Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[MIPS] SMP: Call platform methods via ops structure.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

+714 -518
+5
arch/mips/Kconfig
··· 1441 1441 select SMP 1442 1442 select SYS_SUPPORTS_SCHED_SMT if SMP 1443 1443 select SYS_SUPPORTS_SMP 1444 + select SMP_UP 1444 1445 help 1445 1446 This is a kernel model which is also known a VSMP or lately 1446 1447 has been marketesed into SMVP. ··· 1458 1457 select NR_CPUS_DEFAULT_8 1459 1458 select SMP 1460 1459 select SYS_SUPPORTS_SMP 1460 + select SMP_UP 1461 1461 help 1462 1462 This is a kernel model which is known a SMTC or lately has been 1463 1463 marketesed into SMVP. ··· 1736 1734 available at <http://www.tldp.org/docs.html#howto>. 1737 1735 1738 1736 If you don't know what to do here, say N. 1737 + 1738 + config SMP_UP 1739 + bool 1739 1740 1740 1741 config SYS_SUPPORTS_SMP 1741 1742 bool
+8
arch/mips/fw/arc/init.c
··· 12 12 13 13 #include <asm/bootinfo.h> 14 14 #include <asm/sgialib.h> 15 + #include <asm/smp-ops.h> 15 16 16 17 #undef DEBUG_PROM_INIT 17 18 ··· 48 47 pr_info("Press a key to reboot\n"); 49 48 ArcRead(0, &c, 1, &cnt); 50 49 ArcEnterInteractiveMode(); 50 + #endif 51 + #ifdef CONFIG_SGI_IP27 52 + { 53 + extern struct plat_smp_ops ip27_smp_ops; 54 + 55 + register_smp_ops(&ip27_smp_ops); 56 + } 51 57 #endif 52 58 }
-1
arch/mips/kernel/mips-mt.c
··· 17 17 #include <asm/system.h> 18 18 #include <asm/hardirq.h> 19 19 #include <asm/mmu_context.h> 20 - #include <asm/smp.h> 21 20 #include <asm/mipsmtregs.h> 22 21 #include <asm/r4kcache.h> 23 22 #include <asm/cacheflush.h>
+1 -2
arch/mips/kernel/setup.c
··· 29 29 #include <asm/cpu.h> 30 30 #include <asm/sections.h> 31 31 #include <asm/setup.h> 32 + #include <asm/smp-ops.h> 32 33 #include <asm/system.h> 33 34 34 35 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; ··· 576 575 arch_mem_init(cmdline_p); 577 576 578 577 resource_init(); 579 - #ifdef CONFIG_SMP 580 578 plat_smp_setup(); 581 - #endif 582 579 } 583 580 584 581 static int __init fpu_disable(char *s)
+117 -98
arch/mips/kernel/smp-mt.c
··· 215 215 write_tc_c0_tchalt(TCHALT_H); 216 216 } 217 217 218 + static void vsmp_send_ipi_single(int cpu, unsigned int action) 219 + { 220 + int i; 221 + unsigned long flags; 222 + int vpflags; 223 + 224 + local_irq_save(flags); 225 + 226 + vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ 227 + 228 + switch (action) { 229 + case SMP_CALL_FUNCTION: 230 + i = C_SW1; 231 + break; 232 + 233 + case SMP_RESCHEDULE_YOURSELF: 234 + default: 235 + i = C_SW0; 236 + break; 237 + } 238 + 239 + /* 1:1 mapping of vpe and tc... */ 240 + settc(cpu); 241 + write_vpe_c0_cause(read_vpe_c0_cause() | i); 242 + evpe(vpflags); 243 + 244 + local_irq_restore(flags); 245 + } 246 + 247 + static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action) 248 + { 249 + unsigned int i; 250 + 251 + for_each_cpu_mask(i, mask) 252 + vsmp_send_ipi_single(i, action); 253 + } 254 + 255 + static void __cpuinit vsmp_init_secondary(void) 256 + { 257 + /* Enable per-cpu interrupts */ 258 + 259 + /* This is Malta specific: IPI,performance and timer inetrrupts */ 260 + write_c0_status((read_c0_status() & ~ST0_IM ) | 261 + (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7)); 262 + } 263 + 264 + static void __cpuinit vsmp_smp_finish(void) 265 + { 266 + write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); 267 + 268 + #ifdef CONFIG_MIPS_MT_FPAFF 269 + /* If we have an FPU, enroll ourselves in the FPU-full mask */ 270 + if (cpu_has_fpu) 271 + cpu_set(smp_processor_id(), mt_fpu_cpumask); 272 + #endif /* CONFIG_MIPS_MT_FPAFF */ 273 + 274 + local_irq_enable(); 275 + } 276 + 277 + static void vsmp_cpus_done(void) 278 + { 279 + } 280 + 281 + /* 282 + * Setup the PC, SP, and GP of a secondary processor and start it 283 + * running! 284 + * smp_bootstrap is the place to resume from 285 + * __KSTK_TOS(idle) is apparently the stack pointer 286 + * (unsigned long)idle->thread_info the gp 287 + * assumes a 1:1 mapping of TC => VPE 288 + */ 289 + static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle) 290 + { 291 + struct thread_info *gp = task_thread_info(idle); 292 + dvpe(); 293 + set_c0_mvpcontrol(MVPCONTROL_VPC); 294 + 295 + settc(cpu); 296 + 297 + /* restart */ 298 + write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); 299 + 300 + /* enable the tc this vpe/cpu will be running */ 301 + write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A); 302 + 303 + write_tc_c0_tchalt(0); 304 + 305 + /* enable the VPE */ 306 + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); 307 + 308 + /* stack pointer */ 309 + write_tc_gpr_sp( __KSTK_TOS(idle)); 310 + 311 + /* global pointer */ 312 + write_tc_gpr_gp((unsigned long)gp); 313 + 314 + flush_icache_range((unsigned long)gp, 315 + (unsigned long)(gp + sizeof(struct thread_info))); 316 + 317 + /* finally out of configuration and into chaos */ 318 + clear_c0_mvpcontrol(MVPCONTROL_VPC); 319 + 320 + evpe(EVPE_ENABLE); 321 + } 322 + 218 323 /* 219 324 * Common setup before any secondaries are started 220 325 * Make sure all CPU's are in a sensible state before we boot any of the 221 326 * secondarys 222 327 */ 223 - void __init plat_smp_setup(void) 328 + static void __init vsmp_smp_setup(void) 224 329 { 225 330 unsigned int mvpconf0, ntc, tc, ncpu = 0; 226 331 unsigned int nvpe; ··· 368 263 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); 369 264 } 370 265 371 - void __init plat_prepare_cpus(unsigned int max_cpus) 266 + static void __init vsmp_prepare_cpus(unsigned int max_cpus) 372 267 { 373 268 mips_mt_set_cpuoptions(); 374 269 ··· 388 283 set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); 389 284 } 390 285 391 - /* 392 - * Setup the PC, SP, and GP of a secondary processor and start it 393 - * running! 394 - * smp_bootstrap is the place to resume from 395 - * __KSTK_TOS(idle) is apparently the stack pointer 396 - * (unsigned long)idle->thread_info the gp 397 - * assumes a 1:1 mapping of TC => VPE 398 - */ 399 - void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) 400 - { 401 - struct thread_info *gp = task_thread_info(idle); 402 - dvpe(); 403 - set_c0_mvpcontrol(MVPCONTROL_VPC); 404 - 405 - settc(cpu); 406 - 407 - /* restart */ 408 - write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); 409 - 410 - /* enable the tc this vpe/cpu will be running */ 411 - write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A); 412 - 413 - write_tc_c0_tchalt(0); 414 - 415 - /* enable the VPE */ 416 - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); 417 - 418 - /* stack pointer */ 419 - write_tc_gpr_sp( __KSTK_TOS(idle)); 420 - 421 - /* global pointer */ 422 - write_tc_gpr_gp((unsigned long)gp); 423 - 424 - flush_icache_range((unsigned long)gp, 425 - (unsigned long)(gp + sizeof(struct thread_info))); 426 - 427 - /* finally out of configuration and into chaos */ 428 - clear_c0_mvpcontrol(MVPCONTROL_VPC); 429 - 430 - evpe(EVPE_ENABLE); 431 - } 432 - 433 - void __cpuinit prom_init_secondary(void) 434 - { 435 - /* Enable per-cpu interrupts */ 436 - 437 - /* This is Malta specific: IPI,performance and timer inetrrupts */ 438 - write_c0_status((read_c0_status() & ~ST0_IM ) | 439 - (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7)); 440 - } 441 - 442 - void __cpuinit prom_smp_finish(void) 443 - { 444 - write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); 445 - 446 - #ifdef CONFIG_MIPS_MT_FPAFF 447 - /* If we have an FPU, enroll ourselves in the FPU-full mask */ 448 - if (cpu_has_fpu) 449 - cpu_set(smp_processor_id(), mt_fpu_cpumask); 450 - #endif /* CONFIG_MIPS_MT_FPAFF */ 451 - 452 - local_irq_enable(); 453 - } 454 - 455 - void prom_cpus_done(void) 456 - { 457 - } 458 - 459 - void core_send_ipi(int cpu, unsigned int action) 460 - { 461 - int i; 462 - unsigned long flags; 463 - int vpflags; 464 - 465 - local_irq_save(flags); 466 - 467 - vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ 468 - 469 - switch (action) { 470 - case SMP_CALL_FUNCTION: 471 - i = C_SW1; 472 - break; 473 - 474 - case SMP_RESCHEDULE_YOURSELF: 475 - default: 476 - i = C_SW0; 477 - break; 478 - } 479 - 480 - /* 1:1 mapping of vpe and tc... */ 481 - settc(cpu); 482 - write_vpe_c0_cause(read_vpe_c0_cause() | i); 483 - evpe(vpflags); 484 - 485 - local_irq_restore(flags); 486 - } 286 + struct plat_smp_ops vsmp_smp_ops = { 287 + .send_ipi_single = vsmp_send_ipi_single, 288 + .send_ipi_mask = vsmp_send_ipi_mask, 289 + .init_secondary = vsmp_init_secondary, 290 + .smp_finish = vsmp_smp_finish, 291 + .cpus_done = vsmp_cpus_done, 292 + .boot_secondary = vsmp_boot_secondary, 293 + .smp_setup = vsmp_smp_setup, 294 + .prepare_cpus = vsmp_prepare_cpus, 295 + };
+16 -7
arch/mips/kernel/smp.c
··· 37 37 #include <asm/processor.h> 38 38 #include <asm/system.h> 39 39 #include <asm/mmu_context.h> 40 - #include <asm/smp.h> 41 40 #include <asm/time.h> 42 41 43 42 #ifdef CONFIG_MIPS_MT_SMTC ··· 83 84 cpu_set(cpu, cpu_sibling_map[cpu]); 84 85 } 85 86 87 + struct plat_smp_ops *mp_ops; 88 + 89 + __cpuinit void register_smp_ops(struct plat_smp_ops *ops) 90 + { 91 + if (ops) 92 + printk(KERN_WARNING "Overriding previous set SMP ops\n"); 93 + 94 + mp_ops = ops; 95 + } 96 + 86 97 /* 87 98 * First C code run on the secondary CPUs after being started up by 88 99 * the master. ··· 109 100 cpu_report(); 110 101 per_cpu_trap_init(); 111 102 mips_clockevent_init(); 112 - prom_init_secondary(); 103 + mp_ops->init_secondary(); 113 104 114 105 /* 115 106 * XXX parity protection should be folded in here when it's converted ··· 121 112 cpu = smp_processor_id(); 122 113 cpu_data[cpu].udelay_val = loops_per_jiffy; 123 114 124 - prom_smp_finish(); 115 + mp_ops->smp_finish(); 125 116 set_cpu_sibling_map(cpu); 126 117 127 118 cpu_set(cpu, cpu_callin_map); ··· 193 184 smp_mb(); 194 185 195 186 /* Send a message to all other CPUs and wait for them to respond */ 196 - core_send_ipi_mask(mask, SMP_CALL_FUNCTION); 187 + mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); 197 188 198 189 /* Wait for response */ 199 190 /* FIXME: lock-up detection, backtrace on lock-up */ ··· 287 278 288 279 void __init smp_cpus_done(unsigned int max_cpus) 289 280 { 290 - prom_cpus_done(); 281 + mp_ops->cpus_done(); 291 282 } 292 283 293 284 /* called from main before smp_init() */ ··· 295 286 { 296 287 init_new_context(current, &init_mm); 297 288 current_thread_info()->cpu = 0; 298 - plat_prepare_cpus(max_cpus); 289 + mp_ops->prepare_cpus(max_cpus); 299 290 set_cpu_sibling_map(0); 300 291 #ifndef CONFIG_HOTPLUG_CPU 301 292 cpu_present_map = cpu_possible_map; ··· 334 325 if (IS_ERR(idle)) 335 326 panic(KERN_ERR "Fork failed for CPU %d", cpu); 336 327 337 - prom_boot_secondary(cpu, idle); 328 + mp_ops->boot_secondary(cpu, idle); 338 329 339 330 /* 340 331 * Trust is futile. We should really have timeouts ...
-1
arch/mips/kernel/smtc-proc.c
··· 14 14 #include <asm/system.h> 15 15 #include <asm/hardirq.h> 16 16 #include <asm/mmu_context.h> 17 - #include <asm/smp.h> 18 17 #include <asm/mipsregs.h> 19 18 #include <asm/cacheflush.h> 20 19 #include <linux/proc_fs.h>
-1
arch/mips/kernel/smtc.c
··· 16 16 #include <asm/hazards.h> 17 17 #include <asm/irq.h> 18 18 #include <asm/mmu_context.h> 19 - #include <asm/smp.h> 20 19 #include <asm/mipsregs.h> 21 20 #include <asm/cacheflush.h> 22 21 #include <asm/time.h>
+8
arch/mips/mips-boards/generic/init.c
··· 250 250 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); 251 251 } 252 252 253 + extern struct plat_smp_ops msmtc_smp_ops; 254 + 253 255 void __init prom_init(void) 254 256 { 255 257 prom_argc = fw_arg0; ··· 417 415 prom_meminit(); 418 416 #ifdef CONFIG_SERIAL_8250_CONSOLE 419 417 console_config(); 418 + #endif 419 + #ifdef CONFIG_MIPS_MT_SMP 420 + register_smp_ops(&vsmp_smp_ops); 421 + #endif 422 + #ifdef CONFIG_MIPS_MT_SMTC 423 + register_smp_ops(&msmtc_smp_ops); 420 424 #endif 421 425 }
+46 -32
arch/mips/mips-boards/malta/malta_smtc.c
··· 15 15 * Cause the specified action to be performed on a targeted "CPU" 16 16 */ 17 17 18 - void core_send_ipi(int cpu, unsigned int action) 18 + static void msmtc_send_ipi_single(int cpu, unsigned int action) 19 19 { 20 20 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ 21 21 smtc_send_ipi(cpu, LINUX_SMP_IPI, action); 22 22 } 23 23 24 - /* 25 - * Platform "CPU" startup hook 26 - */ 27 - 28 - void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) 24 + static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action) 29 25 { 30 - smtc_boot_secondary(cpu, idle); 26 + unsigned int i; 27 + 28 + for_each_cpu_mask(i, mask) 29 + msmtc_send_ipi_single(i, action); 31 30 } 32 31 33 32 /* 34 33 * Post-config but pre-boot cleanup entry point 35 34 */ 36 - 37 - void __cpuinit prom_init_secondary(void) 35 + static void __cpuinit msmtc_init_secondary(void) 38 36 { 39 37 void smtc_init_secondary(void); 40 38 int myvpe; ··· 48 50 set_c0_status(0x100 << cp0_perfcount_irq); 49 51 } 50 52 51 - smtc_init_secondary(); 53 + smtc_init_secondary(); 54 + } 55 + 56 + /* 57 + * Platform "CPU" startup hook 58 + */ 59 + static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle) 60 + { 61 + smtc_boot_secondary(cpu, idle); 62 + } 63 + 64 + /* 65 + * SMP initialization finalization entry point 66 + */ 67 + static void __cpuinit msmtc_smp_finish(void) 68 + { 69 + smtc_smp_finish(); 70 + } 71 + 72 + /* 73 + * Hook for after all CPUs are online 74 + */ 75 + 76 + static void msmtc_cpus_done(void) 77 + { 52 78 } 53 79 54 80 /* ··· 82 60 * but it may be multithreaded. 83 61 */ 84 62 85 - void __cpuinit plat_smp_setup(void) 63 + static void __init msmtc_smp_setup(void) 86 64 { 87 - if (read_c0_config3() & (1<<2)) 88 - mipsmt_build_cpu_map(0); 65 + mipsmt_build_cpu_map(0); 89 66 } 90 67 91 - void __init plat_prepare_cpus(unsigned int max_cpus) 68 + static void __init msmtc_prepare_cpus(unsigned int max_cpus) 92 69 { 93 - if (read_c0_config3() & (1<<2)) 94 - mipsmt_prepare_cpus(); 70 + mipsmt_prepare_cpus(); 95 71 } 96 72 97 - /* 98 - * SMP initialization finalization entry point 99 - */ 100 - 101 - void __cpuinit prom_smp_finish(void) 102 - { 103 - smtc_smp_finish(); 104 - } 105 - 106 - /* 107 - * Hook for after all CPUs are online 108 - */ 109 - 110 - void prom_cpus_done(void) 111 - { 112 - } 73 + struct plat_smp_ops msmtc_smp_ops = { 74 + .send_ipi_single = msmtc_send_ipi_single, 75 + .send_ipi_mask = msmtc_send_ipi_mask, 76 + .init_secondary = msmtc_init_secondary, 77 + .smp_finish = msmtc_smp_finish, 78 + .cpus_done = msmtc_cpus_done, 79 + .boot_secondary = msmtc_boot_secondary, 80 + .smp_setup = msmtc_smp_setup, 81 + .prepare_cpus = msmtc_prepare_cpus, 82 + }; 113 83 114 84 #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 115 85 /*
+1 -1
arch/mips/mipssim/Makefile
··· 21 21 sim_cmdline.o 22 22 23 23 obj-$(CONFIG_EARLY_PRINTK) += sim_console.o 24 - obj-$(CONFIG_SMP) += sim_smp.o 24 + obj-$(CONFIG_MIPS_MT_SMTC) += sim_smtc.o 25 25 26 26 EXTRA_CFLAGS += -Werror
+15 -1
arch/mips/mipssim/sim_setup.c
··· 60 60 #endif 61 61 } 62 62 63 + extern struct plat_smp_ops ssmtc_smp_ops; 64 + 63 65 void __init prom_init(void) 64 66 { 65 67 set_io_port_base(0xbfd00000); ··· 69 67 pr_info("\nLINUX started...\n"); 70 68 prom_init_cmdline(); 71 69 prom_meminit(); 72 - } 73 70 71 + #ifdef CONFIG_MIPS_MT_SMP 72 + if (cpu_has_mipsmt) 73 + register_smp_ops(&vsmp_smp_ops); 74 + else 75 + register_smp_ops(&up_smp_ops); 76 + #endif 77 + #ifdef CONFIG_MIPS_MT_SMTC 78 + if (cpu_has_mipsmt) 79 + register_smp_ops(&ssmtc_smp_ops); 80 + else 81 + register_smp_ops(&up_smp_ops); 82 + #endif 83 + } 74 84 75 85 static void __init serial_init(void) 76 86 {
+44 -50
arch/mips/mipssim/sim_smp.c arch/mips/mipssim/sim_smtc.c
··· 16 16 * 17 17 */ 18 18 /* 19 - * Simulator Platform-specific hooks for SMP operation 19 + * Simulator Platform-specific hooks for SMTC operation 20 20 */ 21 21 #include <linux/kernel.h> 22 22 #include <linux/sched.h> ··· 29 29 #include <asm/processor.h> 30 30 #include <asm/system.h> 31 31 #include <asm/mmu_context.h> 32 - #ifdef CONFIG_MIPS_MT_SMTC 33 32 #include <asm/smtc_ipi.h> 34 - #endif /* CONFIG_MIPS_MT_SMTC */ 35 33 36 34 /* VPE/SMP Prototype implements platform interfaces directly */ 37 - #if !defined(CONFIG_MIPS_MT_SMP) 38 35 39 36 /* 40 37 * Cause the specified action to be performed on a targeted "CPU" 41 38 */ 42 39 43 - void core_send_ipi(int cpu, unsigned int action) 40 + static void ssmtc_send_ipi_single(int cpu, unsigned int action) 44 41 { 45 - #ifdef CONFIG_MIPS_MT_SMTC 46 42 smtc_send_ipi(cpu, LINUX_SMP_IPI, action); 47 - #endif /* CONFIG_MIPS_MT_SMTC */ 48 - /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ 49 - 43 + /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ 50 44 } 51 45 52 - /* 53 - * Platform "CPU" startup hook 54 - */ 55 - 56 - void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) 46 + static inline void ssmtc_send_ipi_mask(cpumask_t mask, unsigned int action) 57 47 { 58 - #ifdef CONFIG_MIPS_MT_SMTC 59 - smtc_boot_secondary(cpu, idle); 60 - #endif /* CONFIG_MIPS_MT_SMTC */ 48 + unsigned int i; 49 + 50 + for_each_cpu_mask(i, mask) 51 + ssmtc_send_ipi_single(i, action); 61 52 } 62 53 63 54 /* 64 55 * Post-config but pre-boot cleanup entry point 65 56 */ 66 - 67 - void __cpuinit prom_init_secondary(void) 57 + static void __cpuinit ssmtc_init_secondary(void) 68 58 { 69 - #ifdef CONFIG_MIPS_MT_SMTC 70 59 void smtc_init_secondary(void); 71 60 72 61 smtc_init_secondary(); 73 - #endif /* CONFIG_MIPS_MT_SMTC */ 74 62 } 75 63 76 - void plat_smp_setup(void) 64 + /* 65 + * SMP initialization finalization entry point 66 + */ 67 + static void __cpuinit ssmtc_smp_finish(void) 77 68 { 78 - #ifdef CONFIG_MIPS_MT_SMTC 69 + smtc_smp_finish(); 70 + } 71 + 72 + /* 73 + * Hook for after all CPUs are online 74 + */ 75 + static void ssmtc_cpus_done(void) 76 + { 77 + } 78 + 79 + /* 80 + * Platform "CPU" startup hook 81 + */ 82 + static void __cpuinit ssmtc_boot_secondary(int cpu, struct task_struct *idle) 83 + { 84 + smtc_boot_secondary(cpu, idle); 85 + } 86 + 87 + static void __init ssmtc_smp_setup(void) 88 + { 79 89 if (read_c0_config3() & (1 << 2)) 80 90 mipsmt_build_cpu_map(0); 81 - #endif /* CONFIG_MIPS_MT_SMTC */ 82 91 } 83 92 84 93 /* 85 94 * Platform SMP pre-initialization 86 95 */ 87 - 88 - void plat_prepare_cpus(unsigned int max_cpus) 96 + static void ssmtc_prepare_cpus(unsigned int max_cpus) 89 97 { 90 - #ifdef CONFIG_MIPS_MT_SMTC 91 98 /* 92 99 * As noted above, we can assume a single CPU for now 93 100 * but it may be multithreaded. ··· 103 96 if (read_c0_config3() & (1 << 2)) { 104 97 mipsmt_prepare_cpus(); 105 98 } 106 - #endif /* CONFIG_MIPS_MT_SMTC */ 107 99 } 108 100 109 - /* 110 - * SMP initialization finalization entry point 111 - */ 112 - 113 - void __cpuinit prom_smp_finish(void) 114 - { 115 - #ifdef CONFIG_MIPS_MT_SMTC 116 - smtc_smp_finish(); 117 - #endif /* CONFIG_MIPS_MT_SMTC */ 118 - } 119 - 120 - /* 121 - * Hook for after all CPUs are online 122 - */ 123 - 124 - void prom_cpus_done(void) 125 - { 126 - #ifdef CONFIG_MIPS_MT_SMTC 127 - 128 - #endif /* CONFIG_MIPS_MT_SMTC */ 129 - } 130 - #endif /* CONFIG_MIPS32R2_MT_SMP */ 101 + struct plat_smp_ops ssmtc_smp_ops = { 102 + .send_ipi_single = ssmtc_send_ipi_single, 103 + .send_ipi_mask = ssmtc_send_ipi_mask, 104 + .init_secondary = ssmtc_init_secondary, 105 + .smp_finish = ssmtc_smp_finish, 106 + .cpus_done = ssmtc_cpus_done, 107 + .boot_secondary = ssmtc_boot_secondary, 108 + .smp_setup = ssmtc_smp_setup, 109 + .prepare_cpus = ssmtc_prepare_cpus, 110 + };
+5
arch/mips/pmc-sierra/yosemite/prom.c
··· 19 19 #include <asm/pgtable.h> 20 20 #include <asm/processor.h> 21 21 #include <asm/reboot.h> 22 + #include <asm/smp-ops.h> 22 23 #include <asm/system.h> 23 24 #include <asm/bootinfo.h> 24 25 #include <asm/pmon.h> ··· 79 78 __asm__(".set\tmips3\n\t" "wait\n\t" ".set\tmips0"); 80 79 } 81 80 81 + extern struct plat_smp_ops yos_smp_ops; 82 + 82 83 /* 83 84 * Init routine which accepts the variables from PMON 84 85 */ ··· 130 127 } 131 128 132 129 prom_grab_secondary(); 130 + 131 + register_smp_ops(&yos_smp_ops); 133 132 } 134 133 135 134 void __init prom_free_prom_memory(void)
+84 -65
arch/mips/pmc-sierra/yosemite/smp.c
··· 42 42 launchstack + LAUNCHSTACK_SIZE, 0); 43 43 } 44 44 45 - /* 46 - * Detect available CPUs, populate phys_cpu_present_map before smp_init 47 - * 48 - * We don't want to start the secondary CPU yet nor do we have a nice probing 49 - * feature in PMON so we just assume presence of the secondary core. 50 - */ 51 - void __init plat_smp_setup(void) 52 - { 53 - int i; 54 - 55 - cpus_clear(phys_cpu_present_map); 56 - 57 - for (i = 0; i < 2; i++) { 58 - cpu_set(i, phys_cpu_present_map); 59 - __cpu_number_map[i] = i; 60 - __cpu_logical_map[i] = i; 61 - } 62 - } 63 - 64 - void __init plat_prepare_cpus(unsigned int max_cpus) 65 - { 66 - /* 67 - * Be paranoid. Enable the IPI only if we're really about to go SMP. 68 - */ 69 - if (cpus_weight(cpu_possible_map)) 70 - set_c0_status(STATUSF_IP5); 71 - } 72 - 73 - /* 74 - * Firmware CPU startup hook 75 - * Complicated by PMON's weird interface which tries to minimic the UNIX fork. 76 - * It launches the next * available CPU and copies some information on the 77 - * stack so the first thing we do is throw away that stuff and load useful 78 - * values into the registers ... 79 - */ 80 - void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) 81 - { 82 - unsigned long gp = (unsigned long) task_thread_info(idle); 83 - unsigned long sp = __KSTK_TOS(idle); 84 - 85 - secondary_sp = sp; 86 - secondary_gp = gp; 87 - 88 - spin_unlock(&launch_lock); 89 - } 90 - 91 - /* Hook for after all CPUs are online */ 92 - void prom_cpus_done(void) 93 - { 94 - } 95 - 96 - /* 97 - * After we've done initial boot, this function is called to allow the 98 - * board code to clean up state, if needed 99 - */ 100 - void __cpuinit prom_init_secondary(void) 101 - { 102 - set_c0_status(ST0_CO | ST0_IE | ST0_IM); 103 - } 104 - 105 - void __cpuinit prom_smp_finish(void) 106 - { 107 - } 108 - 109 45 void titan_mailbox_irq(void) 110 46 { 111 47 int cpu = smp_processor_id(); ··· 69 133 /* 70 134 * Send inter-processor interrupt 71 135 */ 72 - void core_send_ipi(int cpu, unsigned int action) 136 + static void yos_send_ipi_single(int cpu, unsigned int action) 73 137 { 74 138 /* 75 139 * Generate an INTMSG so that it can be sent over to the ··· 95 159 break; 96 160 } 97 161 } 162 + 163 + static void yos_send_ipi_mask(cpumask_t mask, unsigned int action) 164 + { 165 + unsigned int i; 166 + 167 + for_each_cpu_mask(i, mask) 168 + yos_send_ipi_single(i, action); 169 + } 170 + 171 + /* 172 + * After we've done initial boot, this function is called to allow the 173 + * board code to clean up state, if needed 174 + */ 175 + static void __cpuinit yos_init_secondary(void) 176 + { 177 + set_c0_status(ST0_CO | ST0_IE | ST0_IM); 178 + } 179 + 180 + static void __cpuinit yos_smp_finish(void) 181 + { 182 + } 183 + 184 + /* Hook for after all CPUs are online */ 185 + static void yos_cpus_done(void) 186 + { 187 + } 188 + 189 + /* 190 + * Firmware CPU startup hook 191 + * Complicated by PMON's weird interface which tries to minimic the UNIX fork. 192 + * It launches the next * available CPU and copies some information on the 193 + * stack so the first thing we do is throw away that stuff and load useful 194 + * values into the registers ... 195 + */ 196 + static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle) 197 + { 198 + unsigned long gp = (unsigned long) task_thread_info(idle); 199 + unsigned long sp = __KSTK_TOS(idle); 200 + 201 + secondary_sp = sp; 202 + secondary_gp = gp; 203 + 204 + spin_unlock(&launch_lock); 205 + } 206 + 207 + /* 208 + * Detect available CPUs, populate phys_cpu_present_map before smp_init 209 + * 210 + * We don't want to start the secondary CPU yet nor do we have a nice probing 211 + * feature in PMON so we just assume presence of the secondary core. 212 + */ 213 + static void __init yos_smp_setup(void) 214 + { 215 + int i; 216 + 217 + cpus_clear(phys_cpu_present_map); 218 + 219 + for (i = 0; i < 2; i++) { 220 + cpu_set(i, phys_cpu_present_map); 221 + __cpu_number_map[i] = i; 222 + __cpu_logical_map[i] = i; 223 + } 224 + } 225 + 226 + static void __init yos_prepare_cpus(unsigned int max_cpus) 227 + { 228 + /* 229 + * Be paranoid. Enable the IPI only if we're really about to go SMP. 230 + */ 231 + if (cpus_weight(cpu_possible_map)) 232 + set_c0_status(STATUSF_IP5); 233 + } 234 + 235 + struct plat_smp_ops yos_smp_ops = { 236 + .send_ipi_single = yos_send_ipi_single, 237 + .send_ipi_mask = yos_send_ipi_mask, 238 + .init_secondary = yos_init_secondary, 239 + .smp_finish = yos_smp_finish, 240 + .cpus_done = yos_cpus_done, 241 + .boot_secondary = yos_boot_secondary, 242 + .smp_setup = yos_smp_setup, 243 + .prepare_cpus = yos_prepare_cpus, 244 + };
+26 -14
arch/mips/qemu/q-smp.c
··· 3 3 * License. See the file "COPYING" in the main directory of this archive 4 4 * for more details. 5 5 * 6 - * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) 6 + * Copyright (C) 2006, 07 by Ralf Baechle (ralf@linux-mips.org) 7 7 * 8 8 * Symmetric Uniprocessor (TM) Support 9 9 */ ··· 13 13 /* 14 14 * Send inter-processor interrupt 15 15 */ 16 - void core_send_ipi(int cpu, unsigned int action) 16 + void up_send_ipi_single(int cpu, unsigned int action) 17 17 { 18 - panic(KERN_ERR "%s called", __FUNCTION__); 18 + panic(KERN_ERR "%s called", __func__); 19 + } 20 + 21 + static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action) 22 + { 23 + panic(KERN_ERR "%s called", __func__); 19 24 } 20 25 21 26 /* 22 27 * After we've done initial boot, this function is called to allow the 23 28 * board code to clean up state, if needed 24 29 */ 25 - void __cpuinit prom_init_secondary(void) 30 + void __cpuinit up_init_secondary(void) 26 31 { 27 32 } 28 33 29 - void __cpuinit prom_smp_finish(void) 34 + void __cpuinit up_smp_finish(void) 30 35 { 31 36 } 32 37 33 38 /* Hook for after all CPUs are online */ 34 - void prom_cpus_done(void) 39 + void up_cpus_done(void) 35 40 { 36 - } 37 - 38 - void __init prom_prepare_cpus(unsigned int max_cpus) 39 - { 40 - cpus_clear(phys_cpu_present_map); 41 41 } 42 42 43 43 /* 44 44 * Firmware CPU startup hook 45 45 */ 46 - void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) 46 + void __cpuinit up_boot_secondary(int cpu, struct task_struct *idle) 47 47 { 48 48 } 49 49 50 - void __init plat_smp_setup(void) 50 + void __init up_smp_setup(void) 51 51 { 52 52 } 53 - void __init plat_prepare_cpus(unsigned int max_cpus) 53 + 54 + void __init up_prepare_cpus(unsigned int max_cpus) 54 55 { 55 56 } 57 + 58 + struct plat_smp_ops up_smp_ops = { 59 + .send_ipi_single = up_send_ipi_single, 60 + .send_ipi_mask = up_send_ipi_mask, 61 + .init_secondary = up_init_secondary, 62 + .smp_finish = up_smp_finish, 63 + .cpus_done = up_cpus_done, 64 + .boot_secondary = up_boot_secondary, 65 + .smp_setup = up_smp_setup, 66 + .prepare_cpus = up_prepare_cpus, 67 + };
-1
arch/mips/sgi-ip27/ip27-init.c
··· 27 27 #include <asm/sn/hub.h> 28 28 #include <asm/sn/intr.h> 29 29 #include <asm/current.h> 30 - #include <asm/smp.h> 31 30 #include <asm/processor.h> 32 31 #include <asm/mmu_context.h> 33 32 #include <asm/thread_info.h>
-1
arch/mips/sgi-ip27/ip27-klnuma.c
··· 11 11 12 12 #include <asm/page.h> 13 13 #include <asm/sections.h> 14 - #include <asm/smp.h> 15 14 #include <asm/sn/types.h> 16 15 #include <asm/sn/arch.h> 17 16 #include <asm/sn/gda.h>
+75 -56
arch/mips/sgi-ip27/ip27-smp.c
··· 140 140 REMOTE_HUB_CLR_INTR(nasid, i); 141 141 } 142 142 143 - void __init plat_smp_setup(void) 144 - { 145 - cnodeid_t cnode; 146 - 147 - for_each_online_node(cnode) { 148 - if (cnode == 0) 149 - continue; 150 - intr_clear_all(COMPACT_TO_NASID_NODEID(cnode)); 151 - } 152 - 153 - replicate_kernel_text(); 154 - 155 - /* 156 - * Assumption to be fixed: we're always booted on logical / physical 157 - * processor 0. While we're always running on logical processor 0 158 - * this still means this is physical processor zero; it might for 159 - * example be disabled in the firwware. 160 - */ 161 - alloc_cpupda(0, 0); 162 - } 163 - 164 - void __init plat_prepare_cpus(unsigned int max_cpus) 165 - { 166 - /* We already did everything necessary earlier */ 167 - } 168 - 169 - /* 170 - * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we 171 - * set sp to the kernel stack of the newly created idle process, gp to the proc 172 - * struct so that current_thread_info() will work. 173 - */ 174 - void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) 175 - { 176 - unsigned long gp = (unsigned long)task_thread_info(idle); 177 - unsigned long sp = __KSTK_TOS(idle); 178 - 179 - LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu), 180 - (launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap), 181 - 0, (void *) sp, (void *) gp); 182 - } 183 - 184 - void __cpuinit prom_init_secondary(void) 185 - { 186 - per_cpu_init(); 187 - local_irq_enable(); 188 - } 189 - 190 - void __init prom_cpus_done(void) 191 - { 192 - } 193 - 194 - void __cpuinit prom_smp_finish(void) 195 - { 196 - } 197 - 198 - void core_send_ipi(int destid, unsigned int action) 143 + static void ip27_send_ipi_single(int destid, unsigned int action) 199 144 { 200 145 int irq; 201 146 ··· 164 219 */ 165 220 REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq); 166 221 } 222 + 223 + static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action) 224 + { 225 + unsigned int i; 226 + 227 + for_each_cpu_mask(i, mask) 228 + ip27_send_ipi_single(i, action); 229 + } 230 + 231 + static void __cpuinit ip27_init_secondary(void) 232 + { 233 + per_cpu_init(); 234 + local_irq_enable(); 235 + } 236 + 237 + static void __cpuinit ip27_smp_finish(void) 238 + { 239 + } 240 + 241 + static void __init ip27_cpus_done(void) 242 + { 243 + } 244 + 245 + /* 246 + * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we 247 + * set sp to the kernel stack of the newly created idle process, gp to the proc 248 + * struct so that current_thread_info() will work. 249 + */ 250 + static void __cpuinit ip27_boot_secondary(int cpu, struct task_struct *idle) 251 + { 252 + unsigned long gp = (unsigned long)task_thread_info(idle); 253 + unsigned long sp = __KSTK_TOS(idle); 254 + 255 + LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu), 256 + (launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap), 257 + 0, (void *) sp, (void *) gp); 258 + } 259 + 260 + static void __init ip27_smp_setup(void) 261 + { 262 + cnodeid_t cnode; 263 + 264 + for_each_online_node(cnode) { 265 + if (cnode == 0) 266 + continue; 267 + intr_clear_all(COMPACT_TO_NASID_NODEID(cnode)); 268 + } 269 + 270 + replicate_kernel_text(); 271 + 272 + /* 273 + * Assumption to be fixed: we're always booted on logical / physical 274 + * processor 0. While we're always running on logical processor 0 275 + * this still means this is physical processor zero; it might for 276 + * example be disabled in the firwware. 277 + */ 278 + alloc_cpupda(0, 0); 279 + } 280 + 281 + static void __init ip27_prepare_cpus(unsigned int max_cpus) 282 + { 283 + /* We already did everything necessary earlier */ 284 + } 285 + 286 + struct plat_smp_ops ip27_smp_ops = { 287 + .send_ipi_single = ip27_send_ipi_single, 288 + .send_ipi_mask = ip27_send_ipi_mask, 289 + .init_secondary = ip27_init_secondary, 290 + .smp_finish = ip27_smp_finish, 291 + .cpus_done = ip27_cpus_done, 292 + .boot_secondary = ip27_boot_secondary, 293 + .smp_setup = ip27_smp_setup, 294 + .prepare_cpus = ip27_prepare_cpus, 295 + };
+96 -9
arch/mips/sibyte/bcm1480/smp.c
··· 23 23 24 24 #include <asm/mmu_context.h> 25 25 #include <asm/io.h> 26 + #include <asm/fw/cfe/cfe_api.h> 26 27 #include <asm/sibyte/sb1250.h> 27 28 #include <asm/sibyte/bcm1480_regs.h> 28 29 #include <asm/sibyte/bcm1480_int.h> ··· 68 67 change_c0_status(ST0_IM, imask); 69 68 } 70 69 71 - void __cpuinit bcm1480_smp_finish(void) 72 - { 73 - extern void sb1480_clockevent_init(void); 74 - 75 - sb1480_clockevent_init(); 76 - local_irq_enable(); 77 - } 78 - 79 70 /* 80 71 * These are routines for dealing with the sb1250 smp capabilities 81 72 * independent of board/firmware ··· 77 84 * Simple enough; everything is set up, so just poke the appropriate mailbox 78 85 * register, and we should be set 79 86 */ 80 - void core_send_ipi(int cpu, unsigned int action) 87 + static void bcm1480_send_ipi_single(int cpu, unsigned int action) 81 88 { 82 89 __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]); 83 90 } 91 + 92 + static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action) 93 + { 94 + unsigned int i; 95 + 96 + for_each_cpu_mask(i, mask) 97 + bcm1480_send_ipi_single(i, action); 98 + } 99 + 100 + /* 101 + * Code to run on secondary just after probing the CPU 102 + */ 103 + static void __cpuinit bcm1480_init_secondary(void) 104 + { 105 + extern void bcm1480_smp_init(void); 106 + 107 + bcm1480_smp_init(); 108 + } 109 + 110 + /* 111 + * Do any tidying up before marking online and running the idle 112 + * loop 113 + */ 114 + static void __cpuinit bcm1480_smp_finish(void) 115 + { 116 + extern void sb1480_clockevent_init(void); 117 + 118 + sb1480_clockevent_init(); 119 + local_irq_enable(); 120 + bcm1480_smp_finish(); 121 + } 122 + 123 + /* 124 + * Final cleanup after all secondaries booted 125 + */ 126 + static void bcm1480_cpus_done(void) 127 + { 128 + } 129 + 130 + /* 131 + * Setup the PC, SP, and GP of a secondary processor and start it 132 + * running! 133 + */ 134 + static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle) 135 + { 136 + int retval; 137 + 138 + retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap, 139 + __KSTK_TOS(idle), 140 + (unsigned long)task_thread_info(idle), 0); 141 + if (retval != 0) 142 + printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval); 143 + } 144 + 145 + /* 146 + * Use CFE to find out how many CPUs are available, setting up 147 + * phys_cpu_present_map and the logical/physical mappings. 148 + * XXXKW will the boot CPU ever not be physical 0? 149 + * 150 + * Common setup before any secondaries are started 151 + */ 152 + static void __init bcm1480_smp_setup(void) 153 + { 154 + int i, num; 155 + 156 + cpus_clear(phys_cpu_present_map); 157 + cpu_set(0, phys_cpu_present_map); 158 + __cpu_number_map[0] = 0; 159 + __cpu_logical_map[0] = 0; 160 + 161 + for (i = 1, num = 0; i < NR_CPUS; i++) { 162 + if (cfe_cpu_stop(i) == 0) { 163 + cpu_set(i, phys_cpu_present_map); 164 + __cpu_number_map[i] = ++num; 165 + __cpu_logical_map[num] = i; 166 + } 167 + } 168 + printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); 169 + } 170 + 171 + static void __init bcm1480_prepare_cpus(unsigned int max_cpus) 172 + { 173 + } 174 + 175 + struct plat_smp_ops bcm1480_smp_ops = { 176 + .send_ipi_single = bcm1480_send_ipi_single, 177 + .send_ipi_mask = bcm1480_send_ipi_mask, 178 + .init_secondary = bcm1480_init_secondary, 179 + .smp_finish = bcm1480_smp_finish, 180 + .cpus_done = bcm1480_cpus_done, 181 + .boot_secondary = bcm1480_boot_secondary, 182 + .smp_setup = bcm1480_smp_setup, 183 + .prepare_cpus = bcm1480_prepare_cpus, 184 + }; 84 185 85 186 void bcm1480_mailbox_interrupt(void) 86 187 {
-1
arch/mips/sibyte/cfe/Makefile
··· 1 1 lib-y = setup.o 2 - lib-$(CONFIG_SMP) += smp.o 3 2 lib-$(CONFIG_SIBYTE_CFE_CONSOLE) += console.o
+11
arch/mips/sibyte/cfe/setup.c
··· 28 28 #include <asm/bootinfo.h> 29 29 #include <asm/reboot.h> 30 30 #include <asm/sibyte/board.h> 31 + #include <asm/smp-ops.h> 31 32 32 33 #include <asm/fw/cfe/cfe_api.h> 33 34 #include <asm/fw/cfe/cfe_error.h> ··· 233 232 234 233 #endif 235 234 235 + extern struct plat_smp_ops sb_smp_ops; 236 + extern struct plat_smp_ops bcm1480_smp_ops; 237 + 236 238 /* 237 239 * prom_init is called just after the cpu type is determined, from setup_arch() 238 240 */ ··· 344 340 arcs_cmdline[CL_SIZE-1] = 0; 345 341 346 342 prom_meminit(); 343 + 344 + #if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250) 345 + register_smp_ops(&sb_smp_ops); 346 + #endif 347 + #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) 348 + register_smp_ops(&bcm1480_smp_ops); 349 + #endif 347 350 } 348 351 349 352 void __init prom_free_prom_memory(void)
-110
arch/mips/sibyte/cfe/smp.c
··· 1 - /* 2 - * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation 3 - * 4 - * This program is free software; you can redistribute it and/or 5 - * modify it under the terms of the GNU General Public License 6 - * as published by the Free Software Foundation; either version 2 7 - * of the License, or (at your option) any later version. 8 - * 9 - * This program is distributed in the hope that it will be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License 15 - * along with this program; if not, write to the Free Software 16 - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 - */ 18 - 19 - #include <linux/init.h> 20 - #include <linux/sched.h> 21 - #include <linux/smp.h> 22 - #include <asm/processor.h> 23 - 24 - #include <asm/fw/cfe/cfe_api.h> 25 - #include <asm/fw/cfe/cfe_error.h> 26 - 27 - /* 28 - * Use CFE to find out how many CPUs are available, setting up 29 - * phys_cpu_present_map and the logical/physical mappings. 30 - * XXXKW will the boot CPU ever not be physical 0? 31 - * 32 - * Common setup before any secondaries are started 33 - */ 34 - void __init plat_smp_setup(void) 35 - { 36 - int i, num; 37 - 38 - cpus_clear(phys_cpu_present_map); 39 - cpu_set(0, phys_cpu_present_map); 40 - __cpu_number_map[0] = 0; 41 - __cpu_logical_map[0] = 0; 42 - 43 - for (i = 1, num = 0; i < NR_CPUS; i++) { 44 - if (cfe_cpu_stop(i) == 0) { 45 - cpu_set(i, phys_cpu_present_map); 46 - __cpu_number_map[i] = ++num; 47 - __cpu_logical_map[num] = i; 48 - } 49 - } 50 - printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); 51 - } 52 - 53 - void __init plat_prepare_cpus(unsigned int max_cpus) 54 - { 55 - } 56 - 57 - /* 58 - * Setup the PC, SP, and GP of a secondary processor and start it 59 - * running! 60 - */ 61 - void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) 62 - { 63 - int retval; 64 - 65 - retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap, 66 - __KSTK_TOS(idle), 67 - (unsigned long)task_thread_info(idle), 0); 68 - if (retval != 0) 69 - printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval); 70 - } 71 - 72 - /* 73 - * Code to run on secondary just after probing the CPU 74 - */ 75 - void __cpuinit prom_init_secondary(void) 76 - { 77 - #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) 78 - extern void bcm1480_smp_init(void); 79 - bcm1480_smp_init(); 80 - #elif defined(CONFIG_SIBYTE_SB1250) 81 - extern void sb1250_smp_init(void); 82 - sb1250_smp_init(); 83 - #else 84 - #error invalid SMP configuration 85 - #endif 86 - } 87 - 88 - /* 89 - * Do any tidying up before marking online and running the idle 90 - * loop 91 - */ 92 - void __cpuinit prom_smp_finish(void) 93 - { 94 - #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) 95 - extern void bcm1480_smp_finish(void); 96 - bcm1480_smp_finish(); 97 - #elif defined(CONFIG_SIBYTE_SB1250) 98 - extern void sb1250_smp_finish(void); 99 - sb1250_smp_finish(); 100 - #else 101 - #error invalid SMP configuration 102 - #endif 103 - } 104 - 105 - /* 106 - * Final cleanup after all secondaries booted 107 - */ 108 - void prom_cpus_done(void) 109 - { 110 - }
+95 -9
arch/mips/sibyte/sb1250/smp.c
··· 24 24 25 25 #include <asm/mmu_context.h> 26 26 #include <asm/io.h> 27 + #include <asm/fw/cfe/cfe_api.h> 27 28 #include <asm/sibyte/sb1250.h> 28 29 #include <asm/sibyte/sb1250_regs.h> 29 30 #include <asm/sibyte/sb1250_int.h> ··· 56 55 change_c0_status(ST0_IM, imask); 57 56 } 58 57 59 - void __cpuinit sb1250_smp_finish(void) 60 - { 61 - extern void sb1250_clockevent_init(void); 62 - 63 - sb1250_clockevent_init(); 64 - local_irq_enable(); 65 - } 66 - 67 58 /* 68 59 * These are routines for dealing with the sb1250 smp capabilities 69 60 * independent of board/firmware ··· 65 72 * Simple enough; everything is set up, so just poke the appropriate mailbox 66 73 * register, and we should be set 67 74 */ 68 - void core_send_ipi(int cpu, unsigned int action) 75 + static void sb1250_send_ipi_single(int cpu, unsigned int action) 69 76 { 70 77 __raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]); 71 78 } 79 + 80 + static inline void sb1250_send_ipi_mask(cpumask_t mask, unsigned int action) 81 + { 82 + unsigned int i; 83 + 84 + for_each_cpu_mask(i, mask) 85 + sb1250_send_ipi_single(i, action); 86 + } 87 + 88 + /* 89 + * Code to run on secondary just after probing the CPU 90 + */ 91 + static void __cpuinit sb1250_init_secondary(void) 92 + { 93 + extern void sb1250_smp_init(void); 94 + 95 + sb1250_smp_init(); 96 + } 97 + 98 + /* 99 + * Do any tidying up before marking online and running the idle 100 + * loop 101 + */ 102 + static void __cpuinit sb1250_smp_finish(void) 103 + { 104 + extern void sb1250_clockevent_init(void); 105 + 106 + sb1250_clockevent_init(); 107 + local_irq_enable(); 108 + } 109 + 110 + /* 111 + * Final cleanup after all secondaries booted 112 + */ 113 + static void sb1250_cpus_done(void) 114 + { 115 + } 116 + 117 + /* 118 + * Setup the PC, SP, and GP of a secondary processor and start it 119 + * running! 120 + */ 121 + static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle) 122 + { 123 + int retval; 124 + 125 + retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap, 126 + __KSTK_TOS(idle), 127 + (unsigned long)task_thread_info(idle), 0); 128 + if (retval != 0) 129 + printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval); 130 + } 131 + 132 + /* 133 + * Use CFE to find out how many CPUs are available, setting up 134 + * phys_cpu_present_map and the logical/physical mappings. 135 + * XXXKW will the boot CPU ever not be physical 0? 136 + * 137 + * Common setup before any secondaries are started 138 + */ 139 + static void __init sb1250_smp_setup(void) 140 + { 141 + int i, num; 142 + 143 + cpus_clear(phys_cpu_present_map); 144 + cpu_set(0, phys_cpu_present_map); 145 + __cpu_number_map[0] = 0; 146 + __cpu_logical_map[0] = 0; 147 + 148 + for (i = 1, num = 0; i < NR_CPUS; i++) { 149 + if (cfe_cpu_stop(i) == 0) { 150 + cpu_set(i, phys_cpu_present_map); 151 + __cpu_number_map[i] = ++num; 152 + __cpu_logical_map[num] = i; 153 + } 154 + } 155 + printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); 156 + } 157 + 158 + static void __init sb1250_prepare_cpus(unsigned int max_cpus) 159 + { 160 + } 161 + 162 + struct plat_smp_ops sb_smp_ops = { 163 + .send_ipi_single = sb1250_send_ipi_single, 164 + .send_ipi_mask = sb1250_send_ipi_mask, 165 + .init_secondary = sb1250_init_secondary, 166 + .smp_finish = sb1250_smp_finish, 167 + .cpus_done = sb1250_cpus_done, 168 + .boot_secondary = sb1250_boot_secondary, 169 + .smp_setup = sb1250_smp_setup, 170 + .prepare_cpus = sb1250_prepare_cpus, 171 + }; 72 172 73 173 void sb1250_mailbox_interrupt(void) 74 174 {
-2
include/asm-mips/sibyte/sb1250.h
··· 48 48 extern void sb1250_time_init(void); 49 49 extern void sb1250_mask_irq(int cpu, int irq); 50 50 extern void sb1250_unmask_irq(int cpu, int irq); 51 - extern void sb1250_smp_finish(void); 52 51 53 52 extern void bcm1480_time_init(void); 54 53 extern void bcm1480_mask_irq(int cpu, int irq); 55 54 extern void bcm1480_unmask_irq(int cpu, int irq); 56 - extern void bcm1480_smp_finish(void); 57 55 58 56 #define AT_spin \ 59 57 __asm__ __volatile__ ( \
+56
include/asm-mips/smp-ops.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General 3 + * Public License. See the file "COPYING" in the main directory of this 4 + * archive for more details. 5 + * 6 + * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com) 7 + * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc. 8 + * Copyright (C) 2000, 2001, 2002 Ralf Baechle 9 + * Copyright (C) 2000, 2001 Broadcom Corporation 10 + */ 11 + #ifndef __ASM_SMP_OPS_H 12 + #define __ASM_SMP_OPS_H 13 + 14 + #ifdef CONFIG_SMP 15 + 16 + #include <linux/cpumask.h> 17 + 18 + struct plat_smp_ops { 19 + void (*send_ipi_single)(int cpu, unsigned int action); 20 + void (*send_ipi_mask)(cpumask_t mask, unsigned int action); 21 + void (*init_secondary)(void); 22 + void (*smp_finish)(void); 23 + void (*cpus_done)(void); 24 + void (*boot_secondary)(int cpu, struct task_struct *idle); 25 + void (*smp_setup)(void); 26 + void (*prepare_cpus)(unsigned int max_cpus); 27 + }; 28 + 29 + extern void register_smp_ops(struct plat_smp_ops *ops); 30 + 31 + static inline void plat_smp_setup(void) 32 + { 33 + extern struct plat_smp_ops *mp_ops; /* private */ 34 + 35 + mp_ops->smp_setup(); 36 + } 37 + 38 + #else /* !CONFIG_SMP */ 39 + 40 + struct plat_smp_ops; 41 + 42 + static inline void plat_smp_setup(void) 43 + { 44 + /* UP, nothing to do ... */ 45 + } 46 + 47 + static inline void register_smp_ops(struct plat_smp_ops *ops) 48 + { 49 + } 50 + 51 + #endif /* !CONFIG_SMP */ 52 + 53 + extern struct plat_smp_ops up_smp_ops; 54 + extern struct plat_smp_ops vsmp_smp_ops; 55 + 56 + #endif /* __ASM_SMP_OPS_H */
+5 -56
include/asm-mips/smp.h
··· 11 11 #ifndef __ASM_SMP_H 12 12 #define __ASM_SMP_H 13 13 14 - 15 - #ifdef CONFIG_SMP 16 - 17 14 #include <linux/bitops.h> 18 15 #include <linux/linkage.h> 19 16 #include <linux/threads.h> 20 17 #include <linux/cpumask.h> 18 + 21 19 #include <asm/atomic.h> 20 + #include <asm/smp-ops.h> 22 21 23 22 extern int smp_num_siblings; 24 23 extern cpumask_t cpu_sibling_map[]; ··· 51 52 extern cpumask_t phys_cpu_present_map; 52 53 #define cpu_possible_map phys_cpu_present_map 53 54 54 - /* 55 - * These are defined by the board-specific code. 56 - */ 57 - 58 - /* 59 - * Cause the function described by call_data to be executed on the passed 60 - * cpu. When the function has finished, increment the finished field of 61 - * call_data. 62 - */ 63 - extern void core_send_ipi(int cpu, unsigned int action); 64 - 65 - static inline void core_send_ipi_mask(cpumask_t mask, unsigned int action) 66 - { 67 - unsigned int i; 68 - 69 - for_each_cpu_mask(i, mask) 70 - core_send_ipi(i, action); 71 - } 72 - 73 - 74 - /* 75 - * Firmware CPU startup hook 76 - */ 77 - extern void prom_boot_secondary(int cpu, struct task_struct *idle); 78 - 79 - /* 80 - * After we've done initial boot, this function is called to allow the 81 - * board code to clean up state, if needed 82 - */ 83 - extern void prom_init_secondary(void); 84 - 85 - /* 86 - * Populate cpu_possible_map before smp_init, called from setup_arch. 87 - */ 88 - extern void plat_smp_setup(void); 89 - 90 - /* 91 - * Called in smp_prepare_cpus. 92 - */ 93 - extern void plat_prepare_cpus(unsigned int max_cpus); 94 - 95 - /* 96 - * Last chance for the board code to finish SMP initialization before 97 - * the CPU is "online". 98 - */ 99 - extern void prom_smp_finish(void); 100 - 101 - /* Hook for after all CPUs are online */ 102 - extern void prom_cpus_done(void); 103 - 104 55 extern void asmlinkage smp_bootstrap(void); 105 56 106 57 /* ··· 60 111 */ 61 112 static inline void smp_send_reschedule(int cpu) 62 113 { 63 - core_send_ipi(cpu, SMP_RESCHEDULE_YOURSELF); 114 + extern struct plat_smp_ops *mp_ops; /* private */ 115 + 116 + mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); 64 117 } 65 118 66 119 extern asmlinkage void smp_call_function_interrupt(void); 67 - 68 - #endif /* CONFIG_SMP */ 69 120 70 121 #endif /* __ASM_SMP_H */