Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

blackfin: smp: cleanup smp code

move idle task point to percpu blackfin_cpudata and add smp_timer_broadcast
interface.
enable SUPPLE_1_WAKEUP and add BFIN_IPI_TIMER ipi support.

Signed-off-by: Steven Miao <realmz6@gmail.com>
Signed-off-by: Bob Liu <lliubbo@gmail.com>

Bob Liu d0014be4 16df3666

+77 -16
+3
arch/blackfin/include/asm/cpu.h
··· 14 14 struct cpu cpu; 15 15 unsigned int imemctl; 16 16 unsigned int dmemctl; 17 + #ifdef CONFIG_SMP 18 + struct task_struct *idle; 19 + #endif 17 20 }; 18 21 19 22 DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
+4 -1
arch/blackfin/include/asm/smp.h
··· 37 37 #endif 38 38 39 39 void smp_icache_flush_range_others(unsigned long start, 40 - unsigned long end); 40 + unsigned long end); 41 41 #ifdef CONFIG_HOTPLUG_CPU 42 42 void coreb_die(void); 43 43 void cpu_die(void); ··· 45 45 int __cpu_disable(void); 46 46 int __cpu_die(unsigned int cpu); 47 47 #endif 48 + 49 + void smp_timer_broadcast(const struct cpumask *mask); 50 + 48 51 49 52 #endif /* !__ASM_BLACKFIN_SMP_H */
+25 -1
arch/blackfin/kernel/time-ts.c
··· 219 219 220 220 #if defined(CONFIG_TICKSOURCE_CORETMR) 221 221 /* per-cpu local core timer */ 222 - static DEFINE_PER_CPU(struct clock_event_device, coretmr_events); 222 + DEFINE_PER_CPU(struct clock_event_device, coretmr_events); 223 223 224 224 static int bfin_coretmr_set_next_event(unsigned long cycles, 225 225 struct clock_event_device *evt) ··· 281 281 #ifdef CONFIG_CORE_TIMER_IRQ_L1 282 282 __attribute__((l1_text)) 283 283 #endif 284 + 285 + static void broadcast_timer_set_mode(enum clock_event_mode mode, 286 + struct clock_event_device *evt) 287 + { 288 + } 289 + 290 + static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) 291 + { 292 + evt->name = "dummy_timer"; 293 + evt->features = CLOCK_EVT_FEAT_ONESHOT | 294 + CLOCK_EVT_FEAT_PERIODIC | 295 + CLOCK_EVT_FEAT_DUMMY; 296 + evt->rating = 400; 297 + evt->mult = 1; 298 + evt->set_mode = broadcast_timer_set_mode; 299 + 300 + clockevents_register_device(evt); 301 + } 302 + 284 303 irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id) 285 304 { 286 305 int cpu = smp_processor_id(); ··· 324 305 unsigned long clock_tick; 325 306 unsigned int cpu = smp_processor_id(); 326 307 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); 308 + 309 + #ifdef CONFIG_SMP 310 + evt->broadcast = smp_timer_broadcast; 311 + #endif 312 + 327 313 328 314 evt->name = "bfin_core_timer"; 329 315 evt->rating = 350;
+3 -1
arch/blackfin/mach-bf561/include/mach/pll.h
··· 16 16 #include <mach/irq.h> 17 17 18 18 #define SUPPLE_0_WAKEUP ((IRQ_SUPPLE_0 - (IRQ_CORETMR + 1)) % 32) 19 + #define SUPPLE_1_WAKEUP ((IRQ_SUPPLE_1 - (IRQ_CORETMR + 1)) % 32) 19 20 20 21 static inline void 21 22 bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2) ··· 43 42 static inline void 44 43 bfin_iwr_set_sup0(unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2) 45 44 { 46 - bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP), 0, iwr0, iwr1, iwr2); 45 + bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP) | 46 + IWR_ENABLE(SUPPLE_1_WAKEUP), 0, iwr0, iwr1, iwr2); 47 47 } 48 48 49 49 #endif
+1 -1
arch/blackfin/mach-bf561/smp.c
··· 84 84 85 85 if ((bfin_read_SYSCR() & COREB_SRAM_INIT) == 0) { 86 86 /* CoreB already running, sending ipi to wakeup it */ 87 - platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0); 87 + smp_send_reschedule(cpu); 88 88 } else { 89 89 /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */ 90 90 bfin_write_SYSCR(bfin_read_SYSCR() & ~COREB_SRAM_INIT);
+41 -12
arch/blackfin/mach-common/smp.c
··· 14 14 #include <linux/sched.h> 15 15 #include <linux/interrupt.h> 16 16 #include <linux/cache.h> 17 + #include <linux/clockchips.h> 17 18 #include <linux/profile.h> 18 19 #include <linux/errno.h> 19 20 #include <linux/mm.h> ··· 48 47 49 48 struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; 50 49 51 - #define BFIN_IPI_RESCHEDULE 0 52 - #define BFIN_IPI_CALL_FUNC 1 53 - #define BFIN_IPI_CPU_STOP 2 50 + #define BFIN_IPI_TIMER 0 51 + #define BFIN_IPI_RESCHEDULE 1 52 + #define BFIN_IPI_CALL_FUNC 2 53 + #define BFIN_IPI_CPU_STOP 3 54 54 55 55 struct blackfin_flush_data { 56 56 unsigned long start; ··· 162 160 return IRQ_HANDLED; 163 161 } 164 162 163 + DECLARE_PER_CPU(struct clock_event_device, coretmr_events); 164 + void ipi_timer(void) 165 + { 166 + int cpu = smp_processor_id(); 167 + struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); 168 + evt->event_handler(evt); 169 + } 170 + 165 171 static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) 166 172 { 167 173 struct ipi_message *msg; ··· 186 176 while (msg_queue->count) { 187 177 msg = &msg_queue->ipi_message[msg_queue->head]; 188 178 switch (msg->type) { 179 + case BFIN_IPI_TIMER: 180 + ipi_timer(); 181 + break; 189 182 case BFIN_IPI_RESCHEDULE: 190 183 scheduler_ipi(); 191 184 break; ··· 310 297 { 311 298 cpumask_t callmap; 312 299 /* simply trigger an ipi */ 313 - if (cpu_is_offline(cpu)) 314 - return; 315 300 316 301 cpumask_clear(&callmap); 317 302 cpumask_set_cpu(cpu, &callmap); ··· 317 306 smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0); 318 307 319 308 return; 309 + } 310 + 311 + void smp_send_msg(const struct cpumask *mask, unsigned long type) 312 + { 313 + smp_send_message(*mask, type, NULL, NULL, 0); 314 + } 315 + 316 + void smp_timer_broadcast(const struct cpumask *mask) 317 + { 318 + smp_send_msg(mask, BFIN_IPI_TIMER); 320 319 } 321 320 322 321 void smp_send_stop(void) ··· 347 326 int __cpuinit __cpu_up(unsigned int cpu) 348 327 { 349 328 int ret; 350 - static struct task_struct *idle; 329 + struct blackfin_cpudata *ci = &per_cpu(cpu_data, cpu); 330 + struct task_struct *idle = ci->idle; 351 331 352 - if (idle) 332 + if (idle) { 353 333 free_task(idle); 354 - 355 - idle = fork_idle(cpu); 356 - if (IS_ERR(idle)) { 357 - printk(KERN_ERR "CPU%u: fork() failed\n", cpu); 358 - return PTR_ERR(idle); 334 + idle = NULL; 359 335 } 360 336 337 + if (!idle) { 338 + idle = fork_idle(cpu); 339 + if (IS_ERR(idle)) { 340 + printk(KERN_ERR "CPU%u: fork() failed\n", cpu); 341 + return PTR_ERR(idle); 342 + } 343 + ci->idle = idle; 344 + } else { 345 + init_idle(idle, cpu); 346 + } 361 347 secondary_stack = task_stack_page(idle) + THREAD_SIZE; 362 348 363 349 ret = platform_boot_secondary(cpu, idle); ··· 439 411 440 412 bfin_setup_caches(cpu); 441 413 414 + notify_cpu_starting(cpu); 442 415 /* 443 416 * Calibrate loops per jiffy value. 444 417 * IRQs need to be enabled here - D-cache can be invalidated