Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xtensa: implement CPU hotplug

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Chris Zankel <chris@zankel.net>

authored by

Max Filippov and committed by
Chris Zankel
49b424fe f615136c

+248 -4
+9
arch/xtensa/Kconfig
··· 140 140 range 2 32 141 141 default "4" 142 142 143 + config HOTPLUG_CPU 144 + bool "Enable CPU hotplug support" 145 + depends on SMP 146 + help 147 + Say Y here to allow turning CPUs off and on. CPUs can be 148 + controlled through /sys/devices/system/cpu. 149 + 150 + Say N if you want to disable CPU hotplug. 151 + 143 152 config MATH_EMULATION 144 153 bool "Math emulation" 145 154 help
+1
arch/xtensa/include/asm/irq.h
··· 45 45 struct irqaction; 46 46 struct irq_domain; 47 47 48 + void migrate_irqs(void); 48 49 int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize, 49 50 unsigned long int_irq, unsigned long ext_irq, 50 51 unsigned long *out_hwirq, unsigned int *out_type);
+9
arch/xtensa/include/asm/smp.h
··· 29 29 struct seq_file; 30 30 void show_ipi_list(struct seq_file *p, int prec); 31 31 32 + #ifdef CONFIG_HOTPLUG_CPU 33 + 34 + void __cpu_die(unsigned int cpu); 35 + int __cpu_disable(void); 36 + void cpu_die(void); 37 + void cpu_restart(void); 38 + 39 + #endif /* CONFIG_HOTPLUG_CPU */ 40 + 32 41 #endif /* CONFIG_SMP */ 33 42 34 43 #endif /* _XTENSA_SMP_H */
+50 -1
arch/xtensa/kernel/head.S
··· 103 103 104 104 ENDPROC(_start) 105 105 106 - __INIT 106 + __REF 107 107 .literal_position 108 108 109 109 ENTRY(_startup) ··· 301 301 #endif /* CONFIG_SMP */ 302 302 303 303 ENDPROC(_startup) 304 + 305 + #ifdef CONFIG_HOTPLUG_CPU 306 + 307 + ENTRY(cpu_restart) 308 + 309 + #if XCHAL_DCACHE_IS_WRITEBACK 310 + ___flush_invalidate_dcache_all a2 a3 311 + #else 312 + ___invalidate_dcache_all a2 a3 313 + #endif 314 + memw 315 + movi a2, CCON # MX External Register to Configure Cache 316 + movi a3, 0 317 + wer a3, a2 318 + extw 319 + 320 + rsr a0, prid 321 + neg a2, a0 322 + movi a3, cpu_start_id 323 + s32i a2, a3, 0 324 + #if XCHAL_DCACHE_IS_WRITEBACK 325 + dhwbi a3, 0 326 + #endif 327 + 1: 328 + l32i a2, a3, 0 329 + dhi a3, 0 330 + bne a2, a0, 1b 331 + 332 + /* 333 + * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). 334 + * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow 335 + * xt-gdb to single step via DEBUG exceptions received directly 336 + * by ocd. 337 + */ 338 + movi a1, 1 339 + movi a0, 0 340 + wsr a1, windowstart 341 + wsr a0, windowbase 342 + rsync 343 + 344 + movi a1, LOCKLEVEL 345 + wsr a1, ps 346 + rsync 347 + 348 + j _startup 349 + 350 + ENDPROC(cpu_restart) 351 + 352 + #endif /* CONFIG_HOTPLUG_CPU */ 304 353 305 354 /* 306 355 * DATA section
+49
arch/xtensa/kernel/irq.c
··· 153 153 #endif 154 154 variant_init_irq(); 155 155 } 156 + 157 + #ifdef CONFIG_HOTPLUG_CPU 158 + static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu) 159 + { 160 + struct irq_desc *desc = irq_to_desc(irq); 161 + struct irq_chip *chip = irq_data_get_irq_chip(data); 162 + unsigned long flags; 163 + 164 + raw_spin_lock_irqsave(&desc->lock, flags); 165 + if (chip->irq_set_affinity) 166 + chip->irq_set_affinity(data, cpumask_of(cpu), false); 167 + raw_spin_unlock_irqrestore(&desc->lock, flags); 168 + } 169 + 170 + /* 171 + * The CPU has been marked offline. Migrate IRQs off this CPU. If 172 + * the affinity settings do not allow other CPUs, force them onto any 173 + * available CPU. 174 + */ 175 + void migrate_irqs(void) 176 + { 177 + unsigned int i, cpu = smp_processor_id(); 178 + struct irq_desc *desc; 179 + 180 + for_each_irq_desc(i, desc) { 181 + struct irq_data *data = irq_desc_get_irq_data(desc); 182 + unsigned int newcpu; 183 + 184 + if (irqd_is_per_cpu(data)) 185 + continue; 186 + 187 + if (!cpumask_test_cpu(cpu, data->affinity)) 188 + continue; 189 + 190 + newcpu = cpumask_any_and(data->affinity, cpu_online_mask); 191 + 192 + if (newcpu >= nr_cpu_ids) { 193 + pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", 194 + i, cpu); 195 + 196 + cpumask_setall(data->affinity); 197 + newcpu = cpumask_any_and(data->affinity, 198 + cpu_online_mask); 199 + } 200 + 201 + route_irq(data, i, newcpu); 202 + } 203 + } 204 + #endif /* CONFIG_HOTPLUG_CPU */
+1
arch/xtensa/kernel/setup.c
··· 527 527 528 528 for_each_possible_cpu(i) { 529 529 struct cpu *cpu = &per_cpu(cpu_data, i); 530 + cpu->hotpluggable = !!i; 530 531 register_cpu(cpu, i); 531 532 } 532 533
+127 -1
arch/xtensa/kernel/smp.c
··· 40 40 # endif 41 41 #endif 42 42 43 + static void system_invalidate_dcache_range(unsigned long start, 44 + unsigned long size); 45 + static void system_flush_invalidate_dcache_range(unsigned long start, 46 + unsigned long size); 47 + 43 48 /* IPI (Inter Process Interrupt) */ 44 49 45 50 #define IPI_IRQ 0 ··· 111 106 static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */ 112 107 static DECLARE_COMPLETION(cpu_running); 113 108 114 - void __init secondary_start_kernel(void) 109 + void secondary_start_kernel(void) 115 110 { 116 111 struct mm_struct *mm = &init_mm; 117 112 unsigned int cpu = smp_processor_id(); ··· 179 174 __func__, cpu, run_stall_mask, get_er(MPSCORE)); 180 175 } 181 176 177 + #ifdef CONFIG_HOTPLUG_CPU 178 + unsigned long cpu_start_id __cacheline_aligned; 179 + #endif 182 180 unsigned long cpu_start_ccount; 183 181 184 182 static int boot_secondary(unsigned int cpu, struct task_struct *ts) ··· 190 182 unsigned long ccount; 191 183 int i; 192 184 185 + #ifdef CONFIG_HOTPLUG_CPU 186 + cpu_start_id = cpu; 187 + system_flush_invalidate_dcache_range( 188 + (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); 189 + #endif 193 190 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); 194 191 195 192 for (i = 0; i < 2; ++i) { ··· 246 233 247 234 return ret; 248 235 } 236 + 237 + #ifdef CONFIG_HOTPLUG_CPU 238 + 239 + /* 240 + * __cpu_disable runs on the processor to be shutdown. 241 + */ 242 + int __cpu_disable(void) 243 + { 244 + unsigned int cpu = smp_processor_id(); 245 + 246 + /* 247 + * Take this CPU offline. Once we clear this, we can't return, 248 + * and we must not schedule until we're ready to give up the cpu. 249 + */ 250 + set_cpu_online(cpu, false); 251 + 252 + /* 253 + * OK - migrate IRQs away from this CPU 254 + */ 255 + migrate_irqs(); 256 + 257 + /* 258 + * Flush user cache and TLB mappings, and then remove this CPU 259 + * from the vm mask set of all processes. 260 + */ 261 + local_flush_cache_all(); 262 + local_flush_tlb_all(); 263 + invalidate_page_directory(); 264 + 265 + clear_tasks_mm_cpumask(cpu); 266 + 267 + return 0; 268 + } 269 + 270 + static void platform_cpu_kill(unsigned int cpu) 271 + { 272 + smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true); 273 + } 274 + 275 + /* 276 + * called on the thread which is asking for a CPU to be shutdown - 277 + * waits until shutdown has completed, or it is timed out. 278 + */ 279 + void __cpu_die(unsigned int cpu) 280 + { 281 + unsigned long timeout = jiffies + msecs_to_jiffies(1000); 282 + while (time_before(jiffies, timeout)) { 283 + system_invalidate_dcache_range((unsigned long)&cpu_start_id, 284 + sizeof(cpu_start_id)); 285 + if (cpu_start_id == -cpu) { 286 + platform_cpu_kill(cpu); 287 + return; 288 + } 289 + } 290 + pr_err("CPU%u: unable to kill\n", cpu); 291 + } 292 + 293 + void arch_cpu_idle_dead(void) 294 + { 295 + cpu_die(); 296 + } 297 + /* 298 + * Called from the idle thread for the CPU which has been shutdown. 299 + * 300 + * Note that we disable IRQs here, but do not re-enable them 301 + * before returning to the caller. This is also the behaviour 302 + * of the other hotplug-cpu capable cores, so presumably coming 303 + * out of idle fixes this. 304 + */ 305 + void __ref cpu_die(void) 306 + { 307 + idle_task_exit(); 308 + local_irq_disable(); 309 + __asm__ __volatile__( 310 + " movi a2, cpu_restart\n" 311 + " jx a2\n"); 312 + } 313 + 314 + #endif /* CONFIG_HOTPLUG_CPU */ 249 315 250 316 enum ipi_msg_type { 251 317 IPI_RESCHEDULE = 0, ··· 554 462 .addr2 = end, 555 463 }; 556 464 on_each_cpu(ipi_flush_icache_range, &fd, 1); 465 + } 466 + 467 + /* ------------------------------------------------------------------------- */ 468 + 469 + static void ipi_invalidate_dcache_range(void *arg) 470 + { 471 + struct flush_data *fd = arg; 472 + __invalidate_dcache_range(fd->addr1, fd->addr2); 473 + } 474 + 475 + static void system_invalidate_dcache_range(unsigned long start, 476 + unsigned long size) 477 + { 478 + struct flush_data fd = { 479 + .addr1 = start, 480 + .addr2 = size, 481 + }; 482 + on_each_cpu(ipi_invalidate_dcache_range, &fd, 1); 483 + } 484 + 485 + static void ipi_flush_invalidate_dcache_range(void *arg) 486 + { 487 + struct flush_data *fd = arg; 488 + __flush_invalidate_dcache_range(fd->addr1, fd->addr2); 489 + } 490 + 491 + static void system_flush_invalidate_dcache_range(unsigned long start, 492 + unsigned long size) 493 + { 494 + struct flush_data fd = { 495 + .addr1 = start, 496 + .addr2 = size, 497 + }; 498 + on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1); 557 499 }
+2 -2
arch/xtensa/kernel/traps.c
··· 332 332 } 333 333 334 334 335 - static void __init trap_init_excsave(void) 335 + static void trap_init_excsave(void) 336 336 { 337 337 unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table); 338 338 __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1)); ··· 384 384 } 385 385 386 386 #ifdef CONFIG_SMP 387 - void __init secondary_trap_init(void) 387 + void secondary_trap_init(void) 388 388 { 389 389 trap_init_excsave(); 390 390 }