Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] smp: delay idle task creation

Delay idle task creation until a cpu gets set online instead of
creating them for all possible cpus at system startup.
For one cpu system this should safe more than 1 MB.
On my debug system with lots of debug stuff enabled this saves 2 MB.

Same as on x86.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Heiko Carstens and committed by
Martin Schwidefsky
f230886b 09a8e7ad

+27 -16
+27 -16
arch/s390/kernel/smp.c
··· 23 23 #define KMSG_COMPONENT "cpu" 24 24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 25 25 26 + #include <linux/workqueue.h> 26 27 #include <linux/module.h> 27 28 #include <linux/init.h> 28 29 #include <linux/mm.h> ··· 478 477 return 0; 479 478 } 480 479 481 - static void __init smp_create_idle(unsigned int cpu) 482 - { 483 - struct task_struct *p; 480 + struct create_idle { 481 + struct work_struct work; 482 + struct task_struct *idle; 483 + struct completion done; 484 + int cpu; 485 + }; 484 486 485 - /* 486 - * don't care about the psw and regs settings since we'll never 487 - * reschedule the forked task. 488 - */ 489 - p = fork_idle(cpu); 490 - if (IS_ERR(p)) 491 - panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 492 - current_set[cpu] = p; 487 + static void __cpuinit smp_fork_idle(struct work_struct *work) 488 + { 489 + struct create_idle *c_idle; 490 + 491 + c_idle = container_of(work, struct create_idle, work); 492 + c_idle->idle = fork_idle(c_idle->cpu); 493 + complete(&c_idle->done); 493 494 } 494 495 495 496 static int __cpuinit smp_alloc_lowcore(int cpu) ··· 555 552 int __cpuinit __cpu_up(unsigned int cpu) 556 553 { 557 554 struct _lowcore *cpu_lowcore; 555 + struct create_idle c_idle; 558 556 struct task_struct *idle; 559 557 struct stack_frame *sf; 560 558 u32 lowcore; ··· 563 559 564 560 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) 565 561 return -EIO; 562 + idle = current_set[cpu]; 563 + if (!idle) { 564 + c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done); 565 + INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle); 566 + c_idle.cpu = cpu; 567 + schedule_work(&c_idle.work); 568 + wait_for_completion(&c_idle.done); 569 + if (IS_ERR(c_idle.idle)) 570 + return PTR_ERR(c_idle.idle); 571 + idle = c_idle.idle; 572 + current_set[cpu] = c_idle.idle; 573 + } 566 574 if (smp_alloc_lowcore(cpu)) 567 575 return -ENOMEM; 568 576 do { ··· 589 573 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) 590 574 udelay(10); 591 575 592 - idle = current_set[cpu]; 593 576 cpu_lowcore = lowcore_ptr[cpu]; 594 577 cpu_lowcore->kernel_stack = (unsigned long) 595 578 task_stack_page(idle) + THREAD_SIZE; ··· 700 685 #endif 701 686 unsigned long async_stack, panic_stack; 702 687 struct _lowcore *lowcore; 703 - unsigned int cpu; 704 688 705 689 smp_detect_cpus(); 706 690 ··· 734 720 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) 735 721 BUG(); 736 722 #endif 737 - for_each_possible_cpu(cpu) 738 - if (cpu != smp_processor_id()) 739 - smp_create_idle(cpu); 740 723 } 741 724 742 725 void __init smp_prepare_boot_cpu(void)