Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull smp hotplug cleanups from Thomas Gleixner:
"This series is merily a cleanup of code copied around in arch/* and
not changing any of the real cpu hotplug horrors yet. I wish I'd had
something more substantial for 3.5, but I underestimated the lurking
horror..."

Fix up trivial conflicts in arch/{arm,sparc,x86}/Kconfig and
arch/sparc/include/asm/thread_info_32.h

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (79 commits)
um: Remove leftover declaration of alloc_task_struct_node()
task_allocator: Use config switches instead of magic defines
sparc: Use common threadinfo allocator
score: Use common threadinfo allocator
sh-use-common-threadinfo-allocator
mn10300: Use common threadinfo allocator
powerpc: Use common threadinfo allocator
mips: Use common threadinfo allocator
hexagon: Use common threadinfo allocator
m32r: Use common threadinfo allocator
frv: Use common threadinfo allocator
cris: Use common threadinfo allocator
x86: Use common threadinfo allocator
c6x: Use common threadinfo allocator
fork: Provide kmemcache based thread_info allocator
tile: Use common threadinfo allocator
fork: Provide weak arch_release_[task_struct|thread_info] functions
fork: Move thread info gfp flags to header
fork: Remove the weak insanity
sh: Remove cpu_idle_wait()
...

+413 -1932
+15
arch/Kconfig
··· 145 145 config USE_GENERIC_SMP_HELPERS 146 146 bool 147 147 148 + config GENERIC_SMP_IDLE_THREAD 149 + bool 150 + 151 + # Select if arch init_task initializer is different to init/init_task.c 152 + config ARCH_INIT_TASK 153 + bool 154 + 155 + # Select if arch has its private alloc_task_struct() function 156 + config ARCH_TASK_STRUCT_ALLOCATOR 157 + bool 158 + 159 + # Select if arch has its private alloc_thread_info() function 160 + config ARCH_THREAD_INFO_ALLOCATOR 161 + bool 162 + 148 163 config HAVE_REGS_AND_STACK_ACCESS_API 149 164 bool 150 165 help
+1
arch/alpha/Kconfig
··· 15 15 select GENERIC_IRQ_SHOW 16 16 select ARCH_WANT_OPTIONAL_GPIOLIB 17 17 select ARCH_HAVE_NMI_SAFE_CMPXCHG 18 + select GENERIC_SMP_IDLE_THREAD 18 19 help 19 20 The Alpha is a 64-bit general-purpose processor designed and 20 21 marketed by the Digital Equipment Corporation of blessed memory,
+1 -1
arch/alpha/kernel/Makefile
··· 6 6 asflags-y := $(KBUILD_CFLAGS) 7 7 ccflags-y := -Wno-sign-compare 8 8 9 - obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ 9 + obj-y := entry.o traps.o process.o osf_sys.o irq.o \ 10 10 irq_alpha.o signal.o setup.o ptrace.o time.o \ 11 11 alpha_ksyms.o systbls.o err_common.o io.o 12 12
-17
arch/alpha/kernel/init_task.c
··· 1 - #include <linux/mm.h> 2 - #include <linux/module.h> 3 - #include <linux/sched.h> 4 - #include <linux/init.h> 5 - #include <linux/init_task.h> 6 - #include <linux/fs.h> 7 - #include <linux/mqueue.h> 8 - #include <asm/uaccess.h> 9 - 10 - 11 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 12 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 13 - struct task_struct init_task = INIT_TASK(init_task); 14 - EXPORT_SYMBOL(init_task); 15 - 16 - union thread_union init_thread_union __init_task_data = 17 - { INIT_THREAD_INFO(init_task) };
+3 -17
arch/alpha/kernel/smp.c
··· 357 357 * Bring one cpu online. 358 358 */ 359 359 static int __cpuinit 360 - smp_boot_one_cpu(int cpuid) 360 + smp_boot_one_cpu(int cpuid, struct task_struct *idle) 361 361 { 362 - struct task_struct *idle; 363 362 unsigned long timeout; 364 - 365 - /* Cook up an idler for this guy. Note that the address we 366 - give to kernel_thread is irrelevant -- it's going to start 367 - where HWRPB.CPU_restart says to start. But this gets all 368 - the other task-y sort of data structures set up like we 369 - wish. We can't use kernel_thread since we must avoid 370 - rescheduling the child. */ 371 - idle = fork_idle(cpuid); 372 - if (IS_ERR(idle)) 373 - panic("failed fork for CPU %d", cpuid); 374 - 375 - DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n", 376 - cpuid, idle->state, idle->flags)); 377 363 378 364 /* Signal the secondary to wait a moment. */ 379 365 smp_secondary_alive = -1; ··· 473 487 } 474 488 475 489 int __cpuinit 476 - __cpu_up(unsigned int cpu) 490 + __cpu_up(unsigned int cpu, struct task_struct *tidle) 477 491 { 478 - smp_boot_one_cpu(cpu); 492 + smp_boot_one_cpu(cpu, tidle); 479 493 480 494 return cpu_online(cpu) ? 0 : -ENOSYS; 481 495 }
+1 -3
arch/arm/Kconfig
··· 37 37 select CPU_PM if (SUSPEND || CPU_IDLE) 38 38 select GENERIC_PCI_IOMAP 39 39 select HAVE_BPF_JIT 40 + select GENERIC_SMP_IDLE_THREAD 40 41 help 41 42 The ARM series is a line of low-power-consumption RISC chip designs 42 43 licensed by ARM Ltd and targeted at embedded applications and ··· 154 153 Internal node to signify that the ARCH has CPUFREQ support 155 154 and that the relevant menu configurations are displayed for 156 155 it. 157 - 158 - config ARCH_HAS_CPU_IDLE_WAIT 159 - def_bool y 160 156 161 157 config GENERIC_HWEIGHT 162 158 bool
+1 -1
arch/arm/Makefile
··· 117 117 CHECKFLAGS += -D__arm__ 118 118 119 119 #Default value 120 - head-y := arch/arm/kernel/head$(MMUEXT).o arch/arm/kernel/init_task.o 120 + head-y := arch/arm/kernel/head$(MMUEXT).o 121 121 textofs-y := 0x00008000 122 122 textofs-$(CONFIG_ARCH_CLPS711X) := 0x00028000 123 123 # We don't want the htc bootloader to corrupt kernel during resume
-1
arch/arm/include/asm/cpu.h
··· 16 16 struct cpuinfo_arm { 17 17 struct cpu cpu; 18 18 #ifdef CONFIG_SMP 19 - struct task_struct *idle; 20 19 unsigned int loops_per_jiffy; 21 20 #endif 22 21 };
-2
arch/arm/include/asm/processor.h
··· 88 88 #define cpu_relax() barrier() 89 89 #endif 90 90 91 - void cpu_idle_wait(void); 92 - 93 91 /* 94 92 * Create a new kernel thread 95 93 */
+1 -1
arch/arm/kernel/Makefile
··· 82 82 obj-$(CONFIG_DEBUG_LL) += debug.o 83 83 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 84 84 85 - extra-y := $(head-y) init_task.o vmlinux.lds 85 + extra-y := $(head-y) vmlinux.lds
-37
arch/arm/kernel/init_task.c
··· 1 - /* 2 - * linux/arch/arm/kernel/init_task.c 3 - */ 4 - #include <linux/mm.h> 5 - #include <linux/module.h> 6 - #include <linux/fs.h> 7 - #include <linux/sched.h> 8 - #include <linux/init.h> 9 - #include <linux/init_task.h> 10 - #include <linux/mqueue.h> 11 - #include <linux/uaccess.h> 12 - 13 - #include <asm/pgtable.h> 14 - 15 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 16 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 17 - /* 18 - * Initial thread structure. 19 - * 20 - * We need to make sure that this is 8192-byte aligned due to the 21 - * way process stacks are handled. This is done by making sure 22 - * the linker maps this in the .text segment right after head.S, 23 - * and making head.S ensure the proper alignment. 24 - * 25 - * The things we do for performance.. 26 - */ 27 - union thread_union init_thread_union __init_task_data = 28 - { INIT_THREAD_INFO(init_task) }; 29 - 30 - /* 31 - * Initial task structure. 32 - * 33 - * All other task structs will be allocated on slabs in fork.c 34 - */ 35 - struct task_struct init_task = INIT_TASK(init_task); 36 - 37 - EXPORT_SYMBOL(init_task);
-20
arch/arm/kernel/process.c
··· 157 157 void (*arm_pm_restart)(char str, const char *cmd) = null_restart; 158 158 EXPORT_SYMBOL_GPL(arm_pm_restart); 159 159 160 - static void do_nothing(void *unused) 161 - { 162 - } 163 - 164 - /* 165 - * cpu_idle_wait - Used to ensure that all the CPUs discard old value of 166 - * pm_idle and update to new pm_idle value. Required while changing pm_idle 167 - * handler on SMP systems. 168 - * 169 - * Caller must have changed pm_idle to the new value before the call. Old 170 - * pm_idle value will not be used by any CPU after the return of this function. 171 - */ 172 - void cpu_idle_wait(void) 173 - { 174 - smp_mb(); 175 - /* kick all the CPUs so that they exit out of pm_idle */ 176 - smp_call_function(do_nothing, NULL, 1); 177 - } 178 - EXPORT_SYMBOL_GPL(cpu_idle_wait); 179 - 180 160 /* 181 161 * This is our default idle handler. 182 162 */
+1 -25
arch/arm/kernel/smp.c
··· 60 60 61 61 static DECLARE_COMPLETION(cpu_running); 62 62 63 - int __cpuinit __cpu_up(unsigned int cpu) 63 + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 64 64 { 65 - struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); 66 - struct task_struct *idle = ci->idle; 67 65 int ret; 68 - 69 - /* 70 - * Spawn a new process manually, if not already done. 71 - * Grab a pointer to its task struct so we can mess with it 72 - */ 73 - if (!idle) { 74 - idle = fork_idle(cpu); 75 - if (IS_ERR(idle)) { 76 - printk(KERN_ERR "CPU%u: fork() failed\n", cpu); 77 - return PTR_ERR(idle); 78 - } 79 - ci->idle = idle; 80 - } else { 81 - /* 82 - * Since this idle thread is being re-used, call 83 - * init_idle() to reinitialize the thread structure. 84 - */ 85 - init_idle(idle, cpu); 86 - } 87 66 88 67 /* 89 68 * We need to tell the secondary core where to find ··· 297 318 298 319 void __init smp_prepare_boot_cpu(void) 299 320 { 300 - unsigned int cpu = smp_processor_id(); 301 - 302 - per_cpu(cpu_data, cpu).idle = current; 303 321 } 304 322 305 323 void __init smp_prepare_cpus(unsigned int max_cpus)
+1 -1
arch/avr32/kernel/Makefile
··· 8 8 obj-y += syscall_table.o syscall-stubs.o irq.o 9 9 obj-y += setup.o traps.o ocd.o ptrace.o 10 10 obj-y += signal.o sys_avr32.o process.o time.o 11 - obj-y += init_task.o switch_to.o cpu.o 11 + obj-y += switch_to.o cpu.o 12 12 obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o 13 13 obj-$(CONFIG_KPROBES) += kprobes.o 14 14 obj-$(CONFIG_STACKTRACE) += stacktrace.o
-31
arch/avr32/kernel/init_task.c
··· 1 - /* 2 - * Copyright (C) 2004-2006 Atmel Corporation 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License version 2 as 6 - * published by the Free Software Foundation. 7 - */ 8 - #include <linux/module.h> 9 - #include <linux/fs.h> 10 - #include <linux/sched.h> 11 - #include <linux/init_task.h> 12 - #include <linux/mqueue.h> 13 - 14 - #include <asm/pgtable.h> 15 - 16 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 17 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 18 - /* 19 - * Initial thread structure. Must be aligned on an 8192-byte boundary. 20 - */ 21 - union thread_union init_thread_union __init_task_data = 22 - { INIT_THREAD_INFO(init_task) }; 23 - 24 - /* 25 - * Initial task structure. 26 - * 27 - * All other task structs will be allocated on slabs in fork.c 28 - */ 29 - struct task_struct init_task = INIT_TASK(init_task); 30 - 31 - EXPORT_SYMBOL(init_task);
+1
arch/blackfin/Kconfig
··· 37 37 select GENERIC_IRQ_PROBE 38 38 select IRQ_PER_CPU if SMP 39 39 select HAVE_NMI_WATCHDOG if NMI_WATCHDOG 40 + select GENERIC_SMP_IDLE_THREAD 40 41 41 42 config GENERIC_CSUM 42 43 def_bool y
-2
arch/blackfin/Makefile
··· 109 109 CHECKFLAGS_SILICON = $(shell echo "" | $(CPP) $(KBUILD_CFLAGS) -dD - 2>/dev/null | awk '$$2 == "__SILICON_REVISION__" { print $$3 }') 110 110 CHECKFLAGS += -D__SILICON_REVISION__=$(CHECKFLAGS_SILICON) -D__bfin__ 111 111 112 - head-y := arch/$(ARCH)/kernel/init_task.o 113 - 114 112 core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/ arch/$(ARCH)/mach-common/ 115 113 116 114 # If we have a machine-specific directory, then include it in the build.
+1 -1
arch/blackfin/kernel/Makefile
··· 2 2 # arch/blackfin/kernel/Makefile 3 3 # 4 4 5 - extra-y := init_task.o vmlinux.lds 5 + extra-y := vmlinux.lds 6 6 7 7 obj-y := \ 8 8 entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \
-32
arch/blackfin/kernel/init_task.c
··· 1 - /* 2 - * Copyright 2004-2009 Analog Devices Inc. 3 - * 4 - * Licensed under the GPL-2 or later 5 - */ 6 - 7 - #include <linux/mm.h> 8 - #include <linux/module.h> 9 - #include <linux/init_task.h> 10 - #include <linux/mqueue.h> 11 - #include <linux/fs.h> 12 - 13 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 14 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 15 - /* 16 - * Initial task structure. 17 - * 18 - * All other task structs will be allocated on slabs in fork.c 19 - */ 20 - struct task_struct init_task = INIT_TASK(init_task); 21 - EXPORT_SYMBOL(init_task); 22 - 23 - /* 24 - * Initial thread structure. 25 - * 26 - * We need to make sure that this is 8192-byte aligned due to the 27 - * way process stacks are handled. This is done by having a special 28 - * "init_task" linker map entry. 29 - */ 30 - union thread_union init_thread_union 31 - __init_task_data = { 32 - INIT_THREAD_INFO(init_task)};
+1 -18
arch/blackfin/mach-common/smp.c
··· 340 340 return; 341 341 } 342 342 343 - int __cpuinit __cpu_up(unsigned int cpu) 343 + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 344 344 { 345 345 int ret; 346 - struct blackfin_cpudata *ci = &per_cpu(cpu_data, cpu); 347 - struct task_struct *idle = ci->idle; 348 346 349 - if (idle) { 350 - free_task(idle); 351 - idle = NULL; 352 - } 353 - 354 - if (!idle) { 355 - idle = fork_idle(cpu); 356 - if (IS_ERR(idle)) { 357 - printk(KERN_ERR "CPU%u: fork() failed\n", cpu); 358 - return PTR_ERR(idle); 359 - } 360 - ci->idle = idle; 361 - } else { 362 - init_idle(idle, cpu); 363 - } 364 347 secondary_stack = task_stack_page(idle) + THREAD_SIZE; 365 348 366 349 ret = platform_boot_secondary(cpu, idle);
+2 -15
arch/c6x/include/asm/thread_info.h
··· 20 20 #ifdef CONFIG_4KSTACKS 21 21 #define THREAD_SIZE 4096 22 22 #define THREAD_SHIFT 12 23 - #define THREAD_ORDER 0 23 + #define THREAD_SIZE_ORDER 0 24 24 #else 25 25 #define THREAD_SIZE 8192 26 26 #define THREAD_SHIFT 13 27 - #define THREAD_ORDER 1 27 + #define THREAD_SIZE_ORDER 1 28 28 #endif 29 29 30 30 #define THREAD_START_SP (THREAD_SIZE - 8) ··· 80 80 return ti; 81 81 } 82 82 83 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 84 - 85 - /* thread information allocation */ 86 - #ifdef CONFIG_DEBUG_STACK_USAGE 87 - #define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) 88 - #else 89 - #define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK) 90 - #endif 91 - 92 - #define alloc_thread_info_node(tsk, node) \ 93 - ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) 94 - 95 - #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) 96 83 #define get_thread_info(ti) get_task_struct((ti)->task) 97 84 #define put_thread_info(ti) put_task_struct((ti)->task) 98 85 #endif /* __ASSEMBLY__ */
-16
arch/c6x/kernel/process.c
··· 26 26 27 27 extern asmlinkage void ret_from_fork(void); 28 28 29 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 30 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 31 - 32 - /* 33 - * Initial thread structure. 34 - */ 35 - union thread_union init_thread_union __init_task_data = { 36 - INIT_THREAD_INFO(init_task) 37 - }; 38 - 39 - /* 40 - * Initial task structure. 41 - */ 42 - struct task_struct init_task = INIT_TASK(init_task); 43 - EXPORT_SYMBOL(init_task); 44 - 45 29 /* 46 30 * power off function, if any 47 31 */
+1
arch/cris/Kconfig
··· 49 49 select HAVE_GENERIC_HARDIRQS 50 50 select GENERIC_IRQ_SHOW 51 51 select GENERIC_IOMAP 52 + select GENERIC_SMP_IDLE_THREAD if ETRAX_ARCH_V32 52 53 53 54 config HZ 54 55 int
+3 -11
arch/cris/arch-v32/kernel/smp.c
··· 108 108 109 109 /* Bring one cpu online.*/ 110 110 static int __init 111 - smp_boot_one_cpu(int cpuid) 111 + smp_boot_one_cpu(int cpuid, struct task_struct idle) 112 112 { 113 113 unsigned timeout; 114 - struct task_struct *idle; 115 114 cpumask_t cpu_mask; 116 115 117 116 cpumask_clear(&cpu_mask); 118 - idle = fork_idle(cpuid); 119 - if (IS_ERR(idle)) 120 - panic("SMP: fork failed for CPU:%d", cpuid); 121 - 122 117 task_thread_info(idle)->cpu = cpuid; 123 118 124 119 /* Information to the CPU that is about to boot */ ··· 136 141 udelay(100); 137 142 barrier(); 138 143 } 139 - 140 - put_task_struct(idle); 141 - idle = NULL; 142 144 143 145 printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); 144 146 return -1; ··· 199 207 */ 200 208 unsigned long cache_decay_ticks = 1; 201 209 202 - int __cpuinit __cpu_up(unsigned int cpu) 210 + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 203 211 { 204 - smp_boot_one_cpu(cpu); 212 + smp_boot_one_cpu(cpu, tidle); 205 213 return cpu_online(cpu) ? 0 : -ENOSYS; 206 214 } 207 215
+2 -3
arch/cris/include/asm/processor.h
··· 25 25 */ 26 26 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) 27 27 28 - /* THREAD_SIZE is the size of the task_struct/kernel_stack combo. 28 + /* THREAD_SIZE is the size of the thread_info/kernel_stack combo. 29 29 * normally, the stack is found by doing something like p + THREAD_SIZE 30 30 * in CRIS, a page is 8192 bytes, which seems like a sane size 31 31 */ 32 - 33 32 #define THREAD_SIZE PAGE_SIZE 34 - #define KERNEL_STACK_SIZE PAGE_SIZE 33 + #define THREAD_SIZE_ORDER (0) 35 34 36 35 /* 37 36 * At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack.
-6
arch/cris/include/asm/thread_info.h
··· 65 65 66 66 #define init_thread_info (init_thread_union.thread_info) 67 67 68 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 69 - /* thread information allocation */ 70 - #define alloc_thread_info_node(tsk, node) \ 71 - ((struct thread_info *) __get_free_pages(GFP_KERNEL, 1)) 72 - #define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 73 - 74 68 #endif /* !__ASSEMBLY__ */ 75 69 76 70 /*
-28
arch/cris/kernel/process.c
··· 29 29 //#define DEBUG 30 30 31 31 /* 32 - * Initial task structure. Make this a per-architecture thing, 33 - * because different architectures tend to have different 34 - * alignment requirements and potentially different initial 35 - * setup. 36 - */ 37 - 38 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 39 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 40 - /* 41 - * Initial thread structure. 42 - * 43 - * We need to make sure that this is 8192-byte aligned due to the 44 - * way process stacks are handled. This is done by having a special 45 - * "init_task" linker map entry.. 46 - */ 47 - union thread_union init_thread_union __init_task_data = 48 - { INIT_THREAD_INFO(init_task) }; 49 - 50 - /* 51 - * Initial task structure. 52 - * 53 - * All other task structs will be allocated on slabs in fork.c 54 - */ 55 - struct task_struct init_task = INIT_TASK(init_task); 56 - 57 - EXPORT_SYMBOL(init_task); 58 - 59 - /* 60 32 * The hlt_counter, disable_hlt and enable_hlt is just here as a hook if 61 33 * there would ever be a halt sequence (for power save when idle) with 62 34 * some largish delay when halting or resuming *and* a driver that can't
+1 -1
arch/frv/Makefile
··· 81 81 KBUILD_AFLAGS += -Wa,--gdwarf2 82 82 endif 83 83 84 - head-y := arch/frv/kernel/head.o arch/frv/kernel/init_task.o 84 + head-y := arch/frv/kernel/head.o 85 85 86 86 core-y += arch/frv/kernel/ arch/frv/mm/ 87 87 libs-y += arch/frv/lib/
-15
arch/frv/include/asm/thread_info.h
··· 21 21 22 22 #define THREAD_SIZE 8192 23 23 24 - #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 25 - 26 24 /* 27 25 * low level task data that entry.S needs immediate access to 28 26 * - this struct should fit entirely inside of one cache line ··· 79 81 register struct thread_info *__current_thread_info asm("gr15"); 80 82 81 83 #define current_thread_info() ({ __current_thread_info; }) 82 - 83 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 84 - 85 - /* thread information allocation */ 86 - #ifdef CONFIG_DEBUG_STACK_USAGE 87 - #define alloc_thread_info_node(tsk, node) \ 88 - kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) 89 - #else 90 - #define alloc_thread_info_node(tsk, node) \ 91 - kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) 92 - #endif 93 - 94 - #define free_thread_info(info) kfree(info) 95 84 96 85 #endif /* __ASSEMBLY__ */ 97 86
+1 -1
arch/frv/kernel/Makefile
··· 5 5 heads-y := head-uc-fr401.o head-uc-fr451.o head-uc-fr555.o 6 6 heads-$(CONFIG_MMU) := head-mmu-fr451.o 7 7 8 - extra-y:= head.o init_task.o vmlinux.lds 8 + extra-y:= head.o vmlinux.lds 9 9 10 10 obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \ 11 11 kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \
-32
arch/frv/kernel/init_task.c
··· 1 - #include <linux/mm.h> 2 - #include <linux/module.h> 3 - #include <linux/sched.h> 4 - #include <linux/init.h> 5 - #include <linux/init_task.h> 6 - #include <linux/fs.h> 7 - #include <linux/mqueue.h> 8 - 9 - #include <asm/uaccess.h> 10 - #include <asm/pgtable.h> 11 - 12 - 13 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 14 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 15 - /* 16 - * Initial thread structure. 17 - * 18 - * We need to make sure that this is THREAD_SIZE aligned due to the 19 - * way process stacks are handled. This is done by having a special 20 - * "init_task" linker map entry.. 21 - */ 22 - union thread_union init_thread_union __init_task_data = 23 - { INIT_THREAD_INFO(init_task) }; 24 - 25 - /* 26 - * Initial task structure. 27 - * 28 - * All other task structs will be allocated on slabs in fork.c 29 - */ 30 - struct task_struct init_task = INIT_TASK(init_task); 31 - 32 - EXPORT_SYMBOL(init_task);
-15
arch/frv/kernel/process.c
··· 43 43 void (*pm_power_off)(void); 44 44 EXPORT_SYMBOL(pm_power_off); 45 45 46 - struct task_struct *alloc_task_struct_node(int node) 47 - { 48 - struct task_struct *p = kmalloc_node(THREAD_SIZE, GFP_KERNEL, node); 49 - 50 - if (p) 51 - atomic_set((atomic_t *)(p+1), 1); 52 - return p; 53 - } 54 - 55 - void free_task_struct(struct task_struct *p) 56 - { 57 - if (atomic_dec_and_test((atomic_t *)(p+1))) 58 - kfree(p); 59 - } 60 - 61 46 static void core_sleep_idle(void) 62 47 { 63 48 #ifdef LED_DEBUG_SLEEP
+1 -1
arch/h8300/kernel/Makefile
··· 6 6 7 7 obj-y := process.o traps.o ptrace.o irq.o \ 8 8 sys_h8300.o time.o signal.o \ 9 - setup.o gpio.o init_task.o syscalls.o \ 9 + setup.o gpio.o syscalls.o \ 10 10 entry.o timer/ 11 11 12 12 obj-$(CONFIG_MODULES) += module.o h8300_ksyms.o
-36
arch/h8300/kernel/init_task.c
··· 1 - /* 2 - * linux/arch/h8300/kernel/init_task.c 3 - */ 4 - #include <linux/mm.h> 5 - #include <linux/module.h> 6 - #include <linux/sched.h> 7 - #include <linux/init.h> 8 - #include <linux/init_task.h> 9 - #include <linux/fs.h> 10 - #include <linux/mqueue.h> 11 - 12 - #include <asm/uaccess.h> 13 - #include <asm/pgtable.h> 14 - 15 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 16 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 17 - /* 18 - * Initial task structure. 19 - * 20 - * All other task structs will be allocated on slabs in fork.c 21 - */ 22 - __asm__(".align 4"); 23 - struct task_struct init_task = INIT_TASK(init_task); 24 - 25 - EXPORT_SYMBOL(init_task); 26 - 27 - /* 28 - * Initial thread structure. 29 - * 30 - * We need to make sure that this is 8192-byte aligned due to the 31 - * way process stacks are handled. This is done by having a special 32 - * "init_task" linker map entry.. 33 - */ 34 - union thread_union init_thread_union __init_task_data = 35 - { INIT_THREAD_INFO(init_task) }; 36 -
+1
arch/hexagon/Kconfig
··· 27 27 select HAVE_ARCH_TRACEHOOK 28 28 select NO_IOPORT 29 29 select GENERIC_IOMAP 30 + select GENERIC_SMP_IDLE_THREAD 30 31 # mostly generic routines, with some accelerated ones 31 32 ---help--- 32 33 Qualcomm Hexagon is a processor architecture designed for high
+1 -2
arch/hexagon/Makefile
··· 45 45 LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 46 46 libs-y += $(LIBGCC) 47 47 48 - head-y := arch/hexagon/kernel/head.o \ 49 - arch/hexagon/kernel/init_task.o 48 + head-y := arch/hexagon/kernel/head.o 50 49 51 50 core-y += arch/hexagon/kernel/ \ 52 51 arch/hexagon/mm/ \
-8
arch/hexagon/include/asm/thread_info.h
··· 31 31 32 32 #define THREAD_SHIFT 12 33 33 #define THREAD_SIZE (1<<THREAD_SHIFT) 34 - 35 - #if THREAD_SHIFT >= PAGE_SHIFT 36 34 #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 37 - #else /* don't use standard allocator */ 38 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 39 - extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node); 40 - extern void free_thread_info(struct thread_info *ti); 41 - #endif 42 - 43 35 44 36 #ifndef __ASSEMBLY__ 45 37
+1 -1
arch/hexagon/kernel/Makefile
··· 1 - extra-y := head.o vmlinux.lds init_task.o 1 + extra-y := head.o vmlinux.lds 2 2 3 3 obj-$(CONFIG_SMP) += smp.o topology.o 4 4
-54
arch/hexagon/kernel/init_task.c
··· 1 - /* 2 - * Init task definition 3 - * 4 - * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 and 8 - * only version 2 as published by the Free Software Foundation. 9 - * 10 - * This program is distributed in the hope that it will be useful, 11 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 - * GNU General Public License for more details. 14 - * 15 - * You should have received a copy of the GNU General Public License 16 - * along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 - * 02110-1301, USA. 19 - */ 20 - 21 - #include <linux/mm.h> 22 - #include <linux/module.h> 23 - #include <linux/sched.h> 24 - #include <linux/init_task.h> 25 - #include <linux/fs.h> 26 - #include <linux/mqueue.h> 27 - #include <asm/thread_info.h> 28 - #include <asm/uaccess.h> 29 - #include <asm/pgtable.h> 30 - 31 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 32 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 33 - 34 - /* 35 - * Initial thread structure. 36 - * 37 - * We need to make sure that this is 8192-byte aligned due to the 38 - * way process stacks are handled. This is done by making sure 39 - * the linker maps this in the .text segment right after head.S, 40 - * and making head.S ensure the proper alignment. 41 - */ 42 - union thread_union init_thread_union 43 - __attribute__((__section__(".data.init_task"), 44 - __aligned__(THREAD_SIZE))) = { 45 - INIT_THREAD_INFO(init_task) 46 - }; 47 - 48 - /* 49 - * Initial task structure. 50 - * 51 - * All other task structs will be allocated on slabs in fork.c 52 - */ 53 - struct task_struct init_task = INIT_TASK(init_task); 54 - EXPORT_SYMBOL(init_task);
-37
arch/hexagon/kernel/process.c
··· 234 234 } 235 235 236 236 /* 237 - * Borrowed from PowerPC -- basically allow smaller kernel stacks if we 238 - * go crazy with the page sizes. 239 - */ 240 - #if THREAD_SHIFT < PAGE_SHIFT 241 - 242 - static struct kmem_cache *thread_info_cache; 243 - 244 - struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) 245 - { 246 - struct thread_info *ti; 247 - 248 - ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node); 249 - if (unlikely(ti == NULL)) 250 - return NULL; 251 - #ifdef CONFIG_DEBUG_STACK_USAGE 252 - memset(ti, 0, THREAD_SIZE); 253 - #endif 254 - return ti; 255 - } 256 - 257 - void free_thread_info(struct thread_info *ti) 258 - { 259 - kmem_cache_free(thread_info_cache, ti); 260 - } 261 - 262 - /* Weak symbol; called by init/main.c */ 263 - 264 - void thread_info_cache_init(void) 265 - { 266 - thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, 267 - THREAD_SIZE, 0, NULL); 268 - BUG_ON(thread_info_cache == NULL); 269 - } 270 - 271 - #endif /* THREAD_SHIFT < PAGE_SHIFT */ 272 - 273 - /* 274 237 * Required placeholder. 275 238 */ 276 239 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+2 -9
arch/hexagon/kernel/smp.c
··· 196 196 * maintains control until "cpu_online(cpu)" is set. 197 197 */ 198 198 199 - int __cpuinit __cpu_up(unsigned int cpu) 199 + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 200 200 { 201 - struct task_struct *idle; 202 - struct thread_info *thread; 201 + struct thread_info *thread = (struct thread_info *)idle->stack; 203 202 void *stack_start; 204 203 205 - /* Create new init task for the CPU */ 206 - idle = fork_idle(cpu); 207 - if (IS_ERR(idle)) 208 - panic(KERN_ERR "fork_idle failed\n"); 209 - 210 - thread = (struct thread_info *)idle->stack; 211 204 thread->cpu = cpu; 212 205 213 206 /* Boot to the head. */
+4
arch/ia64/Kconfig
··· 33 33 select ARCH_WANT_OPTIONAL_GPIOLIB 34 34 select ARCH_HAVE_NMI_SAFE_CMPXCHG 35 35 select GENERIC_IOMAP 36 + select GENERIC_SMP_IDLE_THREAD 37 + select ARCH_INIT_TASK 38 + select ARCH_TASK_STRUCT_ALLOCATOR 39 + select ARCH_THREAD_INFO_ALLOCATOR 36 40 default y 37 41 help 38 42 The Itanium Processor Family is Intel's 64-bit successor to
-1
arch/ia64/include/asm/processor.h
··· 723 723 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT, 724 724 IDLE_NOMWAIT, IDLE_POLL}; 725 725 726 - void cpu_idle_wait(void); 727 726 void default_idle(void); 728 727 729 728 #define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
-3
arch/ia64/include/asm/thread_info.h
··· 54 54 }, \ 55 55 } 56 56 57 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 58 - 59 57 #ifndef ASM_OFFSETS_C 60 58 /* how to get the thread information struct from C */ 61 59 #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) ··· 82 84 #endif 83 85 #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) 84 86 85 - #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 86 87 #define alloc_task_struct_node(node) \ 87 88 ({ \ 88 89 struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP, \
-20
arch/ia64/kernel/process.c
··· 273 273 } 274 274 #endif /* CONFIG_HOTPLUG_CPU */ 275 275 276 - static void do_nothing(void *unused) 277 - { 278 - } 279 - 280 - /* 281 - * cpu_idle_wait - Used to ensure that all the CPUs discard old value of 282 - * pm_idle and update to new pm_idle value. Required while changing pm_idle 283 - * handler on SMP systems. 284 - * 285 - * Caller must have changed pm_idle to the new value before the call. Old 286 - * pm_idle value will not be used by any CPU after the return of this function. 287 - */ 288 - void cpu_idle_wait(void) 289 - { 290 - smp_mb(); 291 - /* kick all the CPUs so that they exit out of pm_idle */ 292 - smp_call_function(do_nothing, NULL, 1); 293 - } 294 - EXPORT_SYMBOL_GPL(cpu_idle_wait); 295 - 296 276 void __attribute__((noreturn)) 297 277 cpu_idle (void) 298 278 {
+4 -59
arch/ia64/kernel/smpboot.c
··· 75 75 #endif 76 76 77 77 /* 78 - * Store all idle threads, this can be reused instead of creating 79 - * a new thread. Also avoids complicated thread destroy functionality 80 - * for idle threads. 81 - */ 82 - struct task_struct *idle_thread_array[NR_CPUS]; 83 - 84 - /* 85 78 * Global array allocated for NR_CPUS at boot time 86 79 */ 87 80 struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS]; ··· 87 94 88 95 #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]); 89 96 90 - #define get_idle_for_cpu(x) (idle_thread_array[(x)]) 91 - #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) 92 - 93 97 #else 94 - 95 - #define get_idle_for_cpu(x) (NULL) 96 - #define set_idle_for_cpu(x,p) 97 98 #define set_brendez_area(x) 98 99 #endif 99 100 ··· 467 480 return NULL; 468 481 } 469 482 470 - struct create_idle { 471 - struct work_struct work; 472 - struct task_struct *idle; 473 - struct completion done; 474 - int cpu; 475 - }; 476 - 477 - void __cpuinit 478 - do_fork_idle(struct work_struct *work) 479 - { 480 - struct create_idle *c_idle = 481 - container_of(work, struct create_idle, work); 482 - 483 - c_idle->idle = fork_idle(c_idle->cpu); 484 - complete(&c_idle->done); 485 - } 486 - 487 483 static int __cpuinit 488 - do_boot_cpu (int sapicid, int cpu) 484 + do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) 489 485 { 490 486 int timeout; 491 - struct create_idle c_idle = { 492 - .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), 493 - .cpu = cpu, 494 - .done = COMPLETION_INITIALIZER(c_idle.done), 495 - }; 496 487 497 - /* 498 - * We can't use kernel_thread since we must avoid to 499 - * reschedule the child. 500 - */ 501 - c_idle.idle = get_idle_for_cpu(cpu); 502 - if (c_idle.idle) { 503 - init_idle(c_idle.idle, cpu); 504 - goto do_rest; 505 - } 506 - 507 - schedule_work(&c_idle.work); 508 - wait_for_completion(&c_idle.done); 509 - 510 - if (IS_ERR(c_idle.idle)) 511 - panic("failed fork for CPU %d", cpu); 512 - 513 - set_idle_for_cpu(cpu, c_idle.idle); 514 - 515 - do_rest: 516 - task_for_booting_cpu = c_idle.idle; 517 - 488 + task_for_booting_cpu = idle; 518 489 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); 519 490 520 491 set_brendez_area(cpu); ··· 738 793 } 739 794 740 795 int __cpuinit 741 - __cpu_up (unsigned int cpu) 796 + __cpu_up(unsigned int cpu, struct task_struct *tidle) 742 797 { 743 798 int ret; 744 799 int sapicid; ··· 756 811 757 812 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 758 813 /* Processor goes to start_secondary(), sets online flag */ 759 - ret = do_boot_cpu(sapicid, cpu); 814 + ret = do_boot_cpu(sapicid, cpu, tidle); 760 815 if (ret < 0) 761 816 return ret; 762 817
+1 -1
arch/m32r/Makefile
··· 31 31 32 32 CHECKFLAGS += -D__m32r__ -D__BIG_ENDIAN__=1 33 33 34 - head-y := arch/m32r/kernel/head.o arch/m32r/kernel/init_task.o 34 + head-y := arch/m32r/kernel/head.o 35 35 36 36 LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 37 37
+2 -15
arch/m32r/include/asm/thread_info.h
··· 55 55 56 56 #define PREEMPT_ACTIVE 0x10000000 57 57 58 - #define THREAD_SIZE (PAGE_SIZE << 1) 59 - 58 + #define THREAD_SIZE (PAGE_SIZE << 1) 59 + #define THREAD_SIZE_ORDER 1 60 60 /* 61 61 * macros/functions for gaining access to the thread information structure 62 62 */ ··· 91 91 92 92 return ti; 93 93 } 94 - 95 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 96 - 97 - /* thread information allocation */ 98 - #ifdef CONFIG_DEBUG_STACK_USAGE 99 - #define alloc_thread_info_node(tsk, node) \ 100 - kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) 101 - #else 102 - #define alloc_thread_info_node(tsk, node) \ 103 - kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) 104 - #endif 105 - 106 - #define free_thread_info(info) kfree(info) 107 94 108 95 #define TI_FLAG_FAULT_CODE_SHIFT 28 109 96
+1 -1
arch/m32r/kernel/Makefile
··· 2 2 # Makefile for the Linux/M32R kernel. 3 3 # 4 4 5 - extra-y := head.o init_task.o vmlinux.lds 5 + extra-y := head.o vmlinux.lds 6 6 7 7 obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \ 8 8 m32r_ksyms.o sys_m32r.o signal.o ptrace.o
-34
arch/m32r/kernel/init_task.c
··· 1 - /* orig : i386 init_task.c */ 2 - 3 - #include <linux/mm.h> 4 - #include <linux/module.h> 5 - #include <linux/sched.h> 6 - #include <linux/init.h> 7 - #include <linux/init_task.h> 8 - #include <linux/fs.h> 9 - #include <linux/mqueue.h> 10 - 11 - #include <asm/uaccess.h> 12 - #include <asm/pgtable.h> 13 - 14 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 15 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 16 - /* 17 - * Initial thread structure. 18 - * 19 - * We need to make sure that this is 8192-byte aligned due to the 20 - * way process stacks are handled. This is done by having a special 21 - * "init_task" linker map entry.. 22 - */ 23 - union thread_union init_thread_union __init_task_data = 24 - { INIT_THREAD_INFO(init_task) }; 25 - 26 - /* 27 - * Initial task structure. 28 - * 29 - * All other task structs will be allocated on slabs in fork.c 30 - */ 31 - struct task_struct init_task = INIT_TASK(init_task); 32 - 33 - EXPORT_SYMBOL(init_task); 34 -
+1 -5
arch/m32r/kernel/smpboot.c
··· 109 109 /* Function Prototypes */ 110 110 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 111 111 112 - void smp_prepare_boot_cpu(void); 113 - void smp_prepare_cpus(unsigned int); 114 112 static void init_ipi_lock(void); 115 113 static void do_boot_cpu(int); 116 - int __cpu_up(unsigned int); 117 - void smp_cpus_done(unsigned int); 118 114 119 115 int start_secondary(void *); 120 116 static void smp_callin(void); ··· 343 347 } 344 348 } 345 349 346 - int __cpuinit __cpu_up(unsigned int cpu_id) 350 + int __cpuinit __cpu_up(unsigned int cpu_id, struct task_struct *tidle) 347 351 { 348 352 int timeout; 349 353
+1 -1
arch/m68k/kernel/Makefile
··· 13 13 extra-$(CONFIG_SUN3) := sun3-head.o 14 14 extra-y += vmlinux.lds 15 15 16 - obj-y := entry.o init_task.o irq.o m68k_ksyms.o module.o process.o ptrace.o 16 + obj-y := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o 17 17 obj-y += setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o 18 18 19 19 obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
-35
arch/m68k/kernel/init_task.c
··· 1 - /* 2 - * linux/arch/m68knommu/kernel/init_task.c 3 - */ 4 - #include <linux/mm.h> 5 - #include <linux/module.h> 6 - #include <linux/sched.h> 7 - #include <linux/init.h> 8 - #include <linux/init_task.h> 9 - #include <linux/fs.h> 10 - #include <linux/mqueue.h> 11 - 12 - #include <asm/uaccess.h> 13 - #include <asm/pgtable.h> 14 - 15 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 16 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 17 - /* 18 - * Initial task structure. 19 - * 20 - * All other task structs will be allocated on slabs in fork.c 21 - */ 22 - struct task_struct init_task = INIT_TASK(init_task); 23 - 24 - EXPORT_SYMBOL(init_task); 25 - 26 - /* 27 - * Initial thread structure. 28 - * 29 - * We need to make sure that this is THREAD size aligned due to the 30 - * way process stacks are handled. This is done by having a special 31 - * "init_task" linker map entry.. 32 - */ 33 - union thread_union init_thread_union __init_task_data = 34 - { INIT_THREAD_INFO(init_task) }; 35 -
+1 -1
arch/microblaze/kernel/Makefile
··· 16 16 extra-y := head.o vmlinux.lds 17 17 18 18 obj-y += dma.o exceptions.o \ 19 - hw_exception_handler.o init_task.o intc.o irq.o \ 19 + hw_exception_handler.o intc.o irq.o \ 20 20 process.o prom.o prom_parse.o ptrace.o \ 21 21 reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o 22 22
-26
arch/microblaze/kernel/init_task.c
··· 1 - /* 2 - * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> 3 - * Copyright (C) 2009 PetaLogix 4 - * Copyright (C) 2006 Atmark Techno, Inc. 5 - * 6 - * This file is subject to the terms and conditions of the GNU General Public 7 - * License. See the file "COPYING" in the main directory of this archive 8 - * for more details. 9 - */ 10 - 11 - #include <linux/module.h> 12 - #include <linux/sched.h> 13 - #include <linux/init_task.h> 14 - #include <linux/fs.h> 15 - #include <linux/mqueue.h> 16 - 17 - #include <asm/pgtable.h> 18 - 19 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 20 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 21 - 22 - union thread_union init_thread_union __init_task_data = 23 - { INIT_THREAD_INFO(init_task) }; 24 - 25 - struct task_struct init_task = INIT_TASK(init_task); 26 - EXPORT_SYMBOL(init_task);
+1
arch/mips/Kconfig
··· 29 29 select HAVE_MEMBLOCK 30 30 select HAVE_MEMBLOCK_NODE_MAP 31 31 select ARCH_DISCARD_MEMBLOCK 32 + select GENERIC_SMP_IDLE_THREAD 32 33 33 34 menu "Machine selection" 34 35
+1 -1
arch/mips/Makefile
··· 235 235 236 236 OBJCOPYFLAGS += --remove-section=.reginfo 237 237 238 - head-y := arch/mips/kernel/head.o arch/mips/kernel/init_task.o 238 + head-y := arch/mips/kernel/head.o 239 239 240 240 libs-y += arch/mips/lib/ 241 241
-12
arch/mips/include/asm/thread_info.h
··· 85 85 86 86 #define STACK_WARN (THREAD_SIZE / 8) 87 87 88 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 89 - 90 - #ifdef CONFIG_DEBUG_STACK_USAGE 91 - #define alloc_thread_info_node(tsk, node) \ 92 - kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) 93 - #else 94 - #define alloc_thread_info_node(tsk, node) \ 95 - kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) 96 - #endif 97 - 98 - #define free_thread_info(info) kfree(info) 99 - 100 88 #endif /* !__ASSEMBLY__ */ 101 89 102 90 #define PREEMPT_ACTIVE 0x10000000
+1 -1
arch/mips/kernel/Makefile
··· 2 2 # Makefile for the Linux/MIPS kernel. 3 3 # 4 4 5 - extra-y := head.o init_task.o vmlinux.lds 5 + extra-y := head.o vmlinux.lds 6 6 7 7 obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ 8 8 ptrace.o reset.o setup.o signal.o syscall.o \
-35
arch/mips/kernel/init_task.c
··· 1 - #include <linux/mm.h> 2 - #include <linux/export.h> 3 - #include <linux/sched.h> 4 - #include <linux/init_task.h> 5 - #include <linux/fs.h> 6 - #include <linux/mqueue.h> 7 - 8 - #include <asm/thread_info.h> 9 - #include <asm/uaccess.h> 10 - #include <asm/pgtable.h> 11 - 12 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 13 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 14 - /* 15 - * Initial thread structure. 16 - * 17 - * We need to make sure that this is 8192-byte aligned due to the 18 - * way process stacks are handled. This is done by making sure 19 - * the linker maps this in the .text segment right after head.S, 20 - * and making head.S ensure the proper alignment. 21 - * 22 - * The things we do for performance.. 23 - */ 24 - union thread_union init_thread_union __init_task_data 25 - __attribute__((__aligned__(THREAD_SIZE))) = 26 - { INIT_THREAD_INFO(init_task) }; 27 - 28 - /* 29 - * Initial task structure. 30 - * 31 - * All other task structs will be allocated on slabs in fork.c 32 - */ 33 - struct task_struct init_task = INIT_TASK(init_task); 34 - 35 - EXPORT_SYMBOL(init_task);
+2 -54
arch/mips/kernel/smp.c
··· 186 186 cpu_set(0, cpu_callin_map); 187 187 } 188 188 189 - /* 190 - * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu 191 - * and keep control until "cpu_online(cpu)" is set. Note: cpu is 192 - * physical, not logical. 193 - */ 194 - static struct task_struct *cpu_idle_thread[NR_CPUS]; 195 - 196 - struct create_idle { 197 - struct work_struct work; 198 - struct task_struct *idle; 199 - struct completion done; 200 - int cpu; 201 - }; 202 - 203 - static void __cpuinit do_fork_idle(struct work_struct *work) 189 + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 204 190 { 205 - struct create_idle *c_idle = 206 - container_of(work, struct create_idle, work); 207 - 208 - c_idle->idle = fork_idle(c_idle->cpu); 209 - complete(&c_idle->done); 210 - } 211 - 212 - int __cpuinit __cpu_up(unsigned int cpu) 213 - { 214 - struct task_struct *idle; 215 - 216 - /* 217 - * Processor goes to start_secondary(), sets online flag 218 - * The following code is purely to make sure 219 - * Linux can schedule processes on this slave. 220 - */ 221 - if (!cpu_idle_thread[cpu]) { 222 - /* 223 - * Schedule work item to avoid forking user task 224 - * Ported from arch/x86/kernel/smpboot.c 225 - */ 226 - struct create_idle c_idle = { 227 - .cpu = cpu, 228 - .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 229 - }; 230 - 231 - INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); 232 - schedule_work(&c_idle.work); 233 - wait_for_completion(&c_idle.done); 234 - idle = cpu_idle_thread[cpu] = c_idle.idle; 235 - 236 - if (IS_ERR(idle)) 237 - panic(KERN_ERR "Fork failed for CPU %d", cpu); 238 - } else { 239 - idle = cpu_idle_thread[cpu]; 240 - init_idle(idle, cpu); 241 - } 242 - 243 - mp_ops->boot_secondary(cpu, idle); 191 + mp_ops->boot_secondary(cpu, tidle); 244 192 245 193 /* 246 194 * Trust is futile. We should really have timeouts ...
+1 -1
arch/mn10300/Makefile
··· 51 51 endif 52 52 53 53 54 - head-y := arch/mn10300/kernel/head.o arch/mn10300/kernel/init_task.o 54 + head-y := arch/mn10300/kernel/head.o 55 55 56 56 core-y += arch/mn10300/kernel/ arch/mn10300/mm/ 57 57
+3 -14
arch/mn10300/include/asm/thread_info.h
··· 20 20 21 21 #ifdef CONFIG_4KSTACKS 22 22 #define THREAD_SIZE (4096) 23 + #define THREAD_SIZE_ORDER (0) 23 24 #else 24 25 #define THREAD_SIZE (8192) 26 + #define THREAD_SIZE_ORDER (1) 25 27 #endif 26 28 27 29 #define STACK_WARN (THREAD_SIZE / 8) ··· 122 120 return sp; 123 121 } 124 122 125 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 126 - 127 - /* thread information allocation */ 128 - #ifdef CONFIG_DEBUG_STACK_USAGE 129 - #define alloc_thread_info_node(tsk, node) \ 130 - kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) 131 - #else 132 - #define alloc_thread_info_node(tsk, node) \ 133 - kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) 134 - #endif 135 - 136 123 #ifndef CONFIG_KGDB 137 - #define free_thread_info(ti) kfree((ti)) 138 - #else 139 - extern void free_thread_info(struct thread_info *); 124 + void arch_release_thread_info(struct thread_info *ti) 140 125 #endif 141 126 #define get_thread_info(ti) get_task_struct((ti)->task) 142 127 #define put_thread_info(ti) put_task_struct((ti)->task)
+1 -1
arch/mn10300/kernel/Makefile
··· 1 1 # 2 2 # Makefile for the MN10300-specific core kernel code 3 3 # 4 - extra-y := head.o init_task.o vmlinux.lds 4 + extra-y := head.o vmlinux.lds 5 5 6 6 fpu-obj-y := fpu-nofpu.o fpu-nofpu-low.o 7 7 fpu-obj-$(CONFIG_FPU) := fpu.o fpu-low.o
-39
arch/mn10300/kernel/init_task.c
··· 1 - /* MN10300 Initial task definitions 2 - * 3 - * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd. 4 - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 - * 6 - * This program is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU General Public Licence 8 - * as published by the Free Software Foundation; either version 9 - * 2 of the Licence, or (at your option) any later version. 10 - */ 11 - #include <linux/mm.h> 12 - #include <linux/module.h> 13 - #include <linux/sched.h> 14 - #include <linux/init.h> 15 - #include <linux/init_task.h> 16 - #include <linux/fs.h> 17 - #include <linux/mqueue.h> 18 - #include <asm/uaccess.h> 19 - #include <asm/pgtable.h> 20 - 21 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 22 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 23 - /* 24 - * Initial thread structure. 25 - * 26 - * We need to make sure that this is THREAD_SIZE aligned due to the 27 - * way process stacks are handled. This is done by having a special 28 - * "init_task" linker map entry.. 29 - */ 30 - union thread_union init_thread_union __init_task_data = 31 - { INIT_THREAD_INFO(init_task) }; 32 - 33 - /* 34 - * Initial task structure. 35 - * 36 - * All other task structs will be allocated on slabs in fork.c 37 - */ 38 - struct task_struct init_task = INIT_TASK(init_task); 39 - EXPORT_SYMBOL(init_task);
+1 -2
arch/mn10300/kernel/kgdb.c
··· 397 397 * single-step state is cleared. At this point the breakpoints should have 398 398 * been removed by __switch_to(). 399 399 */ 400 - void free_thread_info(struct thread_info *ti) 400 + void arch_release_thread_info(struct thread_info *ti) 401 401 { 402 402 if (kgdb_sstep_thread == ti) { 403 403 kgdb_sstep_thread = NULL; ··· 407 407 * so force immediate reentry */ 408 408 kgdb_breakpoint(); 409 409 } 410 - kfree(ti); 411 410 } 412 411 413 412 /*
+1 -1
arch/mn10300/kernel/smp.c
··· 924 924 * __cpu_up - Set smp_commenced_mask for the nominated CPU 925 925 * @cpu: The target CPU. 926 926 */ 927 - int __devinit __cpu_up(unsigned int cpu) 927 + int __devinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 928 928 { 929 929 int timeout; 930 930
+1 -1
arch/openrisc/Makefile
··· 38 38 KBUILD_CFLAGS += $(call cc-option,-msoft-div) 39 39 endif 40 40 41 - head-y := arch/openrisc/kernel/head.o arch/openrisc/kernel/init_task.o 41 + head-y := arch/openrisc/kernel/head.o 42 42 43 43 core-y += arch/openrisc/lib/ \ 44 44 arch/openrisc/kernel/ \
+1 -1
arch/openrisc/kernel/Makefile
··· 2 2 # Makefile for the linux kernel. 3 3 # 4 4 5 - extra-y := head.o vmlinux.lds init_task.o 5 + extra-y := head.o vmlinux.lds 6 6 7 7 obj-y := setup.o idle.o or32_ksyms.o process.o dma.o \ 8 8 traps.o time.o irq.o entry.o ptrace.o signal.o sys_or32.o \
-42
arch/openrisc/kernel/init_task.c
··· 1 - /* 2 - * OpenRISC init_task.c 3 - * 4 - * Linux architectural port borrowing liberally from similar works of 5 - * others. All original copyrights apply as per the original source 6 - * declaration. 7 - * 8 - * Modifications for the OpenRISC architecture: 9 - * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> 10 - * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> 11 - * 12 - * This program is free software; you can redistribute it and/or 13 - * modify it under the terms of the GNU General Public License 14 - * as published by the Free Software Foundation; either version 15 - * 2 of the License, or (at your option) any later version. 16 - */ 17 - 18 - #include <linux/init_task.h> 19 - #include <linux/mqueue.h> 20 - #include <linux/export.h> 21 - 22 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 23 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 24 - 25 - /* 26 - * Initial thread structure. 27 - * 28 - * We need to make sure that this is THREAD_SIZE aligned due to the 29 - * way process stacks are handled. This is done by having a special 30 - * "init_task" linker map entry.. 31 - */ 32 - union thread_union init_thread_union __init_task_data = { 33 - INIT_THREAD_INFO(init_task) 34 - }; 35 - 36 - /* 37 - * Initial task structure. 38 - * 39 - * All other task structs will be allocated on slabs in fork.c 40 - */ 41 - struct task_struct init_task = INIT_TASK(init_task); 42 - EXPORT_SYMBOL(init_task);
+1
arch/parisc/Kconfig
··· 17 17 select GENERIC_PCI_IOMAP 18 18 select IRQ_PER_CPU 19 19 select ARCH_HAVE_NMI_SAFE_CMPXCHG 20 + select GENERIC_SMP_IDLE_THREAD 20 21 21 22 help 22 23 The PA-RISC microprocessor is designed by Hewlett-Packard and used
+1 -1
arch/parisc/Makefile
··· 75 75 76 76 KBUILD_CFLAGS += $(cflags-y) 77 77 78 - kernel-y := mm/ kernel/ math-emu/ kernel/init_task.o 78 + kernel-y := mm/ kernel/ math-emu/ 79 79 kernel-$(CONFIG_HPUX) += hpux/ 80 80 81 81 core-y += $(addprefix arch/parisc/, $(kernel-y))
+1 -1
arch/parisc/kernel/Makefile
··· 2 2 # Makefile for arch/parisc/kernel 3 3 # 4 4 5 - extra-y := init_task.o head.o vmlinux.lds 5 + extra-y := head.o vmlinux.lds 6 6 7 7 obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \ 8 8 pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
-70
arch/parisc/kernel/init_task.c
··· 1 - /* 2 - * Static declaration of "init" task data structure. 3 - * 4 - * Copyright (C) 2000 Paul Bame <bame at parisc-linux.org> 5 - * Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org> 6 - * Copyright (C) 2001 Helge Deller <deller @ parisc-linux.org> 7 - * Copyright (C) 2002 Matthew Wilcox <willy with parisc-linux.org> 8 - * 9 - * 10 - * This program is free software; you can redistribute it and/or modify 11 - * it under the terms of the GNU General Public License as published by 12 - * the Free Software Foundation; either version 2 of the License, or 13 - * (at your option) any later version. 14 - * 15 - * This program is distributed in the hope that it will be useful, 16 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 - * GNU General Public License for more details. 19 - * 20 - * You should have received a copy of the GNU General Public License 21 - * along with this program; if not, write to the Free Software 22 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 - */ 24 - 25 - #include <linux/mm.h> 26 - #include <linux/fs.h> 27 - #include <linux/module.h> 28 - #include <linux/sched.h> 29 - #include <linux/init.h> 30 - #include <linux/init_task.h> 31 - #include <linux/mqueue.h> 32 - 33 - #include <asm/uaccess.h> 34 - #include <asm/pgtable.h> 35 - #include <asm/pgalloc.h> 36 - 37 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 38 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 39 - /* 40 - * Initial task structure. 41 - * 42 - * We need to make sure that this is 16384-byte aligned due to the 43 - * way process stacks are handled. This is done by having a special 44 - * "init_task" linker map entry.. 45 - */ 46 - union thread_union init_thread_union __init_task_data 47 - __attribute__((aligned(128))) = 48 - { INIT_THREAD_INFO(init_task) }; 49 - 50 - #if PT_NLEVELS == 3 51 - /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout 52 - * with the first pmd adjacent to the pgd and below it. gcc doesn't actually 53 - * guarantee that global objects will be laid out in memory in the same order 54 - * as the order of declaration, so put these in different sections and use 55 - * the linker script to order them. */ 56 - pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); 57 - #endif 58 - 59 - pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); 60 - pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); 61 - 62 - /* 63 - * Initial task structure. 64 - * 65 - * All other task structs will be allocated on slabs in fork.c 66 - */ 67 - EXPORT_SYMBOL(init_task); 68 - 69 - __asm__(".data"); 70 - struct task_struct init_task = INIT_TASK(init_task);
+3 -22
arch/parisc/kernel/smp.c
··· 340 340 /* 341 341 * Bring one cpu online. 342 342 */ 343 - int __cpuinit smp_boot_one_cpu(int cpuid) 343 + int __cpuinit smp_boot_one_cpu(int cpuid, struct task_struct *idle) 344 344 { 345 345 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); 346 - struct task_struct *idle; 347 346 long timeout; 348 - 349 - /* 350 - * Create an idle task for this CPU. Note the address wed* give 351 - * to kernel_thread is irrelevant -- it's going to start 352 - * where OS_BOOT_RENDEVZ vector in SAL says to start. But 353 - * this gets all the other task-y sort of data structures set 354 - * up like we wish. We need to pull the just created idle task 355 - * off the run queue and stuff it into the init_tasks[] array. 356 - * Sheesh . . . 357 - */ 358 - 359 - idle = fork_idle(cpuid); 360 - if (IS_ERR(idle)) 361 - panic("SMP: fork failed for CPU:%d", cpuid); 362 347 363 348 task_thread_info(idle)->cpu = cpuid; 364 349 ··· 388 403 udelay(100); 389 404 barrier(); 390 405 } 391 - 392 - put_task_struct(idle); 393 - idle = NULL; 394 - 395 406 printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); 396 407 return -1; 397 408 ··· 436 455 } 437 456 438 457 439 - int __cpuinit __cpu_up(unsigned int cpu) 458 + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 440 459 { 441 460 if (cpu != 0 && cpu < parisc_max_cpus) 442 - smp_boot_one_cpu(cpu); 461 + smp_boot_one_cpu(cpu, tidle); 443 462 444 463 return cpu_online(cpu) ? 0 : -ENOSYS; 445 464 }
+12
arch/parisc/mm/init.c
··· 33 33 34 34 extern int data_start; 35 35 36 + #if PT_NLEVELS == 3 37 + /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout 38 + * with the first pmd adjacent to the pgd and below it. gcc doesn't actually 39 + * guarantee that global objects will be laid out in memory in the same order 40 + * as the order of declaration, so put these in different sections and use 41 + * the linker script to order them. */ 42 + pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); 43 + #endif 44 + 45 + pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); 46 + pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); 47 + 36 48 #ifdef CONFIG_DISCONTIGMEM 37 49 struct node_map_data node_data[MAX_NUMNODES] __read_mostly; 38 50 unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
+1 -4
arch/powerpc/Kconfig
··· 87 87 bool 88 88 default y if 64BIT 89 89 90 - config ARCH_HAS_CPU_IDLE_WAIT 91 - bool 92 - default y 93 - 94 90 config GENERIC_HWEIGHT 95 91 bool 96 92 default y ··· 140 144 select HAVE_BPF_JIT if PPC64 141 145 select HAVE_ARCH_JUMP_LABEL 142 146 select ARCH_HAVE_NMI_SAFE_CMPXCHG 147 + select GENERIC_SMP_IDLE_THREAD 143 148 144 149 config EARLY_PRINTK 145 150 bool
-1
arch/powerpc/include/asm/processor.h
··· 386 386 enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; 387 387 388 388 extern int powersave_nap; /* set if nap mode can be used in idle loop */ 389 - void cpu_idle_wait(void); 390 389 391 390 #ifdef CONFIG_PSERIES_IDLE 392 391 extern void update_smt_snooze_delay(int snooze);
-13
arch/powerpc/include/asm/thread_info.h
··· 62 62 #define init_thread_info (init_thread_union.thread_info) 63 63 #define init_stack (init_thread_union.stack) 64 64 65 - /* thread information allocation */ 66 - 67 - #if THREAD_SHIFT >= PAGE_SHIFT 68 - 69 65 #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 70 - 71 - #else /* THREAD_SHIFT < PAGE_SHIFT */ 72 - 73 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 74 - 75 - extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node); 76 - extern void free_thread_info(struct thread_info *ti); 77 - 78 - #endif /* THREAD_SHIFT < PAGE_SHIFT */ 79 66 80 67 /* how to get the thread information struct from C */ 81 68 static inline struct thread_info *current_thread_info(void)
+1 -1
arch/powerpc/kernel/Makefile
··· 28 28 29 29 obj-y := cputable.o ptrace.o syscalls.o \ 30 30 irq.o align.o signal_32.o pmc.o vdso.o \ 31 - init_task.o process.o systbl.o idle.o \ 31 + process.o systbl.o idle.o \ 32 32 signal.o sysfs.o cacheinfo.o time.o \ 33 33 prom.o traps.o setup-common.o \ 34 34 udbg.o misc.o io.o dma.o \
-23
arch/powerpc/kernel/idle.c
··· 113 113 } 114 114 } 115 115 116 - 117 - /* 118 - * cpu_idle_wait - Used to ensure that all the CPUs come out of the old 119 - * idle loop and start using the new idle loop. 120 - * Required while changing idle handler on SMP systems. 121 - * Caller must have changed idle handler to the new value before the call. 122 - * This window may be larger on shared systems. 123 - */ 124 - void cpu_idle_wait(void) 125 - { 126 - int cpu; 127 - smp_mb(); 128 - 129 - /* kick all the CPUs so that they exit out of old idle routine */ 130 - get_online_cpus(); 131 - for_each_online_cpu(cpu) { 132 - if (cpu != smp_processor_id()) 133 - smp_send_reschedule(cpu); 134 - } 135 - put_online_cpus(); 136 - } 137 - EXPORT_SYMBOL_GPL(cpu_idle_wait); 138 - 139 116 int powersave_nap; 140 117 141 118 #ifdef CONFIG_SYSCTL
-29
arch/powerpc/kernel/init_task.c
··· 1 - #include <linux/mm.h> 2 - #include <linux/export.h> 3 - #include <linux/sched.h> 4 - #include <linux/init.h> 5 - #include <linux/init_task.h> 6 - #include <linux/fs.h> 7 - #include <linux/mqueue.h> 8 - #include <asm/uaccess.h> 9 - 10 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 11 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 12 - /* 13 - * Initial thread structure. 14 - * 15 - * We need to make sure that this is 16384-byte aligned due to the 16 - * way process stacks are handled. This is done by having a special 17 - * "init_task" linker map entry.. 18 - */ 19 - union thread_union init_thread_union __init_task_data = 20 - { INIT_THREAD_INFO(init_task) }; 21 - 22 - /* 23 - * Initial task structure. 24 - * 25 - * All other task structs will be allocated on slabs in fork.c 26 - */ 27 - struct task_struct init_task = INIT_TASK(init_task); 28 - 29 - EXPORT_SYMBOL(init_task);
-31
arch/powerpc/kernel/process.c
··· 1252 1252 } 1253 1253 #endif /* CONFIG_PPC64 */ 1254 1254 1255 - #if THREAD_SHIFT < PAGE_SHIFT 1256 - 1257 - static struct kmem_cache *thread_info_cache; 1258 - 1259 - struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) 1260 - { 1261 - struct thread_info *ti; 1262 - 1263 - ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node); 1264 - if (unlikely(ti == NULL)) 1265 - return NULL; 1266 - #ifdef CONFIG_DEBUG_STACK_USAGE 1267 - memset(ti, 0, THREAD_SIZE); 1268 - #endif 1269 - return ti; 1270 - } 1271 - 1272 - void free_thread_info(struct thread_info *ti) 1273 - { 1274 - kmem_cache_free(thread_info_cache, ti); 1275 - } 1276 - 1277 - void thread_info_cache_init(void) 1278 - { 1279 - thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, 1280 - THREAD_SIZE, 0, NULL); 1281 - BUG_ON(thread_info_cache == NULL); 1282 - } 1283 - 1284 - #endif /* THREAD_SHIFT < PAGE_SHIFT */ 1285 - 1286 1255 unsigned long arch_align_stack(unsigned long sp) 1287 1256 { 1288 1257 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+6 -70
arch/powerpc/kernel/smp.c
··· 57 57 #define DBG(fmt...) 58 58 #endif 59 59 60 - 61 - /* Store all idle threads, this can be reused instead of creating 62 - * a new thread. Also avoids complicated thread destroy functionality 63 - * for idle threads. 64 - */ 65 60 #ifdef CONFIG_HOTPLUG_CPU 66 - /* 67 - * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is 68 - * removed after init for !CONFIG_HOTPLUG_CPU. 69 - */ 70 - static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); 71 - #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 72 - #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 73 - 74 61 /* State of each CPU during hotplug phases */ 75 62 static DEFINE_PER_CPU(int, cpu_state) = { 0 }; 76 - 77 - #else 78 - static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 79 - #define get_idle_for_cpu(x) (idle_thread_array[(x)]) 80 - #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) 81 63 #endif 82 64 83 65 struct thread_info *secondary_ti; ··· 411 429 } 412 430 #endif 413 431 414 - struct create_idle { 415 - struct work_struct work; 416 - struct task_struct *idle; 417 - struct completion done; 418 - int cpu; 419 - }; 420 - 421 - static void __cpuinit do_fork_idle(struct work_struct *work) 432 + static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) 422 433 { 423 - struct create_idle *c_idle = 424 - container_of(work, struct create_idle, work); 425 - 426 - c_idle->idle = fork_idle(c_idle->cpu); 427 - complete(&c_idle->done); 428 - } 429 - 430 - static int __cpuinit create_idle(unsigned int cpu) 431 - { 432 - struct thread_info *ti; 433 - struct create_idle c_idle = { 434 - .cpu = cpu, 435 - .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 436 - }; 437 - INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); 438 - 439 - c_idle.idle = get_idle_for_cpu(cpu); 440 - 441 - /* We can't use kernel_thread since we must avoid to 442 - * reschedule the child. We use a workqueue because 443 - * we want to fork from a kernel thread, not whatever 444 - * userspace process happens to be trying to online us. 445 - */ 446 - if (!c_idle.idle) { 447 - schedule_work(&c_idle.work); 448 - wait_for_completion(&c_idle.done); 449 - } else 450 - init_idle(c_idle.idle, cpu); 451 - if (IS_ERR(c_idle.idle)) { 452 - pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); 453 - return PTR_ERR(c_idle.idle); 454 - } 455 - ti = task_thread_info(c_idle.idle); 434 + struct thread_info *ti = task_thread_info(idle); 456 435 457 436 #ifdef CONFIG_PPC64 458 - paca[cpu].__current = c_idle.idle; 437 + paca[cpu].__current = idle; 459 438 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; 460 439 #endif 461 440 ti->cpu = cpu; 462 - current_set[cpu] = ti; 463 - 464 - return 0; 441 + secondary_ti = current_set[cpu] = ti; 465 442 } 466 443 467 - int __cpuinit __cpu_up(unsigned int cpu) 444 + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 468 445 { 469 446 int rc, c; 470 447 ··· 431 490 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 432 491 return -EINVAL; 433 492 434 - /* Make sure we have an idle thread */ 435 - rc = create_idle(cpu); 436 - if (rc) 437 - return rc; 438 - 439 - secondary_ti = current_set[cpu]; 493 + cpu_idle_thread_init(cpu, tidle); 440 494 441 495 /* Make sure callin-map entry is 0 (can be leftover a CPU 442 496 * hotplug
+1
arch/s390/Kconfig
··· 122 122 select ARCH_INLINE_WRITE_UNLOCK_BH 123 123 select ARCH_INLINE_WRITE_UNLOCK_IRQ 124 124 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 125 + select GENERIC_SMP_IDLE_THREAD 125 126 126 127 config SCHED_OMIT_FRAME_POINTER 127 128 def_bool y
-1
arch/s390/Makefile
··· 91 91 92 92 head-y := arch/s390/kernel/head.o 93 93 head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o) 94 - head-y += arch/s390/kernel/init_task.o 95 94 96 95 # See arch/s390/Kbuild for content of core part of the kernel 97 96 core-y += arch/s390/
+1 -1
arch/s390/include/asm/smp.h
··· 16 16 extern struct mutex smp_cpu_state_mutex; 17 17 extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; 18 18 19 - extern int __cpu_up(unsigned int cpu); 19 + extern int __cpu_up(unsigned int cpu, struct task_struct *tidle); 20 20 21 21 extern void arch_send_call_function_single_ipi(int cpu); 22 22 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+1 -1
arch/s390/kernel/Makefile
··· 28 28 obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 29 29 obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 30 30 31 - extra-y += head.o init_task.o vmlinux.lds 31 + extra-y += head.o vmlinux.lds 32 32 extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) 33 33 34 34 obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
-38
arch/s390/kernel/init_task.c
··· 1 - /* 2 - * arch/s390/kernel/init_task.c 3 - * 4 - * S390 version 5 - * 6 - * Derived from "arch/i386/kernel/init_task.c" 7 - */ 8 - 9 - #include <linux/mm.h> 10 - #include <linux/fs.h> 11 - #include <linux/module.h> 12 - #include <linux/sched.h> 13 - #include <linux/init_task.h> 14 - #include <linux/mqueue.h> 15 - 16 - #include <asm/uaccess.h> 17 - #include <asm/pgtable.h> 18 - 19 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 20 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 21 - /* 22 - * Initial thread structure. 23 - * 24 - * We need to make sure that this is THREAD_SIZE aligned due to the 25 - * way process stacks are handled. This is done by having a special 26 - * "init_task" linker map entry.. 27 - */ 28 - union thread_union init_thread_union __init_task_data = 29 - { INIT_THREAD_INFO(init_task) }; 30 - 31 - /* 32 - * Initial task structure. 33 - * 34 - * All other task structs will be allocated on slabs in fork.c 35 - */ 36 - struct task_struct init_task = INIT_TASK(init_task); 37 - 38 - EXPORT_SYMBOL(init_task);
+3 -32
arch/s390/kernel/smp.c
··· 85 85 86 86 struct pcpu { 87 87 struct cpu cpu; 88 - struct task_struct *idle; /* idle process for the cpu */ 89 88 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ 90 89 unsigned long async_stack; /* async stack for the cpu */ 91 90 unsigned long panic_stack; /* panic stack for the cpu */ ··· 724 725 cpu_idle(); 725 726 } 726 727 727 - struct create_idle { 728 - struct work_struct work; 729 - struct task_struct *idle; 730 - struct completion done; 731 - int cpu; 732 - }; 733 - 734 - static void __cpuinit smp_fork_idle(struct work_struct *work) 735 - { 736 - struct create_idle *c_idle; 737 - 738 - c_idle = container_of(work, struct create_idle, work); 739 - c_idle->idle = fork_idle(c_idle->cpu); 740 - complete(&c_idle->done); 741 - } 742 - 743 728 /* Upping and downing of CPUs */ 744 - int __cpuinit __cpu_up(unsigned int cpu) 729 + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 745 730 { 746 - struct create_idle c_idle; 747 731 struct pcpu *pcpu; 748 732 int rc; 749 733 ··· 736 754 if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) != 737 755 sigp_order_code_accepted) 738 756 return -EIO; 739 - if (!pcpu->idle) { 740 - c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done); 741 - INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle); 742 - c_idle.cpu = cpu; 743 - schedule_work(&c_idle.work); 744 - wait_for_completion(&c_idle.done); 745 - if (IS_ERR(c_idle.idle)) 746 - return PTR_ERR(c_idle.idle); 747 - pcpu->idle = c_idle.idle; 748 - } 749 - init_idle(pcpu->idle, cpu); 757 + 750 758 rc = pcpu_alloc_lowcore(pcpu, cpu); 751 759 if (rc) 752 760 return rc; 753 761 pcpu_prepare_secondary(pcpu, cpu); 754 - pcpu_attach_task(pcpu, pcpu->idle); 762 + pcpu_attach_task(pcpu, tidle); 755 763 pcpu_start_fn(pcpu, smp_start_secondary, NULL); 756 764 while (!cpu_online(cpu)) 757 765 cpu_relax(); ··· 828 856 struct pcpu *pcpu = pcpu_devices; 829 857 830 858 boot_cpu_address = stap(); 831 - pcpu->idle = current; 832 859 pcpu->state = CPU_STATE_CONFIGURED; 833 860 pcpu->address = boot_cpu_address; 834 861 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
+3 -7
arch/score/include/asm/thread_info.h
··· 11 11 #include <linux/const.h> 12 12 13 13 /* thread information allocation */ 14 - #define THREAD_SIZE_ORDER (1) 15 - #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 16 - #define THREAD_MASK (THREAD_SIZE - _AC(1,UL)) 17 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 14 + #define THREAD_SIZE_ORDER (1) 15 + #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 16 + #define THREAD_MASK (THREAD_SIZE - _AC(1,UL)) 18 17 19 18 #ifndef __ASSEMBLY__ 20 19 ··· 69 70 /* How to get the thread information struct from C. */ 70 71 register struct thread_info *__current_thread_info __asm__("r28"); 71 72 #define current_thread_info() __current_thread_info 72 - 73 - #define alloc_thread_info_node(tsk, node) kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) 74 - #define free_thread_info(info) kfree(info) 75 73 76 74 #endif /* !__ASSEMBLY__ */ 77 75
+1 -1
arch/score/kernel/Makefile
··· 4 4 5 5 extra-y := head.o vmlinux.lds 6 6 7 - obj-y += entry.o init_task.o irq.o process.o ptrace.o \ 7 + obj-y += entry.o irq.o process.o ptrace.o \ 8 8 setup.o signal.o sys_score.o time.o traps.o \ 9 9 sys_call_table.o 10 10
-46
arch/score/kernel/init_task.c
··· 1 - /* 2 - * arch/score/kernel/init_task.c 3 - * 4 - * Score Processor version. 5 - * 6 - * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 - * 8 - * This program is free software; you can redistribute it and/or modify 9 - * it under the terms of the GNU General Public License as published by 10 - * the Free Software Foundation; either version 2 of the License, or 11 - * (at your option) any later version. 12 - * 13 - * This program is distributed in the hope that it will be useful, 14 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 - * GNU General Public License for more details. 17 - * 18 - * You should have received a copy of the GNU General Public License 19 - * along with this program; if not, see the file COPYING, or write 20 - * to the Free Software Foundation, Inc., 21 - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 - */ 23 - 24 - #include <linux/init_task.h> 25 - #include <linux/mqueue.h> 26 - 27 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 28 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 29 - 30 - /* 31 - * Initial thread structure. 32 - * 33 - * We need to make sure that this is THREAD_SIZE aligned due to the 34 - * way process stacks are handled. This is done by having a special 35 - * "init_task" linker map entry.. 36 - */ 37 - union thread_union init_thread_union __init_task_data = 38 - { INIT_THREAD_INFO(init_task) }; 39 - 40 - /* 41 - * Initial task structure. 42 - * 43 - * All other task structs will be allocated on slabs in fork.c 44 - */ 45 - struct task_struct init_task = INIT_TASK(init_task); 46 - EXPORT_SYMBOL(init_task);
+1 -3
arch/sh/Kconfig
··· 28 28 select RTC_LIB 29 29 select GENERIC_ATOMIC64 30 30 select GENERIC_IRQ_SHOW 31 + select GENERIC_SMP_IDLE_THREAD 31 32 help 32 33 The SuperH is a RISC processor targeted for use in embedded systems 33 34 and consumer electronics; it was also used in the Sega Dreamcast ··· 151 150 def_bool y 152 151 153 152 config ARCH_HAS_DEFAULT_IDLE 154 - def_bool y 155 - 156 - config ARCH_HAS_CPU_IDLE_WAIT 157 153 def_bool y 158 154 159 155 config NO_IOPORT
+1 -1
arch/sh/Makefile
··· 124 124 125 125 export ld-bfd BITS 126 126 127 - head-y := arch/sh/kernel/init_task.o arch/sh/kernel/head_$(BITS).o 127 + head-y := arch/sh/kernel/head_$(BITS).o 128 128 129 129 core-y += arch/sh/kernel/ arch/sh/mm/ arch/sh/boards/ 130 130 core-$(CONFIG_SH_FPU_EMU) += arch/sh/math-emu/
-5
arch/sh/include/asm/processor.h
··· 85 85 struct tlb_info itlb; 86 86 struct tlb_info dtlb; 87 87 88 - #ifdef CONFIG_SMP 89 - struct task_struct *idle; 90 - #endif 91 - 92 88 unsigned int phys_bits; 93 89 unsigned long flags; 94 90 } __attribute__ ((aligned(L1_CACHE_BYTES))); ··· 98 102 #define cpu_relax() barrier() 99 103 100 104 void default_idle(void); 101 - void cpu_idle_wait(void); 102 105 void stop_this_cpu(void *); 103 106 104 107 /* Forward decl */
+1 -10
arch/sh/include/asm/thread_info.h
··· 88 88 return ti; 89 89 } 90 90 91 - /* thread information allocation */ 92 - #if THREAD_SHIFT >= PAGE_SHIFT 93 - 94 91 #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 95 92 96 - #endif 97 - 98 - extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node); 99 - extern void free_thread_info(struct thread_info *ti); 100 93 extern void arch_task_cache_init(void); 101 - #define arch_task_cache_init arch_task_cache_init 102 94 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 95 + extern void arch_release_task_struct(struct task_struct *tsk); 103 96 extern void init_thread_xstate(void); 104 - 105 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 106 97 107 98 #endif /* __ASSEMBLY__ */ 108 99
+1 -1
arch/sh/kernel/Makefile
··· 2 2 # Makefile for the Linux/SuperH kernel. 3 3 # 4 4 5 - extra-y := head_$(BITS).o init_task.o vmlinux.lds 5 + extra-y := head_$(BITS).o vmlinux.lds 6 6 7 7 ifdef CONFIG_FUNCTION_TRACER 8 8 # Do not profile debug and lowlevel utilities
-20
arch/sh/kernel/idle.c
··· 132 132 pm_idle = poll_idle; 133 133 } 134 134 135 - static void do_nothing(void *unused) 136 - { 137 - } 138 - 139 135 void stop_this_cpu(void *unused) 140 136 { 141 137 local_irq_disable(); ··· 140 144 for (;;) 141 145 cpu_sleep(); 142 146 } 143 - 144 - /* 145 - * cpu_idle_wait - Used to ensure that all the CPUs discard old value of 146 - * pm_idle and update to new pm_idle value. Required while changing pm_idle 147 - * handler on SMP systems. 148 - * 149 - * Caller must have changed pm_idle to the new value before the call. Old 150 - * pm_idle value will not be used by any CPU after the return of this function. 151 - */ 152 - void cpu_idle_wait(void) 153 - { 154 - smp_mb(); 155 - /* kick all the CPUs so that they exit out of pm_idle */ 156 - smp_call_function(do_nothing, NULL, 1); 157 - } 158 - EXPORT_SYMBOL_GPL(cpu_idle_wait);
-30
arch/sh/kernel/init_task.c
··· 1 - #include <linux/mm.h> 2 - #include <linux/module.h> 3 - #include <linux/sched.h> 4 - #include <linux/init_task.h> 5 - #include <linux/mqueue.h> 6 - #include <linux/fs.h> 7 - #include <asm/uaccess.h> 8 - #include <asm/pgtable.h> 9 - 10 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 11 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 12 - struct pt_regs fake_swapper_regs; 13 - /* 14 - * Initial thread structure. 15 - * 16 - * We need to make sure that this is 8192-byte aligned due to the 17 - * way process stacks are handled. This is done by having a special 18 - * "init_task" linker map entry.. 19 - */ 20 - union thread_union init_thread_union __init_task_data = 21 - { INIT_THREAD_INFO(init_task) }; 22 - 23 - /* 24 - * Initial task structure. 25 - * 26 - * All other task structs will be allocated on slabs in fork.c 27 - */ 28 - struct task_struct init_task = INIT_TASK(init_task); 29 - 30 - EXPORT_SYMBOL(init_task);
+2 -44
arch/sh/kernel/process.c
··· 29 29 } 30 30 } 31 31 32 - #if THREAD_SHIFT < PAGE_SHIFT 33 - static struct kmem_cache *thread_info_cache; 34 - 35 - struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) 32 + void arch_release_task_struct(struct task_struct *tsk) 36 33 { 37 - struct thread_info *ti; 38 - #ifdef CONFIG_DEBUG_STACK_USAGE 39 - gfp_t mask = GFP_KERNEL | __GFP_ZERO; 40 - #else 41 - gfp_t mask = GFP_KERNEL; 42 - #endif 43 - 44 - ti = kmem_cache_alloc_node(thread_info_cache, mask, node); 45 - return ti; 34 + free_thread_xstate(tsk); 46 35 } 47 - 48 - void free_thread_info(struct thread_info *ti) 49 - { 50 - free_thread_xstate(ti->task); 51 - kmem_cache_free(thread_info_cache, ti); 52 - } 53 - 54 - void thread_info_cache_init(void) 55 - { 56 - thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, 57 - THREAD_SIZE, SLAB_PANIC, NULL); 58 - } 59 - #else 60 - struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) 61 - { 62 - #ifdef CONFIG_DEBUG_STACK_USAGE 63 - gfp_t mask = GFP_KERNEL | __GFP_ZERO; 64 - #else 65 - gfp_t mask = GFP_KERNEL; 66 - #endif 67 - struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER); 68 - 69 - return page ? page_address(page) : NULL; 70 - } 71 - 72 - void free_thread_info(struct thread_info *ti) 73 - { 74 - free_thread_xstate(ti->task); 75 - free_pages((unsigned long)ti, THREAD_SIZE_ORDER); 76 - } 77 - #endif /* THREAD_SHIFT < PAGE_SHIFT */ 78 36 79 37 void arch_task_cache_init(void) 80 38 {
+1 -13
arch/sh/kernel/smp.c
··· 220 220 void *thread_info; 221 221 } stack_start; 222 222 223 - int __cpuinit __cpu_up(unsigned int cpu) 223 + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tsk) 224 224 { 225 - struct task_struct *tsk; 226 225 unsigned long timeout; 227 - 228 - tsk = cpu_data[cpu].idle; 229 - if (!tsk) { 230 - tsk = fork_idle(cpu); 231 - if (IS_ERR(tsk)) { 232 - pr_err("Failed forking idle task for cpu %d\n", cpu); 233 - return PTR_ERR(tsk); 234 - } 235 - 236 - cpu_data[cpu].idle = tsk; 237 - } 238 226 239 227 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 240 228
+2
arch/sparc/Kconfig
··· 31 31 select GENERIC_PCI_IOMAP 32 32 select HAVE_NMI_WATCHDOG if SPARC64 33 33 select HAVE_BPF_JIT 34 + select GENERIC_SMP_IDLE_THREAD 34 35 35 36 config SPARC32 36 37 def_bool !64BIT 37 38 select GENERIC_ATOMIC64 38 39 select CLZ_TAB 40 + select ARCH_THREAD_INFO_ALLOCATOR 39 41 40 42 config SPARC64 41 43 def_bool 64BIT
-1
arch/sparc/Makefile
··· 50 50 endif 51 51 52 52 head-y := arch/sparc/kernel/head_$(BITS).o 53 - head-y += arch/sparc/kernel/init_task.o 54 53 55 54 # See arch/sparc/Kbuild for the core part of the kernel 56 55 core-y += arch/sparc/
+3 -2
arch/sparc/include/asm/leon.h
··· 270 270 #include <linux/interrupt.h> 271 271 272 272 struct device_node; 273 + struct task_struct; 273 274 extern unsigned int leon_build_device_irq(unsigned int real_irq, 274 275 irq_flow_handler_t flow_handler, 275 276 const char *name, int do_ack); ··· 290 289 extern void leon_clear_profile_irq(int cpu); 291 290 extern void leon_smp_done(void); 292 291 extern void leon_boot_cpus(void); 293 - extern int leon_boot_one_cpu(int i); 292 + extern int leon_boot_one_cpu(int i, struct task_struct *); 294 293 void leon_init_smp(void); 295 294 extern void cpu_idle(void); 296 295 extern void init_IRQ(void); ··· 326 325 #define init_leon() do {} while (0) 327 326 #define leon_smp_done() do {} while (0) 328 327 #define leon_boot_cpus() do {} while (0) 329 - #define leon_boot_one_cpu(i) 1 328 + #define leon_boot_one_cpu(i, t) 1 330 329 #define leon_init_smp() do {} while (0) 331 330 332 331 #endif /* !defined(CONFIG_SPARC_LEON) */
-2
arch/sparc/include/asm/thread_info_32.h
··· 79 79 */ 80 80 #define THREAD_INFO_ORDER 1 81 81 82 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 83 - 84 82 struct thread_info * alloc_thread_info_node(struct task_struct *tsk, int node); 85 83 void free_thread_info(struct thread_info *); 86 84
+2 -23
arch/sparc/include/asm/thread_info_64.h
··· 138 138 139 139 /* thread information allocation */ 140 140 #if PAGE_SHIFT == 13 141 - #define __THREAD_INFO_ORDER 1 141 + #define THREAD_SIZE_ORDER 1 142 142 #else /* PAGE_SHIFT == 13 */ 143 - #define __THREAD_INFO_ORDER 0 143 + #define THREAD_SIZE_ORDER 0 144 144 #endif /* PAGE_SHIFT == 13 */ 145 - 146 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 147 - 148 - #ifdef CONFIG_DEBUG_STACK_USAGE 149 - #define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO) 150 - #else 151 - #define THREAD_FLAGS (GFP_KERNEL) 152 - #endif 153 - 154 - #define alloc_thread_info_node(tsk, node) \ 155 - ({ \ 156 - struct page *page = alloc_pages_node(node, THREAD_FLAGS, \ 157 - __THREAD_INFO_ORDER); \ 158 - struct thread_info *ret; \ 159 - \ 160 - ret = page ? page_address(page) : NULL; \ 161 - ret; \ 162 - }) 163 - 164 - #define free_thread_info(ti) \ 165 - free_pages((unsigned long)(ti),__THREAD_INFO_ORDER) 166 145 167 146 #define __thread_flag_byte_ptr(ti) \ 168 147 ((unsigned char *)(&((ti)->flags)))
-1
arch/sparc/kernel/Makefile
··· 6 6 ccflags-y := -Werror 7 7 8 8 extra-y := head_$(BITS).o 9 - extra-y += init_task.o 10 9 11 10 # Undefine sparc when processing vmlinux.lds - it is used 12 11 # And teach CPP we are doing $(BITS) builds (for this case)
+10 -8
arch/sparc/kernel/init_task.c init/init_task.c
··· 1 - #include <linux/mm.h> 2 - #include <linux/fs.h> 3 - #include <linux/module.h> 4 - #include <linux/sched.h> 5 1 #include <linux/init_task.h> 2 + #include <linux/export.h> 6 3 #include <linux/mqueue.h> 4 + #include <linux/sched.h> 5 + #include <linux/init.h> 6 + #include <linux/fs.h> 7 + #include <linux/mm.h> 7 8 8 9 #include <asm/pgtable.h> 9 10 #include <asm/uaccess.h> 10 11 11 12 static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 12 13 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 14 + 15 + /* Initial task structure */ 13 16 struct task_struct init_task = INIT_TASK(init_task); 14 17 EXPORT_SYMBOL(init_task); 15 18 16 - /* .text section in head.S is aligned at 8k boundary and this gets linked 17 - * right after that so that the init_thread_union is aligned properly as well. 18 - * If this is not aligned on a 8k boundary, then you should change code 19 - * in etrap.S which assumes it. 19 + /* 20 + * Initial thread structure. Alignment of this is handled by a special 21 + * linker map entry. 20 22 */ 21 23 union thread_union init_thread_union __init_task_data = 22 24 { INIT_THREAD_INFO(init_task) };
+2 -7
arch/sparc/kernel/leon_smp.c
··· 203 203 204 204 } 205 205 206 - int __cpuinit leon_boot_one_cpu(int i) 206 + int __cpuinit leon_boot_one_cpu(int i, struct task_struct *idle) 207 207 { 208 - 209 - struct task_struct *p; 210 208 int timeout; 211 209 212 - /* Cook up an idler for this guy. */ 213 - p = fork_idle(i); 214 - 215 - current_set[i] = task_thread_info(p); 210 + current_set[i] = task_thread_info(idle); 216 211 217 212 /* See trampoline.S:leon_smp_cpu_startup for details... 218 213 * Initialize the contexts table
+6 -6
arch/sparc/kernel/smp_32.c
··· 256 256 set_cpu_possible(cpuid, true); 257 257 } 258 258 259 - int __cpuinit __cpu_up(unsigned int cpu) 259 + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 260 260 { 261 - extern int __cpuinit smp4m_boot_one_cpu(int); 262 - extern int __cpuinit smp4d_boot_one_cpu(int); 261 + extern int __cpuinit smp4m_boot_one_cpu(int, struct task_struct *); 262 + extern int __cpuinit smp4d_boot_one_cpu(int, struct task_struct *); 263 263 int ret=0; 264 264 265 265 switch(sparc_cpu_model) { 266 266 case sun4m: 267 - ret = smp4m_boot_one_cpu(cpu); 267 + ret = smp4m_boot_one_cpu(cpu, tidle); 268 268 break; 269 269 case sun4d: 270 - ret = smp4d_boot_one_cpu(cpu); 270 + ret = smp4d_boot_one_cpu(cpu, tidle); 271 271 break; 272 272 case sparc_leon: 273 - ret = leon_boot_one_cpu(cpu); 273 + ret = leon_boot_one_cpu(cpu, tidle); 274 274 break; 275 275 case sun4e: 276 276 printk("SUN4E\n");
+4 -8
arch/sparc/kernel/smp_64.c
··· 343 343 */ 344 344 static struct thread_info *cpu_new_thread = NULL; 345 345 346 - static int __cpuinit smp_boot_one_cpu(unsigned int cpu) 346 + static int __cpuinit smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) 347 347 { 348 348 unsigned long entry = 349 349 (unsigned long)(&sparc64_cpu_startup); 350 350 unsigned long cookie = 351 351 (unsigned long)(&cpu_new_thread); 352 - struct task_struct *p; 353 352 void *descr = NULL; 354 353 int timeout, ret; 355 354 356 - p = fork_idle(cpu); 357 - if (IS_ERR(p)) 358 - return PTR_ERR(p); 359 355 callin_flag = 0; 360 - cpu_new_thread = task_thread_info(p); 356 + cpu_new_thread = task_thread_info(idle); 361 357 362 358 if (tlb_type == hypervisor) { 363 359 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) ··· 1223 1227 } 1224 1228 } 1225 1229 1226 - int __cpuinit __cpu_up(unsigned int cpu) 1230 + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 1227 1231 { 1228 - int ret = smp_boot_one_cpu(cpu); 1232 + int ret = smp_boot_one_cpu(cpu, tidle); 1229 1233 1230 1234 if (!ret) { 1231 1235 cpumask_set_cpu(cpu, &smp_commenced_mask);
+2 -6
arch/sparc/kernel/sun4d_smp.c
··· 129 129 local_ops->cache_all(); 130 130 } 131 131 132 - int __cpuinit smp4d_boot_one_cpu(int i) 132 + int __cpuinit smp4d_boot_one_cpu(int i, struct task_struct *idle) 133 133 { 134 134 unsigned long *entry = &sun4d_cpu_startup; 135 - struct task_struct *p; 136 135 int timeout; 137 136 int cpu_node; 138 137 139 138 cpu_find_by_instance(i, &cpu_node, NULL); 140 - /* Cook up an idler for this guy. */ 141 - p = fork_idle(i); 142 - current_set[i] = task_thread_info(p); 143 - 139 + current_set[i] = task_thread_info(idle); 144 140 /* 145 141 * Initialize the contexts table 146 142 * Since the call to prom_startcpu() trashes the structure,
+2 -5
arch/sparc/kernel/sun4m_smp.c
··· 90 90 local_ops->cache_all(); 91 91 } 92 92 93 - int __cpuinit smp4m_boot_one_cpu(int i) 93 + int __cpuinit smp4m_boot_one_cpu(int i, struct task_struct *idle) 94 94 { 95 95 unsigned long *entry = &sun4m_cpu_startup; 96 - struct task_struct *p; 97 96 int timeout; 98 97 int cpu_node; 99 98 100 99 cpu_find_by_mid(i, &cpu_node); 100 + current_set[i] = task_thread_info(idle); 101 101 102 - /* Cook up an idler for this guy. */ 103 - p = fork_idle(i); 104 - current_set[i] = task_thread_info(p); 105 102 /* See trampoline.S for details... */ 106 103 entry += ((i - 1) * 3); 107 104
+2 -4
arch/tile/include/asm/thread_info.h
··· 77 77 78 78 #ifndef __ASSEMBLY__ 79 79 80 + void arch_release_thread_info(struct thread_info *info); 81 + 80 82 /* How to get the thread information struct from C. */ 81 83 register unsigned long stack_pointer __asm__("sp"); 82 84 83 85 #define current_thread_info() \ 84 86 ((struct thread_info *)(stack_pointer & -THREAD_SIZE)) 85 - 86 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 87 - extern struct thread_info *alloc_thread_info_node(struct task_struct *task, int node); 88 - extern void free_thread_info(struct thread_info *info); 89 87 90 88 /* Sit on a nap instruction until interrupted. */ 91 89 extern void smp_nap(void);
+1 -1
arch/tile/kernel/Makefile
··· 3 3 # 4 4 5 5 extra-y := vmlinux.lds head_$(BITS).o 6 - obj-y := backtrace.o entry.o init_task.o irq.o messaging.o \ 6 + obj-y := backtrace.o entry.o irq.o messaging.o \ 7 7 pci-dma.o proc.o process.o ptrace.o reboot.o \ 8 8 setup.o signal.o single_step.o stack.o sys.o sysfs.o time.o traps.o \ 9 9 intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
-59
arch/tile/kernel/init_task.c
··· 1 - /* 2 - * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 - * 4 - * This program is free software; you can redistribute it and/or 5 - * modify it under the terms of the GNU General Public License 6 - * as published by the Free Software Foundation, version 2. 7 - * 8 - * This program is distributed in the hope that it will be useful, but 9 - * WITHOUT ANY WARRANTY; without even the implied warranty of 10 - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 - * NON INFRINGEMENT. See the GNU General Public License for 12 - * more details. 13 - */ 14 - 15 - #include <linux/mm.h> 16 - #include <linux/fs.h> 17 - #include <linux/init_task.h> 18 - #include <linux/mqueue.h> 19 - #include <linux/module.h> 20 - #include <linux/start_kernel.h> 21 - #include <linux/uaccess.h> 22 - 23 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 24 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 25 - 26 - /* 27 - * Initial thread structure. 28 - * 29 - * We need to make sure that this is THREAD_SIZE aligned due to the 30 - * way process stacks are handled. This is done by having a special 31 - * "init_task" linker map entry.. 32 - */ 33 - union thread_union init_thread_union __init_task_data = { 34 - INIT_THREAD_INFO(init_task) 35 - }; 36 - 37 - /* 38 - * Initial task structure. 39 - * 40 - * All other task structs will be allocated on slabs in fork.c 41 - */ 42 - struct task_struct init_task = INIT_TASK(init_task); 43 - EXPORT_SYMBOL(init_task); 44 - 45 - /* 46 - * per-CPU stack and boot info. 47 - */ 48 - DEFINE_PER_CPU(unsigned long, boot_sp) = 49 - (unsigned long)init_stack + THREAD_SIZE; 50 - 51 - #ifdef CONFIG_SMP 52 - DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel; 53 - #else 54 - /* 55 - * The variable must be __initdata since it references __init code. 56 - * With CONFIG_SMP it is per-cpu data, which is exempt from validation. 57 - */ 58 - unsigned long __initdata boot_pc = (unsigned long)start_kernel; 59 - #endif
+2 -21
arch/tile/kernel/process.c
··· 114 114 } 115 115 } 116 116 117 - struct thread_info *alloc_thread_info_node(struct task_struct *task, int node) 118 - { 119 - struct page *page; 120 - gfp_t flags = GFP_KERNEL; 121 - 122 - #ifdef CONFIG_DEBUG_STACK_USAGE 123 - flags |= __GFP_ZERO; 124 - #endif 125 - 126 - page = alloc_pages_node(node, flags, THREAD_SIZE_ORDER); 127 - if (!page) 128 - return NULL; 129 - 130 - return (struct thread_info *)page_address(page); 131 - } 132 - 133 117 /* 134 - * Free a thread_info node, and all of its derivative 135 - * data structures. 118 + * Release a thread_info structure 136 119 */ 137 - void free_thread_info(struct thread_info *info) 120 + void arch_release_thread_info(struct thread_info *info) 138 121 { 139 122 struct single_step_state *step_state = info->step_state; 140 123 ··· 152 169 */ 153 170 kfree(step_state); 154 171 } 155 - 156 - free_pages((unsigned long)info, THREAD_SIZE_ORDER); 157 172 } 158 173 159 174 static void save_arch_state(struct thread_struct *t);
+16
arch/tile/kernel/setup.c
··· 61 61 62 62 static unsigned long __initdata node_percpu[MAX_NUMNODES]; 63 63 64 + /* 65 + * per-CPU stack and boot info. 66 + */ 67 + DEFINE_PER_CPU(unsigned long, boot_sp) = 68 + (unsigned long)init_stack + THREAD_SIZE; 69 + 70 + #ifdef CONFIG_SMP 71 + DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel; 72 + #else 73 + /* 74 + * The variable must be __initdata since it references __init code. 75 + * With CONFIG_SMP it is per-cpu data, which is exempt from validation. 76 + */ 77 + unsigned long __initdata boot_pc = (unsigned long)start_kernel; 78 + #endif 79 + 64 80 #ifdef CONFIG_HIGHMEM 65 81 /* Page frame index of end of lowmem on each controller. */ 66 82 unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES];
+1 -1
arch/tile/kernel/smpboot.c
··· 222 222 cpu_idle(); 223 223 } 224 224 225 - int __cpuinit __cpu_up(unsigned int cpu) 225 + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 226 226 { 227 227 /* Wait 5s total for all CPUs for them to come online */ 228 228 static int timeout;
-2
arch/um/include/asm/processor-generic.h
··· 68 68 .request = { 0 } \ 69 69 } 70 70 71 - extern struct task_struct *alloc_task_struct_node(int node); 72 - 73 71 static inline void release_thread(struct task_struct *task) 74 72 { 75 73 }
+1 -1
arch/um/kernel/Makefile
··· 10 10 extra-y := vmlinux.lds 11 11 clean-files := 12 12 13 - obj-y = config.o exec.o exitcode.o init_task.o irq.o ksyms.o mem.o \ 13 + obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \ 14 14 physmem.o process.o ptrace.o reboot.o sigio.o \ 15 15 signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o \ 16 16 um_arch.o umid.o skas/
-38
arch/um/kernel/init_task.c
··· 1 - /* 2 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,intel.linux}.com) 3 - * Licensed under the GPL 4 - */ 5 - 6 - #include "linux/sched.h" 7 - #include "linux/init_task.h" 8 - #include "linux/fs.h" 9 - #include "linux/module.h" 10 - #include "linux/mqueue.h" 11 - #include "asm/uaccess.h" 12 - 13 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 14 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 15 - /* 16 - * Initial task structure. 17 - * 18 - * All other task structs will be allocated on slabs in fork.c 19 - */ 20 - 21 - struct task_struct init_task = INIT_TASK(init_task); 22 - 23 - EXPORT_SYMBOL(init_task); 24 - 25 - /* 26 - * Initial thread structure. 27 - * 28 - * We need to make sure that this is aligned due to the 29 - * way process stacks are handled. This is done by having a special 30 - * "init_task" linker map entry.. 31 - */ 32 - 33 - union thread_union init_thread_union __init_task_data = 34 - { INIT_THREAD_INFO(init_task) }; 35 - 36 - union thread_union cpu0_irqstack 37 - __attribute__((__section__(".data..init_irqstack"))) = 38 - { INIT_THREAD_INFO(init_task) };
+1 -1
arch/um/kernel/smp.c
··· 140 140 set_cpu_online(smp_processor_id(), true); 141 141 } 142 142 143 - int __cpu_up(unsigned int cpu) 143 + int __cpu_up(unsigned int cpu, struct task_struct *tidle) 144 144 { 145 145 cpu_set(cpu, smp_commenced_mask); 146 146 while (!cpu_online(cpu))
+5
arch/um/kernel/um_arch.c
··· 10 10 #include <linux/seq_file.h> 11 11 #include <linux/string.h> 12 12 #include <linux/utsname.h> 13 + #include <linux/sched.h> 13 14 #include <asm/pgtable.h> 14 15 #include <asm/processor.h> 15 16 #include <asm/setup.h> ··· 47 46 .loops_per_jiffy = 0, 48 47 .ipi_pipe = { -1, -1 } 49 48 }; 49 + 50 + union thread_union cpu0_irqstack 51 + __attribute__((__section__(".data..init_irqstack"))) = 52 + { INIT_THREAD_INFO(init_task) }; 50 53 51 54 unsigned long thread_saved_pc(struct task_struct *task) 52 55 {
-1
arch/unicore32/Makefile
··· 33 33 CHECKFLAGS += -D__unicore32__ 34 34 35 35 head-y := arch/unicore32/kernel/head.o 36 - head-y += arch/unicore32/kernel/init_task.o 37 36 38 37 core-y += arch/unicore32/kernel/ 39 38 core-y += arch/unicore32/mm/
+1 -1
arch/unicore32/kernel/Makefile
··· 29 29 head-y := head.o 30 30 obj-$(CONFIG_DEBUG_LL) += debug.o 31 31 32 - extra-y := $(head-y) init_task.o vmlinux.lds 32 + extra-y := $(head-y) vmlinux.lds
-44
arch/unicore32/kernel/init_task.c
··· 1 - /* 2 - * linux/arch/unicore32/kernel/init_task.c 3 - * 4 - * Code specific to PKUnity SoC and UniCore ISA 5 - * 6 - * Copyright (C) 2001-2010 GUAN Xue-tao 7 - * 8 - * This program is free software; you can redistribute it and/or modify 9 - * it under the terms of the GNU General Public License version 2 as 10 - * published by the Free Software Foundation. 11 - */ 12 - #include <linux/mm.h> 13 - #include <linux/module.h> 14 - #include <linux/fs.h> 15 - #include <linux/sched.h> 16 - #include <linux/init.h> 17 - #include <linux/init_task.h> 18 - #include <linux/mqueue.h> 19 - #include <linux/uaccess.h> 20 - 21 - #include <asm/pgtable.h> 22 - 23 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 24 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 25 - /* 26 - * Initial thread structure. 27 - * 28 - * We need to make sure that this is 8192-byte aligned due to the 29 - * way process stacks are handled. This is done by making sure 30 - * the linker maps this in the .text segment right after head.S, 31 - * and making head.S ensure the proper alignment. 32 - * 33 - * The things we do for performance.. 34 - */ 35 - union thread_union init_thread_union __init_task_data = { 36 - INIT_THREAD_INFO(init_task) }; 37 - 38 - /* 39 - * Initial task structure. 40 - * 41 - * All other task structs will be allocated on slabs in fork.c 42 - */ 43 - struct task_struct init_task = INIT_TASK(init_task); 44 - EXPORT_SYMBOL(init_task);
+1 -3
arch/x86/Kconfig
··· 82 82 select ARCH_HAVE_NMI_SAFE_CMPXCHG 83 83 select GENERIC_IOMAP 84 84 select DCACHE_WORD_ACCESS 85 + select GENERIC_SMP_IDLE_THREAD 85 86 86 87 config INSTRUCTION_DECODER 87 88 def_bool (KPROBES || PERF_EVENTS) ··· 160 159 161 160 config RWSEM_XCHGADD_ALGORITHM 162 161 def_bool X86_XADD 163 - 164 - config ARCH_HAS_CPU_IDLE_WAIT 165 - def_bool y 166 162 167 163 config GENERIC_CALIBRATE_DELAY 168 164 def_bool y
-1
arch/x86/Makefile
··· 149 149 head-y := arch/x86/kernel/head_$(BITS).o 150 150 head-y += arch/x86/kernel/head$(BITS).o 151 151 head-y += arch/x86/kernel/head.o 152 - head-y += arch/x86/kernel/init_task.o 153 152 154 153 libs-y += arch/x86/lib/ 155 154
+1 -1
arch/x86/include/asm/boot.h
··· 19 19 #ifdef CONFIG_X86_64 20 20 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT 21 21 #else 22 - #define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_ORDER) 22 + #define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_SIZE_ORDER) 23 23 #endif 24 24 #define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) 25 25
+2 -2
arch/x86/include/asm/page_32_types.h
··· 15 15 */ 16 16 #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 17 17 18 - #define THREAD_ORDER 1 19 - #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 18 + #define THREAD_SIZE_ORDER 1 19 + #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 20 20 21 21 #define STACKFAULT_STACK 0 22 22 #define DOUBLEFAULT_STACK 1
+2 -2
arch/x86/include/asm/page_64_types.h
··· 1 1 #ifndef _ASM_X86_PAGE_64_DEFS_H 2 2 #define _ASM_X86_PAGE_64_DEFS_H 3 3 4 - #define THREAD_ORDER 1 5 - #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 4 + #define THREAD_SIZE_ORDER 1 5 + #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 6 6 #define CURRENT_MASK (~(THREAD_SIZE - 1)) 7 7 8 8 #define EXCEPTION_STACK_ORDER 0
-2
arch/x86/include/asm/processor.h
··· 974 974 #define cpu_has_amd_erratum(x) (false) 975 975 #endif /* CONFIG_CPU_SUP_AMD */ 976 976 977 - void cpu_idle_wait(void); 978 - 979 977 extern unsigned long arch_align_stack(unsigned long sp); 980 978 extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 981 979
+7 -4
arch/x86/include/asm/smp.h
··· 62 62 /* Static state in head.S used to set up a CPU */ 63 63 extern unsigned long stack_start; /* Initial stack pointer address */ 64 64 65 + struct task_struct; 66 + 65 67 struct smp_ops { 66 68 void (*smp_prepare_boot_cpu)(void); 67 69 void (*smp_prepare_cpus)(unsigned max_cpus); ··· 72 70 void (*stop_other_cpus)(int wait); 73 71 void (*smp_send_reschedule)(int cpu); 74 72 75 - int (*cpu_up)(unsigned cpu); 73 + int (*cpu_up)(unsigned cpu, struct task_struct *tidle); 76 74 int (*cpu_disable)(void); 77 75 void (*cpu_die)(unsigned int cpu); 78 76 void (*play_dead)(void); ··· 115 113 smp_ops.smp_cpus_done(max_cpus); 116 114 } 117 115 118 - static inline int __cpu_up(unsigned int cpu) 116 + static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle) 119 117 { 120 - return smp_ops.cpu_up(cpu); 118 + return smp_ops.cpu_up(cpu, tidle); 121 119 } 122 120 123 121 static inline int __cpu_disable(void) ··· 154 152 void native_smp_prepare_boot_cpu(void); 155 153 void native_smp_prepare_cpus(unsigned int max_cpus); 156 154 void native_smp_cpus_done(unsigned int max_cpus); 157 - int native_cpu_up(unsigned int cpunum); 155 + int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); 158 156 int native_cpu_disable(void); 159 157 void native_cpu_die(unsigned int cpu); 160 158 void native_play_dead(void); ··· 164 162 165 163 void native_send_call_func_ipi(const struct cpumask *mask); 166 164 void native_send_call_func_single_ipi(int cpu); 165 + void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); 167 166 168 167 void smp_store_cpu_info(int id); 169 168 #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
+1 -20
arch/x86/include/asm/thread_info.h
··· 155 155 156 156 #define PREEMPT_ACTIVE 0x10000000 157 157 158 - /* thread information allocation */ 159 - #ifdef CONFIG_DEBUG_STACK_USAGE 160 - #define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) 161 - #else 162 - #define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK) 163 - #endif 164 - 165 - #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 166 - 167 - #define alloc_thread_info_node(tsk, node) \ 168 - ({ \ 169 - struct page *page = alloc_pages_node(node, THREAD_FLAGS, \ 170 - THREAD_ORDER); \ 171 - struct thread_info *ret = page ? page_address(page) : NULL; \ 172 - \ 173 - ret; \ 174 - }) 175 - 176 158 #ifdef CONFIG_X86_32 177 159 178 160 #define STACK_WARN (THREAD_SIZE/8) ··· 264 282 265 283 #ifndef __ASSEMBLY__ 266 284 extern void arch_task_cache_init(void); 267 - extern void free_thread_info(struct thread_info *ti); 268 285 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 269 - #define arch_task_cache_init arch_task_cache_init 286 + extern void arch_release_task_struct(struct task_struct *tsk); 270 287 #endif 271 288 #endif /* _ASM_X86_THREAD_INFO_H */
+1 -1
arch/x86/kernel/Makefile
··· 2 2 # Makefile for the linux kernel. 3 3 # 4 4 5 - extra-y := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinux.lds 5 + extra-y := head_$(BITS).o head$(BITS).o head.o vmlinux.lds 6 6 7 7 CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) 8 8
+1 -1
arch/x86/kernel/apm_32.c
··· 2401 2401 * (pm_idle), Wait for all processors to update cached/local 2402 2402 * copies of pm_idle before proceeding. 2403 2403 */ 2404 - cpu_idle_wait(); 2404 + kick_all_cpus_sync(); 2405 2405 } 2406 2406 if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0) 2407 2407 && (apm_info.connection_version > 0x0100)) {
-42
arch/x86/kernel/init_task.c
··· 1 - #include <linux/mm.h> 2 - #include <linux/module.h> 3 - #include <linux/sched.h> 4 - #include <linux/init.h> 5 - #include <linux/init_task.h> 6 - #include <linux/fs.h> 7 - #include <linux/mqueue.h> 8 - 9 - #include <asm/uaccess.h> 10 - #include <asm/pgtable.h> 11 - #include <asm/desc.h> 12 - 13 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 14 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 15 - 16 - /* 17 - * Initial thread structure. 18 - * 19 - * We need to make sure that this is THREAD_SIZE aligned due to the 20 - * way process stacks are handled. This is done by having a special 21 - * "init_task" linker map entry.. 22 - */ 23 - union thread_union init_thread_union __init_task_data = 24 - { INIT_THREAD_INFO(init_task) }; 25 - 26 - /* 27 - * Initial task structure. 28 - * 29 - * All other task structs will be allocated on slabs in fork.c 30 - */ 31 - struct task_struct init_task = INIT_TASK(init_task); 32 - EXPORT_SYMBOL(init_task); 33 - 34 - /* 35 - * per-CPU TSS segments. Threads are completely 'soft' on Linux, 36 - * no more per-task TSS's. The TSS size is kept cacheline-aligned 37 - * so they are allowed to end up in the .data..cacheline_aligned 38 - * section. Since TSS's are completely CPU-local, we want them 39 - * on exact cacheline boundaries, to eliminate cacheline ping-pong. 40 - */ 41 - DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; 42 -
+4 -4
arch/x86/kernel/irq_32.c
··· 127 127 return; 128 128 129 129 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), 130 - THREAD_FLAGS, 131 - THREAD_ORDER)); 130 + THREADINFO_GFP, 131 + THREAD_SIZE_ORDER)); 132 132 memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); 133 133 irqctx->tinfo.cpu = cpu; 134 134 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; ··· 137 137 per_cpu(hardirq_ctx, cpu) = irqctx; 138 138 139 139 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), 140 - THREAD_FLAGS, 141 - THREAD_ORDER)); 140 + THREADINFO_GFP, 141 + THREAD_SIZE_ORDER)); 142 142 memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); 143 143 irqctx->tinfo.cpu = cpu; 144 144 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+11 -23
arch/x86/kernel/process.c
··· 27 27 #include <asm/debugreg.h> 28 28 #include <asm/nmi.h> 29 29 30 + /* 31 + * per-CPU TSS segments. Threads are completely 'soft' on Linux, 32 + * no more per-task TSS's. The TSS size is kept cacheline-aligned 33 + * so they are allowed to end up in the .data..cacheline_aligned 34 + * section. Since TSS's are completely CPU-local, we want them 35 + * on exact cacheline boundaries, to eliminate cacheline ping-pong. 36 + */ 37 + DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; 38 + 30 39 #ifdef CONFIG_X86_64 31 40 static DEFINE_PER_CPU(unsigned char, is_idle); 32 41 static ATOMIC_NOTIFIER_HEAD(idle_notifier); ··· 76 67 fpu_free(&tsk->thread.fpu); 77 68 } 78 69 79 - void free_thread_info(struct thread_info *ti) 70 + void arch_release_task_struct(struct task_struct *tsk) 80 71 { 81 - free_thread_xstate(ti->task); 82 - free_pages((unsigned long)ti, THREAD_ORDER); 72 + free_thread_xstate(tsk); 83 73 } 84 74 85 75 void arch_task_cache_init(void) ··· 523 515 halt(); 524 516 } 525 517 } 526 - 527 - static void do_nothing(void *unused) 528 - { 529 - } 530 - 531 - /* 532 - * cpu_idle_wait - Used to ensure that all the CPUs discard old value of 533 - * pm_idle and update to new pm_idle value. Required while changing pm_idle 534 - * handler on SMP systems. 535 - * 536 - * Caller must have changed pm_idle to the new value before the call. Old 537 - * pm_idle value will not be used by any CPU after the return of this function. 538 - */ 539 - void cpu_idle_wait(void) 540 - { 541 - smp_mb(); 542 - /* kick all the CPUs so that they exit out of pm_idle */ 543 - smp_call_function(do_nothing, NULL, 1); 544 - } 545 - EXPORT_SYMBOL_GPL(cpu_idle_wait); 546 518 547 519 /* Default MONITOR/MWAIT with no hints, used for default C1 state */ 548 520 static void mwait_idle(void)
+11 -72
arch/x86/kernel/smpboot.c
··· 76 76 /* State of each CPU */ 77 77 DEFINE_PER_CPU(int, cpu_state) = { 0 }; 78 78 79 - /* Store all idle threads, this can be reused instead of creating 80 - * a new thread. Also avoids complicated thread destroy functionality 81 - * for idle threads. 82 - */ 83 79 #ifdef CONFIG_HOTPLUG_CPU 84 - /* 85 - * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is 86 - * removed after init for !CONFIG_HOTPLUG_CPU. 87 - */ 88 - static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); 89 - #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 90 - #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 91 - 92 80 /* 93 81 * We need this for trampoline_base protection from concurrent accesses when 94 82 * off- and onlining cores wildly. ··· 85 97 86 98 void cpu_hotplug_driver_lock(void) 87 99 { 88 - mutex_lock(&x86_cpu_hotplug_driver_mutex); 100 + mutex_lock(&x86_cpu_hotplug_driver_mutex); 89 101 } 90 102 91 103 void cpu_hotplug_driver_unlock(void) 92 104 { 93 - mutex_unlock(&x86_cpu_hotplug_driver_mutex); 105 + mutex_unlock(&x86_cpu_hotplug_driver_mutex); 94 106 } 95 107 96 108 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; } 97 109 ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; } 98 - #else 99 - static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 100 - #define get_idle_for_cpu(x) (idle_thread_array[(x)]) 101 - #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) 102 110 #endif 103 111 104 112 /* Number of siblings per CPU package */ ··· 602 618 return (send_status | accept_status); 603 619 } 604 620 605 - struct create_idle { 606 - struct work_struct work; 607 - struct task_struct *idle; 608 - struct completion done; 609 - int cpu; 610 - }; 611 - 612 - static void __cpuinit do_fork_idle(struct work_struct *work) 613 - { 614 - struct create_idle *c_idle = 615 - container_of(work, struct create_idle, work); 616 - 617 - c_idle->idle = fork_idle(c_idle->cpu); 618 - complete(&c_idle->done); 619 - } 620 - 621 621 /* reduce the number of lines printed when booting a large cpu count system */ 622 622 static void __cpuinit announce_cpu(int cpu, int apicid) 623 623 { ··· 628 660 * Returns zero if CPU booted OK, else error code from 629 661 * ->wakeup_secondary_cpu. 630 662 */ 631 - static int __cpuinit do_boot_cpu(int apicid, int cpu) 663 + static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) 632 664 { 633 665 unsigned long boot_error = 0; 634 666 unsigned long start_ip; 635 667 int timeout; 636 - struct create_idle c_idle = { 637 - .cpu = cpu, 638 - .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 639 - }; 640 - 641 - INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); 642 668 643 669 alternatives_smp_switch(1); 644 670 645 - c_idle.idle = get_idle_for_cpu(cpu); 671 + idle->thread.sp = (unsigned long) (((struct pt_regs *) 672 + (THREAD_SIZE + task_stack_page(idle))) - 1); 673 + per_cpu(current_task, cpu) = idle; 646 674 647 - /* 648 - * We can't use kernel_thread since we must avoid to 649 - * reschedule the child. 650 - */ 651 - if (c_idle.idle) { 652 - c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) 653 - (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); 654 - init_idle(c_idle.idle, cpu); 655 - goto do_rest; 656 - } 657 - 658 - schedule_work(&c_idle.work); 659 - wait_for_completion(&c_idle.done); 660 - 661 - if (IS_ERR(c_idle.idle)) { 662 - printk("failed fork for CPU %d\n", cpu); 663 - destroy_work_on_stack(&c_idle.work); 664 - return PTR_ERR(c_idle.idle); 665 - } 666 - 667 - set_idle_for_cpu(cpu, c_idle.idle); 668 - do_rest: 669 - per_cpu(current_task, cpu) = c_idle.idle; 670 675 #ifdef CONFIG_X86_32 671 676 /* Stack for startup_32 can be just as for start_secondary onwards */ 672 677 irq_ctx_init(cpu); 673 678 #else 674 - clear_tsk_thread_flag(c_idle.idle, TIF_FORK); 679 + clear_tsk_thread_flag(idle, TIF_FORK); 675 680 initial_gs = per_cpu_offset(cpu); 676 681 per_cpu(kernel_stack, cpu) = 677 - (unsigned long)task_stack_page(c_idle.idle) - 682 + (unsigned long)task_stack_page(idle) - 678 683 KERNEL_STACK_OFFSET + THREAD_SIZE; 679 684 #endif 680 685 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 681 686 initial_code = (unsigned long)start_secondary; 682 - stack_start = c_idle.idle->thread.sp; 687 + stack_start = idle->thread.sp; 683 688 684 689 /* start_ip had better be page-aligned! */ 685 690 start_ip = trampoline_address(); ··· 754 813 */ 755 814 smpboot_restore_warm_reset_vector(); 756 815 } 757 - 758 - destroy_work_on_stack(&c_idle.work); 759 816 return boot_error; 760 817 } 761 818 762 - int __cpuinit native_cpu_up(unsigned int cpu) 819 + int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle) 763 820 { 764 821 int apicid = apic->cpu_present_to_apicid(cpu); 765 822 unsigned long flags; ··· 790 851 791 852 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 792 853 793 - err = do_boot_cpu(apicid, cpu); 854 + err = do_boot_cpu(apicid, cpu, tidle); 794 855 if (err) { 795 856 pr_debug("do_boot_cpu failed %d\n", err); 796 857 return -EIO;
+4 -15
arch/x86/xen/smp.c
··· 265 265 set_cpu_possible(cpu, false); 266 266 } 267 267 268 - for_each_possible_cpu (cpu) { 269 - struct task_struct *idle; 270 - 271 - if (cpu == 0) 272 - continue; 273 - 274 - idle = fork_idle(cpu); 275 - if (IS_ERR(idle)) 276 - panic("failed fork for CPU %d", cpu); 277 - 268 + for_each_possible_cpu(cpu) 278 269 set_cpu_present(cpu, true); 279 - } 280 270 } 281 271 282 272 static int __cpuinit ··· 336 346 return 0; 337 347 } 338 348 339 - static int __cpuinit xen_cpu_up(unsigned int cpu) 349 + static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) 340 350 { 341 - struct task_struct *idle = idle_task(cpu); 342 351 int rc; 343 352 344 353 per_cpu(current_task, cpu) = idle; ··· 551 562 xen_init_lock_cpu(0); 552 563 } 553 564 554 - static int __cpuinit xen_hvm_cpu_up(unsigned int cpu) 565 + static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 555 566 { 556 567 int rc; 557 - rc = native_cpu_up(cpu); 568 + rc = native_cpu_up(cpu, tidle); 558 569 WARN_ON (xen_smp_intr_init(cpu)); 559 570 return rc; 560 571 }
+1 -1
arch/xtensa/kernel/Makefile
··· 6 6 7 7 obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \ 8 8 setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ 9 - pci-dma.o init_task.o io.o 9 + pci-dma.o io.o 10 10 11 11 obj-$(CONFIG_KGDB) += xtensa-stub.o 12 12 obj-$(CONFIG_PCI) += pci.o
-31
arch/xtensa/kernel/init_task.c
··· 1 - /* 2 - * arch/xtensa/kernel/init_task.c 3 - * 4 - * Xtensa Processor version. 5 - * 6 - * This file is subject to the terms and conditions of the GNU General Public 7 - * License. See the file "COPYING" in the main directory of this archive 8 - * for more details. 9 - * 10 - * Copyright (C) 2007 Tensilica Inc. 11 - * 12 - * Chris Zankel <chris@zankel.net> 13 - */ 14 - 15 - #include <linux/mm.h> 16 - #include <linux/fs.h> 17 - #include <linux/init.h> 18 - #include <linux/init_task.h> 19 - #include <linux/module.h> 20 - #include <linux/mqueue.h> 21 - 22 - #include <asm/uaccess.h> 23 - 24 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 25 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 26 - union thread_union init_thread_union __init_task_data = 27 - { INIT_THREAD_INFO(init_task) }; 28 - 29 - struct task_struct init_task = INIT_TASK(init_task); 30 - 31 - EXPORT_SYMBOL(init_task);
+1 -12
drivers/cpuidle/cpuidle.c
··· 40 40 off = 1; 41 41 } 42 42 43 - #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT) 44 - static void cpuidle_kick_cpus(void) 45 - { 46 - cpu_idle_wait(); 47 - } 48 - #elif defined(CONFIG_SMP) 49 - # error "Arch needs cpu_idle_wait() equivalent here" 50 - #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */ 51 - static void cpuidle_kick_cpus(void) {} 52 - #endif 53 - 54 43 static int __cpuidle_register_device(struct cpuidle_device *dev); 55 44 56 45 static inline int cpuidle_enter(struct cpuidle_device *dev, ··· 175 186 { 176 187 if (enabled_devices) { 177 188 initialized = 0; 178 - cpuidle_kick_cpus(); 189 + kick_all_cpus_sync(); 179 190 } 180 191 } 181 192
+5 -1
include/linux/smp.h
··· 61 61 /* 62 62 * Bring a CPU up 63 63 */ 64 - extern int __cpu_up(unsigned int cpunum); 64 + extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle); 65 65 66 66 /* 67 67 * Final polishing of CPUs ··· 80 80 81 81 int smp_call_function_any(const struct cpumask *mask, 82 82 smp_call_func_t func, void *info, int wait); 83 + 84 + void kick_all_cpus_sync(void); 83 85 84 86 /* 85 87 * Generic and arch helpers ··· 193 191 { 194 192 return smp_call_function_single(0, func, info, wait); 195 193 } 194 + 195 + static inline void kick_all_cpus_sync(void) { } 196 196 197 197 #endif /* !SMP */ 198 198
+6
include/linux/thread_info.h
··· 54 54 55 55 #ifdef __KERNEL__ 56 56 57 + #ifdef CONFIG_DEBUG_STACK_USAGE 58 + # define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) 59 + #else 60 + # define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK) 61 + #endif 62 + 57 63 /* 58 64 * flag set/clear/test wrappers 59 65 * - pass TIF_xxxx constants to these functions
+4
init/Makefile
··· 10 10 endif 11 11 obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o 12 12 13 + ifneq ($(CONFIG_ARCH_INIT_TASK),y) 14 + obj-y += init_task.o 15 + endif 16 + 13 17 mounts-y := do_mounts.o 14 18 mounts-$(CONFIG_BLK_DEV_RAM) += do_mounts_rd.o 15 19 mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o
+1
kernel/Makefile
··· 43 43 obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o 44 44 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o 45 45 obj-$(CONFIG_SMP) += smp.o 46 + obj-$(CONFIG_SMP) += smpboot.o 46 47 ifneq ($(CONFIG_SMP),y) 47 48 obj-y += up.o 48 49 endif
+12 -1
kernel/cpu.c
··· 17 17 #include <linux/gfp.h> 18 18 #include <linux/suspend.h> 19 19 20 + #include "smpboot.h" 21 + 20 22 #ifdef CONFIG_SMP 21 23 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 22 24 static DEFINE_MUTEX(cpu_add_remove_lock); ··· 297 295 int ret, nr_calls = 0; 298 296 void *hcpu = (void *)(long)cpu; 299 297 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 298 + struct task_struct *idle; 300 299 301 300 if (cpu_online(cpu) || !cpu_present(cpu)) 302 301 return -EINVAL; 303 302 304 303 cpu_hotplug_begin(); 304 + 305 + idle = idle_thread_get(cpu); 306 + if (IS_ERR(idle)) { 307 + ret = PTR_ERR(idle); 308 + goto out; 309 + } 310 + 305 311 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); 306 312 if (ret) { 307 313 nr_calls--; ··· 319 309 } 320 310 321 311 /* Arch-specific enabling code. */ 322 - ret = __cpu_up(cpu); 312 + ret = __cpu_up(cpu, idle); 323 313 if (ret != 0) 324 314 goto out_notify; 325 315 BUG_ON(!cpu_online(cpu)); ··· 330 320 out_notify: 331 321 if (ret != 0) 332 322 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); 323 + out: 333 324 cpu_hotplug_done(); 334 325 335 326 return ret;
+49 -20
kernel/fork.c
··· 112 112 return total; 113 113 } 114 114 115 - #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 116 - # define alloc_task_struct_node(node) \ 117 - kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node) 118 - # define free_task_struct(tsk) \ 119 - kmem_cache_free(task_struct_cachep, (tsk)) 115 + #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 120 116 static struct kmem_cache *task_struct_cachep; 117 + 118 + static inline struct task_struct *alloc_task_struct_node(int node) 119 + { 120 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); 121 + } 122 + 123 + void __weak arch_release_task_struct(struct task_struct *tsk) { } 124 + 125 + static inline void free_task_struct(struct task_struct *tsk) 126 + { 127 + arch_release_task_struct(tsk); 128 + kmem_cache_free(task_struct_cachep, tsk); 129 + } 121 130 #endif 122 131 123 - #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR 132 + #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR 133 + void __weak arch_release_thread_info(struct thread_info *ti) { } 134 + 135 + /* 136 + * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a 137 + * kmemcache based allocator. 138 + */ 139 + # if THREAD_SIZE >= PAGE_SIZE 124 140 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, 125 141 int node) 126 142 { 127 - #ifdef CONFIG_DEBUG_STACK_USAGE 128 - gfp_t mask = GFP_KERNEL | __GFP_ZERO; 129 - #else 130 - gfp_t mask = GFP_KERNEL; 131 - #endif 132 - struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER); 143 + struct page *page = alloc_pages_node(node, THREADINFO_GFP, 144 + THREAD_SIZE_ORDER); 133 145 134 146 return page ? page_address(page) : NULL; 135 147 } 136 148 137 149 static inline void free_thread_info(struct thread_info *ti) 138 150 { 151 + arch_release_thread_info(ti); 139 152 free_pages((unsigned long)ti, THREAD_SIZE_ORDER); 140 153 } 154 + # else 155 + static struct kmem_cache *thread_info_cache; 156 + 157 + static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, 158 + int node) 159 + { 160 + return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node); 161 + } 162 + 163 + static void free_thread_info(struct thread_info *ti) 164 + { 165 + arch_release_thread_info(ti); 166 + kmem_cache_free(thread_info_cache, ti); 167 + } 168 + 169 + void thread_info_cache_init(void) 170 + { 171 + thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, 172 + THREAD_SIZE, 0, NULL); 173 + BUG_ON(thread_info_cache == NULL); 174 + } 175 + # endif 141 176 #endif 142 177 143 178 /* SLAB cache for signal_struct structures (tsk->signal) */ ··· 239 204 } 240 205 EXPORT_SYMBOL_GPL(__put_task_struct); 241 206 242 - /* 243 - * macro override instead of weak attribute alias, to workaround 244 - * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions. 245 - */ 246 - #ifndef arch_task_cache_init 247 - #define arch_task_cache_init() 248 - #endif 207 + void __init __weak arch_task_cache_init(void) { } 249 208 250 209 void __init fork_init(unsigned long mempages) 251 210 { 252 - #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 211 + #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 253 212 #ifndef ARCH_MIN_TASKALIGN 254 213 #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES 255 214 #endif
-2
kernel/sched/Makefile
··· 16 16 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o 17 17 obj-$(CONFIG_SCHEDSTATS) += stats.o 18 18 obj-$(CONFIG_SCHED_DEBUG) += debug.o 19 - 20 -
+2
kernel/sched/core.c
··· 83 83 84 84 #include "sched.h" 85 85 #include "../workqueue_sched.h" 86 + #include "../smpboot.h" 86 87 87 88 #define CREATE_TRACE_POINTS 88 89 #include <trace/events/sched.h> ··· 7063 7062 /* May be allocated at isolcpus cmdline parse time */ 7064 7063 if (cpu_isolated_map == NULL) 7065 7064 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 7065 + idle_thread_set_boot_cpu(); 7066 7066 #endif 7067 7067 init_sched_fair_class(); 7068 7068
+27
kernel/smp.c
··· 13 13 #include <linux/smp.h> 14 14 #include <linux/cpu.h> 15 15 16 + #include "smpboot.h" 17 + 16 18 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS 17 19 static struct { 18 20 struct list_head queue; ··· 671 669 { 672 670 unsigned int cpu; 673 671 672 + idle_threads_init(); 673 + 674 674 /* FIXME: This should be done in userspace --RR */ 675 675 for_each_present_cpu(cpu) { 676 676 if (num_online_cpus() >= setup_max_cpus) ··· 795 791 } 796 792 } 797 793 EXPORT_SYMBOL(on_each_cpu_cond); 794 + 795 + static void do_nothing(void *unused) 796 + { 797 + } 798 + 799 + /** 800 + * kick_all_cpus_sync - Force all cpus out of idle 801 + * 802 + * Used to synchronize the update of pm_idle function pointer. It's 803 + * called after the pointer is updated and returns after the dummy 804 + * callback function has been executed on all cpus. The execution of 805 + * the function can only happen on the remote cpus after they have 806 + * left the idle function which had been called via pm_idle function 807 + * pointer. So it's guaranteed that nothing uses the previous pointer 808 + * anymore. 809 + */ 810 + void kick_all_cpus_sync(void) 811 + { 812 + /* Make sure the change is visible before we kick the cpus */ 813 + smp_mb(); 814 + smp_call_function(do_nothing, NULL, 1); 815 + } 816 + EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
+62
kernel/smpboot.c
··· 1 + /* 2 + * Common SMP CPU bringup/teardown functions 3 + */ 4 + #include <linux/err.h> 5 + #include <linux/smp.h> 6 + #include <linux/init.h> 7 + #include <linux/sched.h> 8 + #include <linux/percpu.h> 9 + 10 + #include "smpboot.h" 11 + 12 + #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD 13 + /* 14 + * For the hotplug case we keep the task structs around and reuse 15 + * them. 16 + */ 17 + static DEFINE_PER_CPU(struct task_struct *, idle_threads); 18 + 19 + struct task_struct * __cpuinit idle_thread_get(unsigned int cpu) 20 + { 21 + struct task_struct *tsk = per_cpu(idle_threads, cpu); 22 + 23 + if (!tsk) 24 + return ERR_PTR(-ENOMEM); 25 + init_idle(tsk, cpu); 26 + return tsk; 27 + } 28 + 29 + void __init idle_thread_set_boot_cpu(void) 30 + { 31 + per_cpu(idle_threads, smp_processor_id()) = current; 32 + } 33 + 34 + static inline void idle_init(unsigned int cpu) 35 + { 36 + struct task_struct *tsk = per_cpu(idle_threads, cpu); 37 + 38 + if (!tsk) { 39 + tsk = fork_idle(cpu); 40 + if (IS_ERR(tsk)) 41 + pr_err("SMP: fork_idle() failed for CPU %u\n", cpu); 42 + else 43 + per_cpu(idle_threads, cpu) = tsk; 44 + } 45 + } 46 + 47 + /** 48 + * idle_thread_init - Initialize the idle thread for a cpu 49 + * @cpu: The cpu for which the idle thread should be initialized 50 + * 51 + * Creates the thread if it does not exist. 52 + */ 53 + void __init idle_threads_init(void) 54 + { 55 + unsigned int cpu; 56 + 57 + for_each_possible_cpu(cpu) { 58 + if (cpu != smp_processor_id()) 59 + idle_init(cpu); 60 + } 61 + } 62 + #endif
+18
kernel/smpboot.h
··· 1 + #ifndef SMPBOOT_H 2 + #define SMPBOOT_H 3 + 4 + struct task_struct; 5 + 6 + int smpboot_prepare(unsigned int cpu); 7 + 8 + #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD 9 + struct task_struct *idle_thread_get(unsigned int cpu); 10 + void idle_thread_set_boot_cpu(void); 11 + void idle_threads_init(void); 12 + #else 13 + static inline struct task_struct *idle_thread_get(unsigned int cpu) { return NULL; } 14 + static inline void idle_thread_set_boot_cpu(void) { } 15 + static inline void idle_threads_init(void) { } 16 + #endif 17 + 18 + #endif