Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus

* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
work_on_cpu(): rewrite it to create a kernel thread on demand
kthread: move sched-realeted initialization from kthreadd context
kthread: Don't looking for a task in create_kthread() #2

+30 -30
+12 -14
kernel/kthread.c
··· 76 76 77 77 /* OK, tell user we're spawned, wait for stop or wakeup */ 78 78 __set_current_state(TASK_UNINTERRUPTIBLE); 79 + create->result = current; 79 80 complete(&create->started); 80 81 schedule(); 81 82 ··· 97 96 98 97 /* We want our own signal handler (we take no signals by default). */ 99 98 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 100 - if (pid < 0) { 99 + if (pid < 0) 101 100 create->result = ERR_PTR(pid); 102 - } else { 103 - struct sched_param param = { .sched_priority = 0 }; 101 + else 104 102 wait_for_completion(&create->started); 105 - read_lock(&tasklist_lock); 106 - create->result = find_task_by_pid_ns(pid, &init_pid_ns); 107 - read_unlock(&tasklist_lock); 108 - /* 109 - * root may have changed our (kthreadd's) priority or CPU mask. 110 - * The kernel thread should not inherit these properties. 111 - */ 112 - sched_setscheduler(create->result, SCHED_NORMAL, &param); 113 - set_user_nice(create->result, KTHREAD_NICE_LEVEL); 114 - set_cpus_allowed_ptr(create->result, cpu_all_mask); 115 - } 116 103 complete(&create->done); 117 104 } 118 105 ··· 143 154 wait_for_completion(&create.done); 144 155 145 156 if (!IS_ERR(create.result)) { 157 + struct sched_param param = { .sched_priority = 0 }; 146 158 va_list args; 159 + 147 160 va_start(args, namefmt); 148 161 vsnprintf(create.result->comm, sizeof(create.result->comm), 149 162 namefmt, args); 150 163 va_end(args); 164 + /* 165 + * root may have changed our (kthreadd's) priority or CPU mask. 166 + * The kernel thread should not inherit these properties. 167 + */ 168 + sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param); 169 + set_user_nice(create.result, KTHREAD_NICE_LEVEL); 170 + set_cpus_allowed_ptr(create.result, cpu_all_mask); 151 171 } 152 172 return create.result; 153 173 }
+18 -16
kernel/workqueue.c
··· 966 966 } 967 967 968 968 #ifdef CONFIG_SMP 969 - static struct workqueue_struct *work_on_cpu_wq __read_mostly; 970 969 971 970 struct work_for_cpu { 972 - struct work_struct work; 971 + struct completion completion; 973 972 long (*fn)(void *); 974 973 void *arg; 975 974 long ret; 976 975 }; 977 976 978 - static void do_work_for_cpu(struct work_struct *w) 977 + static int do_work_for_cpu(void *_wfc) 979 978 { 980 - struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work); 981 - 979 + struct work_for_cpu *wfc = _wfc; 982 980 wfc->ret = wfc->fn(wfc->arg); 981 + complete(&wfc->completion); 982 + return 0; 983 983 } 984 984 985 985 /** ··· 990 990 * 991 991 * This will return the value @fn returns. 992 992 * It is up to the caller to ensure that the cpu doesn't go offline. 993 + * The caller must not hold any locks which would prevent @fn from completing. 993 994 */ 994 995 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 995 996 { 996 - struct work_for_cpu wfc; 997 + struct task_struct *sub_thread; 998 + struct work_for_cpu wfc = { 999 + .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), 1000 + .fn = fn, 1001 + .arg = arg, 1002 + }; 997 1003 998 - INIT_WORK(&wfc.work, do_work_for_cpu); 999 - wfc.fn = fn; 1000 - wfc.arg = arg; 1001 - queue_work_on(cpu, work_on_cpu_wq, &wfc.work); 1002 - flush_work(&wfc.work); 1003 - 1004 + sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); 1005 + if (IS_ERR(sub_thread)) 1006 + return PTR_ERR(sub_thread); 1007 + kthread_bind(sub_thread, cpu); 1008 + wake_up_process(sub_thread); 1009 + wait_for_completion(&wfc.completion); 1004 1010 return wfc.ret; 1005 1011 } 1006 1012 EXPORT_SYMBOL_GPL(work_on_cpu); ··· 1022 1016 hotcpu_notifier(workqueue_cpu_callback, 0); 1023 1017 keventd_wq = create_workqueue("events"); 1024 1018 BUG_ON(!keventd_wq); 1025 - #ifdef CONFIG_SMP 1026 - work_on_cpu_wq = create_workqueue("work_on_cpu"); 1027 - BUG_ON(!work_on_cpu_wq); 1028 - #endif 1029 1019 }