Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus

* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
work_on_cpu(): rewrite it to create a kernel thread on demand
kthread: move sched-realeted initialization from kthreadd context
kthread: Don't looking for a task in create_kthread() #2

+30 -30
+12 -14
kernel/kthread.c
··· 76 77 /* OK, tell user we're spawned, wait for stop or wakeup */ 78 __set_current_state(TASK_UNINTERRUPTIBLE); 79 complete(&create->started); 80 schedule(); 81 ··· 97 98 /* We want our own signal handler (we take no signals by default). */ 99 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 100 - if (pid < 0) { 101 create->result = ERR_PTR(pid); 102 - } else { 103 - struct sched_param param = { .sched_priority = 0 }; 104 wait_for_completion(&create->started); 105 - read_lock(&tasklist_lock); 106 - create->result = find_task_by_pid_ns(pid, &init_pid_ns); 107 - read_unlock(&tasklist_lock); 108 - /* 109 - * root may have changed our (kthreadd's) priority or CPU mask. 110 - * The kernel thread should not inherit these properties. 111 - */ 112 - sched_setscheduler(create->result, SCHED_NORMAL, &param); 113 - set_user_nice(create->result, KTHREAD_NICE_LEVEL); 114 - set_cpus_allowed_ptr(create->result, cpu_all_mask); 115 - } 116 complete(&create->done); 117 } 118 ··· 143 wait_for_completion(&create.done); 144 145 if (!IS_ERR(create.result)) { 146 va_list args; 147 va_start(args, namefmt); 148 vsnprintf(create.result->comm, sizeof(create.result->comm), 149 namefmt, args); 150 va_end(args); 151 } 152 return create.result; 153 }
··· 76 77 /* OK, tell user we're spawned, wait for stop or wakeup */ 78 __set_current_state(TASK_UNINTERRUPTIBLE); 79 + create->result = current; 80 complete(&create->started); 81 schedule(); 82 ··· 96 97 /* We want our own signal handler (we take no signals by default). */ 98 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 99 + if (pid < 0) 100 create->result = ERR_PTR(pid); 101 + else 102 wait_for_completion(&create->started); 103 complete(&create->done); 104 } 105 ··· 154 wait_for_completion(&create.done); 155 156 if (!IS_ERR(create.result)) { 157 + struct sched_param param = { .sched_priority = 0 }; 158 va_list args; 159 + 160 va_start(args, namefmt); 161 vsnprintf(create.result->comm, sizeof(create.result->comm), 162 namefmt, args); 163 va_end(args); 164 + /* 165 + * root may have changed our (kthreadd's) priority or CPU mask. 166 + * The kernel thread should not inherit these properties. 167 + */ 168 + sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param); 169 + set_user_nice(create.result, KTHREAD_NICE_LEVEL); 170 + set_cpus_allowed_ptr(create.result, cpu_all_mask); 171 } 172 return create.result; 173 }
+18 -16
kernel/workqueue.c
··· 966 } 967 968 #ifdef CONFIG_SMP 969 - static struct workqueue_struct *work_on_cpu_wq __read_mostly; 970 971 struct work_for_cpu { 972 - struct work_struct work; 973 long (*fn)(void *); 974 void *arg; 975 long ret; 976 }; 977 978 - static void do_work_for_cpu(struct work_struct *w) 979 { 980 - struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work); 981 - 982 wfc->ret = wfc->fn(wfc->arg); 983 } 984 985 /** ··· 990 * 991 * This will return the value @fn returns. 992 * It is up to the caller to ensure that the cpu doesn't go offline. 993 */ 994 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 995 { 996 - struct work_for_cpu wfc; 997 998 - INIT_WORK(&wfc.work, do_work_for_cpu); 999 - wfc.fn = fn; 1000 - wfc.arg = arg; 1001 - queue_work_on(cpu, work_on_cpu_wq, &wfc.work); 1002 - flush_work(&wfc.work); 1003 - 1004 return wfc.ret; 1005 } 1006 EXPORT_SYMBOL_GPL(work_on_cpu); ··· 1022 hotcpu_notifier(workqueue_cpu_callback, 0); 1023 keventd_wq = create_workqueue("events"); 1024 BUG_ON(!keventd_wq); 1025 - #ifdef CONFIG_SMP 1026 - work_on_cpu_wq = create_workqueue("work_on_cpu"); 1027 - BUG_ON(!work_on_cpu_wq); 1028 - #endif 1029 }
··· 966 } 967 968 #ifdef CONFIG_SMP 969 970 struct work_for_cpu { 971 + struct completion completion; 972 long (*fn)(void *); 973 void *arg; 974 long ret; 975 }; 976 977 + static int do_work_for_cpu(void *_wfc) 978 { 979 + struct work_for_cpu *wfc = _wfc; 980 wfc->ret = wfc->fn(wfc->arg); 981 + complete(&wfc->completion); 982 + return 0; 983 } 984 985 /** ··· 990 * 991 * This will return the value @fn returns. 992 * It is up to the caller to ensure that the cpu doesn't go offline. 993 + * The caller must not hold any locks which would prevent @fn from completing. 994 */ 995 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 996 { 997 + struct task_struct *sub_thread; 998 + struct work_for_cpu wfc = { 999 + .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), 1000 + .fn = fn, 1001 + .arg = arg, 1002 + }; 1003 1004 + sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); 1005 + if (IS_ERR(sub_thread)) 1006 + return PTR_ERR(sub_thread); 1007 + kthread_bind(sub_thread, cpu); 1008 + wake_up_process(sub_thread); 1009 + wait_for_completion(&wfc.completion); 1010 return wfc.ret; 1011 } 1012 EXPORT_SYMBOL_GPL(work_on_cpu); ··· 1016 hotcpu_notifier(workqueue_cpu_callback, 0); 1017 keventd_wq = create_workqueue("events"); 1018 BUG_ON(!keventd_wq); 1019 }