Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sched: Cure nr_iowait_cpu() users
init: Fix comment
init, sched: Fix race between init and kthreadd

+25 -13
+2 -2
drivers/cpuidle/governors/menu.c
··· 143 143 * This allows us to calculate 144 144 * E(duration)|iowait 145 145 */ 146 - if (nr_iowait_cpu()) 146 + if (nr_iowait_cpu(smp_processor_id())) 147 147 bucket = BUCKETS/2; 148 148 149 149 if (duration < 10) ··· 175 175 mult += 2 * get_loadavg(); 176 176 177 177 /* for IO wait tasks (per cpu!) we add 5x each */ 178 - mult += 10 * nr_iowait_cpu(); 178 + mult += 10 * nr_iowait_cpu(smp_processor_id()); 179 179 180 180 return mult; 181 181 }
+1 -1
include/linux/sched.h
··· 139 139 extern unsigned long nr_running(void); 140 140 extern unsigned long nr_uninterruptible(void); 141 141 extern unsigned long nr_iowait(void); 142 - extern unsigned long nr_iowait_cpu(void); 142 + extern unsigned long nr_iowait_cpu(int cpu); 143 143 extern unsigned long this_cpu_load(void); 144 144 145 145
+12
init/main.c
··· 424 424 * gcc-3.4 accidentally inlines this function, so use noinline. 425 425 */ 426 426 427 + static __initdata DECLARE_COMPLETION(kthreadd_done); 428 + 427 429 static noinline void __init_refok rest_init(void) 428 430 __releases(kernel_lock) 429 431 { 430 432 int pid; 431 433 432 434 rcu_scheduler_starting(); 435 + /* 436 + * We need to spawn init first so that it obtains pid 1, however 437 + * the init task will end up wanting to create kthreads, which, if 438 + * we schedule it before we create kthreadd, will OOPS. 439 + */ 433 440 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); 434 441 numa_default_policy(); 435 442 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); 436 443 rcu_read_lock(); 437 444 kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); 438 445 rcu_read_unlock(); 446 + complete(&kthreadd_done); 439 447 unlock_kernel(); 440 448 441 449 /* ··· 865 857 866 858 static int __init kernel_init(void * unused) 867 859 { 860 + /* 861 + * Wait until kthreadd is all set-up. 862 + */ 863 + wait_for_completion(&kthreadd_done); 868 864 lock_kernel(); 869 865 870 866 /*
+2 -2
kernel/sched.c
··· 2873 2873 return sum; 2874 2874 } 2875 2875 2876 - unsigned long nr_iowait_cpu(void) 2876 + unsigned long nr_iowait_cpu(int cpu) 2877 2877 { 2878 - struct rq *this = this_rq(); 2878 + struct rq *this = cpu_rq(cpu); 2879 2879 return atomic_read(&this->nr_iowait); 2880 2880 } 2881 2881
+8 -8
kernel/time/tick-sched.c
··· 154 154 * Updates the per cpu time idle statistics counters 155 155 */ 156 156 static void 157 - update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time) 157 + update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) 158 158 { 159 159 ktime_t delta; 160 160 161 161 if (ts->idle_active) { 162 162 delta = ktime_sub(now, ts->idle_entrytime); 163 163 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 164 - if (nr_iowait_cpu() > 0) 164 + if (nr_iowait_cpu(cpu) > 0) 165 165 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); 166 166 ts->idle_entrytime = now; 167 167 } ··· 175 175 { 176 176 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 177 177 178 - update_ts_time_stats(ts, now, NULL); 178 + update_ts_time_stats(cpu, ts, now, NULL); 179 179 ts->idle_active = 0; 180 180 181 181 sched_clock_idle_wakeup_event(0); 182 182 } 183 183 184 - static ktime_t tick_nohz_start_idle(struct tick_sched *ts) 184 + static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) 185 185 { 186 186 ktime_t now; 187 187 188 188 now = ktime_get(); 189 189 190 - update_ts_time_stats(ts, now, NULL); 190 + update_ts_time_stats(cpu, ts, now, NULL); 191 191 192 192 ts->idle_entrytime = now; 193 193 ts->idle_active = 1; ··· 216 216 if (!tick_nohz_enabled) 217 217 return -1; 218 218 219 - update_ts_time_stats(ts, ktime_get(), last_update_time); 219 + update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); 220 220 221 221 return ktime_to_us(ts->idle_sleeptime); 222 222 } ··· 242 242 if (!tick_nohz_enabled) 243 243 return -1; 244 244 245 - update_ts_time_stats(ts, ktime_get(), last_update_time); 245 + update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); 246 246 247 247 return ktime_to_us(ts->iowait_sleeptime); 248 248 } ··· 284 284 */ 285 285 ts->inidle = 1; 286 286 287 - now = tick_nohz_start_idle(ts); 287 + now = tick_nohz_start_idle(cpu, ts); 288 288 289 289 /* 290 290 * If this cpu is offline and it is the one which updates