Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (25 commits)
sched: Fix broken assertion
sched: Assert task state bits at build time
sched: Update task_state_arraypwith new states
sched: Add missing state chars to TASK_STATE_TO_CHAR_STR
sched: Move TASK_STATE_TO_CHAR_STR near the TASK_state bits
sched: Teach might_sleep() about preemptible RCU
sched: Make warning less noisy
sched: Simplify set_task_cpu()
sched: Remove the cfs_rq dependency from set_task_cpu()
sched: Add pre and post wakeup hooks
sched: Move kthread_bind() back to kthread.c
sched: Fix select_task_rq() vs hotplug issues
sched: Fix sched_exec() balancing
sched: Ensure set_task_cpu() is never called on blocked tasks
sched: Use TASK_WAKING for fork wakups
sched: Select_task_rq_fair() must honour SD_LOAD_BALANCE
sched: Fix task_hot() test order
sched: Fix set_cpu_active() in cpu_down()
sched: Mark boot-cpu active before smp_init()
sched: Fix cpu_clock() in NMIs, on !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
...

+337 -250
+12 -7
fs/proc/array.c
··· 134 134 * simple bit tests. 135 135 */ 136 136 static const char *task_state_array[] = { 137 - "R (running)", /* 0 */ 138 - "S (sleeping)", /* 1 */ 139 - "D (disk sleep)", /* 2 */ 140 - "T (stopped)", /* 4 */ 141 - "T (tracing stop)", /* 8 */ 142 - "Z (zombie)", /* 16 */ 143 - "X (dead)" /* 32 */ 137 + "R (running)", /* 0 */ 138 + "S (sleeping)", /* 1 */ 139 + "D (disk sleep)", /* 2 */ 140 + "T (stopped)", /* 4 */ 141 + "t (tracing stop)", /* 8 */ 142 + "Z (zombie)", /* 16 */ 143 + "X (dead)", /* 32 */ 144 + "x (dead)", /* 64 */ 145 + "K (wakekill)", /* 128 */ 146 + "W (waking)", /* 256 */ 144 147 }; 145 148 146 149 static inline const char *get_task_state(struct task_struct *tsk) 147 150 { 148 151 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; 149 152 const char **p = &task_state_array[0]; 153 + 154 + BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array)); 150 155 151 156 while (state) { 152 157 p++;
+5
include/linux/rcutiny.h
··· 101 101 { 102 102 } 103 103 104 + static inline int rcu_preempt_depth(void) 105 + { 106 + return 0; 107 + } 108 + 104 109 #endif /* __LINUX_RCUTINY_H */
+11
include/linux/rcutree.h
··· 45 45 extern void synchronize_rcu(void); 46 46 extern void exit_rcu(void); 47 47 48 + /* 49 + * Defined as macro as it is a very low level header 50 + * included from areas that don't even know about current 51 + */ 52 + #define rcu_preempt_depth() (current->rcu_read_lock_nesting) 53 + 48 54 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 49 55 50 56 static inline void __rcu_read_lock(void) ··· 67 61 68 62 static inline void exit_rcu(void) 69 63 { 64 + } 65 + 66 + static inline int rcu_preempt_depth(void) 67 + { 68 + return 0; 70 69 } 71 70 72 71 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
+9 -4
include/linux/sched.h
··· 192 192 #define TASK_DEAD 64 193 193 #define TASK_WAKEKILL 128 194 194 #define TASK_WAKING 256 195 + #define TASK_STATE_MAX 512 196 + 197 + #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" 198 + 199 + extern char ___assert_task_state[1 - 2*!!( 200 + sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 195 201 196 202 /* Convenience macros for the sake of set_task_state */ 197 203 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) ··· 1097 1091 enum cpu_idle_type idle); 1098 1092 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1099 1093 void (*post_schedule) (struct rq *this_rq); 1100 - void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 1094 + void (*task_waking) (struct rq *this_rq, struct task_struct *task); 1095 + void (*task_woken) (struct rq *this_rq, struct task_struct *task); 1101 1096 1102 1097 void (*set_cpus_allowed)(struct task_struct *p, 1103 1098 const struct cpumask *newmask); ··· 1122 1115 struct task_struct *task); 1123 1116 1124 1117 #ifdef CONFIG_FAIR_GROUP_SCHED 1125 - void (*moved_group) (struct task_struct *p); 1118 + void (*moved_group) (struct task_struct *p, int on_rq); 1126 1119 #endif 1127 1120 }; 1128 1121 ··· 2600 2593 { 2601 2594 } 2602 2595 #endif /* CONFIG_MM_OWNER */ 2603 - 2604 - #define TASK_STATE_TO_CHAR_STR "RSDTtZX" 2605 2596 2606 2597 #endif /* __KERNEL__ */ 2607 2598
+1 -6
init/main.c
··· 369 369 { 370 370 unsigned int cpu; 371 371 372 - /* 373 - * Set up the current CPU as possible to migrate to. 374 - * The other ones will be done by cpu_up/cpu_down() 375 - */ 376 - set_cpu_active(smp_processor_id(), true); 377 - 378 372 /* FIXME: This should be done in userspace --RR */ 379 373 for_each_present_cpu(cpu) { 380 374 if (num_online_cpus() >= setup_max_cpus) ··· 480 486 int cpu = smp_processor_id(); 481 487 /* Mark the boot cpu "present", "online" etc for SMP and UP case */ 482 488 set_cpu_online(cpu, true); 489 + set_cpu_active(cpu, true); 483 490 set_cpu_present(cpu, true); 484 491 set_cpu_possible(cpu, true); 485 492 }
+3 -21
kernel/cpu.c
··· 209 209 return -ENOMEM; 210 210 211 211 cpu_hotplug_begin(); 212 + set_cpu_active(cpu, false); 212 213 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 213 214 hcpu, -1, &nr_calls); 214 215 if (err == NOTIFY_BAD) { ··· 280 279 err = -EBUSY; 281 280 goto out; 282 281 } 283 - 284 - set_cpu_active(cpu, false); 285 - 286 - /* 287 - * Make sure the all cpus did the reschedule and are not 288 - * using stale version of the cpu_active_mask. 289 - * This is not strictly necessary becuase stop_machine() 290 - * that we run down the line already provides the required 291 - * synchronization. But it's really a side effect and we do not 292 - * want to depend on the innards of the stop_machine here. 293 - */ 294 - synchronize_sched(); 295 282 296 283 err = _cpu_down(cpu, 0); 297 284 ··· 371 382 return error; 372 383 cpu_maps_update_begin(); 373 384 first_cpu = cpumask_first(cpu_online_mask); 374 - /* We take down all of the non-boot CPUs in one shot to avoid races 385 + /* 386 + * We take down all of the non-boot CPUs in one shot to avoid races 375 387 * with the userspace trying to use the CPU hotplug at the same time 376 388 */ 377 389 cpumask_clear(frozen_cpus); 378 - 379 - for_each_online_cpu(cpu) { 380 - if (cpu == first_cpu) 381 - continue; 382 - set_cpu_active(cpu, false); 383 - } 384 - 385 - synchronize_sched(); 386 390 387 391 printk("Disabling non-boot CPUs ...\n"); 388 392 for_each_online_cpu(cpu) {
+23
kernel/kthread.c
··· 150 150 EXPORT_SYMBOL(kthread_create); 151 151 152 152 /** 153 + * kthread_bind - bind a just-created kthread to a cpu. 154 + * @p: thread created by kthread_create(). 155 + * @cpu: cpu (might not be online, must be possible) for @k to run on. 156 + * 157 + * Description: This function is equivalent to set_cpus_allowed(), 158 + * except that @cpu doesn't need to be online, and the thread must be 159 + * stopped (i.e., just returned from kthread_create()). 160 + */ 161 + void kthread_bind(struct task_struct *p, unsigned int cpu) 162 + { 163 + /* Must have done schedule() in kthread() before we set_task_cpu */ 164 + if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { 165 + WARN_ON(1); 166 + return; 167 + } 168 + 169 + p->cpus_allowed = cpumask_of_cpu(cpu); 170 + p->rt.nr_cpus_allowed = 1; 171 + p->flags |= PF_THREAD_BOUND; 172 + } 173 + EXPORT_SYMBOL(kthread_bind); 174 + 175 + /** 153 176 * kthread_stop - stop a thread created by kthread_create(). 154 177 * @k: thread created by kthread_create(). 155 178 *
+207 -194
kernel/sched.c
··· 26 26 * Thomas Gleixner, Mike Kravetz 27 27 */ 28 28 29 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 + 29 31 #include <linux/mm.h> 30 32 #include <linux/module.h> 31 33 #include <linux/nmi.h> ··· 2004 2002 p->sched_class->prio_changed(rq, p, oldprio, running); 2005 2003 } 2006 2004 2007 - /** 2008 - * kthread_bind - bind a just-created kthread to a cpu. 2009 - * @p: thread created by kthread_create(). 2010 - * @cpu: cpu (might not be online, must be possible) for @k to run on. 2011 - * 2012 - * Description: This function is equivalent to set_cpus_allowed(), 2013 - * except that @cpu doesn't need to be online, and the thread must be 2014 - * stopped (i.e., just returned from kthread_create()). 2015 - * 2016 - * Function lives here instead of kthread.c because it messes with 2017 - * scheduler internals which require locking. 2018 - */ 2019 - void kthread_bind(struct task_struct *p, unsigned int cpu) 2020 - { 2021 - struct rq *rq = cpu_rq(cpu); 2022 - unsigned long flags; 2023 - 2024 - /* Must have done schedule() in kthread() before we set_task_cpu */ 2025 - if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { 2026 - WARN_ON(1); 2027 - return; 2028 - } 2029 - 2030 - raw_spin_lock_irqsave(&rq->lock, flags); 2031 - update_rq_clock(rq); 2032 - set_task_cpu(p, cpu); 2033 - p->cpus_allowed = cpumask_of_cpu(cpu); 2034 - p->rt.nr_cpus_allowed = 1; 2035 - p->flags |= PF_THREAD_BOUND; 2036 - raw_spin_unlock_irqrestore(&rq->lock, flags); 2037 - } 2038 - EXPORT_SYMBOL(kthread_bind); 2039 - 2040 2005 #ifdef CONFIG_SMP 2041 2006 /* 2042 2007 * Is this task likely cache-hot: ··· 2013 2044 { 2014 2045 s64 delta; 2015 2046 2047 + if (p->sched_class != &fair_sched_class) 2048 + return 0; 2049 + 2016 2050 /* 2017 2051 * Buddy candidates are cache hot: 2018 2052 */ ··· 2023 2051 (&p->se == cfs_rq_of(&p->se)->next || 2024 2052 &p->se == cfs_rq_of(&p->se)->last)) 2025 2053 return 1; 2026 - 2027 - if (p->sched_class != &fair_sched_class) 2028 - return 0; 2029 2054 2030 2055 if (sysctl_sched_migration_cost == -1) 2031 2056 return 1; ··· 2034 2065 return delta < (s64)sysctl_sched_migration_cost; 2035 2066 } 2036 2067 2037 - 2038 2068 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 2039 2069 { 2040 - int old_cpu = task_cpu(p); 2041 - struct cfs_rq *old_cfsrq = task_cfs_rq(p), 2042 - *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu); 2070 + #ifdef CONFIG_SCHED_DEBUG 2071 + /* 2072 + * We should never call set_task_cpu() on a blocked task, 2073 + * ttwu() will sort out the placement. 2074 + */ 2075 + WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 2076 + !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); 2077 + #endif 2043 2078 2044 2079 trace_sched_migrate_task(p, new_cpu); 2045 2080 2046 - if (old_cpu != new_cpu) { 2047 - p->se.nr_migrations++; 2048 - perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 2049 - 1, 1, NULL, 0); 2050 - } 2051 - p->se.vruntime -= old_cfsrq->min_vruntime - 2052 - new_cfsrq->min_vruntime; 2081 + if (task_cpu(p) == new_cpu) 2082 + return; 2083 + 2084 + p->se.nr_migrations++; 2085 + perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); 2053 2086 2054 2087 __set_task_cpu(p, new_cpu); 2055 2088 } ··· 2076 2105 2077 2106 /* 2078 2107 * If the task is not on a runqueue (and not running), then 2079 - * it is sufficient to simply update the task's cpu field. 2108 + * the next wake-up will properly place the task. 2080 2109 */ 2081 - if (!p->se.on_rq && !task_running(rq, p)) { 2082 - update_rq_clock(rq); 2083 - set_task_cpu(p, dest_cpu); 2110 + if (!p->se.on_rq && !task_running(rq, p)) 2084 2111 return 0; 2085 - } 2086 2112 2087 2113 init_completion(&req->done); 2088 2114 req->task = p; ··· 2285 2317 } 2286 2318 2287 2319 #ifdef CONFIG_SMP 2320 + static int select_fallback_rq(int cpu, struct task_struct *p) 2321 + { 2322 + int dest_cpu; 2323 + const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); 2324 + 2325 + /* Look for allowed, online CPU in same node. */ 2326 + for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) 2327 + if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 2328 + return dest_cpu; 2329 + 2330 + /* Any allowed, online CPU? */ 2331 + dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); 2332 + if (dest_cpu < nr_cpu_ids) 2333 + return dest_cpu; 2334 + 2335 + /* No more Mr. Nice Guy. */ 2336 + if (dest_cpu >= nr_cpu_ids) { 2337 + rcu_read_lock(); 2338 + cpuset_cpus_allowed_locked(p, &p->cpus_allowed); 2339 + rcu_read_unlock(); 2340 + dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); 2341 + 2342 + /* 2343 + * Don't tell them about moving exiting tasks or 2344 + * kernel threads (both mm NULL), since they never 2345 + * leave kernel. 2346 + */ 2347 + if (p->mm && printk_ratelimit()) { 2348 + printk(KERN_INFO "process %d (%s) no " 2349 + "longer affine to cpu%d\n", 2350 + task_pid_nr(p), p->comm, cpu); 2351 + } 2352 + } 2353 + 2354 + return dest_cpu; 2355 + } 2356 + 2357 + /* 2358 + * Called from: 2359 + * 2360 + * - fork, @p is stable because it isn't on the tasklist yet 2361 + * 2362 + * - exec, @p is unstable, retry loop 2363 + * 2364 + * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so 2365 + * we should be good. 2366 + */ 2288 2367 static inline 2289 2368 int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) 2290 2369 { 2291 - return p->sched_class->select_task_rq(p, sd_flags, wake_flags); 2370 + int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); 2371 + 2372 + /* 2373 + * In order not to call set_task_cpu() on a blocking task we need 2374 + * to rely on ttwu() to place the task on a valid ->cpus_allowed 2375 + * cpu. 2376 + * 2377 + * Since this is common to all placement strategies, this lives here. 2378 + * 2379 + * [ this allows ->select_task() to simply return task_cpu(p) and 2380 + * not worry about this generic constraint ] 2381 + */ 2382 + if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || 2383 + !cpu_active(cpu))) 2384 + cpu = select_fallback_rq(task_cpu(p), p); 2385 + 2386 + return cpu; 2292 2387 } 2293 2388 #endif 2294 2389 ··· 2406 2375 if (task_contributes_to_load(p)) 2407 2376 rq->nr_uninterruptible--; 2408 2377 p->state = TASK_WAKING; 2378 + 2379 + if (p->sched_class->task_waking) 2380 + p->sched_class->task_waking(rq, p); 2381 + 2409 2382 __task_rq_unlock(rq); 2410 2383 2411 2384 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); ··· 2473 2438 2474 2439 p->state = TASK_RUNNING; 2475 2440 #ifdef CONFIG_SMP 2476 - if (p->sched_class->task_wake_up) 2477 - p->sched_class->task_wake_up(rq, p); 2441 + if (p->sched_class->task_woken) 2442 + p->sched_class->task_woken(rq, p); 2478 2443 2479 2444 if (unlikely(rq->idle_stamp)) { 2480 2445 u64 delta = rq->clock - rq->idle_stamp; ··· 2573 2538 #ifdef CONFIG_PREEMPT_NOTIFIERS 2574 2539 INIT_HLIST_HEAD(&p->preempt_notifiers); 2575 2540 #endif 2576 - 2577 - /* 2578 - * We mark the process as running here, but have not actually 2579 - * inserted it onto the runqueue yet. This guarantees that 2580 - * nobody will actually run it, and a signal or other external 2581 - * event cannot wake it up and insert it on the runqueue either. 2582 - */ 2583 - p->state = TASK_RUNNING; 2584 2541 } 2585 2542 2586 2543 /* ··· 2583 2556 int cpu = get_cpu(); 2584 2557 2585 2558 __sched_fork(p); 2559 + /* 2560 + * We mark the process as waking here. This guarantees that 2561 + * nobody will actually run it, and a signal or other external 2562 + * event cannot wake it up and insert it on the runqueue either. 2563 + */ 2564 + p->state = TASK_WAKING; 2586 2565 2587 2566 /* 2588 2567 * Revert to default priority/policy on fork if requested. ··· 2657 2624 struct rq *rq; 2658 2625 2659 2626 rq = task_rq_lock(p, &flags); 2660 - BUG_ON(p->state != TASK_RUNNING); 2627 + BUG_ON(p->state != TASK_WAKING); 2628 + p->state = TASK_RUNNING; 2661 2629 update_rq_clock(rq); 2662 2630 activate_task(rq, p, 0); 2663 2631 trace_sched_wakeup_new(rq, p, 1); 2664 2632 check_preempt_curr(rq, p, WF_FORK); 2665 2633 #ifdef CONFIG_SMP 2666 - if (p->sched_class->task_wake_up) 2667 - p->sched_class->task_wake_up(rq, p); 2634 + if (p->sched_class->task_woken) 2635 + p->sched_class->task_woken(rq, p); 2668 2636 #endif 2669 2637 task_rq_unlock(rq, &flags); 2670 2638 } ··· 3135 3101 } 3136 3102 3137 3103 /* 3138 - * If dest_cpu is allowed for this process, migrate the task to it. 3139 - * This is accomplished by forcing the cpu_allowed mask to only 3140 - * allow dest_cpu, which will force the cpu onto dest_cpu. Then 3141 - * the cpu_allowed mask is restored. 3104 + * sched_exec - execve() is a valuable balancing opportunity, because at 3105 + * this point the task has the smallest effective memory and cache footprint. 3142 3106 */ 3143 - static void sched_migrate_task(struct task_struct *p, int dest_cpu) 3107 + void sched_exec(void) 3144 3108 { 3109 + struct task_struct *p = current; 3145 3110 struct migration_req req; 3111 + int dest_cpu, this_cpu; 3146 3112 unsigned long flags; 3147 3113 struct rq *rq; 3148 3114 3115 + again: 3116 + this_cpu = get_cpu(); 3117 + dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0); 3118 + if (dest_cpu == this_cpu) { 3119 + put_cpu(); 3120 + return; 3121 + } 3122 + 3149 3123 rq = task_rq_lock(p, &flags); 3124 + put_cpu(); 3125 + 3126 + /* 3127 + * select_task_rq() can race against ->cpus_allowed 3128 + */ 3150 3129 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) 3151 - || unlikely(!cpu_active(dest_cpu))) 3152 - goto out; 3130 + || unlikely(!cpu_active(dest_cpu))) { 3131 + task_rq_unlock(rq, &flags); 3132 + goto again; 3133 + } 3153 3134 3154 3135 /* force the process onto the specified CPU */ 3155 3136 if (migrate_task(p, dest_cpu, &req)) { ··· 3179 3130 3180 3131 return; 3181 3132 } 3182 - out: 3183 3133 task_rq_unlock(rq, &flags); 3184 - } 3185 - 3186 - /* 3187 - * sched_exec - execve() is a valuable balancing opportunity, because at 3188 - * this point the task has the smallest effective memory and cache footprint. 3189 - */ 3190 - void sched_exec(void) 3191 - { 3192 - int new_cpu, this_cpu = get_cpu(); 3193 - new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0); 3194 - put_cpu(); 3195 - if (new_cpu != this_cpu) 3196 - sched_migrate_task(current, new_cpu); 3197 3134 } 3198 3135 3199 3136 /* ··· 5375 5340 { 5376 5341 struct pt_regs *regs = get_irq_regs(); 5377 5342 5378 - printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 5379 - prev->comm, prev->pid, preempt_count()); 5343 + pr_err("BUG: scheduling while atomic: %s/%d/0x%08x\n", 5344 + prev->comm, prev->pid, preempt_count()); 5380 5345 5381 5346 debug_show_held_locks(prev); 5382 5347 print_modules(); ··· 5946 5911 */ 5947 5912 bool try_wait_for_completion(struct completion *x) 5948 5913 { 5914 + unsigned long flags; 5949 5915 int ret = 1; 5950 5916 5951 - spin_lock_irq(&x->wait.lock); 5917 + spin_lock_irqsave(&x->wait.lock, flags); 5952 5918 if (!x->done) 5953 5919 ret = 0; 5954 5920 else 5955 5921 x->done--; 5956 - spin_unlock_irq(&x->wait.lock); 5922 + spin_unlock_irqrestore(&x->wait.lock, flags); 5957 5923 return ret; 5958 5924 } 5959 5925 EXPORT_SYMBOL(try_wait_for_completion); ··· 5969 5933 */ 5970 5934 bool completion_done(struct completion *x) 5971 5935 { 5936 + unsigned long flags; 5972 5937 int ret = 1; 5973 5938 5974 - spin_lock_irq(&x->wait.lock); 5939 + spin_lock_irqsave(&x->wait.lock, flags); 5975 5940 if (!x->done) 5976 5941 ret = 0; 5977 - spin_unlock_irq(&x->wait.lock); 5942 + spin_unlock_irqrestore(&x->wait.lock, flags); 5978 5943 return ret; 5979 5944 } 5980 5945 EXPORT_SYMBOL(completion_done); ··· 6494 6457 return -EINVAL; 6495 6458 6496 6459 retval = -ESRCH; 6497 - read_lock(&tasklist_lock); 6460 + rcu_read_lock(); 6498 6461 p = find_process_by_pid(pid); 6499 6462 if (p) { 6500 6463 retval = security_task_getscheduler(p); ··· 6502 6465 retval = p->policy 6503 6466 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 6504 6467 } 6505 - read_unlock(&tasklist_lock); 6468 + rcu_read_unlock(); 6506 6469 return retval; 6507 6470 } 6508 6471 ··· 6520 6483 if (!param || pid < 0) 6521 6484 return -EINVAL; 6522 6485 6523 - read_lock(&tasklist_lock); 6486 + rcu_read_lock(); 6524 6487 p = find_process_by_pid(pid); 6525 6488 retval = -ESRCH; 6526 6489 if (!p) ··· 6531 6494 goto out_unlock; 6532 6495 6533 6496 lp.sched_priority = p->rt_priority; 6534 - read_unlock(&tasklist_lock); 6497 + rcu_read_unlock(); 6535 6498 6536 6499 /* 6537 6500 * This one might sleep, we cannot do it with a spinlock held ... ··· 6541 6504 return retval; 6542 6505 6543 6506 out_unlock: 6544 - read_unlock(&tasklist_lock); 6507 + rcu_read_unlock(); 6545 6508 return retval; 6546 6509 } 6547 6510 ··· 6552 6515 int retval; 6553 6516 6554 6517 get_online_cpus(); 6555 - read_lock(&tasklist_lock); 6518 + rcu_read_lock(); 6556 6519 6557 6520 p = find_process_by_pid(pid); 6558 6521 if (!p) { 6559 - read_unlock(&tasklist_lock); 6522 + rcu_read_unlock(); 6560 6523 put_online_cpus(); 6561 6524 return -ESRCH; 6562 6525 } 6563 6526 6564 - /* 6565 - * It is not safe to call set_cpus_allowed with the 6566 - * tasklist_lock held. We will bump the task_struct's 6567 - * usage count and then drop tasklist_lock. 6568 - */ 6527 + /* Prevent p going away */ 6569 6528 get_task_struct(p); 6570 - read_unlock(&tasklist_lock); 6529 + rcu_read_unlock(); 6571 6530 6572 6531 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 6573 6532 retval = -ENOMEM; ··· 6649 6616 int retval; 6650 6617 6651 6618 get_online_cpus(); 6652 - read_lock(&tasklist_lock); 6619 + rcu_read_lock(); 6653 6620 6654 6621 retval = -ESRCH; 6655 6622 p = find_process_by_pid(pid); ··· 6665 6632 task_rq_unlock(rq, &flags); 6666 6633 6667 6634 out_unlock: 6668 - read_unlock(&tasklist_lock); 6635 + rcu_read_unlock(); 6669 6636 put_online_cpus(); 6670 6637 6671 6638 return retval; ··· 6909 6876 return -EINVAL; 6910 6877 6911 6878 retval = -ESRCH; 6912 - read_lock(&tasklist_lock); 6879 + rcu_read_lock(); 6913 6880 p = find_process_by_pid(pid); 6914 6881 if (!p) 6915 6882 goto out_unlock; ··· 6922 6889 time_slice = p->sched_class->get_rr_interval(rq, p); 6923 6890 task_rq_unlock(rq, &flags); 6924 6891 6925 - read_unlock(&tasklist_lock); 6892 + rcu_read_unlock(); 6926 6893 jiffies_to_timespec(time_slice, &t); 6927 6894 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 6928 6895 return retval; 6929 6896 6930 6897 out_unlock: 6931 - read_unlock(&tasklist_lock); 6898 + rcu_read_unlock(); 6932 6899 return retval; 6933 6900 } 6934 6901 ··· 6940 6907 unsigned state; 6941 6908 6942 6909 state = p->state ? __ffs(p->state) + 1 : 0; 6943 - printk(KERN_INFO "%-13.13s %c", p->comm, 6910 + pr_info("%-13.13s %c", p->comm, 6944 6911 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 6945 6912 #if BITS_PER_LONG == 32 6946 6913 if (state == TASK_RUNNING) 6947 - printk(KERN_CONT " running "); 6914 + pr_cont(" running "); 6948 6915 else 6949 - printk(KERN_CONT " %08lx ", thread_saved_pc(p)); 6916 + pr_cont(" %08lx ", thread_saved_pc(p)); 6950 6917 #else 6951 6918 if (state == TASK_RUNNING) 6952 - printk(KERN_CONT " running task "); 6919 + pr_cont(" running task "); 6953 6920 else 6954 - printk(KERN_CONT " %016lx ", thread_saved_pc(p)); 6921 + pr_cont(" %016lx ", thread_saved_pc(p)); 6955 6922 #endif 6956 6923 #ifdef CONFIG_DEBUG_STACK_USAGE 6957 6924 free = stack_not_used(p); 6958 6925 #endif 6959 - printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 6926 + pr_cont("%5lu %5d %6d 0x%08lx\n", free, 6960 6927 task_pid_nr(p), task_pid_nr(p->real_parent), 6961 6928 (unsigned long)task_thread_info(p)->flags); 6962 6929 ··· 6968 6935 struct task_struct *g, *p; 6969 6936 6970 6937 #if BITS_PER_LONG == 32 6971 - printk(KERN_INFO 6972 - " task PC stack pid father\n"); 6938 + pr_info(" task PC stack pid father\n"); 6973 6939 #else 6974 - printk(KERN_INFO 6975 - " task PC stack pid father\n"); 6940 + pr_info(" task PC stack pid father\n"); 6976 6941 #endif 6977 6942 read_lock(&tasklist_lock); 6978 6943 do_each_thread(g, p) { ··· 7017 6986 raw_spin_lock_irqsave(&rq->lock, flags); 7018 6987 7019 6988 __sched_fork(idle); 6989 + idle->state = TASK_RUNNING; 7020 6990 idle->se.exec_start = sched_clock(); 7021 6991 7022 6992 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); ··· 7132 7100 struct rq *rq; 7133 7101 int ret = 0; 7134 7102 7103 + /* 7104 + * Since we rely on wake-ups to migrate sleeping tasks, don't change 7105 + * the ->cpus_allowed mask from under waking tasks, which would be 7106 + * possible when we change rq->lock in ttwu(), so synchronize against 7107 + * TASK_WAKING to avoid that. 7108 + */ 7109 + again: 7110 + while (p->state == TASK_WAKING) 7111 + cpu_relax(); 7112 + 7135 7113 rq = task_rq_lock(p, &flags); 7114 + 7115 + if (p->state == TASK_WAKING) { 7116 + task_rq_unlock(rq, &flags); 7117 + goto again; 7118 + } 7119 + 7136 7120 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 7137 7121 ret = -EINVAL; 7138 7122 goto out; ··· 7204 7156 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 7205 7157 { 7206 7158 struct rq *rq_dest, *rq_src; 7207 - int ret = 0, on_rq; 7159 + int ret = 0; 7208 7160 7209 7161 if (unlikely(!cpu_active(dest_cpu))) 7210 7162 return ret; ··· 7220 7172 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 7221 7173 goto fail; 7222 7174 7223 - on_rq = p->se.on_rq; 7224 - if (on_rq) 7175 + /* 7176 + * If we're not on a rq, the next wake-up will ensure we're 7177 + * placed properly. 7178 + */ 7179 + if (p->se.on_rq) { 7225 7180 deactivate_task(rq_src, p, 0); 7226 - 7227 - set_task_cpu(p, dest_cpu); 7228 - if (on_rq) { 7181 + set_task_cpu(p, dest_cpu); 7229 7182 activate_task(rq_dest, p, 0); 7230 7183 check_preempt_curr(rq_dest, p, 0); 7231 7184 } ··· 7322 7273 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 7323 7274 { 7324 7275 int dest_cpu; 7325 - const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu)); 7326 7276 7327 7277 again: 7328 - /* Look for allowed, online CPU in same node. */ 7329 - for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) 7330 - if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 7331 - goto move; 7278 + dest_cpu = select_fallback_rq(dead_cpu, p); 7332 7279 7333 - /* Any allowed, online CPU? */ 7334 - dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); 7335 - if (dest_cpu < nr_cpu_ids) 7336 - goto move; 7337 - 7338 - /* No more Mr. Nice Guy. */ 7339 - if (dest_cpu >= nr_cpu_ids) { 7340 - cpuset_cpus_allowed_locked(p, &p->cpus_allowed); 7341 - dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); 7342 - 7343 - /* 7344 - * Don't tell them about moving exiting tasks or 7345 - * kernel threads (both mm NULL), since they never 7346 - * leave kernel. 7347 - */ 7348 - if (p->mm && printk_ratelimit()) { 7349 - printk(KERN_INFO "process %d (%s) no " 7350 - "longer affine to cpu%d\n", 7351 - task_pid_nr(p), p->comm, dead_cpu); 7352 - } 7353 - } 7354 - 7355 - move: 7356 7280 /* It can have affinity changed while we were choosing. */ 7357 7281 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) 7358 7282 goto again; ··· 7828 7806 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 7829 7807 7830 7808 if (!(sd->flags & SD_LOAD_BALANCE)) { 7831 - printk("does not load-balance\n"); 7809 + pr_cont("does not load-balance\n"); 7832 7810 if (sd->parent) 7833 - printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" 7834 - " has parent"); 7811 + pr_err("ERROR: !SD_LOAD_BALANCE domain has parent\n"); 7835 7812 return -1; 7836 7813 } 7837 7814 7838 - printk(KERN_CONT "span %s level %s\n", str, sd->name); 7815 + pr_cont("span %s level %s\n", str, sd->name); 7839 7816 7840 7817 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 7841 - printk(KERN_ERR "ERROR: domain->span does not contain " 7842 - "CPU%d\n", cpu); 7818 + pr_err("ERROR: domain->span does not contain CPU%d\n", cpu); 7843 7819 } 7844 7820 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 7845 - printk(KERN_ERR "ERROR: domain->groups does not contain" 7846 - " CPU%d\n", cpu); 7821 + pr_err("ERROR: domain->groups does not contain CPU%d\n", cpu); 7847 7822 } 7848 7823 7849 7824 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 7850 7825 do { 7851 7826 if (!group) { 7852 - printk("\n"); 7853 - printk(KERN_ERR "ERROR: group is NULL\n"); 7827 + pr_cont("\n"); 7828 + pr_err("ERROR: group is NULL\n"); 7854 7829 break; 7855 7830 } 7856 7831 7857 7832 if (!group->cpu_power) { 7858 - printk(KERN_CONT "\n"); 7859 - printk(KERN_ERR "ERROR: domain->cpu_power not " 7860 - "set\n"); 7833 + pr_cont("\n"); 7834 + pr_err("ERROR: domain->cpu_power not set\n"); 7861 7835 break; 7862 7836 } 7863 7837 7864 7838 if (!cpumask_weight(sched_group_cpus(group))) { 7865 - printk(KERN_CONT "\n"); 7866 - printk(KERN_ERR "ERROR: empty group\n"); 7839 + pr_cont("\n"); 7840 + pr_err("ERROR: empty group\n"); 7867 7841 break; 7868 7842 } 7869 7843 7870 7844 if (cpumask_intersects(groupmask, sched_group_cpus(group))) { 7871 - printk(KERN_CONT "\n"); 7872 - printk(KERN_ERR "ERROR: repeated CPUs\n"); 7845 + pr_cont("\n"); 7846 + pr_err("ERROR: repeated CPUs\n"); 7873 7847 break; 7874 7848 } 7875 7849 ··· 7873 7855 7874 7856 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 7875 7857 7876 - printk(KERN_CONT " %s", str); 7858 + pr_cont(" %s", str); 7877 7859 if (group->cpu_power != SCHED_LOAD_SCALE) { 7878 - printk(KERN_CONT " (cpu_power = %d)", 7879 - group->cpu_power); 7860 + pr_cont(" (cpu_power = %d)", group->cpu_power); 7880 7861 } 7881 7862 7882 7863 group = group->next; 7883 7864 } while (group != sd->groups); 7884 - printk(KERN_CONT "\n"); 7865 + pr_cont("\n"); 7885 7866 7886 7867 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 7887 - printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 7868 + pr_err("ERROR: groups don't span domain->span\n"); 7888 7869 7889 7870 if (sd->parent && 7890 7871 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 7891 - printk(KERN_ERR "ERROR: parent span is not a superset " 7892 - "of domain->span\n"); 7872 + pr_err("ERROR: parent span is not a superset of domain->span\n"); 7893 7873 return 0; 7894 7874 } 7895 7875 ··· 8443 8427 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), 8444 8428 GFP_KERNEL, num); 8445 8429 if (!sg) { 8446 - printk(KERN_WARNING "Can not alloc domain group for node %d\n", 8447 - num); 8430 + pr_warning("Can not alloc domain group for node %d\n", num); 8448 8431 return -ENOMEM; 8449 8432 } 8450 8433 d->sched_group_nodes[num] = sg; ··· 8472 8457 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), 8473 8458 GFP_KERNEL, num); 8474 8459 if (!sg) { 8475 - printk(KERN_WARNING 8476 - "Can not alloc domain group for node %d\n", j); 8460 + pr_warning("Can not alloc domain group for node %d\n", 8461 + j); 8477 8462 return -ENOMEM; 8478 8463 } 8479 8464 sg->cpu_power = 0; ··· 8701 8686 d->sched_group_nodes = kcalloc(nr_node_ids, 8702 8687 sizeof(struct sched_group *), GFP_KERNEL); 8703 8688 if (!d->sched_group_nodes) { 8704 - printk(KERN_WARNING "Can not alloc sched group node list\n"); 8689 + pr_warning("Can not alloc sched group node list\n"); 8705 8690 return sa_notcovered; 8706 8691 } 8707 8692 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; ··· 8718 8703 return sa_send_covered; 8719 8704 d->rd = alloc_rootdomain(); 8720 8705 if (!d->rd) { 8721 - printk(KERN_WARNING "Cannot alloc root domain\n"); 8706 + pr_warning("Cannot alloc root domain\n"); 8722 8707 return sa_tmpmask; 8723 8708 } 8724 8709 return sa_rootdomain; ··· 9683 9668 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 9684 9669 static inline int preempt_count_equals(int preempt_offset) 9685 9670 { 9686 - int nested = preempt_count() & ~PREEMPT_ACTIVE; 9671 + int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); 9687 9672 9688 9673 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); 9689 9674 } ··· 9700 9685 return; 9701 9686 prev_jiffy = jiffies; 9702 9687 9703 - printk(KERN_ERR 9704 - "BUG: sleeping function called from invalid context at %s:%d\n", 9705 - file, line); 9706 - printk(KERN_ERR 9707 - "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 9708 - in_atomic(), irqs_disabled(), 9709 - current->pid, current->comm); 9688 + pr_err("BUG: sleeping function called from invalid context at %s:%d\n", 9689 + file, line); 9690 + pr_err("in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 9691 + in_atomic(), irqs_disabled(), 9692 + current->pid, current->comm); 9710 9693 9711 9694 debug_show_held_locks(current); 9712 9695 if (irqs_disabled()) ··· 10096 10083 10097 10084 #ifdef CONFIG_FAIR_GROUP_SCHED 10098 10085 if (tsk->sched_class->moved_group) 10099 - tsk->sched_class->moved_group(tsk); 10086 + tsk->sched_class->moved_group(tsk, on_rq); 10100 10087 #endif 10101 10088 10102 10089 if (unlikely(running))
+16 -9
kernel/sched_clock.c
··· 236 236 } 237 237 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 238 238 239 + unsigned long long cpu_clock(int cpu) 240 + { 241 + unsigned long long clock; 242 + unsigned long flags; 243 + 244 + local_irq_save(flags); 245 + clock = sched_clock_cpu(cpu); 246 + local_irq_restore(flags); 247 + 248 + return clock; 249 + } 250 + 239 251 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 240 252 241 253 void sched_clock_init(void) ··· 263 251 return sched_clock(); 264 252 } 265 253 266 - #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 267 254 268 255 unsigned long long cpu_clock(int cpu) 269 256 { 270 - unsigned long long clock; 271 - unsigned long flags; 272 - 273 - local_irq_save(flags); 274 - clock = sched_clock_cpu(cpu); 275 - local_irq_restore(flags); 276 - 277 - return clock; 257 + return sched_clock_cpu(cpu); 278 258 } 259 + 260 + #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 261 + 279 262 EXPORT_SYMBOL_GPL(cpu_clock);
+47 -6
kernel/sched_fair.c
··· 510 510 curr->sum_exec_runtime += delta_exec; 511 511 schedstat_add(cfs_rq, exec_clock, delta_exec); 512 512 delta_exec_weighted = calc_delta_fair(delta_exec, curr); 513 + 513 514 curr->vruntime += delta_exec_weighted; 514 515 update_min_vruntime(cfs_rq); 515 516 } ··· 766 765 se->vruntime = vruntime; 767 766 } 768 767 768 + #define ENQUEUE_WAKEUP 1 769 + #define ENQUEUE_MIGRATE 2 770 + 769 771 static void 770 - enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) 772 + enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 771 773 { 774 + /* 775 + * Update the normalized vruntime before updating min_vruntime 776 + * through callig update_curr(). 777 + */ 778 + if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE)) 779 + se->vruntime += cfs_rq->min_vruntime; 780 + 772 781 /* 773 782 * Update run-time statistics of the 'current'. 774 783 */ 775 784 update_curr(cfs_rq); 776 785 account_entity_enqueue(cfs_rq, se); 777 786 778 - if (wakeup) { 787 + if (flags & ENQUEUE_WAKEUP) { 779 788 place_entity(cfs_rq, se, 0); 780 789 enqueue_sleeper(cfs_rq, se); 781 790 } ··· 839 828 __dequeue_entity(cfs_rq, se); 840 829 account_entity_dequeue(cfs_rq, se); 841 830 update_min_vruntime(cfs_rq); 831 + 832 + /* 833 + * Normalize the entity after updating the min_vruntime because the 834 + * update can refer to the ->curr item and we need to reflect this 835 + * movement in our normalized position. 836 + */ 837 + if (!sleep) 838 + se->vruntime -= cfs_rq->min_vruntime; 842 839 } 843 840 844 841 /* ··· 1057 1038 { 1058 1039 struct cfs_rq *cfs_rq; 1059 1040 struct sched_entity *se = &p->se; 1041 + int flags = 0; 1042 + 1043 + if (wakeup) 1044 + flags |= ENQUEUE_WAKEUP; 1045 + if (p->state == TASK_WAKING) 1046 + flags |= ENQUEUE_MIGRATE; 1060 1047 1061 1048 for_each_sched_entity(se) { 1062 1049 if (se->on_rq) 1063 1050 break; 1064 1051 cfs_rq = cfs_rq_of(se); 1065 - enqueue_entity(cfs_rq, se, wakeup); 1066 - wakeup = 1; 1052 + enqueue_entity(cfs_rq, se, flags); 1053 + flags = ENQUEUE_WAKEUP; 1067 1054 } 1068 1055 1069 1056 hrtick_update(rq); ··· 1144 1119 } 1145 1120 1146 1121 #ifdef CONFIG_SMP 1122 + 1123 + static void task_waking_fair(struct rq *rq, struct task_struct *p) 1124 + { 1125 + struct sched_entity *se = &p->se; 1126 + struct cfs_rq *cfs_rq = cfs_rq_of(se); 1127 + 1128 + se->vruntime -= cfs_rq->min_vruntime; 1129 + } 1147 1130 1148 1131 #ifdef CONFIG_FAIR_GROUP_SCHED 1149 1132 /* ··· 1462 1429 } 1463 1430 1464 1431 for_each_domain(cpu, tmp) { 1432 + if (!(tmp->flags & SD_LOAD_BALANCE)) 1433 + continue; 1434 + 1465 1435 /* 1466 1436 * If power savings logic is enabled for a domain, see if we 1467 1437 * are not overloaded, if so, don't balance wider. ··· 2011 1975 resched_task(rq->curr); 2012 1976 } 2013 1977 1978 + se->vruntime -= cfs_rq->min_vruntime; 1979 + 2014 1980 raw_spin_unlock_irqrestore(&rq->lock, flags); 2015 1981 } 2016 1982 ··· 2066 2028 } 2067 2029 2068 2030 #ifdef CONFIG_FAIR_GROUP_SCHED 2069 - static void moved_group_fair(struct task_struct *p) 2031 + static void moved_group_fair(struct task_struct *p, int on_rq) 2070 2032 { 2071 2033 struct cfs_rq *cfs_rq = task_cfs_rq(p); 2072 2034 2073 2035 update_curr(cfs_rq); 2074 - place_entity(cfs_rq, &p->se, 1); 2036 + if (!on_rq) 2037 + place_entity(cfs_rq, &p->se, 1); 2075 2038 } 2076 2039 #endif 2077 2040 ··· 2112 2073 .move_one_task = move_one_task_fair, 2113 2074 .rq_online = rq_online_fair, 2114 2075 .rq_offline = rq_offline_fair, 2076 + 2077 + .task_waking = task_waking_fair, 2115 2078 #endif 2116 2079 2117 2080 .set_curr_task = set_curr_task_fair,
+1 -1
kernel/sched_idletask.c
··· 35 35 dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) 36 36 { 37 37 raw_spin_unlock_irq(&rq->lock); 38 - printk(KERN_ERR "bad: scheduling from the idle thread!\n"); 38 + pr_err("bad: scheduling from the idle thread!\n"); 39 39 dump_stack(); 40 40 raw_spin_lock_irq(&rq->lock); 41 41 }
+2 -2
kernel/sched_rt.c
··· 1472 1472 * If we are not running and we are not going to reschedule soon, we should 1473 1473 * try to push tasks away now 1474 1474 */ 1475 - static void task_wake_up_rt(struct rq *rq, struct task_struct *p) 1475 + static void task_woken_rt(struct rq *rq, struct task_struct *p) 1476 1476 { 1477 1477 if (!task_running(rq, p) && 1478 1478 !test_tsk_need_resched(rq->curr) && ··· 1753 1753 .rq_offline = rq_offline_rt, 1754 1754 .pre_schedule = pre_schedule_rt, 1755 1755 .post_schedule = post_schedule_rt, 1756 - .task_wake_up = task_wake_up_rt, 1756 + .task_woken = task_woken_rt, 1757 1757 .switched_from = switched_from_rt, 1758 1758 #endif 1759 1759