Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
"Three CPU hotplug related fixes and a debugging improvement"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/debug: Add debugfs knob for "sched_debug"
sched/core: WARN() when migrating to an offline CPU
sched/fair: Plug hole between hotplug and active_load_balance()
sched/fair: Avoid newidle balance for !active CPUs

+25 -3
+4
kernel/sched/core.c
··· 1173 1173 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1174 1174 lockdep_is_held(&task_rq(p)->lock))); 1175 1175 #endif 1176 + /* 1177 + * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 1178 + */ 1179 + WARN_ON_ONCE(!cpu_online(new_cpu)); 1176 1180 #endif 1177 1181 1178 1182 trace_sched_migrate_task(p, new_cpu);
+5
kernel/sched/debug.c
··· 181 181 .release = single_release, 182 182 }; 183 183 184 + __read_mostly bool sched_debug_enabled; 185 + 184 186 static __init int sched_init_debug(void) 185 187 { 186 188 debugfs_create_file("sched_features", 0644, NULL, NULL, 187 189 &sched_feat_fops); 190 + 191 + debugfs_create_bool("sched_debug", 0644, NULL, 192 + &sched_debug_enabled); 188 193 189 194 return 0; 190 195 }
+13
kernel/sched/fair.c
··· 8437 8437 this_rq->idle_stamp = rq_clock(this_rq); 8438 8438 8439 8439 /* 8440 + * Do not pull tasks towards !active CPUs... 8441 + */ 8442 + if (!cpu_active(this_cpu)) 8443 + return 0; 8444 + 8445 + /* 8440 8446 * This is OK, because current is on_cpu, which avoids it being picked 8441 8447 * for load-balance and preemption/IRQs are still disabled avoiding 8442 8448 * further scheduler activity on it and we're being very careful to ··· 8549 8543 struct rq_flags rf; 8550 8544 8551 8545 rq_lock_irq(busiest_rq, &rf); 8546 + /* 8547 + * Between queueing the stop-work and running it is a hole in which 8548 + * CPUs can become inactive. We should not move tasks from or to 8549 + * inactive CPUs. 8550 + */ 8551 + if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu)) 8552 + goto out_unlock; 8552 8553 8553 8554 /* make sure the requested cpu hasn't gone down in the meantime */ 8554 8555 if (unlikely(busiest_cpu != smp_processor_id() ||
+2
kernel/sched/sched.h
··· 1951 1951 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 1952 1952 1953 1953 #ifdef CONFIG_SCHED_DEBUG 1954 + extern bool sched_debug_enabled; 1955 + 1954 1956 extern void print_cfs_stats(struct seq_file *m, int cpu); 1955 1957 extern void print_rt_stats(struct seq_file *m, int cpu); 1956 1958 extern void print_dl_stats(struct seq_file *m, int cpu);
+1 -3
kernel/sched/topology.c
··· 14 14 15 15 #ifdef CONFIG_SCHED_DEBUG 16 16 17 - static __read_mostly int sched_debug_enabled; 18 - 19 17 static int __init sched_debug_setup(char *str) 20 18 { 21 - sched_debug_enabled = 1; 19 + sched_debug_enabled = true; 22 20 23 21 return 0; 24 22 }