sched/cpuset/pm: Fix cpuset vs. suspend-resume bugs

Cpusets vs. suspend-resume is _completely_ broken. And it got noticed
because it now resulted in non-cpuset usage breaking too.

On suspend cpuset_cpu_inactive() doesn't call into
cpuset_update_active_cpus() because it doesn't want to move tasks about,
there is no need, all tasks are frozen and won't run again until after
we've resumed everything.

But this means that when we finally do call into
cpuset_update_active_cpus() after resuming the last frozen cpu in
cpuset_cpu_active(), the top_cpuset will not have any difference with
the cpu_active_mask and this it will not in fact do _anything_.

So the cpuset configuration will not be restored. This was largely
hidden because we would unconditionally create identity domains and
mobile users would not in fact use cpusets much. And servers what do use
cpusets tend to not suspend-resume much.

An addition problem is that we'd not in fact wait for the cpuset work to
finish before resuming the tasks, allowing spurious migrations outside
of the specified domains.

Fix the rebuild by introducing cpuset_force_rebuild() and fix the
ordering with cpuset_wait_for_hotplug().

Reported-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: deb7aa308ea2 ("cpuset: reorganize CPU / memory hotplug handling")
Link: http://lkml.kernel.org/r/20170907091338.orwxrqkbfkki3c24@hirez.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
50e76632 a731ebe6

+28 -6
+6
include/linux/cpuset.h
··· 51 52 extern int cpuset_init(void); 53 extern void cpuset_init_smp(void); 54 extern void cpuset_update_active_cpus(void); 55 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 56 extern void cpuset_cpus_allowed_fallback(struct task_struct *p); 57 extern nodemask_t cpuset_mems_allowed(struct task_struct *p); ··· 166 static inline int cpuset_init(void) { return 0; } 167 static inline void cpuset_init_smp(void) {} 168 169 static inline void cpuset_update_active_cpus(void) 170 { 171 partition_sched_domains(1, NULL, NULL); 172 } 173 174 static inline void cpuset_cpus_allowed(struct task_struct *p, 175 struct cpumask *mask)
··· 51 52 extern int cpuset_init(void); 53 extern void cpuset_init_smp(void); 54 + extern void cpuset_force_rebuild(void); 55 extern void cpuset_update_active_cpus(void); 56 + extern void cpuset_wait_for_hotplug(void); 57 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 58 extern void cpuset_cpus_allowed_fallback(struct task_struct *p); 59 extern nodemask_t cpuset_mems_allowed(struct task_struct *p); ··· 164 static inline int cpuset_init(void) { return 0; } 165 static inline void cpuset_init_smp(void) {} 166 167 + static inline void cpuset_force_rebuild(void) { } 168 + 169 static inline void cpuset_update_active_cpus(void) 170 { 171 partition_sched_domains(1, NULL, NULL); 172 } 173 + 174 + static inline void cpuset_wait_for_hotplug(void) { } 175 176 static inline void cpuset_cpus_allowed(struct task_struct *p, 177 struct cpumask *mask)
+15 -1
kernel/cgroup/cpuset.c
··· 2267 mutex_unlock(&cpuset_mutex); 2268 } 2269 2270 /** 2271 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset 2272 * ··· 2348 } 2349 2350 /* rebuild sched domains if cpus_allowed has changed */ 2351 - if (cpus_updated) 2352 rebuild_sched_domains(); 2353 } 2354 2355 void cpuset_update_active_cpus(void) ··· 2362 * to a work item to avoid reverse locking order. 2363 */ 2364 schedule_work(&cpuset_hotplug_work); 2365 } 2366 2367 /*
··· 2267 mutex_unlock(&cpuset_mutex); 2268 } 2269 2270 + static bool force_rebuild; 2271 + 2272 + void cpuset_force_rebuild(void) 2273 + { 2274 + force_rebuild = true; 2275 + } 2276 + 2277 /** 2278 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset 2279 * ··· 2341 } 2342 2343 /* rebuild sched domains if cpus_allowed has changed */ 2344 + if (cpus_updated || force_rebuild) { 2345 + force_rebuild = false; 2346 rebuild_sched_domains(); 2347 + } 2348 } 2349 2350 void cpuset_update_active_cpus(void) ··· 2353 * to a work item to avoid reverse locking order. 2354 */ 2355 schedule_work(&cpuset_hotplug_work); 2356 + } 2357 + 2358 + void cpuset_wait_for_hotplug(void) 2359 + { 2360 + flush_work(&cpuset_hotplug_work); 2361 } 2362 2363 /*
+4 -1
kernel/power/process.c
··· 20 #include <linux/workqueue.h> 21 #include <linux/kmod.h> 22 #include <trace/events/power.h> 23 24 - /* 25 * Timeout for stopping processes 26 */ 27 unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC; ··· 202 203 __usermodehelper_set_disable_depth(UMH_FREEZING); 204 thaw_workqueues(); 205 206 read_lock(&tasklist_lock); 207 for_each_process_thread(g, p) {
··· 20 #include <linux/workqueue.h> 21 #include <linux/kmod.h> 22 #include <trace/events/power.h> 23 + #include <linux/cpuset.h> 24 25 + /* 26 * Timeout for stopping processes 27 */ 28 unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC; ··· 201 202 __usermodehelper_set_disable_depth(UMH_FREEZING); 203 thaw_workqueues(); 204 + 205 + cpuset_wait_for_hotplug(); 206 207 read_lock(&tasklist_lock); 208 for_each_process_thread(g, p) {
+3 -4
kernel/sched/core.c
··· 5556 * operation in the resume sequence, just build a single sched 5557 * domain, ignoring cpusets. 5558 */ 5559 - num_cpus_frozen--; 5560 - if (likely(num_cpus_frozen)) { 5561 - partition_sched_domains(1, NULL, NULL); 5562 return; 5563 - } 5564 /* 5565 * This is the last CPU online operation. So fall through and 5566 * restore the original sched domains by considering the 5567 * cpuset configurations. 5568 */ 5569 } 5570 cpuset_update_active_cpus(); 5571 }
··· 5556 * operation in the resume sequence, just build a single sched 5557 * domain, ignoring cpusets. 5558 */ 5559 + partition_sched_domains(1, NULL, NULL); 5560 + if (--num_cpus_frozen) 5561 return; 5562 /* 5563 * This is the last CPU online operation. So fall through and 5564 * restore the original sched domains by considering the 5565 * cpuset configurations. 5566 */ 5567 + cpuset_force_rebuild(); 5568 } 5569 cpuset_update_active_cpus(); 5570 }