cgroup/cpuset: Eliminate unncessary sched domains rebuilds in hotplug

It was found that some hotplug operations may cause multiple
rebuild_sched_domains_locked() calls. Some of those intermediate calls
may use cpuset states not in the final correct form leading to incorrect
sched domain setting.

Fix this problem by using the existing force_rebuild flag to inhibit
immediate rebuild_sched_domains_locked() calls if set and only doing
one final call at the end. Also renaming the force_rebuild flag to
force_sd_rebuild to make its meaning for clear.

Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>

authored by Waiman Long and committed by Tejun Heo ff0ce721 311a1bdc

+17 -16
+17 -16
kernel/cgroup/cpuset.c
··· 233 233 static struct list_head remote_children; 234 234 235 235 /* 236 + * A flag to force sched domain rebuild at the end of an operation while 237 + * inhibiting it in the intermediate stages when set. Currently it is only 238 + * set in hotplug code. 239 + */ 240 + static bool force_sd_rebuild; 241 + 242 + /* 236 243 * Partition root states: 237 244 * 238 245 * 0 - member (not a partition root) ··· 1482 1475 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 1483 1476 } 1484 1477 1485 - if (rebuild_domains) 1478 + if (rebuild_domains && !force_sd_rebuild) 1486 1479 rebuild_sched_domains_locked(); 1487 1480 } 1488 1481 ··· 1840 1833 remote_partition_disable(child, tmp); 1841 1834 disable_cnt++; 1842 1835 } 1843 - if (disable_cnt) 1836 + if (disable_cnt && !force_sd_rebuild) 1844 1837 rebuild_sched_domains_locked(); 1845 1838 } 1846 1839 ··· 2449 2442 } 2450 2443 rcu_read_unlock(); 2451 2444 2452 - if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD)) 2445 + if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD) && 2446 + !force_sd_rebuild) 2453 2447 rebuild_sched_domains_locked(); 2454 2448 } 2455 2449 ··· 3112 3104 cs->flags = trialcs->flags; 3113 3105 spin_unlock_irq(&callback_lock); 3114 3106 3115 - if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) 3107 + if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed && 3108 + !force_sd_rebuild) 3116 3109 rebuild_sched_domains_locked(); 3117 3110 3118 3111 if (spread_flag_changed) ··· 4510 4501 update_tasks_nodemask(cs); 4511 4502 } 4512 4503 4513 - static bool force_rebuild; 4514 - 4515 4504 void cpuset_force_rebuild(void) 4516 4505 { 4517 - force_rebuild = true; 4506 + force_sd_rebuild = true; 4518 4507 } 4519 4508 4520 4509 /** ··· 4660 4653 !cpumask_empty(subpartitions_cpus); 4661 4654 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); 4662 4655 4663 - /* 4664 - * In the rare case that hotplug removes all the cpus in 4665 - * subpartitions_cpus, we assumed that cpus are updated. 4666 - */ 4667 - if (!cpus_updated && !cpumask_empty(subpartitions_cpus)) 4668 - cpus_updated = true; 4669 - 4670 4656 /* For v1, synchronize cpus_allowed to cpu_active_mask */ 4671 4657 if (cpus_updated) { 4658 + cpuset_force_rebuild(); 4672 4659 spin_lock_irq(&callback_lock); 4673 4660 if (!on_dfl) 4674 4661 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); ··· 4718 4717 } 4719 4718 4720 4719 /* rebuild sched domains if cpus_allowed has changed */ 4721 - if (cpus_updated || force_rebuild) { 4722 - force_rebuild = false; 4720 + if (force_sd_rebuild) { 4721 + force_sd_rebuild = false; 4723 4722 rebuild_sched_domains_cpuslocked(); 4724 4723 } 4725 4724