···14 * 2003-10-22 Updates by Stephen Hemminger.15 * 2004 May-July Rework by Paul Jackson.16 * 2006 Rework by Paul Menage to use generic cgroups0017 *18 * This file is subject to the terms and conditions of the GNU General Public19 * License. See the file COPYING in the main directory of the Linux···238239static DEFINE_MUTEX(callback_mutex);240241-/* This is ugly, but preserves the userspace API for existing cpuset0242 * users. If someone tries to mount the "cpuset" filesystem, we243- * silently switch it to mount "cgroup" instead */0244static int cpuset_get_sb(struct file_system_type *fs_type,245 int flags, const char *unused_dev_name,246 void *data, struct vfsmount *mnt)···477}478479/*480- * Helper routine for rebuild_sched_domains().481 * Do cpusets a, b have overlapping cpus_allowed masks?482 */483-484static int cpusets_overlap(struct cpuset *a, struct cpuset *b)485{486 return cpus_intersects(a->cpus_allowed, b->cpus_allowed);···521}522523/*524- * rebuild_sched_domains()525 *526- * This routine will be called to rebuild the scheduler's dynamic527- * sched domains:528- * - if the flag 'sched_load_balance' of any cpuset with non-empty529- * 'cpus' changes,530- * - or if the 'cpus' allowed changes in any cpuset which has that531- * flag enabled,532- * - or if the 'sched_relax_domain_level' of any cpuset which has533- * that flag enabled and with non-empty 'cpus' changes,534- * - or if any cpuset with non-empty 'cpus' is removed,535- * - or if a cpu gets offlined.536- *537- * This routine builds a partial partition of the systems CPUs538- * (the set of non-overlappping cpumask_t's in the array 'part'539- * below), and passes that partial partition to the kernel/sched.c540- * partition_sched_domains() routine, which will rebuild the541- * schedulers load balancing domains (sched domains) as specified542- * by that partial partition. A 'partial partition' is a set of543- * non-overlapping subsets whose union is a subset of that set.544 *545 * See "What is sched_load_balance" in Documentation/cpusets.txt546 * for a background explanation of this.···539 * domains when operating in the severe memory shortage situations540 * that could cause allocation failures below.541 *542- * Call with cgroup_mutex held. May take callback_mutex during543- * call due to the kfifo_alloc() and kmalloc() calls. May nest544- * a call to the get_online_cpus()/put_online_cpus() pair.545- * Must not be called holding callback_mutex, because we must not546- * call get_online_cpus() while holding callback_mutex. Elsewhere547- * the kernel nests callback_mutex inside get_online_cpus() calls.548- * So the reverse nesting would risk an ABBA deadlock.549 *550 * The three key local variables below are:551 * q - a linked-list queue of cpuset pointers, used to implement a···574 * element of the partition (one sched domain) to be passed to575 * partition_sched_domains().576 */577-578-void rebuild_sched_domains(void)579{580- LIST_HEAD(q); /* queue of cpusets to be scanned*/581 struct cpuset *cp; /* scans q */582 struct cpuset **csa; /* array of all cpuset ptrs */583 int csn; /* how many cpuset ptrs in csa so far */···587 int ndoms; /* number of sched domains in result */588 int nslot; /* next empty doms[] cpumask_t slot */589590- csa = NULL;591 doms = NULL;592 dattr = NULL;0593594 /* Special case for the 99% of systems with one, full, sched domain */595 if (is_sched_load_balance(&top_cpuset)) {596- ndoms = 1;597 doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);598 if (!doms)599- goto rebuild;0600 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);601 if (dattr) {602 *dattr = SD_ATTR_INIT;603 update_domain_attr_tree(dattr, &top_cpuset);604 }605 *doms = top_cpuset.cpus_allowed;606- goto rebuild;00607 }608609 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);···669 }670 }671672- /* Convert <csn, csa> to <ndoms, doms> */000673 doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);674- if (!doms)675- goto rebuild;0000000676 dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);677678 for (nslot = 0, i = 0; i < csn; i++) {679 struct cpuset *a = csa[i];0680 int apn = a->pn;681682- if (apn >= 0) {683- cpumask_t *dp = doms + nslot;684-685- if (nslot == ndoms) {686- static int warnings = 10;687- if (warnings) {688- printk(KERN_WARNING689- "rebuild_sched_domains confused:"690- " nslot %d, ndoms %d, csn %d, i %d,"691- " apn %d\n",692- nslot, ndoms, csn, i, apn);693- warnings--;694- }695- continue;696- }697-698- cpus_clear(*dp);699- if (dattr)700- *(dattr + nslot) = SD_ATTR_INIT;701- for (j = i; j < csn; j++) {702- struct cpuset *b = csa[j];703-704- if (apn == b->pn) {705- cpus_or(*dp, *dp, b->cpus_allowed);706- b->pn = -1;707- if (dattr)708- update_domain_attr_tree(dattr709- + nslot, b);710- }711- }712- nslot++;713 }00000000000000000000000000000000714 }715 BUG_ON(nslot != ndoms);716717-rebuild:718- /* Have scheduler rebuild sched domains */719- get_online_cpus();720- partition_sched_domains(ndoms, doms, dattr);721- put_online_cpus();722-723done:724 kfree(csa);725- /* Don't kfree(doms) -- partition_sched_domains() does that. */726- /* Don't kfree(dattr) -- partition_sched_domains() does that. */00000000000000000000000000000000000000000000000000000000000000000000000727}728729/**···932 return retval;933934 if (is_load_balanced)935- rebuild_sched_domains();936 return 0;937}938···1159 if (val != cs->relax_domain_level) {1160 cs->relax_domain_level = val;1161 if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs))1162- rebuild_sched_domains();1163 }11641165 return 0;···1200 mutex_unlock(&callback_mutex);12011202 if (cpus_nonempty && balance_flag_changed)1203- rebuild_sched_domains();12041205 return 0;1206}···1561 default:1562 BUG();1563 }0001564}15651566static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)···1576 default:1577 BUG();1578 }0001579}15801581···1767}17681769/*1770- * Locking note on the strange update_flag() call below:1771- *1772 * If the cpuset being removed has its flag 'sched_load_balance'1773 * enabled, then simulate turning sched_load_balance off, which1774- * will call rebuild_sched_domains(). The get_online_cpus()1775- * call in rebuild_sched_domains() must not be made while holding1776- * callback_mutex. Elsewhere the kernel nests callback_mutex inside1777- * get_online_cpus() calls. So the reverse nesting would risk an1778- * ABBA deadlock.1779 */17801781static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)···1788struct cgroup_subsys cpuset_subsys = {1789 .name = "cpuset",1790 .create = cpuset_create,1791- .destroy = cpuset_destroy,1792 .can_attach = cpuset_can_attach,1793 .attach = cpuset_attach,1794 .populate = cpuset_populate,···1880}18811882/*1883- * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs1884 * or memory nodes, we need to walk over the cpuset hierarchy,1885 * removing that CPU or node from all cpusets. If this removes the1886 * last CPU or node from a cpuset, then move the tasks in the empty···1972}19731974/*1975- * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track1976- * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to1977- * track what's online after any CPU or memory node hotplug or unplug event.1978- *1979- * Since there are two callers of this routine, one for CPU hotplug1980- * events and one for memory node hotplug events, we could have coded1981- * two separate routines here. We code it as a single common routine1982- * in order to minimize text size.1983- */1984-1985-static void common_cpu_mem_hotplug_unplug(int rebuild_sd)1986-{1987- cgroup_lock();1988-1989- top_cpuset.cpus_allowed = cpu_online_map;1990- top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];1991- scan_for_empty_cpusets(&top_cpuset);1992-1993- /*1994- * Scheduler destroys domains on hotplug events.1995- * Rebuild them based on the current settings.1996- */1997- if (rebuild_sd)1998- rebuild_sched_domains();1999-2000- cgroup_unlock();2001-}2002-2003-/*2004 * The top_cpuset tracks what CPUs and Memory Nodes are online,2005 * period. This is necessary in order to make cpusets transparent2006 * (of no affect) on systems that are actively using CPU hotplug···1979 *1980 * This routine ensures that top_cpuset.cpus_allowed tracks1981 * cpu_online_map on each CPU hotplug (cpuhp) event.0001982 */1983-1984-static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,1985 unsigned long phase, void *unused_cpu)1986{00001987 switch (phase) {1988- case CPU_UP_CANCELED:1989- case CPU_UP_CANCELED_FROZEN:1990- case CPU_DOWN_FAILED:1991- case CPU_DOWN_FAILED_FROZEN:1992 case CPU_ONLINE:1993 case CPU_ONLINE_FROZEN:1994 case CPU_DEAD:1995 case CPU_DEAD_FROZEN:1996- common_cpu_mem_hotplug_unplug(1);1997 break;01998 default:1999 return NOTIFY_DONE;2000 }00000000020012002 return NOTIFY_OK;2003}···2016#ifdef CONFIG_MEMORY_HOTPLUG2017/*2018 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].2019- * Call this routine anytime after you change2020- * node_states[N_HIGH_MEMORY].2021- * See also the previous routine cpuset_handle_cpuhp().2022 */2023-2024void cpuset_track_online_nodes(void)2025{2026- common_cpu_mem_hotplug_unplug(0);0002027}2028#endif2029···2039 top_cpuset.cpus_allowed = cpu_online_map;2040 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];20412042- hotcpu_notifier(cpuset_handle_cpuhp, 0);2043}20442045/**
···14 * 2003-10-22 Updates by Stephen Hemminger.15 * 2004 May-July Rework by Paul Jackson.16 * 2006 Rework by Paul Menage to use generic cgroups17+ * 2008 Rework of the scheduler domains and CPU hotplug handling18+ * by Max Krasnyansky19 *20 * This file is subject to the terms and conditions of the GNU General Public21 * License. See the file COPYING in the main directory of the Linux···236237static DEFINE_MUTEX(callback_mutex);238239+/*240+ * This is ugly, but preserves the userspace API for existing cpuset241 * users. If someone tries to mount the "cpuset" filesystem, we242+ * silently switch it to mount "cgroup" instead243+ */244static int cpuset_get_sb(struct file_system_type *fs_type,245 int flags, const char *unused_dev_name,246 void *data, struct vfsmount *mnt)···473}474475/*476+ * Helper routine for generate_sched_domains().477 * Do cpusets a, b have overlapping cpus_allowed masks?478 */0479static int cpusets_overlap(struct cpuset *a, struct cpuset *b)480{481 return cpus_intersects(a->cpus_allowed, b->cpus_allowed);···518}519520/*521+ * generate_sched_domains()522 *523+ * This function builds a partial partition of the systems CPUs524+ * A 'partial partition' is a set of non-overlapping subsets whose525+ * union is a subset of that set.526+ * The output of this function needs to be passed to kernel/sched.c527+ * partition_sched_domains() routine, which will rebuild the scheduler's528+ * load balancing domains (sched domains) as specified by that partial529+ * partition.00000000000530 *531 * See "What is sched_load_balance" in Documentation/cpusets.txt532 * for a background explanation of this.···547 * domains when operating in the severe memory shortage situations548 * that could cause allocation failures below.549 *550+ * Must be called with cgroup_lock held.000000551 *552 * The three key local variables below are:553 * q - a linked-list queue of cpuset pointers, used to implement a···588 * element of the partition (one sched domain) to be passed to589 * partition_sched_domains().590 */591+static int generate_sched_domains(cpumask_t **domains,592+ struct sched_domain_attr **attributes)593{594+ LIST_HEAD(q); /* queue of cpusets to be scanned */595 struct cpuset *cp; /* scans q */596 struct cpuset **csa; /* array of all cpuset ptrs */597 int csn; /* how many cpuset ptrs in csa so far */···601 int ndoms; /* number of sched domains in result */602 int nslot; /* next empty doms[] cpumask_t slot */603604+ ndoms = 0;605 doms = NULL;606 dattr = NULL;607+ csa = NULL;608609 /* Special case for the 99% of systems with one, full, sched domain */610 if (is_sched_load_balance(&top_cpuset)) {0611 doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);612 if (!doms)613+ goto done;614+615 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);616 if (dattr) {617 *dattr = SD_ATTR_INIT;618 update_domain_attr_tree(dattr, &top_cpuset);619 }620 *doms = top_cpuset.cpus_allowed;621+622+ ndoms = 1;623+ goto done;624 }625626 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);···680 }681 }682683+ /*684+ * Now we know how many domains to create.685+ * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.686+ */687 doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);688+ if (!doms) {689+ ndoms = 0;690+ goto done;691+ }692+693+ /*694+ * The rest of the code, including the scheduler, can deal with695+ * dattr==NULL case. No need to abort if alloc fails.696+ */697 dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);698699 for (nslot = 0, i = 0; i < csn; i++) {700 struct cpuset *a = csa[i];701+ cpumask_t *dp;702 int apn = a->pn;703704+ if (apn < 0) {705+ /* Skip completed partitions */706+ continue;0000000000000000000000000000707 }708+709+ dp = doms + nslot;710+711+ if (nslot == ndoms) {712+ static int warnings = 10;713+ if (warnings) {714+ printk(KERN_WARNING715+ "rebuild_sched_domains confused:"716+ " nslot %d, ndoms %d, csn %d, i %d,"717+ " apn %d\n",718+ nslot, ndoms, csn, i, apn);719+ warnings--;720+ }721+ continue;722+ }723+724+ cpus_clear(*dp);725+ if (dattr)726+ *(dattr + nslot) = SD_ATTR_INIT;727+ for (j = i; j < csn; j++) {728+ struct cpuset *b = csa[j];729+730+ if (apn == b->pn) {731+ cpus_or(*dp, *dp, b->cpus_allowed);732+ if (dattr)733+ update_domain_attr_tree(dattr + nslot, b);734+735+ /* Done with this partition */736+ b->pn = -1;737+ }738+ }739+ nslot++;740 }741 BUG_ON(nslot != ndoms);742000000743done:744 kfree(csa);745+746+ *domains = doms;747+ *attributes = dattr;748+ return ndoms;749+}750+751+/*752+ * Rebuild scheduler domains.753+ *754+ * Call with neither cgroup_mutex held nor within get_online_cpus().755+ * Takes both cgroup_mutex and get_online_cpus().756+ *757+ * Cannot be directly called from cpuset code handling changes758+ * to the cpuset pseudo-filesystem, because it cannot be called759+ * from code that already holds cgroup_mutex.760+ */761+static void do_rebuild_sched_domains(struct work_struct *unused)762+{763+ struct sched_domain_attr *attr;764+ cpumask_t *doms;765+ int ndoms;766+767+ get_online_cpus();768+769+ /* Generate domain masks and attrs */770+ cgroup_lock();771+ ndoms = generate_sched_domains(&doms, &attr);772+ cgroup_unlock();773+774+ /* Have scheduler rebuild the domains */775+ partition_sched_domains(ndoms, doms, attr);776+777+ put_online_cpus();778+}779+780+static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);781+782+/*783+ * Rebuild scheduler domains, asynchronously via workqueue.784+ *785+ * If the flag 'sched_load_balance' of any cpuset with non-empty786+ * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset787+ * which has that flag enabled, or if any cpuset with a non-empty788+ * 'cpus' is removed, then call this routine to rebuild the789+ * scheduler's dynamic sched domains.790+ *791+ * The rebuild_sched_domains() and partition_sched_domains()792+ * routines must nest cgroup_lock() inside get_online_cpus(),793+ * but such cpuset changes as these must nest that locking the794+ * other way, holding cgroup_lock() for much of the code.795+ *796+ * So in order to avoid an ABBA deadlock, the cpuset code handling797+ * these user changes delegates the actual sched domain rebuilding798+ * to a separate workqueue thread, which ends up processing the799+ * above do_rebuild_sched_domains() function.800+ */801+static void async_rebuild_sched_domains(void)802+{803+ schedule_work(&rebuild_sched_domains_work);804+}805+806+/*807+ * Accomplishes the same scheduler domain rebuild as the above808+ * async_rebuild_sched_domains(), however it directly calls the809+ * rebuild routine synchronously rather than calling it via an810+ * asynchronous work thread.811+ *812+ * This can only be called from code that is not holding813+ * cgroup_mutex (not nested in a cgroup_lock() call.)814+ */815+void rebuild_sched_domains(void)816+{817+ do_rebuild_sched_domains(NULL);818}819820/**···863 return retval;864865 if (is_load_balanced)866+ async_rebuild_sched_domains();867 return 0;868}869···1090 if (val != cs->relax_domain_level) {1091 cs->relax_domain_level = val;1092 if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs))1093+ async_rebuild_sched_domains();1094 }10951096 return 0;···1131 mutex_unlock(&callback_mutex);11321133 if (cpus_nonempty && balance_flag_changed)1134+ async_rebuild_sched_domains();11351136 return 0;1137}···1492 default:1493 BUG();1494 }1495+1496+ /* Unreachable but makes gcc happy */1497+ return 0;1498}14991500static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)···1504 default:1505 BUG();1506 }1507+1508+ /* Unrechable but makes gcc happy */1509+ return 0;1510}15111512···1692}16931694/*001695 * If the cpuset being removed has its flag 'sched_load_balance'1696 * enabled, then simulate turning sched_load_balance off, which1697+ * will call async_rebuild_sched_domains().00001698 */16991700static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)···1719struct cgroup_subsys cpuset_subsys = {1720 .name = "cpuset",1721 .create = cpuset_create,1722+ .destroy = cpuset_destroy,1723 .can_attach = cpuset_can_attach,1724 .attach = cpuset_attach,1725 .populate = cpuset_populate,···1811}18121813/*1814+ * If CPU and/or memory hotplug handlers, below, unplug any CPUs1815 * or memory nodes, we need to walk over the cpuset hierarchy,1816 * removing that CPU or node from all cpusets. If this removes the1817 * last CPU or node from a cpuset, then move the tasks in the empty···1903}19041905/*000000000000000000000000000001906 * The top_cpuset tracks what CPUs and Memory Nodes are online,1907 * period. This is necessary in order to make cpusets transparent1908 * (of no affect) on systems that are actively using CPU hotplug···1939 *1940 * This routine ensures that top_cpuset.cpus_allowed tracks1941 * cpu_online_map on each CPU hotplug (cpuhp) event.1942+ *1943+ * Called within get_online_cpus(). Needs to call cgroup_lock()1944+ * before calling generate_sched_domains().1945 */1946+static int cpuset_track_online_cpus(struct notifier_block *unused_nb,01947 unsigned long phase, void *unused_cpu)1948{1949+ struct sched_domain_attr *attr;1950+ cpumask_t *doms;1951+ int ndoms;1952+1953 switch (phase) {00001954 case CPU_ONLINE:1955 case CPU_ONLINE_FROZEN:1956 case CPU_DEAD:1957 case CPU_DEAD_FROZEN:01958 break;1959+1960 default:1961 return NOTIFY_DONE;1962 }1963+1964+ cgroup_lock();1965+ top_cpuset.cpus_allowed = cpu_online_map;1966+ scan_for_empty_cpusets(&top_cpuset);1967+ ndoms = generate_sched_domains(&doms, &attr);1968+ cgroup_unlock();1969+1970+ /* Have scheduler rebuild the domains */1971+ partition_sched_domains(ndoms, doms, attr);19721973 return NOTIFY_OK;1974}···1965#ifdef CONFIG_MEMORY_HOTPLUG1966/*1967 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].1968+ * Call this routine anytime after node_states[N_HIGH_MEMORY] changes.1969+ * See also the previous routine cpuset_track_online_cpus().01970 */01971void cpuset_track_online_nodes(void)1972{1973+ cgroup_lock();1974+ top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];1975+ scan_for_empty_cpusets(&top_cpuset);1976+ cgroup_unlock();1977}1978#endif1979···1987 top_cpuset.cpus_allowed = cpu_online_map;1988 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];19891990+ hotcpu_notifier(cpuset_track_online_cpus, 0);1991}19921993/**
+13-6
kernel/sched.c
···7696 * and partition_sched_domains() will fallback to the single partition7697 * 'fallback_doms', it also forces the domains to be rebuilt.7698 *00007699 * Call with hotplug lock held7700 */7701void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,7702 struct sched_domain_attr *dattr_new)7703{7704- int i, j;77057706 mutex_lock(&sched_domains_mutex);77077708 /* always unregister in case we don't destroy any domains */7709 unregister_sched_domain_sysctl();77107711- if (doms_new == NULL)7712- ndoms_new = 0;77137714 /* Destroy deleted domains */7715 for (i = 0; i < ndoms_cur; i++) {7716- for (j = 0; j < ndoms_new; j++) {7717 if (cpus_equal(doms_cur[i], doms_new[j])7718 && dattrs_equal(dattr_cur, i, dattr_new, j))7719 goto match1;···77297730 if (doms_new == NULL) {7731 ndoms_cur = 0;7732- ndoms_new = 1;7733 doms_new = &fallback_doms;7734 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);7735 dattr_new = NULL;···7765int arch_reinit_sched_domains(void)7766{7767 get_online_cpus();00007768 rebuild_sched_domains();7769 put_online_cpus();07770 return 0;7771}7772···7855 case CPU_ONLINE_FROZEN:7856 case CPU_DEAD:7857 case CPU_DEAD_FROZEN:7858- partition_sched_domains(0, NULL, NULL);7859 return NOTIFY_OK;78607861 default:
···7696 * and partition_sched_domains() will fallback to the single partition7697 * 'fallback_doms', it also forces the domains to be rebuilt.7698 *7699+ * If doms_new==NULL it will be replaced with cpu_online_map.7700+ * ndoms_new==0 is a special case for destroying existing domains.7701+ * It will not create the default domain.7702+ *7703 * Call with hotplug lock held7704 */7705void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,7706 struct sched_domain_attr *dattr_new)7707{7708+ int i, j, n;77097710 mutex_lock(&sched_domains_mutex);77117712 /* always unregister in case we don't destroy any domains */7713 unregister_sched_domain_sysctl();77147715+ n = doms_new ? ndoms_new : 0;077167717 /* Destroy deleted domains */7718 for (i = 0; i < ndoms_cur; i++) {7719+ for (j = 0; j < n; j++) {7720 if (cpus_equal(doms_cur[i], doms_new[j])7721 && dattrs_equal(dattr_cur, i, dattr_new, j))7722 goto match1;···77267727 if (doms_new == NULL) {7728 ndoms_cur = 0;07729 doms_new = &fallback_doms;7730 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);7731 dattr_new = NULL;···7763int arch_reinit_sched_domains(void)7764{7765 get_online_cpus();7766+7767+ /* Destroy domains first to force the rebuild */7768+ partition_sched_domains(0, NULL, NULL);7769+7770 rebuild_sched_domains();7771 put_online_cpus();7772+7773 return 0;7774}7775···7848 case CPU_ONLINE_FROZEN:7849 case CPU_DEAD:7850 case CPU_DEAD_FROZEN:7851+ partition_sched_domains(1, NULL, NULL);7852 return NOTIFY_OK;78537854 default: