Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'cgroup-for-6.11-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup

Pull cgroup fixes from Tejun Heo:
"Three patches addressing cpuset corner cases"

* tag 'cgroup-for-6.11-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup:
cgroup/cpuset: Eliminate unncessary sched domains rebuilds in hotplug
cgroup/cpuset: Clear effective_xcpus on cpus_allowed clearing only if cpus.exclusive not set
cgroup/cpuset: fix panic caused by partcmd_update

+21 -17
+21 -17
kernel/cgroup/cpuset.c
··· 233 233 static struct list_head remote_children; 234 234 235 235 /* 236 + * A flag to force sched domain rebuild at the end of an operation while 237 + * inhibiting it in the intermediate stages when set. Currently it is only 238 + * set in hotplug code. 239 + */ 240 + static bool force_sd_rebuild; 241 + 242 + /* 236 243 * Partition root states: 237 244 * 238 245 * 0 - member (not a partition root) ··· 1482 1475 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 1483 1476 } 1484 1477 1485 - if (rebuild_domains) 1478 + if (rebuild_domains && !force_sd_rebuild) 1486 1479 rebuild_sched_domains_locked(); 1487 1480 } 1488 1481 ··· 1840 1833 remote_partition_disable(child, tmp); 1841 1834 disable_cnt++; 1842 1835 } 1843 - if (disable_cnt) 1836 + if (disable_cnt && !force_sd_rebuild) 1844 1837 rebuild_sched_domains_locked(); 1845 1838 } 1846 1839 ··· 1998 1991 part_error = PERR_CPUSEMPTY; 1999 1992 goto write_error; 2000 1993 } 1994 + /* Check newmask again, whether cpus are available for parent/cs */ 1995 + nocpu |= tasks_nocpu_error(parent, cs, newmask); 2001 1996 2002 1997 /* 2003 1998 * partcmd_update with newmask: ··· 2449 2440 } 2450 2441 rcu_read_unlock(); 2451 2442 2452 - if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD)) 2443 + if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD) && 2444 + !force_sd_rebuild) 2453 2445 rebuild_sched_domains_locked(); 2454 2446 } 2455 2447 ··· 2533 2523 */ 2534 2524 if (!*buf) { 2535 2525 cpumask_clear(trialcs->cpus_allowed); 2536 - cpumask_clear(trialcs->effective_xcpus); 2526 + if (cpumask_empty(trialcs->exclusive_cpus)) 2527 + cpumask_clear(trialcs->effective_xcpus); 2537 2528 } else { 2538 2529 retval = cpulist_parse(buf, trialcs->cpus_allowed); 2539 2530 if (retval < 0) ··· 3112 3101 cs->flags = trialcs->flags; 3113 3102 spin_unlock_irq(&callback_lock); 3114 3103 3115 - if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) 3104 + if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed && 3105 + !force_sd_rebuild) 3116 3106 rebuild_sched_domains_locked(); 3117 3107 3118 3108 if (spread_flag_changed) ··· 4510 4498 update_tasks_nodemask(cs); 4511 4499 } 4512 4500 4513 - static bool force_rebuild; 4514 - 4515 4501 void cpuset_force_rebuild(void) 4516 4502 { 4517 - force_rebuild = true; 4503 + force_sd_rebuild = true; 4518 4504 } 4519 4505 4520 4506 /** ··· 4660 4650 !cpumask_empty(subpartitions_cpus); 4661 4651 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); 4662 4652 4663 - /* 4664 - * In the rare case that hotplug removes all the cpus in 4665 - * subpartitions_cpus, we assumed that cpus are updated. 4666 - */ 4667 - if (!cpus_updated && !cpumask_empty(subpartitions_cpus)) 4668 - cpus_updated = true; 4669 - 4670 4653 /* For v1, synchronize cpus_allowed to cpu_active_mask */ 4671 4654 if (cpus_updated) { 4655 + cpuset_force_rebuild(); 4672 4656 spin_lock_irq(&callback_lock); 4673 4657 if (!on_dfl) 4674 4658 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); ··· 4718 4714 } 4719 4715 4720 4716 /* rebuild sched domains if cpus_allowed has changed */ 4721 - if (cpus_updated || force_rebuild) { 4722 - force_rebuild = false; 4717 + if (force_sd_rebuild) { 4718 + force_sd_rebuild = false; 4723 4719 rebuild_sched_domains_cpuslocked(); 4724 4720 } 4725 4721