Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cpuset: Fix cpuset_cpus_allowed_fallback(), don't update tsk->rt.nr_cpus_allowed

The rule is, we have to update tsk->rt.nr_cpus_allowed if we change
tsk->cpus_allowed. Otherwise RT scheduler may confuse.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/4DD4B3FA.5060901@jp.fujitsu.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

KOSAKI Motohiro and committed by
Ingo Molnar
1e1b6c51 1e876231

+24 -12
+1 -1
include/linux/cpuset.h
··· 146 146 147 147 static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) 148 148 { 149 - cpumask_copy(&p->cpus_allowed, cpu_possible_mask); 149 + do_set_cpus_allowed(p, cpu_possible_mask); 150 150 return cpumask_any(cpu_active_mask); 151 151 } 152 152
+7
include/linux/sched.h
··· 1841 1841 #endif 1842 1842 1843 1843 #ifdef CONFIG_SMP 1844 + extern void do_set_cpus_allowed(struct task_struct *p, 1845 + const struct cpumask *new_mask); 1846 + 1844 1847 extern int set_cpus_allowed_ptr(struct task_struct *p, 1845 1848 const struct cpumask *new_mask); 1846 1849 #else 1850 + static inline void do_set_cpus_allowed(struct task_struct *p, 1851 + const struct cpumask *new_mask) 1852 + { 1853 + } 1847 1854 static inline int set_cpus_allowed_ptr(struct task_struct *p, 1848 1855 const struct cpumask *new_mask) 1849 1856 {
+2 -2
kernel/cpuset.c
··· 2190 2190 rcu_read_lock(); 2191 2191 cs = task_cs(tsk); 2192 2192 if (cs) 2193 - cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed); 2193 + do_set_cpus_allowed(tsk, cs->cpus_allowed); 2194 2194 rcu_read_unlock(); 2195 2195 2196 2196 /* ··· 2217 2217 * Like above we can temporary set any mask and rely on 2218 2218 * set_cpus_allowed_ptr() as synchronization point. 2219 2219 */ 2220 - cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask); 2220 + do_set_cpus_allowed(tsk, cpu_possible_mask); 2221 2221 cpu = cpumask_any(cpu_active_mask); 2222 2222 } 2223 2223
+2 -2
kernel/kthread.c
··· 202 202 return; 203 203 } 204 204 205 - p->cpus_allowed = cpumask_of_cpu(cpu); 206 - p->rt.nr_cpus_allowed = 1; 205 + /* It's safe because the task is inactive. */ 206 + do_set_cpus_allowed(p, cpumask_of(cpu)); 207 207 p->flags |= PF_THREAD_BOUND; 208 208 } 209 209 EXPORT_SYMBOL(kthread_bind);
+12 -7
kernel/sched.c
··· 5860 5860 idle->state = TASK_RUNNING; 5861 5861 idle->se.exec_start = sched_clock(); 5862 5862 5863 - cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); 5863 + do_set_cpus_allowed(idle, cpumask_of(cpu)); 5864 5864 /* 5865 5865 * We're having a chicken and egg problem, even though we are 5866 5866 * holding rq->lock, the cpu isn't yet set to this cpu so the ··· 5948 5948 } 5949 5949 5950 5950 #ifdef CONFIG_SMP 5951 + void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 5952 + { 5953 + if (p->sched_class && p->sched_class->set_cpus_allowed) 5954 + p->sched_class->set_cpus_allowed(p, new_mask); 5955 + else { 5956 + cpumask_copy(&p->cpus_allowed, new_mask); 5957 + p->rt.nr_cpus_allowed = cpumask_weight(new_mask); 5958 + } 5959 + } 5960 + 5951 5961 /* 5952 5962 * This is how migration works: 5953 5963 * ··· 6003 5993 goto out; 6004 5994 } 6005 5995 6006 - if (p->sched_class->set_cpus_allowed) 6007 - p->sched_class->set_cpus_allowed(p, new_mask); 6008 - else { 6009 - cpumask_copy(&p->cpus_allowed, new_mask); 6010 - p->rt.nr_cpus_allowed = cpumask_weight(new_mask); 6011 - } 5996 + do_set_cpus_allowed(p, new_mask); 6012 5997 6013 5998 /* Can the task run on the task's current CPU? If so, we're done */ 6014 5999 if (cpumask_test_cpu(task_cpu(p), new_mask))