Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/core: Provide a tsk_nr_cpus_allowed() helper

tsk_nr_cpus_allowed() is an accessor for task->nr_cpus_allowed which allows
us to change the representation of ->nr_cpus_allowed if required.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1462969411-17735-2-git-send-email-bigeasy@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Thomas Gleixner and committed by
Ingo Molnar
50605ffb ade42e09

+32 -27
+5
include/linux/sched.h
··· 1930 1930 /* Future-safe accessor for struct task_struct's cpus_allowed. */ 1931 1931 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) 1932 1932 1933 + static inline int tsk_nr_cpus_allowed(struct task_struct *p) 1934 + { 1935 + return p->nr_cpus_allowed; 1936 + } 1937 + 1933 1938 #define TNF_MIGRATED 0x01 1934 1939 #define TNF_NO_GROUP 0x02 1935 1940 #define TNF_SHARED 0x04
+1 -1
kernel/sched/core.c
··· 1585 1585 { 1586 1586 lockdep_assert_held(&p->pi_lock); 1587 1587 1588 - if (p->nr_cpus_allowed > 1) 1588 + if (tsk_nr_cpus_allowed(p) > 1) 1589 1589 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 1590 1590 else 1591 1591 cpu = cpumask_any(tsk_cpus_allowed(p));
+14 -14
kernel/sched/deadline.c
··· 134 134 { 135 135 struct task_struct *p = dl_task_of(dl_se); 136 136 137 - if (p->nr_cpus_allowed > 1) 137 + if (tsk_nr_cpus_allowed(p) > 1) 138 138 dl_rq->dl_nr_migratory++; 139 139 140 140 update_dl_migration(dl_rq); ··· 144 144 { 145 145 struct task_struct *p = dl_task_of(dl_se); 146 146 147 - if (p->nr_cpus_allowed > 1) 147 + if (tsk_nr_cpus_allowed(p) > 1) 148 148 dl_rq->dl_nr_migratory--; 149 149 150 150 update_dl_migration(dl_rq); ··· 966 966 967 967 enqueue_dl_entity(&p->dl, pi_se, flags); 968 968 969 - if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 969 + if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) 970 970 enqueue_pushable_dl_task(rq, p); 971 971 } 972 972 ··· 1040 1040 * try to make it stay here, it might be important. 1041 1041 */ 1042 1042 if (unlikely(dl_task(curr)) && 1043 - (curr->nr_cpus_allowed < 2 || 1043 + (tsk_nr_cpus_allowed(curr) < 2 || 1044 1044 !dl_entity_preempt(&p->dl, &curr->dl)) && 1045 - (p->nr_cpus_allowed > 1)) { 1045 + (tsk_nr_cpus_allowed(p) > 1)) { 1046 1046 int target = find_later_rq(p); 1047 1047 1048 1048 if (target != -1 && ··· 1063 1063 * Current can't be migrated, useless to reschedule, 1064 1064 * let's hope p can move out. 1065 1065 */ 1066 - if (rq->curr->nr_cpus_allowed == 1 || 1066 + if (tsk_nr_cpus_allowed(rq->curr) == 1 || 1067 1067 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) 1068 1068 return; 1069 1069 ··· 1071 1071 * p is migratable, so let's not schedule it and 1072 1072 * see if it is pushed or pulled somewhere else. 1073 1073 */ 1074 - if (p->nr_cpus_allowed != 1 && 1074 + if (tsk_nr_cpus_allowed(p) != 1 && 1075 1075 cpudl_find(&rq->rd->cpudl, p, NULL) != -1) 1076 1076 return; 1077 1077 ··· 1186 1186 { 1187 1187 update_curr_dl(rq); 1188 1188 1189 - if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 1189 + if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1) 1190 1190 enqueue_pushable_dl_task(rq, p); 1191 1191 } 1192 1192 ··· 1287 1287 if (unlikely(!later_mask)) 1288 1288 return -1; 1289 1289 1290 - if (task->nr_cpus_allowed == 1) 1290 + if (tsk_nr_cpus_allowed(task) == 1) 1291 1291 return -1; 1292 1292 1293 1293 /* ··· 1433 1433 1434 1434 BUG_ON(rq->cpu != task_cpu(p)); 1435 1435 BUG_ON(task_current(rq, p)); 1436 - BUG_ON(p->nr_cpus_allowed <= 1); 1436 + BUG_ON(tsk_nr_cpus_allowed(p) <= 1); 1437 1437 1438 1438 BUG_ON(!task_on_rq_queued(p)); 1439 1439 BUG_ON(!dl_task(p)); ··· 1472 1472 */ 1473 1473 if (dl_task(rq->curr) && 1474 1474 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && 1475 - rq->curr->nr_cpus_allowed > 1) { 1475 + tsk_nr_cpus_allowed(rq->curr) > 1) { 1476 1476 resched_curr(rq); 1477 1477 return 0; 1478 1478 } ··· 1619 1619 { 1620 1620 if (!task_running(rq, p) && 1621 1621 !test_tsk_need_resched(rq->curr) && 1622 - p->nr_cpus_allowed > 1 && 1622 + tsk_nr_cpus_allowed(p) > 1 && 1623 1623 dl_task(rq->curr) && 1624 - (rq->curr->nr_cpus_allowed < 2 || 1624 + (tsk_nr_cpus_allowed(rq->curr) < 2 || 1625 1625 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { 1626 1626 push_dl_tasks(rq); 1627 1627 } ··· 1725 1725 1726 1726 if (task_on_rq_queued(p) && rq->curr != p) { 1727 1727 #ifdef CONFIG_SMP 1728 - if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) 1728 + if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded) 1729 1729 queue_push_tasks(rq); 1730 1730 #else 1731 1731 if (dl_task(rq->curr))
+12 -12
kernel/sched/rt.c
··· 334 334 rt_rq = &rq_of_rt_rq(rt_rq)->rt; 335 335 336 336 rt_rq->rt_nr_total++; 337 - if (p->nr_cpus_allowed > 1) 337 + if (tsk_nr_cpus_allowed(p) > 1) 338 338 rt_rq->rt_nr_migratory++; 339 339 340 340 update_rt_migration(rt_rq); ··· 351 351 rt_rq = &rq_of_rt_rq(rt_rq)->rt; 352 352 353 353 rt_rq->rt_nr_total--; 354 - if (p->nr_cpus_allowed > 1) 354 + if (tsk_nr_cpus_allowed(p) > 1) 355 355 rt_rq->rt_nr_migratory--; 356 356 357 357 update_rt_migration(rt_rq); ··· 1324 1324 1325 1325 enqueue_rt_entity(rt_se, flags); 1326 1326 1327 - if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1327 + if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) 1328 1328 enqueue_pushable_task(rq, p); 1329 1329 } 1330 1330 ··· 1413 1413 * will have to sort it out. 1414 1414 */ 1415 1415 if (curr && unlikely(rt_task(curr)) && 1416 - (curr->nr_cpus_allowed < 2 || 1416 + (tsk_nr_cpus_allowed(curr) < 2 || 1417 1417 curr->prio <= p->prio)) { 1418 1418 int target = find_lowest_rq(p); 1419 1419 ··· 1437 1437 * Current can't be migrated, useless to reschedule, 1438 1438 * let's hope p can move out. 1439 1439 */ 1440 - if (rq->curr->nr_cpus_allowed == 1 || 1440 + if (tsk_nr_cpus_allowed(rq->curr) == 1 || 1441 1441 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) 1442 1442 return; 1443 1443 ··· 1445 1445 * p is migratable, so let's not schedule it and 1446 1446 * see if it is pushed or pulled somewhere else. 1447 1447 */ 1448 - if (p->nr_cpus_allowed != 1 1448 + if (tsk_nr_cpus_allowed(p) != 1 1449 1449 && cpupri_find(&rq->rd->cpupri, p, NULL)) 1450 1450 return; 1451 1451 ··· 1579 1579 * The previous task needs to be made eligible for pushing 1580 1580 * if it is still active 1581 1581 */ 1582 - if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) 1582 + if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1) 1583 1583 enqueue_pushable_task(rq, p); 1584 1584 } 1585 1585 ··· 1629 1629 if (unlikely(!lowest_mask)) 1630 1630 return -1; 1631 1631 1632 - if (task->nr_cpus_allowed == 1) 1632 + if (tsk_nr_cpus_allowed(task) == 1) 1633 1633 return -1; /* No other targets possible */ 1634 1634 1635 1635 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) ··· 1762 1762 1763 1763 BUG_ON(rq->cpu != task_cpu(p)); 1764 1764 BUG_ON(task_current(rq, p)); 1765 - BUG_ON(p->nr_cpus_allowed <= 1); 1765 + BUG_ON(tsk_nr_cpus_allowed(p) <= 1); 1766 1766 1767 1767 BUG_ON(!task_on_rq_queued(p)); 1768 1768 BUG_ON(!rt_task(p)); ··· 2122 2122 { 2123 2123 if (!task_running(rq, p) && 2124 2124 !test_tsk_need_resched(rq->curr) && 2125 - p->nr_cpus_allowed > 1 && 2125 + tsk_nr_cpus_allowed(p) > 1 && 2126 2126 (dl_task(rq->curr) || rt_task(rq->curr)) && 2127 - (rq->curr->nr_cpus_allowed < 2 || 2127 + (tsk_nr_cpus_allowed(rq->curr) < 2 || 2128 2128 rq->curr->prio <= p->prio)) 2129 2129 push_rt_tasks(rq); 2130 2130 } ··· 2197 2197 */ 2198 2198 if (task_on_rq_queued(p) && rq->curr != p) { 2199 2199 #ifdef CONFIG_SMP 2200 - if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) 2200 + if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded) 2201 2201 queue_push_tasks(rq); 2202 2202 #else 2203 2203 if (p->prio < rq->curr->prio)