Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/core: Provide a pointer to the valid CPU mask

In commit:

4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed() wrapper")

the tsk_nr_cpus_allowed() wrapper was removed. There was not
much difference in !RT but in RT we used this to implement
migrate_disable(). Within a migrate_disable() section the CPU mask is
restricted to single CPU while the "normal" CPU mask remains untouched.

As an alternative implementation Ingo suggested to use:

struct task_struct {
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
};
with
t->cpus_ptr = &t->cpus_mask;

In -RT we then can switch the cpus_ptr to:

t->cpus_ptr = &cpumask_of(task_cpu(p));

in a migration disabled region. The rules are simple:

- Code that 'uses' ->cpus_allowed would use the pointer.
- Code that 'modifies' ->cpus_allowed would use the direct mask.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20190423142636.14347-1-bigeasy@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Sebastian Andrzej Siewior and committed by
Ingo Molnar
3bd37062 f2c7c76c

+75 -73
+1 -1
arch/ia64/kernel/mca.c
··· 1831 1831 ti->cpu = cpu; 1832 1832 p->stack = ti; 1833 1833 p->state = TASK_UNINTERRUPTIBLE; 1834 - cpumask_set_cpu(cpu, &p->cpus_allowed); 1834 + cpumask_set_cpu(cpu, &p->cpus_mask); 1835 1835 INIT_LIST_HEAD(&p->tasks); 1836 1836 p->parent = p->real_parent = p->group_leader = p; 1837 1837 INIT_LIST_HEAD(&p->children);
+2 -2
arch/mips/include/asm/switch_to.h
··· 42 42 * inline to try to keep the overhead down. If we have been forced to run on 43 43 * a "CPU" with an FPU because of a previous high level of FP computation, 44 44 * but did not actually use the FPU during the most recent time-slice (CU1 45 - * isn't set), we undo the restriction on cpus_allowed. 45 + * isn't set), we undo the restriction on cpus_mask. 46 46 * 47 47 * We're not calling set_cpus_allowed() here, because we have no need to 48 48 * force prompt migration - we're already switching the current CPU to a ··· 57 57 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ 58 58 (!(KSTK_STATUS(prev) & ST0_CU1))) { \ 59 59 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ 60 - prev->cpus_allowed = prev->thread.user_cpus_allowed; \ 60 + prev->cpus_mask = prev->thread.user_cpus_allowed; \ 61 61 } \ 62 62 next->thread.emulated_fp = 0; \ 63 63 } while(0)
+1 -1
arch/mips/kernel/mips-mt-fpaff.c
··· 177 177 if (retval) 178 178 goto out_unlock; 179 179 180 - cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed); 180 + cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr); 181 181 cpumask_and(&mask, &allowed, cpu_active_mask); 182 182 183 183 out_unlock:
+3 -3
arch/mips/kernel/traps.c
··· 891 891 * restricted the allowed set to exclude any CPUs with FPUs, 892 892 * we'll skip the procedure. 893 893 */ 894 - if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) { 894 + if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) { 895 895 cpumask_t tmask; 896 896 897 897 current->thread.user_cpus_allowed 898 - = current->cpus_allowed; 899 - cpumask_and(&tmask, &current->cpus_allowed, 898 + = current->cpus_mask; 899 + cpumask_and(&tmask, &current->cpus_mask, 900 900 &mt_fpu_cpumask); 901 901 set_cpus_allowed_ptr(current, &tmask); 902 902 set_thread_flag(TIF_FPUBOUND);
+1 -1
arch/powerpc/platforms/cell/spufs/sched.c
··· 128 128 * runqueue. The context will be rescheduled on the proper node 129 129 * if it is timesliced or preempted. 130 130 */ 131 - cpumask_copy(&ctx->cpus_allowed, &current->cpus_allowed); 131 + cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr); 132 132 133 133 /* Save the current cpu id for spu interrupt routing. */ 134 134 ctx->last_ran = raw_smp_processor_id();
+1 -1
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
··· 1503 1503 * may be scheduled elsewhere and invalidate entries in the 1504 1504 * pseudo-locked region. 1505 1505 */ 1506 - if (!cpumask_subset(&current->cpus_allowed, &plr->d->cpu_mask)) { 1506 + if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { 1507 1507 mutex_unlock(&rdtgroup_mutex); 1508 1508 return -EINVAL; 1509 1509 }
+3 -3
drivers/infiniband/hw/hfi1/affinity.c
··· 1038 1038 struct hfi1_affinity_node *entry; 1039 1039 cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; 1040 1040 const struct cpumask *node_mask, 1041 - *proc_mask = &current->cpus_allowed; 1041 + *proc_mask = current->cpus_ptr; 1042 1042 struct hfi1_affinity_node_list *affinity = &node_affinity; 1043 1043 struct cpu_mask_set *set = &affinity->proc; 1044 1044 ··· 1046 1046 * check whether process/context affinity has already 1047 1047 * been set 1048 1048 */ 1049 - if (cpumask_weight(proc_mask) == 1) { 1049 + if (current->nr_cpus_allowed == 1) { 1050 1050 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", 1051 1051 current->pid, current->comm, 1052 1052 cpumask_pr_args(proc_mask)); ··· 1057 1057 cpu = cpumask_first(proc_mask); 1058 1058 cpumask_set_cpu(cpu, &set->used); 1059 1059 goto done; 1060 - } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { 1060 + } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) { 1061 1061 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", 1062 1062 current->pid, current->comm, 1063 1063 cpumask_pr_args(proc_mask));
+1 -2
drivers/infiniband/hw/hfi1/sdma.c
··· 855 855 { 856 856 struct sdma_rht_node *rht_node; 857 857 struct sdma_engine *sde = NULL; 858 - const struct cpumask *current_mask = &current->cpus_allowed; 859 858 unsigned long cpu_id; 860 859 861 860 /* 862 861 * To ensure that always the same sdma engine(s) will be 863 862 * selected make sure the process is pinned to this CPU only. 864 863 */ 865 - if (cpumask_weight(current_mask) != 1) 864 + if (current->nr_cpus_allowed != 1) 866 865 goto out; 867 866 868 867 cpu_id = smp_processor_id();
+3 -4
drivers/infiniband/hw/qib/qib_file_ops.c
··· 1142 1142 static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd) 1143 1143 { 1144 1144 struct qib_filedata *fd = fp->private_data; 1145 - const unsigned int weight = cpumask_weight(&current->cpus_allowed); 1145 + const unsigned int weight = current->nr_cpus_allowed; 1146 1146 const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus); 1147 1147 int local_cpu; 1148 1148 ··· 1623 1623 ret = find_free_ctxt(i_minor - 1, fp, uinfo); 1624 1624 else { 1625 1625 int unit; 1626 - const unsigned int cpu = cpumask_first(&current->cpus_allowed); 1627 - const unsigned int weight = 1628 - cpumask_weight(&current->cpus_allowed); 1626 + const unsigned int cpu = cpumask_first(current->cpus_ptr); 1627 + const unsigned int weight = current->nr_cpus_allowed; 1629 1628 1630 1629 if (weight == 1 && !test_bit(cpu, qib_cpulist)) 1631 1630 if (!find_hca(cpu, &unit) && unit >= 0)
+2 -2
fs/proc/array.c
··· 381 381 static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) 382 382 { 383 383 seq_printf(m, "Cpus_allowed:\t%*pb\n", 384 - cpumask_pr_args(&task->cpus_allowed)); 384 + cpumask_pr_args(task->cpus_ptr)); 385 385 seq_printf(m, "Cpus_allowed_list:\t%*pbl\n", 386 - cpumask_pr_args(&task->cpus_allowed)); 386 + cpumask_pr_args(task->cpus_ptr)); 387 387 } 388 388 389 389 static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm)
+3 -2
include/linux/sched.h
··· 651 651 652 652 unsigned int policy; 653 653 int nr_cpus_allowed; 654 - cpumask_t cpus_allowed; 654 + const cpumask_t *cpus_ptr; 655 + cpumask_t cpus_mask; 655 656 656 657 #ifdef CONFIG_PREEMPT_RCU 657 658 int rcu_read_lock_nesting; ··· 1400 1399 #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1401 1400 #define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ 1402 1401 #define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ 1403 - #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 1402 + #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ 1404 1403 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1405 1404 #define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ 1406 1405 #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
+2 -1
init/init_task.c
··· 72 72 .static_prio = MAX_PRIO - 20, 73 73 .normal_prio = MAX_PRIO - 20, 74 74 .policy = SCHED_NORMAL, 75 - .cpus_allowed = CPU_MASK_ALL, 75 + .cpus_ptr = &init_task.cpus_mask, 76 + .cpus_mask = CPU_MASK_ALL, 76 77 .nr_cpus_allowed= NR_CPUS, 77 78 .mm = NULL, 78 79 .active_mm = &init_mm,
+1 -1
kernel/cgroup/cpuset.c
··· 2829 2829 if (task_css_is_root(task, cpuset_cgrp_id)) 2830 2830 return; 2831 2831 2832 - set_cpus_allowed_ptr(task, &current->cpus_allowed); 2832 + set_cpus_allowed_ptr(task, current->cpus_ptr); 2833 2833 task->mems_allowed = current->mems_allowed; 2834 2834 } 2835 2835
+2
kernel/fork.c
··· 894 894 #ifdef CONFIG_STACKPROTECTOR 895 895 tsk->stack_canary = get_random_canary(); 896 896 #endif 897 + if (orig->cpus_ptr == &orig->cpus_mask) 898 + tsk->cpus_ptr = &tsk->cpus_mask; 897 899 898 900 /* 899 901 * One for us, one for whoever does the "release_task()" (usually
+20 -20
kernel/sched/core.c
··· 930 930 */ 931 931 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 932 932 { 933 - if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 933 + if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 934 934 return false; 935 935 936 936 if (is_per_cpu_kthread(p)) ··· 1025 1025 local_irq_disable(); 1026 1026 /* 1027 1027 * We need to explicitly wake pending tasks before running 1028 - * __migrate_task() such that we will not miss enforcing cpus_allowed 1028 + * __migrate_task() such that we will not miss enforcing cpus_ptr 1029 1029 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 1030 1030 */ 1031 1031 sched_ttwu_pending(); ··· 1056 1056 */ 1057 1057 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) 1058 1058 { 1059 - cpumask_copy(&p->cpus_allowed, new_mask); 1059 + cpumask_copy(&p->cpus_mask, new_mask); 1060 1060 p->nr_cpus_allowed = cpumask_weight(new_mask); 1061 1061 } 1062 1062 ··· 1126 1126 goto out; 1127 1127 } 1128 1128 1129 - if (cpumask_equal(&p->cpus_allowed, new_mask)) 1129 + if (cpumask_equal(p->cpus_ptr, new_mask)) 1130 1130 goto out; 1131 1131 1132 1132 if (!cpumask_intersects(new_mask, cpu_valid_mask)) { ··· 1286 1286 if (task_cpu(arg->src_task) != arg->src_cpu) 1287 1287 goto unlock; 1288 1288 1289 - if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed)) 1289 + if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 1290 1290 goto unlock; 1291 1291 1292 - if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed)) 1292 + if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 1293 1293 goto unlock; 1294 1294 1295 1295 __migrate_swap_task(arg->src_task, arg->dst_cpu); ··· 1331 1331 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1332 1332 goto out; 1333 1333 1334 - if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed)) 1334 + if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 1335 1335 goto out; 1336 1336 1337 - if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed)) 1337 + if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 1338 1338 goto out; 1339 1339 1340 1340 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); ··· 1479 1479 EXPORT_SYMBOL_GPL(kick_process); 1480 1480 1481 1481 /* 1482 - * ->cpus_allowed is protected by both rq->lock and p->pi_lock 1482 + * ->cpus_ptr is protected by both rq->lock and p->pi_lock 1483 1483 * 1484 1484 * A few notes on cpu_active vs cpu_online: 1485 1485 * ··· 1519 1519 for_each_cpu(dest_cpu, nodemask) { 1520 1520 if (!cpu_active(dest_cpu)) 1521 1521 continue; 1522 - if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 1522 + if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) 1523 1523 return dest_cpu; 1524 1524 } 1525 1525 } 1526 1526 1527 1527 for (;;) { 1528 1528 /* Any allowed, online CPU? */ 1529 - for_each_cpu(dest_cpu, &p->cpus_allowed) { 1529 + for_each_cpu(dest_cpu, p->cpus_ptr) { 1530 1530 if (!is_cpu_allowed(p, dest_cpu)) 1531 1531 continue; 1532 1532 ··· 1570 1570 } 1571 1571 1572 1572 /* 1573 - * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. 1573 + * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 1574 1574 */ 1575 1575 static inline 1576 1576 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) ··· 1580 1580 if (p->nr_cpus_allowed > 1) 1581 1581 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 1582 1582 else 1583 - cpu = cpumask_any(&p->cpus_allowed); 1583 + cpu = cpumask_any(p->cpus_ptr); 1584 1584 1585 1585 /* 1586 1586 * In order not to call set_task_cpu() on a blocking task we need 1587 - * to rely on ttwu() to place the task on a valid ->cpus_allowed 1587 + * to rely on ttwu() to place the task on a valid ->cpus_ptr 1588 1588 * CPU. 1589 1589 * 1590 1590 * Since this is common to all placement strategies, this lives here. ··· 2395 2395 #ifdef CONFIG_SMP 2396 2396 /* 2397 2397 * Fork balancing, do it here and not earlier because: 2398 - * - cpus_allowed can change in the fork path 2398 + * - cpus_ptr can change in the fork path 2399 2399 * - any previously selected CPU might disappear through hotplug 2400 2400 * 2401 2401 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, ··· 4267 4267 * the entire root_domain to become SCHED_DEADLINE. We 4268 4268 * will also fail if there's no bandwidth available. 4269 4269 */ 4270 - if (!cpumask_subset(span, &p->cpus_allowed) || 4270 + if (!cpumask_subset(span, p->cpus_ptr) || 4271 4271 rq->rd->dl_bw.bw == 0) { 4272 4272 task_rq_unlock(rq, p, &rf); 4273 4273 return -EPERM; ··· 4866 4866 goto out_unlock; 4867 4867 4868 4868 raw_spin_lock_irqsave(&p->pi_lock, flags); 4869 - cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); 4869 + cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 4870 4870 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4871 4871 4872 4872 out_unlock: ··· 5443 5443 * allowed nodes is unnecessary. Thus, cpusets are not 5444 5444 * applicable for such threads. This prevents checking for 5445 5445 * success of set_cpus_allowed_ptr() on all attached tasks 5446 - * before cpus_allowed may be changed. 5446 + * before cpus_mask may be changed. 5447 5447 */ 5448 5448 if (p->flags & PF_NO_SETAFFINITY) { 5449 5449 ret = -EINVAL; ··· 5470 5470 if (curr_cpu == target_cpu) 5471 5471 return 0; 5472 5472 5473 - if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed)) 5473 + if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 5474 5474 return -EINVAL; 5475 5475 5476 5476 /* TODO: This is not properly updating schedstats */ ··· 5608 5608 put_prev_task(rq, next); 5609 5609 5610 5610 /* 5611 - * Rules for changing task_struct::cpus_allowed are holding 5611 + * Rules for changing task_struct::cpus_mask are holding 5612 5612 * both pi_lock and rq->lock, such that holding either 5613 5613 * stabilizes the mask. 5614 5614 *
+2 -2
kernel/sched/cpudeadline.c
··· 124 124 const struct sched_dl_entity *dl_se = &p->dl; 125 125 126 126 if (later_mask && 127 - cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { 127 + cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) { 128 128 return 1; 129 129 } else { 130 130 int best_cpu = cpudl_maximum(cp); 131 131 132 132 WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); 133 133 134 - if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) && 134 + if (cpumask_test_cpu(best_cpu, p->cpus_ptr) && 135 135 dl_time_before(dl_se->deadline, cp->elements[0].dl)) { 136 136 if (later_mask) 137 137 cpumask_set_cpu(best_cpu, later_mask);
+2 -2
kernel/sched/cpupri.c
··· 98 98 if (skip) 99 99 continue; 100 100 101 - if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) 101 + if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids) 102 102 continue; 103 103 104 104 if (lowest_mask) { 105 - cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); 105 + cpumask_and(lowest_mask, p->cpus_ptr, vec->mask); 106 106 107 107 /* 108 108 * We have to ensure that we have at least one bit
+3 -3
kernel/sched/deadline.c
··· 538 538 * If we cannot preempt any rq, fall back to pick any 539 539 * online CPU: 540 540 */ 541 - cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); 541 + cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); 542 542 if (cpu >= nr_cpu_ids) { 543 543 /* 544 544 * Failed to find any suitable CPU. ··· 1824 1824 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) 1825 1825 { 1826 1826 if (!task_running(rq, p) && 1827 - cpumask_test_cpu(cpu, &p->cpus_allowed)) 1827 + cpumask_test_cpu(cpu, p->cpus_ptr)) 1828 1828 return 1; 1829 1829 return 0; 1830 1830 } ··· 1974 1974 /* Retry if something changed. */ 1975 1975 if (double_lock_balance(rq, later_rq)) { 1976 1976 if (unlikely(task_rq(task) != rq || 1977 - !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) || 1977 + !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) || 1978 1978 task_running(rq, task) || 1979 1979 !dl_task(task) || 1980 1980 !task_on_rq_queued(task))) {
+17 -17
kernel/sched/fair.c
··· 1621 1621 * be incurred if the tasks were swapped. 1622 1622 */ 1623 1623 /* Skip this swap candidate if cannot move to the source cpu */ 1624 - if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) 1624 + if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) 1625 1625 goto unlock; 1626 1626 1627 1627 /* ··· 1718 1718 1719 1719 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { 1720 1720 /* Skip this CPU if the source task cannot migrate */ 1721 - if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) 1721 + if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) 1722 1722 continue; 1723 1723 1724 1724 env->dst_cpu = cpu; ··· 5831 5831 5832 5832 /* Skip over this group if it has no CPUs allowed */ 5833 5833 if (!cpumask_intersects(sched_group_span(group), 5834 - &p->cpus_allowed)) 5834 + p->cpus_ptr)) 5835 5835 continue; 5836 5836 5837 5837 local_group = cpumask_test_cpu(this_cpu, ··· 5963 5963 return cpumask_first(sched_group_span(group)); 5964 5964 5965 5965 /* Traverse only the allowed CPUs */ 5966 - for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) { 5966 + for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { 5967 5967 if (available_idle_cpu(i)) { 5968 5968 struct rq *rq = cpu_rq(i); 5969 5969 struct cpuidle_state *idle = idle_get_state(rq); ··· 6003 6003 { 6004 6004 int new_cpu = cpu; 6005 6005 6006 - if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed)) 6006 + if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) 6007 6007 return prev_cpu; 6008 6008 6009 6009 /* ··· 6120 6120 if (!test_idle_cores(target, false)) 6121 6121 return -1; 6122 6122 6123 - cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); 6123 + cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); 6124 6124 6125 6125 for_each_cpu_wrap(core, cpus, target) { 6126 6126 bool idle = true; ··· 6154 6154 return -1; 6155 6155 6156 6156 for_each_cpu(cpu, cpu_smt_mask(target)) { 6157 - if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 6157 + if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 6158 6158 continue; 6159 6159 if (available_idle_cpu(cpu)) 6160 6160 return cpu; ··· 6217 6217 for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { 6218 6218 if (!--nr) 6219 6219 return -1; 6220 - if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 6220 + if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 6221 6221 continue; 6222 6222 if (available_idle_cpu(cpu)) 6223 6223 break; ··· 6254 6254 recent_used_cpu != target && 6255 6255 cpus_share_cache(recent_used_cpu, target) && 6256 6256 available_idle_cpu(recent_used_cpu) && 6257 - cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) { 6257 + cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) { 6258 6258 /* 6259 6259 * Replace recent_used_cpu with prev as it is a potential 6260 6260 * candidate for the next wake: ··· 6600 6600 int max_spare_cap_cpu = -1; 6601 6601 6602 6602 for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { 6603 - if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 6603 + if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 6604 6604 continue; 6605 6605 6606 6606 /* Skip CPUs that will be overutilized. */ ··· 6689 6689 } 6690 6690 6691 6691 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) && 6692 - cpumask_test_cpu(cpu, &p->cpus_allowed); 6692 + cpumask_test_cpu(cpu, p->cpus_ptr); 6693 6693 } 6694 6694 6695 6695 rcu_read_lock(); ··· 7445 7445 /* 7446 7446 * We do not migrate tasks that are: 7447 7447 * 1) throttled_lb_pair, or 7448 - * 2) cannot be migrated to this CPU due to cpus_allowed, or 7448 + * 2) cannot be migrated to this CPU due to cpus_ptr, or 7449 7449 * 3) running (obviously), or 7450 7450 * 4) are cache-hot on their current CPU. 7451 7451 */ 7452 7452 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) 7453 7453 return 0; 7454 7454 7455 - if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) { 7455 + if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { 7456 7456 int cpu; 7457 7457 7458 7458 schedstat_inc(p->se.statistics.nr_failed_migrations_affine); ··· 7472 7472 7473 7473 /* Prevent to re-select dst_cpu via env's CPUs: */ 7474 7474 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { 7475 - if (cpumask_test_cpu(cpu, &p->cpus_allowed)) { 7475 + if (cpumask_test_cpu(cpu, p->cpus_ptr)) { 7476 7476 env->flags |= LBF_DST_PINNED; 7477 7477 env->new_dst_cpu = cpu; 7478 7478 break; ··· 8099 8099 8100 8100 /* 8101 8101 * Group imbalance indicates (and tries to solve) the problem where balancing 8102 - * groups is inadequate due to ->cpus_allowed constraints. 8102 + * groups is inadequate due to ->cpus_ptr constraints. 8103 8103 * 8104 8104 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a 8105 8105 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. ··· 8768 8768 /* 8769 8769 * If the busiest group is imbalanced the below checks don't 8770 8770 * work because they assume all things are equal, which typically 8771 - * isn't true due to cpus_allowed constraints and the like. 8771 + * isn't true due to cpus_ptr constraints and the like. 8772 8772 */ 8773 8773 if (busiest->group_type == group_imbalanced) 8774 8774 goto force_balance; ··· 9210 9210 * if the curr task on busiest CPU can't be 9211 9211 * moved to this_cpu: 9212 9212 */ 9213 - if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { 9213 + if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { 9214 9214 raw_spin_unlock_irqrestore(&busiest->lock, 9215 9215 flags); 9216 9216 env.flags |= LBF_ALL_PINNED;
+2 -2
kernel/sched/rt.c
··· 1614 1614 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 1615 1615 { 1616 1616 if (!task_running(rq, p) && 1617 - cpumask_test_cpu(cpu, &p->cpus_allowed)) 1617 + cpumask_test_cpu(cpu, p->cpus_ptr)) 1618 1618 return 1; 1619 1619 1620 1620 return 0; ··· 1751 1751 * Also make sure that it wasn't scheduled on its rq. 1752 1752 */ 1753 1753 if (unlikely(task_rq(task) != rq || 1754 - !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) || 1754 + !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) || 1755 1755 task_running(rq, task) || 1756 1756 !rt_task(task) || 1757 1757 !task_on_rq_queued(task))) {
+1 -1
kernel/trace/trace_hwlat.c
··· 277 277 * of this thread, than stop migrating for the duration 278 278 * of the current test. 279 279 */ 280 - if (!cpumask_equal(current_mask, &current->cpus_allowed)) 280 + if (!cpumask_equal(current_mask, current->cpus_ptr)) 281 281 goto disable; 282 282 283 283 get_online_cpus();
+1 -1
lib/smp_processor_id.c
··· 23 23 * Kernel threads bound to a single CPU can safely use 24 24 * smp_processor_id(): 25 25 */ 26 - if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu))) 26 + if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu))) 27 27 goto out; 28 28 29 29 /*
+1 -1
samples/trace_events/trace-events-sample.c
··· 34 34 35 35 /* Silly tracepoints */ 36 36 trace_foo_bar("hello", cnt, array, random_strings[len], 37 - &current->cpus_allowed); 37 + current->cpus_ptr); 38 38 39 39 trace_foo_with_template_simple("HELLO", cnt); 40 40