Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/mmcid: Move scheduler code out of global header

This is only used in the scheduler core code, so there is no point to have
it in a global header.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Yury Norov (NVIDIA) <yury.norov@gmail.com>
Link: https://patch.msgid.link/20251119172549.321259077@linutronix.de

authored by

Thomas Gleixner and committed by
Peter Zijlstra
b08ef5fc 925b7847

+18 -15
-13
include/linux/mm_types.h
··· 1387 1387 return 2 * cpumask_size(); /* mm_cpus_allowed(), mm_cidmask(). */ 1388 1388 } 1389 1389 1390 - static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask) 1391 - { 1392 - struct cpumask *mm_allowed = mm_cpus_allowed(mm); 1393 - 1394 - if (!mm) 1395 - return; 1396 - /* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */ 1397 - guard(raw_spinlock)(&mm->mm_cid.lock); 1398 - cpumask_or(mm_allowed, mm_allowed, cpumask); 1399 - WRITE_ONCE(mm->mm_cid.nr_cpus_allowed, cpumask_weight(mm_allowed)); 1400 - } 1401 1390 #else /* CONFIG_SCHED_MM_CID */ 1402 1391 static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { } 1403 1392 static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; } 1404 1393 static inline void mm_destroy_cid(struct mm_struct *mm) { } 1405 - 1406 1394 static inline unsigned int mm_cid_size(void) 1407 1395 { 1408 1396 return 0; 1409 1397 } 1410 - static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask) { } 1411 1398 #endif /* CONFIG_SCHED_MM_CID */ 1412 1399 1413 1400 struct mmu_gather;
+18 -2
kernel/sched/core.c
··· 2669 2669 return 0; 2670 2670 } 2671 2671 2672 + static inline void mm_update_cpus_allowed(struct mm_struct *mm, const cpumask_t *affmask); 2673 + 2672 2674 /* 2673 2675 * sched_class::set_cpus_allowed must do the below, but is not required to 2674 2676 * actually call this function. ··· 2730 2728 put_prev_task(rq, p); 2731 2729 2732 2730 p->sched_class->set_cpus_allowed(p, ctx); 2733 - mm_set_cpus_allowed(p->mm, ctx->new_mask); 2731 + mm_update_cpus_allowed(p->mm, ctx->new_mask); 2734 2732 2735 2733 if (queued) 2736 2734 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); ··· 10374 10372 * When a task exits, the MM CID held by the task is not longer required as 10375 10373 * the task cannot return to user space. 10376 10374 */ 10375 + static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) 10376 + { 10377 + struct cpumask *mm_allowed = mm_cpus_allowed(mm); 10378 + 10379 + if (!mm) 10380 + return; 10381 + /* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */ 10382 + guard(raw_spinlock)(&mm->mm_cid.lock); 10383 + cpumask_or(mm_allowed, mm_allowed, affmsk); 10384 + WRITE_ONCE(mm->mm_cid.nr_cpus_allowed, cpumask_weight(mm_allowed)); 10385 + } 10386 + 10377 10387 void sched_mm_cid_exit_signals(struct task_struct *t) 10378 10388 { 10379 10389 struct mm_struct *mm = t->mm; ··· 10425 10411 WARN_ON_ONCE(!t->mm || t->mm_cid.cid != MM_CID_UNSET); 10426 10412 t->mm_cid.active = 1; 10427 10413 } 10428 - #endif /* CONFIG_SCHED_MM_CID */ 10414 + #else /* CONFIG_SCHED_MM_CID */ 10415 + static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { } 10416 + #endif /* !CONFIG_SCHED_MM_CID */ 10429 10417 10430 10418 #ifdef CONFIG_SCHED_CLASS_EXT 10431 10419 void sched_deq_and_put_task(struct task_struct *p, int queue_flags,