Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/smp: Make SMP unconditional

Simplify the scheduler by making CONFIG_SMP=y primitives and data
structures unconditional.

Introduce transitory wrappers for functionality not yet converted to SMP.

Note that this patch is pretty large, because there's no clear separation
between various aspects of the SMP scheduler, it's basically a huge block
of #ifdef CONFIG_SMP. A fair amount of it has to be switched on for it to
boot and work on UP systems.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Shrikanth Hegde <sshegde@linux.ibm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20250528080924.2273858-21-mingo@kernel.org

+31 -599
-9
include/linux/preempt.h
··· 369 369 370 370 #endif 371 371 372 - #ifdef CONFIG_SMP 373 - 374 372 /* 375 373 * Migrate-Disable and why it is undesired. 376 374 * ··· 426 428 */ 427 429 extern void migrate_disable(void); 428 430 extern void migrate_enable(void); 429 - 430 - #else 431 - 432 - static inline void migrate_disable(void) { } 433 - static inline void migrate_enable(void) { } 434 - 435 - #endif /* CONFIG_SMP */ 436 431 437 432 /** 438 433 * preempt_disable_nested - Disable preemption inside a normally preempt disabled section
-42
include/linux/sched.h
··· 599 599 unsigned long runnable_weight; 600 600 #endif 601 601 602 - #ifdef CONFIG_SMP 603 602 /* 604 603 * Per entity load average tracking. 605 604 * ··· 606 607 * collide with read-mostly values above. 607 608 */ 608 609 struct sched_avg avg; 609 - #endif 610 610 }; 611 611 612 612 struct sched_rt_entity { ··· 835 837 struct alloc_tag *alloc_tag; 836 838 #endif 837 839 838 - #ifdef CONFIG_SMP 839 840 int on_cpu; 840 841 struct __call_single_node wake_entry; 841 842 unsigned int wakee_flips; ··· 850 853 */ 851 854 int recent_used_cpu; 852 855 int wake_cpu; 853 - #endif 854 856 int on_rq; 855 857 856 858 int prio; ··· 908 912 cpumask_t *user_cpus_ptr; 909 913 cpumask_t cpus_mask; 910 914 void *migration_pending; 911 - #ifdef CONFIG_SMP 912 915 unsigned short migration_disabled; 913 - #endif 914 916 unsigned short migration_flags; 915 917 916 918 #ifdef CONFIG_PREEMPT_RCU ··· 940 946 struct sched_info sched_info; 941 947 942 948 struct list_head tasks; 943 - #ifdef CONFIG_SMP 944 949 struct plist_node pushable_tasks; 945 950 struct rb_node pushable_dl_tasks; 946 - #endif 947 951 948 952 struct mm_struct *mm; 949 953 struct mm_struct *active_mm; ··· 1835 1843 extern int task_can_attach(struct task_struct *p); 1836 1844 extern int dl_bw_alloc(int cpu, u64 dl_bw); 1837 1845 extern void dl_bw_free(int cpu, u64 dl_bw); 1838 - #ifdef CONFIG_SMP 1839 1846 1840 1847 /* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */ 1841 1848 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); ··· 1852 1861 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask); 1853 1862 extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); 1854 1863 extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p); 1855 - #else 1856 - static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1857 - { 1858 - } 1859 - static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1860 - { 1861 - /* Opencoded cpumask_test_cpu(0, new_mask) to avoid dependency on cpumask.h */ 1862 - if ((*cpumask_bits(new_mask) & 1) == 0) 1863 - return -EINVAL; 1864 - return 0; 1865 - } 1866 - static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node) 1867 - { 1868 - if (src->user_cpus_ptr) 1869 - return -EINVAL; 1870 - return 0; 1871 - } 1872 - static inline void release_user_cpus_ptr(struct task_struct *p) 1873 - { 1874 - WARN_ON(p->user_cpus_ptr); 1875 - } 1876 - 1877 - static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 1878 - { 1879 - return 0; 1880 - } 1881 - #endif 1882 1864 1883 1865 extern int yield_to(struct task_struct *p, bool preempt); 1884 1866 extern void set_user_nice(struct task_struct *p, long nice); ··· 1940 1976 extern int wake_up_process(struct task_struct *tsk); 1941 1977 extern void wake_up_new_task(struct task_struct *tsk); 1942 1978 1943 - #ifdef CONFIG_SMP 1944 1979 extern void kick_process(struct task_struct *tsk); 1945 - #else 1946 - static inline void kick_process(struct task_struct *tsk) { } 1947 - #endif 1948 1980 1949 1981 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); 1950 1982 #define set_task_comm(tsk, from) ({ \ ··· 2185 2225 #define TASK_SIZE_OF(tsk) TASK_SIZE 2186 2226 #endif 2187 2227 2188 - #ifdef CONFIG_SMP 2189 2228 static inline bool owner_on_cpu(struct task_struct *owner) 2190 2229 { 2191 2230 /* ··· 2196 2237 2197 2238 /* Returns effective CPU energy utilization, as seen by the scheduler */ 2198 2239 unsigned long sched_cpu_util(int cpu); 2199 - #endif /* CONFIG_SMP */ 2200 2240 2201 2241 #ifdef CONFIG_SCHED_CORE 2202 2242 extern void sched_core_free(struct task_struct *tsk);
-4
include/linux/sched/deadline.h
··· 29 29 return (s64)(a - b) < 0; 30 30 } 31 31 32 - #ifdef CONFIG_SMP 33 - 34 32 struct root_domain; 35 33 extern void dl_add_task_root_domain(struct task_struct *p); 36 34 extern void dl_clear_root_domain(struct root_domain *rd); 37 35 extern void dl_clear_root_domain_cpu(int cpu); 38 - 39 - #endif /* CONFIG_SMP */ 40 36 41 37 extern u64 dl_cookie; 42 38 extern bool dl_bw_visited(int cpu, u64 cookie);
-4
include/linux/sched/idle.h
··· 11 11 CPU_MAX_IDLE_TYPES 12 12 }; 13 13 14 - #ifdef CONFIG_SMP 15 14 extern void wake_up_if_idle(int cpu); 16 - #else 17 - static inline void wake_up_if_idle(int cpu) { } 18 - #endif 19 15 20 16 /* 21 17 * Idle thread specific functions to determine the need_resched
+2 -2
include/linux/sched/nohz.h
··· 6 6 * This is the interface between the scheduler and nohz/dynticks: 7 7 */ 8 8 9 - #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 9 + #ifdef CONFIG_NO_HZ_COMMON 10 10 extern void nohz_balance_enter_idle(int cpu); 11 11 extern int get_nohz_timer_target(void); 12 12 #else ··· 23 23 static inline void calc_load_nohz_stop(void) { } 24 24 #endif /* CONFIG_NO_HZ_COMMON */ 25 25 26 - #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) 26 + #ifdef CONFIG_NO_HZ_COMMON 27 27 extern void wake_up_nohz_cpu(int cpu); 28 28 #else 29 29 static inline void wake_up_nohz_cpu(int cpu) { }
-32
include/linux/sched/topology.h
··· 9 9 /* 10 10 * sched-domains (multiprocessor balancing) declarations: 11 11 */ 12 - #ifdef CONFIG_SMP 13 12 14 13 /* Generate SD flag indexes */ 15 14 #define SD_FLAG(name, mflags) __##name, ··· 198 199 199 200 200 201 # define SD_INIT_NAME(type) .name = #type 201 - 202 - #else /* CONFIG_SMP */ 203 - 204 - struct sched_domain_attr; 205 - 206 - static inline void 207 - partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 208 - struct sched_domain_attr *dattr_new) 209 - { 210 - } 211 - 212 - static inline bool cpus_equal_capacity(int this_cpu, int that_cpu) 213 - { 214 - return true; 215 - } 216 - 217 - static inline bool cpus_share_cache(int this_cpu, int that_cpu) 218 - { 219 - return true; 220 - } 221 - 222 - static inline bool cpus_share_resources(int this_cpu, int that_cpu) 223 - { 224 - return true; 225 - } 226 - 227 - static inline void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio) 228 - { 229 - } 230 - 231 - #endif /* !CONFIG_SMP */ 232 202 233 203 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 234 204 extern void rebuild_sched_domains_energy(void);
+2 -4
kernel/sched/build_policy.c
··· 50 50 #include "idle.c" 51 51 52 52 #include "rt.c" 53 + #include "cpudeadline.c" 53 54 54 - #ifdef CONFIG_SMP 55 - # include "cpudeadline.c" 56 - # include "pelt.c" 57 - #endif 55 + #include "pelt.c" 58 56 59 57 #include "cputime.c" 60 58 #include "deadline.c"
+2 -4
kernel/sched/build_utility.c
··· 80 80 #include "wait_bit.c" 81 81 #include "wait.c" 82 82 83 - #ifdef CONFIG_SMP 84 - # include "cpupri.c" 85 - # include "stop_task.c" 86 - #endif 83 + #include "cpupri.c" 84 + #include "stop_task.c" 87 85 88 86 #include "topology.c" 89 87
+15 -91
kernel/sched/core.c
··· 650 650 raw_spin_unlock(rq_lockp(rq)); 651 651 } 652 652 653 - #ifdef CONFIG_SMP 654 653 /* 655 654 * double_rq_lock - safely lock two runqueues 656 655 */ ··· 666 667 667 668 double_rq_clock_clear_update(rq1, rq2); 668 669 } 669 - #endif /* CONFIG_SMP */ 670 670 671 671 /* 672 672 * __task_rq_lock - lock the rq @p resides on. ··· 947 949 _val; \ 948 950 }) 949 951 950 - #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 952 + #ifdef TIF_POLLING_NRFLAG 951 953 /* 952 954 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 953 955 * this avoids any races wrt polling state changes and thereby avoids ··· 986 988 return true; 987 989 } 988 990 989 - #ifdef CONFIG_SMP 990 991 static inline bool set_nr_if_polling(struct task_struct *p) 991 992 { 992 993 return false; 993 994 } 994 - #endif 995 995 #endif 996 996 997 997 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) ··· 1163 1167 raw_spin_rq_unlock_irqrestore(rq, flags); 1164 1168 } 1165 1169 1166 - #ifdef CONFIG_SMP 1167 1170 #ifdef CONFIG_NO_HZ_COMMON 1168 1171 /* 1169 1172 * In the semi idle case, use the nearest busy CPU for migrating timers ··· 1369 1374 return true; 1370 1375 } 1371 1376 #endif /* CONFIG_NO_HZ_FULL */ 1372 - #endif /* CONFIG_SMP */ 1373 1377 1374 - #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 1375 - (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 1378 + #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_FAIR_GROUP_SCHED) 1376 1379 /* 1377 1380 * Iterate task_group tree rooted at *from, calling @down when first entering a 1378 1381 * node and @up when leaving it for the final time. ··· 2346 2353 return ncsw; 2347 2354 } 2348 2355 2349 - #ifdef CONFIG_SMP 2350 - 2351 2356 static void 2352 2357 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx); 2353 2358 ··· 3296 3305 WARN_ON_ONCE(ret); 3297 3306 } 3298 3307 3308 + #ifdef CONFIG_SMP 3309 + 3299 3310 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 3300 3311 { 3301 3312 unsigned int state = READ_ONCE(p->__state); ··· 3351 3358 3352 3359 __set_task_cpu(p, new_cpu); 3353 3360 } 3361 + #endif /* CONFIG_SMP */ 3354 3362 3355 3363 #ifdef CONFIG_NUMA_BALANCING 3356 3364 static void __migrate_swap_task(struct task_struct *p, int cpu) ··· 3655 3661 } 3656 3662 } 3657 3663 3658 - #else /* !CONFIG_SMP: */ 3659 - 3660 - static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } 3661 - 3662 - static inline bool rq_has_pinned_tasks(struct rq *rq) 3663 - { 3664 - return false; 3665 - } 3666 - 3667 - #endif /* !CONFIG_SMP */ 3668 - 3669 3664 static void 3670 3665 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 3671 3666 { ··· 3665 3682 3666 3683 rq = this_rq(); 3667 3684 3668 - #ifdef CONFIG_SMP 3669 3685 if (cpu == rq->cpu) { 3670 3686 __schedstat_inc(rq->ttwu_local); 3671 3687 __schedstat_inc(p->stats.nr_wakeups_local); ··· 3684 3702 3685 3703 if (wake_flags & WF_MIGRATED) 3686 3704 __schedstat_inc(p->stats.nr_wakeups_migrate); 3687 - #endif /* CONFIG_SMP */ 3688 3705 3689 3706 __schedstat_inc(rq->ttwu_count); 3690 3707 __schedstat_inc(p->stats.nr_wakeups); ··· 3712 3731 if (p->sched_contributes_to_load) 3713 3732 rq->nr_uninterruptible--; 3714 3733 3715 - #ifdef CONFIG_SMP 3716 3734 if (wake_flags & WF_RQ_SELECTED) 3717 3735 en_flags |= ENQUEUE_RQ_SELECTED; 3718 3736 if (wake_flags & WF_MIGRATED) 3719 3737 en_flags |= ENQUEUE_MIGRATED; 3720 3738 else 3721 - #endif 3722 3739 if (p->in_iowait) { 3723 3740 delayacct_blkio_end(p); 3724 3741 atomic_dec(&task_rq(p)->nr_iowait); ··· 3727 3748 3728 3749 ttwu_do_wakeup(p); 3729 3750 3730 - #ifdef CONFIG_SMP 3731 3751 if (p->sched_class->task_woken) { 3732 3752 /* 3733 3753 * Our task @p is fully woken up and running; so it's safe to ··· 3748 3770 3749 3771 rq->idle_stamp = 0; 3750 3772 } 3751 - #endif /* CONFIG_SMP */ 3752 3773 } 3753 3774 3754 3775 /* ··· 3801 3824 return ret; 3802 3825 } 3803 3826 3804 - #ifdef CONFIG_SMP 3805 3827 void sched_ttwu_pending(void *arg) 3806 3828 { 3807 3829 struct llist_node *llist = arg; ··· 3867 3891 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 3868 3892 3869 3893 WRITE_ONCE(rq->ttwu_pending, 1); 3894 + #ifdef CONFIG_SMP 3870 3895 __smp_call_single_queue(cpu, &p->wake_entry.llist); 3896 + #endif 3871 3897 } 3872 3898 3873 3899 void wake_up_if_idle(int cpu) ··· 3969 3991 3970 3992 return false; 3971 3993 } 3972 - 3973 - #else /* !CONFIG_SMP: */ 3974 - 3975 - static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3976 - { 3977 - return false; 3978 - } 3979 - 3980 - #endif /* !CONFIG_SMP */ 3981 3994 3982 3995 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 3983 3996 { ··· 4502 4533 p->capture_control = NULL; 4503 4534 #endif 4504 4535 init_numa_balancing(clone_flags, p); 4505 - #ifdef CONFIG_SMP 4506 4536 p->wake_entry.u_flags = CSD_TYPE_TTWU; 4507 4537 p->migration_pending = NULL; 4508 - #endif 4509 4538 init_sched_mm_cid(p); 4510 4539 } 4511 4540 ··· 4754 4787 if (likely(sched_info_on())) 4755 4788 memset(&p->sched_info, 0, sizeof(p->sched_info)); 4756 4789 #endif 4757 - #ifdef CONFIG_SMP 4758 4790 p->on_cpu = 0; 4759 - #endif 4760 4791 init_task_preempt_count(p); 4761 - #ifdef CONFIG_SMP 4762 4792 plist_node_init(&p->pushable_tasks, MAX_PRIO); 4763 4793 RB_CLEAR_NODE(&p->pushable_dl_tasks); 4764 - #endif 4794 + 4765 4795 return 0; 4766 4796 } 4767 4797 ··· 4835 4871 4836 4872 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4837 4873 WRITE_ONCE(p->__state, TASK_RUNNING); 4838 - #ifdef CONFIG_SMP 4839 4874 /* 4840 4875 * Fork balancing, do it here and not earlier because: 4841 4876 * - cpus_ptr can change in the fork path ··· 4846 4883 p->recent_used_cpu = task_cpu(p); 4847 4884 rseq_migrate(p); 4848 4885 __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags)); 4849 - #endif 4850 4886 rq = __task_rq_lock(p, &rf); 4851 4887 update_rq_clock(rq); 4852 4888 post_init_entity_util_avg(p); ··· 4956 4994 4957 4995 static inline void prepare_task(struct task_struct *next) 4958 4996 { 4959 - #ifdef CONFIG_SMP 4960 4997 /* 4961 4998 * Claim the task as running, we do this before switching to it 4962 4999 * such that any running task will have this set. ··· 4964 5003 * its ordering comment. 4965 5004 */ 4966 5005 WRITE_ONCE(next->on_cpu, 1); 4967 - #endif 4968 5006 } 4969 5007 4970 5008 static inline void finish_task(struct task_struct *prev) 4971 5009 { 4972 - #ifdef CONFIG_SMP 4973 5010 /* 4974 5011 * This must be the very last reference to @prev from this CPU. After 4975 5012 * p->on_cpu is cleared, the task can be moved to a different CPU. We ··· 4980 5021 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 4981 5022 */ 4982 5023 smp_store_release(&prev->on_cpu, 0); 4983 - #endif 4984 5024 } 4985 - 4986 - #ifdef CONFIG_SMP 4987 5025 4988 5026 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head) 4989 5027 { ··· 5062 5106 raw_spin_rq_unlock_irqrestore(rq, flags); 5063 5107 } 5064 5108 } 5065 - 5066 - #else /* !CONFIG_SMP: */ 5067 - 5068 - static inline void __balance_callbacks(struct rq *rq) 5069 - { 5070 - } 5071 - 5072 - #endif /* !CONFIG_SMP */ 5073 5109 5074 5110 static inline void 5075 5111 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) ··· 5511 5563 struct rq *rq; 5512 5564 u64 ns; 5513 5565 5514 - #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 5566 + #ifdef CONFIG_64BIT 5515 5567 /* 5516 5568 * 64-bit doesn't need locks to atomically read a 64-bit value. 5517 5569 * So we have a optimization chance when the task's delta_exec is 0. ··· 5638 5690 if (donor->flags & PF_WQ_WORKER) 5639 5691 wq_worker_tick(donor); 5640 5692 5641 - #ifdef CONFIG_SMP 5642 5693 if (!scx_switched_all()) { 5643 5694 rq->idle_balance = idle_cpu(cpu); 5644 5695 sched_balance_trigger(rq); 5645 5696 } 5646 - #endif 5647 5697 } 5648 5698 5649 5699 #ifdef CONFIG_NO_HZ_FULL ··· 7765 7819 */ 7766 7820 void __init init_idle(struct task_struct *idle, int cpu) 7767 7821 { 7768 - #ifdef CONFIG_SMP 7769 7822 struct affinity_context ac = (struct affinity_context) { 7770 7823 .new_mask = cpumask_of(cpu), 7771 7824 .flags = 0, 7772 7825 }; 7773 - #endif 7774 7826 struct rq *rq = cpu_rq(cpu); 7775 7827 unsigned long flags; 7776 7828 ··· 7784 7840 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY; 7785 7841 kthread_set_per_cpu(idle, cpu); 7786 7842 7787 - #ifdef CONFIG_SMP 7788 7843 /* 7789 7844 * No validation and serialization required at boot time and for 7790 7845 * setting up the idle tasks of not yet online CPUs. 7791 7846 */ 7792 7847 set_cpus_allowed_common(idle, &ac); 7793 - #endif 7794 7848 /* 7795 7849 * We're having a chicken and egg problem, even though we are 7796 7850 * holding rq->lock, the CPU isn't yet set to this CPU so the ··· 7807 7865 rq_set_donor(rq, idle); 7808 7866 rcu_assign_pointer(rq->curr, idle); 7809 7867 idle->on_rq = TASK_ON_RQ_QUEUED; 7810 - #ifdef CONFIG_SMP 7811 7868 idle->on_cpu = 1; 7812 - #endif 7813 7869 raw_spin_rq_unlock(rq); 7814 7870 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 7815 7871 ··· 7820 7880 idle->sched_class = &idle_sched_class; 7821 7881 ftrace_graph_init_idle_task(idle, cpu); 7822 7882 vtime_init_idle(idle, cpu); 7823 - #ifdef CONFIG_SMP 7824 7883 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 7825 - #endif 7826 7884 } 7827 - 7828 - #ifdef CONFIG_SMP 7829 7885 7830 7886 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 7831 7887 const struct cpumask *trial) ··· 8416 8480 } 8417 8481 early_initcall(migration_init); 8418 8482 8419 - #else /* !CONFIG_SMP: */ 8420 - void __init sched_init_smp(void) 8421 - { 8422 - sched_init_granularity(); 8423 - } 8424 - #endif /* !CONFIG_SMP */ 8425 - 8426 8483 int in_sched_functions(unsigned long addr) 8427 8484 { 8428 8485 return in_lock_functions(addr) || ··· 8441 8512 int i; 8442 8513 8443 8514 /* Make sure the linker didn't screw up */ 8444 - #ifdef CONFIG_SMP 8445 8515 BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class)); 8446 - #endif 8447 8516 BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class)); 8448 8517 BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class)); 8449 8518 BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class)); ··· 8484 8557 #endif /* CONFIG_RT_GROUP_SCHED */ 8485 8558 } 8486 8559 8487 - #ifdef CONFIG_SMP 8488 8560 init_defrootdomain(); 8489 - #endif 8490 8561 8491 8562 #ifdef CONFIG_RT_GROUP_SCHED 8492 8563 init_rt_bandwidth(&root_task_group.rt_bandwidth, ··· 8545 8620 rq->rt.rt_runtime = global_rt_runtime(); 8546 8621 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 8547 8622 #endif 8548 - #ifdef CONFIG_SMP 8549 8623 rq->sd = NULL; 8550 8624 rq->rd = NULL; 8551 8625 rq->cpu_capacity = SCHED_CAPACITY_SCALE; ··· 8561 8637 INIT_LIST_HEAD(&rq->cfs_tasks); 8562 8638 8563 8639 rq_attach_root(rq, &def_root_domain); 8564 - # ifdef CONFIG_NO_HZ_COMMON 8640 + #ifdef CONFIG_NO_HZ_COMMON 8565 8641 rq->last_blocked_load_update_tick = jiffies; 8566 8642 atomic_set(&rq->nohz_flags, 0); 8567 8643 8568 8644 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); 8569 - # endif 8570 - # ifdef CONFIG_HOTPLUG_CPU 8645 + #endif 8646 + #ifdef CONFIG_HOTPLUG_CPU 8571 8647 rcuwait_init(&rq->hotplug_wait); 8572 - # endif 8573 - #endif /* CONFIG_SMP */ 8648 + #endif 8574 8649 hrtick_rq_init(rq); 8575 8650 atomic_set(&rq->nr_iowait, 0); 8576 8651 fair_server_init(rq); ··· 8619 8696 8620 8697 #ifdef CONFIG_SMP 8621 8698 idle_thread_set_boot_cpu(); 8622 - balance_push_set(smp_processor_id(), false); 8623 8699 #endif 8700 + 8701 + balance_push_set(smp_processor_id(), false); 8624 8702 init_sched_fair_class(); 8625 8703 init_sched_ext_class(); 8626 8704
-2
kernel/sched/cpudeadline.h
··· 17 17 struct cpudl_item *elements; 18 18 }; 19 19 20 - #ifdef CONFIG_SMP 21 20 int cpudl_find(struct cpudl *cp, struct task_struct *p, struct cpumask *later_mask); 22 21 void cpudl_set(struct cpudl *cp, int cpu, u64 dl); 23 22 void cpudl_clear(struct cpudl *cp, int cpu); ··· 24 25 void cpudl_set_freecpu(struct cpudl *cp, int cpu); 25 26 void cpudl_clear_freecpu(struct cpudl *cp, int cpu); 26 27 void cpudl_cleanup(struct cpudl *cp); 27 - #endif /* CONFIG_SMP */
-2
kernel/sched/cpupri.h
··· 20 20 int *cpu_to_pri; 21 21 }; 22 22 23 - #ifdef CONFIG_SMP 24 23 int cpupri_find(struct cpupri *cp, struct task_struct *p, 25 24 struct cpumask *lowest_mask); 26 25 int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p, ··· 28 29 void cpupri_set(struct cpupri *cp, int cpu, int pri); 29 30 int cpupri_init(struct cpupri *cp); 30 31 void cpupri_cleanup(struct cpupri *cp); 31 - #endif /* CONFIG_SMP */
-95
kernel/sched/deadline.c
··· 115 115 } 116 116 #endif /* !CONFIG_RT_MUTEXES */ 117 117 118 - #ifdef CONFIG_SMP 119 118 static inline struct dl_bw *dl_bw_of(int i) 120 119 { 121 120 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), ··· 194 195 rq->dl.extra_bw += bw; 195 196 } 196 197 } 197 - #else /* !CONFIG_SMP: */ 198 - static inline struct dl_bw *dl_bw_of(int i) 199 - { 200 - return &cpu_rq(i)->dl.dl_bw; 201 - } 202 - 203 - static inline int dl_bw_cpus(int i) 204 - { 205 - return 1; 206 - } 207 - 208 - static inline unsigned long dl_bw_capacity(int i) 209 - { 210 - return SCHED_CAPACITY_SCALE; 211 - } 212 - 213 - bool dl_bw_visited(int cpu, u64 cookie) 214 - { 215 - return false; 216 - } 217 - 218 - static inline 219 - void __dl_update(struct dl_bw *dl_b, s64 bw) 220 - { 221 - struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 222 - 223 - dl->extra_bw += bw; 224 - } 225 - #endif /* !CONFIG_SMP */ 226 198 227 199 static inline 228 200 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) ··· 526 556 { 527 557 dl_rq->root = RB_ROOT_CACHED; 528 558 529 - #ifdef CONFIG_SMP 530 559 /* zero means no -deadline tasks */ 531 560 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; 532 561 533 562 dl_rq->overloaded = 0; 534 563 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED; 535 - #else 536 - init_dl_bw(&dl_rq->dl_bw); 537 - #endif 538 564 539 565 dl_rq->running_bw = 0; 540 566 dl_rq->this_bw = 0; 541 567 init_dl_rq_bw_ratio(dl_rq); 542 568 } 543 - 544 - #ifdef CONFIG_SMP 545 569 546 570 static inline int dl_overloaded(struct rq *rq) 547 571 { ··· 720 756 721 757 return later_rq; 722 758 } 723 - 724 - #else /* !CONFIG_SMP: */ 725 - 726 - static inline 727 - void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 728 - { 729 - } 730 - 731 - static inline 732 - void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 733 - { 734 - } 735 - 736 - static inline 737 - void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 738 - { 739 - } 740 - 741 - static inline 742 - void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 743 - { 744 - } 745 - 746 - static inline void deadline_queue_push_tasks(struct rq *rq) 747 - { 748 - } 749 - 750 - static inline void deadline_queue_pull_task(struct rq *rq) 751 - { 752 - } 753 - #endif /* !CONFIG_SMP */ 754 759 755 760 static void 756 761 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags); ··· 1132 1199 1133 1200 static void __push_dl_task(struct rq *rq, struct rq_flags *rf) 1134 1201 { 1135 - #ifdef CONFIG_SMP 1136 1202 /* 1137 1203 * Queueing this task back might have overloaded rq, check if we need 1138 1204 * to kick someone away. ··· 1145 1213 push_dl_task(rq); 1146 1214 rq_repin_lock(rq, rf); 1147 1215 } 1148 - #endif /* CONFIG_SMP */ 1149 1216 } 1150 1217 1151 1218 /* a defer timer will not be reset if the runtime consumed was < dl_server_min_res */ ··· 1274 1343 goto unlock; 1275 1344 } 1276 1345 1277 - #ifdef CONFIG_SMP 1278 1346 if (unlikely(!rq->online)) { 1279 1347 /* 1280 1348 * If the runqueue is no longer available, migrate the ··· 1290 1360 * there. 1291 1361 */ 1292 1362 } 1293 - #endif /* CONFIG_SMP */ 1294 1363 1295 1364 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 1296 1365 if (dl_task(rq->donor)) ··· 1777 1848 #define __node_2_dle(node) \ 1778 1849 rb_entry((node), struct sched_dl_entity, rb_node) 1779 1850 1780 - #ifdef CONFIG_SMP 1781 - 1782 1851 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1783 1852 { 1784 1853 struct rq *rq = rq_of_dl_rq(dl_rq); ··· 1811 1884 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline); 1812 1885 } 1813 1886 } 1814 - 1815 - #else /* !CONFIG_SMP: */ 1816 - 1817 - static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1818 - static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1819 - 1820 - #endif /* !CONFIG_SMP */ 1821 1887 1822 1888 static inline 1823 1889 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) ··· 2138 2218 rq_clock_skip_update(rq); 2139 2219 } 2140 2220 2141 - #ifdef CONFIG_SMP 2142 - 2143 2221 static inline bool dl_task_is_earliest_deadline(struct task_struct *p, 2144 2222 struct rq *rq) 2145 2223 { ··· 2267 2349 2268 2350 return sched_stop_runnable(rq) || sched_dl_runnable(rq); 2269 2351 } 2270 - #endif /* CONFIG_SMP */ 2271 2352 2272 2353 /* 2273 2354 * Only called when both the current and waking task are -deadline ··· 2280 2363 return; 2281 2364 } 2282 2365 2283 - #ifdef CONFIG_SMP 2284 2366 /* 2285 2367 * In the unlikely case current and p have the same deadline 2286 2368 * let us try to decide what's the best thing to do... ··· 2287 2371 if ((p->dl.deadline == rq->donor->dl.deadline) && 2288 2372 !test_tsk_need_resched(rq->curr)) 2289 2373 check_preempt_equal_dl(rq, p); 2290 - #endif /* CONFIG_SMP */ 2291 2374 } 2292 2375 2293 2376 #ifdef CONFIG_SCHED_HRTICK ··· 2418 2503 * sched_fork() 2419 2504 */ 2420 2505 } 2421 - 2422 - #ifdef CONFIG_SMP 2423 2506 2424 2507 /* Only try algorithms three times */ 2425 2508 #define DL_MAX_TRIES 3 ··· 2912 2999 dl_clear_root_domain(cpu_rq(cpu)->rd); 2913 3000 } 2914 3001 2915 - #endif /* CONFIG_SMP */ 2916 - 2917 3002 static void switched_from_dl(struct rq *rq, struct task_struct *p) 2918 3003 { 2919 3004 /* ··· 2984 3073 } 2985 3074 2986 3075 if (rq->donor != p) { 2987 - #ifdef CONFIG_SMP 2988 3076 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) 2989 3077 deadline_queue_push_tasks(rq); 2990 - #endif 2991 3078 if (dl_task(rq->donor)) 2992 3079 wakeup_preempt_dl(rq, p, 0); 2993 3080 else ··· 3062 3153 .put_prev_task = put_prev_task_dl, 3063 3154 .set_next_task = set_next_task_dl, 3064 3155 3065 - #ifdef CONFIG_SMP 3066 3156 .balance = balance_dl, 3067 3157 .select_task_rq = select_task_rq_dl, 3068 3158 .migrate_task_rq = migrate_task_rq_dl, ··· 3070 3162 .rq_offline = rq_offline_dl, 3071 3163 .task_woken = task_woken_dl, 3072 3164 .find_lock_rq = find_lock_later_rq, 3073 - #endif /* CONFIG_SMP */ 3074 3165 3075 3166 .task_tick = task_tick_dl, 3076 3167 .task_fork = task_fork_dl, ··· 3369 3462 return false; 3370 3463 } 3371 3464 3372 - #ifdef CONFIG_SMP 3373 3465 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, 3374 3466 const struct cpumask *trial) 3375 3467 { ··· 3480 3574 { 3481 3575 dl_bw_manage(dl_bw_req_free, cpu, dl_bw); 3482 3576 } 3483 - #endif /* CONFIG_SMP */ 3484 3577 3485 3578 void print_dl_stats(struct seq_file *m, int cpu) 3486 3579 {
-12
kernel/sched/debug.c
··· 286 286 287 287 __read_mostly bool sched_debug_verbose; 288 288 289 - #ifdef CONFIG_SMP 290 289 static struct dentry *sd_dentry; 291 290 292 291 ··· 313 314 314 315 return result; 315 316 } 316 - #else /* !CONFIG_SMP: */ 317 - # define sched_verbose_write debugfs_write_file_bool 318 - #endif /* !CONFIG_SMP */ 319 317 320 318 static const struct file_operations sched_verbose_fops = { 321 319 .read = debugfs_read_file_bool, ··· 539 543 } 540 544 late_initcall(sched_init_debug); 541 545 542 - #ifdef CONFIG_SMP 543 - 544 546 static cpumask_var_t sd_sysctl_cpus; 545 547 546 548 static int sd_flags_show(struct seq_file *m, void *v) ··· 648 654 if (cpumask_available(sd_sysctl_cpus)) 649 655 __cpumask_set_cpu(cpu, sd_sysctl_cpus); 650 656 } 651 - 652 - #endif /* CONFIG_SMP */ 653 657 654 658 #ifdef CONFIG_FAIR_GROUP_SCHED 655 659 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) ··· 924 932 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x)) 925 933 926 934 PU(dl_nr_running); 927 - #ifdef CONFIG_SMP 928 935 dl_bw = &cpu_rq(cpu)->rd->dl_bw; 929 - #else 930 - dl_bw = &dl_rq->dl_bw; 931 - #endif 932 936 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw); 933 937 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw); 934 938
-115
kernel/sched/fair.c
··· 88 88 } 89 89 __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift); 90 90 91 - #ifdef CONFIG_SMP 92 91 /* 93 92 * For asym packing, by default the lower numbered CPU has higher priority. 94 93 */ ··· 110 111 * (default: ~5%) 111 112 */ 112 113 #define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078) 113 - #endif /* CONFIG_SMP */ 114 114 115 115 #ifdef CONFIG_CFS_BANDWIDTH 116 116 /* ··· 994 996 /************************************************************** 995 997 * Scheduling class statistics methods: 996 998 */ 997 - #ifdef CONFIG_SMP 998 999 int sched_update_scaling(void) 999 1000 { 1000 1001 unsigned int factor = get_update_sysctl_factor(); ··· 1005 1008 1006 1009 return 0; 1007 1010 } 1008 - #endif /* CONFIG_SMP */ 1009 1011 1010 1012 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se); 1011 1013 ··· 1037 1041 } 1038 1042 1039 1043 #include "pelt.h" 1040 - 1041 - #ifdef CONFIG_SMP 1042 1044 1043 1045 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); 1044 1046 static unsigned long task_h_load(struct task_struct *p); ··· 1125 1131 1126 1132 sa->runnable_avg = sa->util_avg; 1127 1133 } 1128 - 1129 - #else /* !CONFIG_SMP: */ 1130 - void init_entity_runnable_average(struct sched_entity *se) 1131 - { 1132 - } 1133 - void post_init_entity_util_avg(struct task_struct *p) 1134 - { 1135 - } 1136 - static void update_tg_load_avg(struct cfs_rq *cfs_rq) 1137 - { 1138 - } 1139 - #endif /* !CONFIG_SMP */ 1140 1134 1141 1135 static s64 update_curr_se(struct rq *rq, struct sched_entity *curr) 1142 1136 { ··· 3680 3698 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) 3681 3699 { 3682 3700 update_load_add(&cfs_rq->load, se->load.weight); 3683 - #ifdef CONFIG_SMP 3684 3701 if (entity_is_task(se)) { 3685 3702 struct rq *rq = rq_of(cfs_rq); 3686 3703 3687 3704 account_numa_enqueue(rq, task_of(se)); 3688 3705 list_add(&se->group_node, &rq->cfs_tasks); 3689 3706 } 3690 - #endif 3691 3707 cfs_rq->nr_queued++; 3692 3708 } 3693 3709 ··· 3693 3713 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) 3694 3714 { 3695 3715 update_load_sub(&cfs_rq->load, se->load.weight); 3696 - #ifdef CONFIG_SMP 3697 3716 if (entity_is_task(se)) { 3698 3717 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); 3699 3718 list_del_init(&se->group_node); 3700 3719 } 3701 - #endif 3702 3720 cfs_rq->nr_queued--; 3703 3721 } 3704 3722 ··· 3748 3770 *ptr -= min_t(typeof(*ptr), *ptr, _val); \ 3749 3771 } while (0) 3750 3772 3751 - #ifdef CONFIG_SMP 3752 3773 static inline void 3753 3774 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3754 3775 { ··· 3764 3787 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, 3765 3788 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); 3766 3789 } 3767 - #else /* !CONFIG_SMP: */ 3768 - static inline void 3769 - enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 3770 - static inline void 3771 - dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 3772 - #endif /* !CONFIG_SMP */ 3773 3790 3774 3791 static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags); 3775 3792 ··· 3795 3824 3796 3825 update_load_set(&se->load, weight); 3797 3826 3798 - #ifdef CONFIG_SMP 3799 3827 do { 3800 3828 u32 divider = get_pelt_divider(&se->avg); 3801 3829 3802 3830 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); 3803 3831 } while (0); 3804 - #endif 3805 3832 3806 3833 enqueue_load_avg(cfs_rq, se); 3807 3834 if (se->on_rq) { ··· 3834 3865 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); 3835 3866 3836 3867 #ifdef CONFIG_FAIR_GROUP_SCHED 3837 - #ifdef CONFIG_SMP 3838 3868 /* 3839 3869 * All this does is approximate the hierarchical proportion which includes that 3840 3870 * global sum we all love to hate. ··· 3940 3972 */ 3941 3973 return clamp_t(long, shares, MIN_SHARES, tg_shares); 3942 3974 } 3943 - #endif /* CONFIG_SMP */ 3944 3975 3945 3976 /* 3946 3977 * Recomputes the group entity based on the current state of its group ··· 3960 3993 if (throttled_hierarchy(gcfs_rq)) 3961 3994 return; 3962 3995 3963 - #ifndef CONFIG_SMP 3964 - shares = READ_ONCE(gcfs_rq->tg->shares); 3965 - #else 3966 3996 shares = calc_group_shares(gcfs_rq); 3967 - #endif 3968 3997 if (unlikely(se->load.weight != shares)) 3969 3998 reweight_entity(cfs_rq_of(se), se, shares); 3970 3999 } ··· 3994 4031 } 3995 4032 } 3996 4033 3997 - #ifdef CONFIG_SMP 3998 4034 static inline bool load_avg_is_decayed(struct sched_avg *sa) 3999 4035 { 4000 4036 if (sa->load_sum) ··· 5108 5146 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); 5109 5147 } 5110 5148 5111 - #else /* !CONFIG_SMP: */ 5112 - 5113 - static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) 5114 - { 5115 - return !cfs_rq->nr_queued; 5116 - } 5117 - 5118 - #define UPDATE_TG 0x0 5119 - #define SKIP_AGE_LOAD 0x0 5120 - #define DO_ATTACH 0x0 5121 - #define DO_DETACH 0x0 5122 - 5123 - static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) 5124 - { 5125 - cfs_rq_util_change(cfs_rq, 0); 5126 - } 5127 - 5128 - static inline void remove_entity_load_avg(struct sched_entity *se) {} 5129 - 5130 - static inline void 5131 - attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 5132 - static inline void 5133 - detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 5134 - 5135 - static inline int sched_balance_newidle(struct rq *rq, struct rq_flags *rf) 5136 - { 5137 - return 0; 5138 - } 5139 - 5140 - static inline void 5141 - util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} 5142 - 5143 - static inline void 5144 - util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {} 5145 - 5146 - static inline void 5147 - util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, 5148 - bool task_sleep) {} 5149 - static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} 5150 - 5151 - #endif /* !CONFIG_SMP */ 5152 - 5153 5149 void __setparam_fair(struct task_struct *p, const struct sched_attr *attr) 5154 5150 { 5155 5151 struct sched_entity *se = &p->se; ··· 6010 6090 resched_curr(rq); 6011 6091 } 6012 6092 6013 - #ifdef CONFIG_SMP 6014 6093 static void __cfsb_csd_unthrottle(void *arg) 6015 6094 { 6016 6095 struct cfs_rq *cursor, *tmp; ··· 6068 6149 if (first) 6069 6150 smp_call_function_single_async(cpu_of(rq), &rq->cfsb_csd); 6070 6151 } 6071 - #else /* !CONFIG_SMP: */ 6072 - static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) 6073 - { 6074 - unthrottle_cfs_rq(cfs_rq); 6075 - } 6076 - #endif /* !CONFIG_SMP */ 6077 6152 6078 6153 static void unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) 6079 6154 { ··· 6523 6610 * guaranteed at this point that no additional cfs_rq of this group can 6524 6611 * join a CSD list. 6525 6612 */ 6526 - #ifdef CONFIG_SMP 6527 6613 for_each_possible_cpu(i) { 6528 6614 struct rq *rq = cpu_rq(i); 6529 6615 unsigned long flags; ··· 6534 6622 __cfsb_csd_unthrottle(rq); 6535 6623 local_irq_restore(flags); 6536 6624 } 6537 - #endif 6538 6625 } 6539 6626 6540 6627 /* ··· 6746 6835 } 6747 6836 #endif /* !CONFIG_SCHED_HRTICK */ 6748 6837 6749 - #ifdef CONFIG_SMP 6750 6838 static inline bool cpu_overutilized(int cpu) 6751 6839 { 6752 6840 unsigned long rq_util_min, rq_util_max; ··· 6787 6877 if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu)) 6788 6878 set_rd_overutilized(rq->rd, 1); 6789 6879 } 6790 - #else /* !CONFIG_SMP: */ 6791 - static inline void check_update_overutilized_status(struct rq *rq) { } 6792 - #endif /* !CONFIG_SMP */ 6793 6880 6794 6881 /* Runqueue only has SCHED_IDLE tasks enqueued */ 6795 6882 static int sched_idle_rq(struct rq *rq) ··· 6795 6888 rq->nr_running); 6796 6889 } 6797 6890 6798 - #ifdef CONFIG_SMP 6799 6891 static int sched_idle_cpu(int cpu) 6800 6892 { 6801 6893 return sched_idle_rq(cpu_rq(cpu)); 6802 6894 } 6803 - #endif 6804 6895 6805 6896 static void 6806 6897 requeue_delayed_entity(struct sched_entity *se) ··· 7112 7207 { 7113 7208 return (rq->cfs.h_nr_queued - rq->cfs.h_nr_runnable); 7114 7209 } 7115 - 7116 - #ifdef CONFIG_SMP 7117 7210 7118 7211 /* Working cpumask for: sched_balance_rq(), sched_balance_newidle(). */ 7119 7212 static DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); ··· 8648 8745 8649 8746 return sched_balance_newidle(rq, rf) != 0; 8650 8747 } 8651 - #else /* !CONFIG_SMP: */ 8652 - static inline void set_task_max_allowed_capacity(struct task_struct *p) {} 8653 - #endif /* !CONFIG_SMP */ 8654 8748 8655 8749 static void set_next_buddy(struct sched_entity *se) 8656 8750 { ··· 8957 9057 return true; 8958 9058 } 8959 9059 8960 - #ifdef CONFIG_SMP 8961 9060 /************************************************** 8962 9061 * Fair scheduling class load-balancing methods. 8963 9062 * ··· 12879 12980 clear_tg_offline_cfs_rqs(rq); 12880 12981 } 12881 12982 12882 - #endif /* CONFIG_SMP */ 12883 - 12884 12983 #ifdef CONFIG_SCHED_CORE 12885 12984 static inline bool 12886 12985 __entity_slice_used(struct sched_entity *se, int min_nr_tasks) ··· 13106 13209 { 13107 13210 struct cfs_rq *cfs_rq = cfs_rq_of(se); 13108 13211 13109 - #ifdef CONFIG_SMP 13110 13212 /* 13111 13213 * In case the task sched_avg hasn't been attached: 13112 13214 * - A forked task which hasn't been woken up by wake_up_new_task(). ··· 13114 13218 */ 13115 13219 if (!se->avg.last_update_time) 13116 13220 return; 13117 - #endif 13118 13221 13119 13222 /* Catch up with the cfs_rq and remove our load when we leave */ 13120 13223 update_load_avg(cfs_rq, se, 0); ··· 13177 13282 { 13178 13283 struct sched_entity *se = &p->se; 13179 13284 13180 - #ifdef CONFIG_SMP 13181 13285 if (task_on_rq_queued(p)) { 13182 13286 /* 13183 13287 * Move the next running task to the front of the list, so our ··· 13184 13290 */ 13185 13291 list_move(&se->group_node, &rq->cfs_tasks); 13186 13292 } 13187 - #endif 13188 13293 if (!first) 13189 13294 return; 13190 13295 ··· 13221 13328 { 13222 13329 cfs_rq->tasks_timeline = RB_ROOT_CACHED; 13223 13330 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); 13224 - #ifdef CONFIG_SMP 13225 13331 raw_spin_lock_init(&cfs_rq->removed.lock); 13226 - #endif 13227 13332 } 13228 13333 13229 13334 #ifdef CONFIG_FAIR_GROUP_SCHED ··· 13236 13345 13237 13346 detach_task_cfs_rq(p); 13238 13347 13239 - #ifdef CONFIG_SMP 13240 13348 /* Tell se's cfs_rq has been changed -- migrated */ 13241 13349 p->se.avg.last_update_time = 0; 13242 - #endif 13243 13350 set_task_rq(p, task_cpu(p)); 13244 13351 attach_task_cfs_rq(p); 13245 13352 } ··· 13533 13644 .put_prev_task = put_prev_task_fair, 13534 13645 .set_next_task = set_next_task_fair, 13535 13646 13536 - #ifdef CONFIG_SMP 13537 13647 .balance = balance_fair, 13538 13648 .select_task_rq = select_task_rq_fair, 13539 13649 .migrate_task_rq = migrate_task_rq_fair, ··· 13542 13654 13543 13655 .task_dead = task_dead_fair, 13544 13656 .set_cpus_allowed = set_cpus_allowed_fair, 13545 - #endif 13546 13657 13547 13658 .task_tick = task_tick_fair, 13548 13659 .task_fork = task_fork_fair, ··· 13604 13717 13605 13718 __init void init_sched_fair_class(void) 13606 13719 { 13607 - #ifdef CONFIG_SMP 13608 13720 int i; 13609 13721 13610 13722 for_each_possible_cpu(i) { ··· 13625 13739 nohz.next_blocked = jiffies; 13626 13740 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 13627 13741 #endif 13628 - #endif /* CONFIG_SMP */ 13629 13742 }
-52
kernel/sched/pelt.h
··· 3 3 #define _KERNEL_SCHED_PELT_H 4 4 #include "sched.h" 5 5 6 - #ifdef CONFIG_SMP 7 6 #include "sched-pelt.h" 8 7 9 8 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se); ··· 185 186 return rq_clock_pelt(rq_of(cfs_rq)); 186 187 } 187 188 #endif /* !CONFIG_CFS_BANDWIDTH */ 188 - 189 - #else /* !CONFIG_SMP: */ 190 - 191 - static inline int 192 - update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 193 - { 194 - return 0; 195 - } 196 - 197 - static inline int 198 - update_rt_rq_load_avg(u64 now, struct rq *rq, int running) 199 - { 200 - return 0; 201 - } 202 - 203 - static inline int 204 - update_dl_rq_load_avg(u64 now, struct rq *rq, int running) 205 - { 206 - return 0; 207 - } 208 - 209 - static inline int 210 - update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) 211 - { 212 - return 0; 213 - } 214 - 215 - static inline u64 hw_load_avg(struct rq *rq) 216 - { 217 - return 0; 218 - } 219 - 220 - static inline int 221 - update_irq_load_avg(struct rq *rq, u64 running) 222 - { 223 - return 0; 224 - } 225 - 226 - static inline u64 rq_clock_pelt(struct rq *rq) 227 - { 228 - return rq_clock_task(rq); 229 - } 230 - 231 - static inline void 232 - update_rq_clock_pelt(struct rq *rq, s64 delta) { } 233 - 234 - static inline void 235 - update_idle_rq_clock_pelt(struct rq *rq) { } 236 - 237 - static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { } 238 - #endif /* !CONFIG_SMP */ 239 189 240 190 #endif /* _KERNEL_SCHED_PELT_H */
+5 -1
kernel/sched/rt.c
··· 2454 2454 GFP_KERNEL, cpu_to_node(i)); 2455 2455 } 2456 2456 } 2457 - #endif /* CONFIG_SMP */ 2457 + #else /* !CONFIG_SMP: */ 2458 + void __init init_sched_rt_class(void) 2459 + { 2460 + } 2461 + #endif /* !CONFIG_SMP */ 2458 2462 2459 2463 /* 2460 2464 * When switching a task to RT, we may overload the runqueue
+3 -118
kernel/sched/sched.h
··· 443 443 /* runqueue "owned" by this group on each CPU */ 444 444 struct cfs_rq **cfs_rq; 445 445 unsigned long shares; 446 - #ifdef CONFIG_SMP 447 446 /* 448 447 * load_avg can be heavily contended at clock tick time, so put 449 448 * it in its own cache-line separated from the fields above which 450 449 * will also be accessed at each tick. 451 450 */ 452 451 atomic_long_t load_avg ____cacheline_aligned; 453 - #endif /* CONFIG_SMP */ 454 452 #endif /* CONFIG_FAIR_GROUP_SCHED */ 455 453 456 454 #ifdef CONFIG_RT_GROUP_SCHED ··· 572 574 573 575 extern int sched_group_set_idle(struct task_group *tg, long idle); 574 576 575 - #ifdef CONFIG_SMP 576 577 extern void set_task_rq_fair(struct sched_entity *se, 577 578 struct cfs_rq *prev, struct cfs_rq *next); 578 - #else /* !CONFIG_SMP: */ 579 - static inline void set_task_rq_fair(struct sched_entity *se, 580 - struct cfs_rq *prev, struct cfs_rq *next) { } 581 - #endif /* !CONFIG_SMP */ 582 579 #else /* !CONFIG_FAIR_GROUP_SCHED: */ 583 580 static inline int sched_group_set_shares(struct task_group *tg, unsigned long shares) { return 0; } 584 581 static inline int sched_group_set_idle(struct task_group *tg, long idle) { return 0; } ··· 661 668 struct sched_entity *curr; 662 669 struct sched_entity *next; 663 670 664 - #ifdef CONFIG_SMP 665 671 /* 666 672 * CFS load tracking 667 673 */ ··· 692 700 u64 last_h_load_update; 693 701 struct sched_entity *h_load_next; 694 702 #endif /* CONFIG_FAIR_GROUP_SCHED */ 695 - #endif /* CONFIG_SMP */ 696 703 697 704 #ifdef CONFIG_FAIR_GROUP_SCHED 698 705 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ ··· 788 797 struct rt_prio_array active; 789 798 unsigned int rt_nr_running; 790 799 unsigned int rr_nr_running; 791 - #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 792 800 struct { 793 801 int curr; /* highest queued rt task prio */ 794 - #ifdef CONFIG_SMP 795 802 int next; /* next highest */ 796 - #endif 797 803 } highest_prio; 798 - #endif 799 804 #ifdef CONFIG_SMP 800 805 bool overloaded; 801 806 struct plist_head pushable_tasks; ··· 827 840 828 841 unsigned int dl_nr_running; 829 842 830 - #ifdef CONFIG_SMP 831 843 /* 832 844 * Deadline values of the currently executing and the 833 845 * earliest ready task on this rq. Caching these facilitates ··· 846 860 * of the leftmost (earliest deadline) element. 847 861 */ 848 862 struct rb_root_cached pushable_dl_tasks_root; 849 - #else /* !CONFIG_SMP: */ 850 - struct dl_bw dl_bw; 851 - #endif /* !CONFIG_SMP */ 863 + 852 864 /* 853 865 * "Active utilization" for this runqueue: increased when a 854 866 * task wakes up (becomes TASK_RUNNING) and decreased when a ··· 917 933 918 934 #endif /* !CONFIG_FAIR_GROUP_SCHED */ 919 935 920 - #ifdef CONFIG_SMP 921 936 /* 922 937 * XXX we want to get rid of these helpers and use the full load resolution. 923 938 */ ··· 1027 1044 #ifdef HAVE_RT_PUSH_IPI 1028 1045 extern void rto_push_irq_work_func(struct irq_work *work); 1029 1046 #endif 1030 - #endif /* CONFIG_SMP */ 1031 1047 1032 1048 #ifdef CONFIG_UCLAMP_TASK 1033 1049 /* ··· 1090 1108 unsigned int numa_migrate_on; 1091 1109 #endif 1092 1110 #ifdef CONFIG_NO_HZ_COMMON 1093 - #ifdef CONFIG_SMP 1094 1111 unsigned long last_blocked_load_update_tick; 1095 1112 unsigned int has_blocked_load; 1096 1113 call_single_data_t nohz_csd; 1097 - #endif /* CONFIG_SMP */ 1098 1114 unsigned int nohz_tick_stopped; 1099 1115 atomic_t nohz_flags; 1100 1116 #endif /* CONFIG_NO_HZ_COMMON */ 1101 1117 1102 - #ifdef CONFIG_SMP 1103 1118 unsigned int ttwu_pending; 1104 - #endif 1105 1119 u64 nr_switches; 1106 1120 1107 1121 #ifdef CONFIG_UCLAMP_TASK ··· 1162 1184 int membarrier_state; 1163 1185 #endif 1164 1186 1165 - #ifdef CONFIG_SMP 1166 1187 struct root_domain *rd; 1167 1188 struct sched_domain __rcu *sd; 1168 1189 ··· 1202 1225 #ifdef CONFIG_HOTPLUG_CPU 1203 1226 struct rcuwait hotplug_wait; 1204 1227 #endif 1205 - #endif /* CONFIG_SMP */ 1206 1228 1207 1229 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1208 1230 u64 prev_irq_time; ··· 1248 1272 struct cpuidle_state *idle_state; 1249 1273 #endif 1250 1274 1251 - #ifdef CONFIG_SMP 1252 1275 unsigned int nr_pinned; 1253 - #endif 1254 1276 unsigned int push_busy; 1255 1277 struct cpu_stop_work push_work; 1256 1278 ··· 1274 1300 /* Scratch cpumask to be temporarily used under rq_lock */ 1275 1301 cpumask_var_t scratch_mask; 1276 1302 1277 - #if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) 1303 + #ifdef CONFIG_CFS_BANDWIDTH 1278 1304 call_single_data_t cfsb_csd; 1279 1305 struct list_head cfsb_csd_list; 1280 1306 #endif ··· 1937 1963 1938 1964 #endif /* !CONFIG_NUMA_BALANCING */ 1939 1965 1940 - #ifdef CONFIG_SMP 1941 - 1942 1966 static inline void 1943 1967 queue_balance_callback(struct rq *rq, 1944 1968 struct balance_callback *head, ··· 2101 2129 return cpu_possible_mask; /* &init_task.cpus_mask */ 2102 2130 return p->user_cpus_ptr; 2103 2131 } 2104 - 2105 - #endif /* CONFIG_SMP */ 2106 2132 2107 2133 #ifdef CONFIG_CGROUP_SCHED 2108 2134 ··· 2388 2418 void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next); 2389 2419 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 2390 2420 2391 - #ifdef CONFIG_SMP 2392 2421 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); 2393 2422 2394 2423 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); ··· 2400 2431 void (*rq_offline)(struct rq *rq); 2401 2432 2402 2433 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); 2403 - #endif /* CONFIG_SMP */ 2404 2434 2405 2435 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 2406 2436 void (*task_fork)(struct task_struct *p); ··· 2551 2583 #define SCA_MIGRATE_ENABLE 0x04 2552 2584 #define SCA_USER 0x08 2553 2585 2554 - #ifdef CONFIG_SMP 2555 - 2556 2586 extern void update_group_capacity(struct sched_domain *sd, int cpu); 2557 2587 2558 2588 extern void sched_balance_trigger(struct rq *rq); ··· 2601 2635 } 2602 2636 2603 2637 extern int push_cpu_stop(void *arg); 2604 - 2605 - #else /* !CONFIG_SMP: */ 2606 - 2607 - static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu) 2608 - { 2609 - return true; 2610 - } 2611 - 2612 - static inline int __set_cpus_allowed_ptr(struct task_struct *p, 2613 - struct affinity_context *ctx) 2614 - { 2615 - return set_cpus_allowed_ptr(p, ctx->new_mask); 2616 - } 2617 - 2618 - static inline cpumask_t *alloc_user_cpus_ptr(int node) 2619 - { 2620 - return NULL; 2621 - } 2622 - 2623 - #endif /* !CONFIG_SMP */ 2624 2638 2625 2639 #ifdef CONFIG_CPU_IDLE 2626 2640 ··· 2878 2932 { class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t; \ 2879 2933 _lock; return _t; } 2880 2934 2881 - #ifdef CONFIG_SMP 2882 - 2883 2935 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) 2884 2936 { 2885 2937 #ifdef CONFIG_SCHED_CORE ··· 3037 3093 3038 3094 extern bool sched_smp_initialized; 3039 3095 3040 - #else /* !CONFIG_SMP: */ 3041 - 3042 - /* 3043 - * double_rq_lock - safely lock two runqueues 3044 - * 3045 - * Note this does not disable interrupts like task_rq_lock, 3046 - * you need to do so manually before calling. 3047 - */ 3048 - static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 3049 - __acquires(rq1->lock) 3050 - __acquires(rq2->lock) 3051 - { 3052 - WARN_ON_ONCE(!irqs_disabled()); 3053 - WARN_ON_ONCE(rq1 != rq2); 3054 - raw_spin_rq_lock(rq1); 3055 - __acquire(rq2->lock); /* Fake it out ;) */ 3056 - double_rq_clock_clear_update(rq1, rq2); 3057 - } 3058 - 3059 - /* 3060 - * double_rq_unlock - safely unlock two runqueues 3061 - * 3062 - * Note this does not restore interrupts like task_rq_unlock, 3063 - * you need to do so manually after calling. 3064 - */ 3065 - static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 3066 - __releases(rq1->lock) 3067 - __releases(rq2->lock) 3068 - { 3069 - WARN_ON_ONCE(rq1 != rq2); 3070 - raw_spin_rq_unlock(rq1); 3071 - __release(rq2->lock); 3072 - } 3073 - 3074 - #endif /* !CONFIG_SMP */ 3075 - 3076 3096 DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq, 3077 3097 double_rq_lock(_T->lock, _T->lock2), 3078 3098 double_rq_unlock(_T->lock, _T->lock2)) ··· 3095 3187 static inline void nohz_balance_exit_idle(struct rq *rq) { } 3096 3188 #endif /* !CONFIG_NO_HZ_COMMON */ 3097 3189 3098 - #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 3190 + #ifdef CONFIG_NO_HZ_COMMON 3099 3191 extern void nohz_run_idle_balance(int cpu); 3100 3192 #else 3101 3193 static inline void nohz_run_idle_balance(int cpu) { } ··· 3221 3313 # define arch_scale_freq_invariant() false 3222 3314 #endif 3223 3315 3224 - #ifdef CONFIG_SMP 3225 - 3226 3316 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 3227 3317 unsigned long *min, 3228 3318 unsigned long *max); ··· 3263 3357 { 3264 3358 return READ_ONCE(rq->avg_rt.util_avg); 3265 3359 } 3266 - 3267 - #else /* !CONFIG_SMP: */ 3268 - static inline bool update_other_load_avgs(struct rq *rq) { return false; } 3269 - #endif /* !CONFIG_SMP */ 3270 3360 3271 3361 #ifdef CONFIG_UCLAMP_TASK 3272 3362 ··· 3482 3580 3483 3581 #endif /* !CONFIG_MEMBARRIER */ 3484 3582 3485 - #ifdef CONFIG_SMP 3486 3583 static inline bool is_per_cpu_kthread(struct task_struct *p) 3487 3584 { 3488 3585 if (!(p->flags & PF_KTHREAD)) ··· 3492 3591 3493 3592 return true; 3494 3593 } 3495 - #endif /* CONFIG_SMP */ 3496 3594 3497 3595 extern void swake_up_all_locked(struct swait_queue_head *q); 3498 3596 extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); ··· 3790 3890 3791 3891 extern u64 avg_vruntime(struct cfs_rq *cfs_rq); 3792 3892 extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); 3793 - #ifdef CONFIG_SMP 3794 3893 static inline 3795 3894 void move_queued_task_locked(struct rq *src_rq, struct rq *dst_rq, struct task_struct *task) 3796 3895 { ··· 3810 3911 3811 3912 return false; 3812 3913 } 3813 - #endif /* CONFIG_SMP */ 3814 3914 3815 3915 #ifdef CONFIG_RT_MUTEXES 3816 3916 ··· 3850 3952 const struct sched_class *prev_class, 3851 3953 int oldprio); 3852 3954 3853 - #ifdef CONFIG_SMP 3854 3955 extern struct balance_callback *splice_balance_callbacks(struct rq *rq); 3855 3956 extern void balance_callbacks(struct rq *rq, struct balance_callback *head); 3856 - #else /* !CONFIG_SMP: */ 3857 - 3858 - static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) 3859 - { 3860 - return NULL; 3861 - } 3862 - 3863 - static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) 3864 - { 3865 - } 3866 - 3867 - #endif /* !CONFIG_SMP */ 3868 3957 3869 3958 #ifdef CONFIG_SCHED_CLASS_EXT 3870 3959 /*
-2
kernel/sched/syscalls.c
··· 1119 1119 return copy_struct_to_user(uattr, usize, &kattr, sizeof(kattr), NULL); 1120 1120 } 1121 1121 1122 - #ifdef CONFIG_SMP 1123 1122 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 1124 1123 { 1125 1124 /* ··· 1147 1148 1148 1149 return 0; 1149 1150 } 1150 - #endif /* CONFIG_SMP */ 1151 1151 1152 1152 int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) 1153 1153 {
+2 -8
kernel/sched/topology.c
··· 17 17 mutex_unlock(&sched_domains_mutex); 18 18 } 19 19 20 - #ifdef CONFIG_SMP 21 - 22 20 /* Protected by sched_domains_mutex: */ 23 21 static cpumask_var_t sched_domains_tmpmask; 24 22 static cpumask_var_t sched_domains_tmpmask2; ··· 1320 1322 update_group_capacity(sd, cpu); 1321 1323 } 1322 1324 1323 - #ifdef CONFIG_SMP 1324 - 1325 1325 /* Update the "asym_prefer_cpu" when arch_asym_cpu_priority() changes. */ 1326 1326 void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio) 1327 1327 { 1328 + #ifdef CONFIG_SMP 1328 1329 int asym_prefer_cpu = cpu; 1329 1330 struct sched_domain *sd; 1330 1331 ··· 1373 1376 1374 1377 WRITE_ONCE(sg->asym_prefer_cpu, asym_prefer_cpu); 1375 1378 } 1376 - } 1377 - 1378 1379 #endif /* CONFIG_SMP */ 1380 + } 1379 1381 1380 1382 /* 1381 1383 * Set of available CPUs grouped by their corresponding capacities ··· 2840 2844 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); 2841 2845 sched_domains_mutex_unlock(); 2842 2846 } 2843 - 2844 - #endif /* CONFIG_SMP */