Merge tag 'sched-urgent-2024-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Thomas Gleixner:

- When stime is larger than rtime due to accounting imprecision, then
utime = rtime - stime becomes negative. As this is unsigned math, the
result becomes a huge positive number.

Cure it by resetting stime to rtime in that case, so utime becomes 0.

- Restore consistent state when sched_cpu_deactivate() fails.

When offlining a CPU fails in sched_cpu_deactivate() after the SMT
present counter has been decremented, then the function aborts but
fails to increment the SMT present counter and leaves it imbalanced.
Consecutive operations cause it to underflow. Add the missing fixup
for the error path.

For SMT accounting the runqueue needs to marked online again in the
error exit path to restore consistent state.

* tag 'sched-urgent-2024-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/core: Fix unbalance set_rq_online/offline() in sched_cpu_deactivate()
sched/core: Introduce sched_set_rq_on/offline() helper
sched/smt: Fix unbalance sched_smt_present dec/inc
sched/smt: Introduce sched_smt_present_inc/dec() helper
sched/cputime: Fix mul_u64_u64_div_u64() precision for cputime

Changed files
+53 -21
kernel
+47 -21
kernel/sched/core.c
··· 7845 7845 } 7846 7846 } 7847 7847 7848 + static inline void sched_set_rq_online(struct rq *rq, int cpu) 7849 + { 7850 + struct rq_flags rf; 7851 + 7852 + rq_lock_irqsave(rq, &rf); 7853 + if (rq->rd) { 7854 + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7855 + set_rq_online(rq); 7856 + } 7857 + rq_unlock_irqrestore(rq, &rf); 7858 + } 7859 + 7860 + static inline void sched_set_rq_offline(struct rq *rq, int cpu) 7861 + { 7862 + struct rq_flags rf; 7863 + 7864 + rq_lock_irqsave(rq, &rf); 7865 + if (rq->rd) { 7866 + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7867 + set_rq_offline(rq); 7868 + } 7869 + rq_unlock_irqrestore(rq, &rf); 7870 + } 7871 + 7848 7872 /* 7849 7873 * used to mark begin/end of suspend/resume: 7850 7874 */ ··· 7919 7895 return 0; 7920 7896 } 7921 7897 7898 + static inline void sched_smt_present_inc(int cpu) 7899 + { 7900 + #ifdef CONFIG_SCHED_SMT 7901 + if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7902 + static_branch_inc_cpuslocked(&sched_smt_present); 7903 + #endif 7904 + } 7905 + 7906 + static inline void sched_smt_present_dec(int cpu) 7907 + { 7908 + #ifdef CONFIG_SCHED_SMT 7909 + if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7910 + static_branch_dec_cpuslocked(&sched_smt_present); 7911 + #endif 7912 + } 7913 + 7922 7914 int sched_cpu_activate(unsigned int cpu) 7923 7915 { 7924 7916 struct rq *rq = cpu_rq(cpu); 7925 - struct rq_flags rf; 7926 7917 7927 7918 /* 7928 7919 * Clear the balance_push callback and prepare to schedule ··· 7945 7906 */ 7946 7907 balance_push_set(cpu, false); 7947 7908 7948 - #ifdef CONFIG_SCHED_SMT 7949 7909 /* 7950 7910 * When going up, increment the number of cores with SMT present. 7951 7911 */ 7952 - if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7953 - static_branch_inc_cpuslocked(&sched_smt_present); 7954 - #endif 7912 + sched_smt_present_inc(cpu); 7955 7913 set_cpu_active(cpu, true); 7956 7914 7957 7915 if (sched_smp_initialized) { ··· 7966 7930 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 7967 7931 * domains. 7968 7932 */ 7969 - rq_lock_irqsave(rq, &rf); 7970 - if (rq->rd) { 7971 - BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7972 - set_rq_online(rq); 7973 - } 7974 - rq_unlock_irqrestore(rq, &rf); 7933 + sched_set_rq_online(rq, cpu); 7975 7934 7976 7935 return 0; 7977 7936 } ··· 7974 7943 int sched_cpu_deactivate(unsigned int cpu) 7975 7944 { 7976 7945 struct rq *rq = cpu_rq(cpu); 7977 - struct rq_flags rf; 7978 7946 int ret; 7979 7947 7980 7948 /* ··· 8004 7974 */ 8005 7975 synchronize_rcu(); 8006 7976 8007 - rq_lock_irqsave(rq, &rf); 8008 - if (rq->rd) { 8009 - BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 8010 - set_rq_offline(rq); 8011 - } 8012 - rq_unlock_irqrestore(rq, &rf); 7977 + sched_set_rq_offline(rq, cpu); 8013 7978 8014 - #ifdef CONFIG_SCHED_SMT 8015 7979 /* 8016 7980 * When going down, decrement the number of cores with SMT present. 8017 7981 */ 8018 - if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 8019 - static_branch_dec_cpuslocked(&sched_smt_present); 7982 + sched_smt_present_dec(cpu); 8020 7983 7984 + #ifdef CONFIG_SCHED_SMT 8021 7985 sched_core_cpu_deactivate(cpu); 8022 7986 #endif 8023 7987 ··· 8021 7997 sched_update_numa(cpu, false); 8022 7998 ret = cpuset_cpu_inactive(cpu); 8023 7999 if (ret) { 8000 + sched_smt_present_inc(cpu); 8001 + sched_set_rq_online(rq, cpu); 8024 8002 balance_push_set(cpu, false); 8025 8003 set_cpu_active(cpu, true); 8026 8004 sched_update_numa(cpu, true);
+6
kernel/sched/cputime.c
··· 582 582 } 583 583 584 584 stime = mul_u64_u64_div_u64(stime, rtime, stime + utime); 585 + /* 586 + * Because mul_u64_u64_div_u64() can approximate on some 587 + * achitectures; enforce the constraint that: a*b/(b+c) <= a. 588 + */ 589 + if (unlikely(stime > rtime)) 590 + stime = rtime; 585 591 586 592 update: 587 593 /*