Merge tag 'sched_urgent_for_v5.16_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Borislav Petkov:

- Properly init uclamp_flags of a runqueue, on first enqueuing

- Fix preempt= callback return values

- Correct utime/stime resource usage reporting on nohz_full to return
the proper times instead of shorter ones

* tag 'sched_urgent_for_v5.16_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/uclamp: Fix rq->uclamp_max not set on first enqueue
preempt/dynamic: Fix setup_preempt_mode() return value
sched/cputime: Fix getrusage(RUSAGE_THREAD) with nohz_full

Changed files
+15 -8
include
linux
sched
kernel
+3 -2
include/linux/sched/cputime.h
··· 18 18 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 19 19 20 20 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 21 - extern void task_cputime(struct task_struct *t, 21 + extern bool task_cputime(struct task_struct *t, 22 22 u64 *utime, u64 *stime); 23 23 extern u64 task_gtime(struct task_struct *t); 24 24 #else 25 - static inline void task_cputime(struct task_struct *t, 25 + static inline bool task_cputime(struct task_struct *t, 26 26 u64 *utime, u64 *stime) 27 27 { 28 28 *utime = t->utime; 29 29 *stime = t->stime; 30 + return false; 30 31 } 31 32 32 33 static inline u64 task_gtime(struct task_struct *t)
+3 -3
kernel/sched/core.c
··· 1918 1918 }; 1919 1919 } 1920 1920 1921 - rq->uclamp_flags = 0; 1921 + rq->uclamp_flags = UCLAMP_FLAG_IDLE; 1922 1922 } 1923 1923 1924 1924 static void __init init_uclamp(void) ··· 6617 6617 int mode = sched_dynamic_mode(str); 6618 6618 if (mode < 0) { 6619 6619 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); 6620 - return 1; 6620 + return 0; 6621 6621 } 6622 6622 6623 6623 sched_dynamic_update(mode); 6624 - return 0; 6624 + return 1; 6625 6625 } 6626 6626 __setup("preempt=", setup_preempt_mode); 6627 6627
+9 -3
kernel/sched/cputime.c
··· 615 615 .sum_exec_runtime = p->se.sum_exec_runtime, 616 616 }; 617 617 618 - task_cputime(p, &cputime.utime, &cputime.stime); 618 + if (task_cputime(p, &cputime.utime, &cputime.stime)) 619 + cputime.sum_exec_runtime = task_sched_runtime(p); 619 620 cputime_adjust(&cputime, &p->prev_cputime, ut, st); 620 621 } 621 622 EXPORT_SYMBOL_GPL(task_cputime_adjusted); ··· 829 828 * add up the pending nohz execution time since the last 830 829 * cputime snapshot. 831 830 */ 832 - void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) 831 + bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime) 833 832 { 834 833 struct vtime *vtime = &t->vtime; 835 834 unsigned int seq; 836 835 u64 delta; 836 + int ret; 837 837 838 838 if (!vtime_accounting_enabled()) { 839 839 *utime = t->utime; 840 840 *stime = t->stime; 841 - return; 841 + return false; 842 842 } 843 843 844 844 do { 845 + ret = false; 845 846 seq = read_seqcount_begin(&vtime->seqcount); 846 847 847 848 *utime = t->utime; ··· 853 850 if (vtime->state < VTIME_SYS) 854 851 continue; 855 852 853 + ret = true; 856 854 delta = vtime_delta(vtime); 857 855 858 856 /* ··· 865 861 else 866 862 *utime += vtime->utime + delta; 867 863 } while (read_seqcount_retry(&vtime->seqcount, seq)); 864 + 865 + return ret; 868 866 } 869 867 870 868 static int vtime_state_fetch(struct vtime *vtime, int cpu)