Merge tag 'sched-urgent-2026-01-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:

- Fix PELT clock synchronization bug when entering idle

- Disable the NEXT_BUDDY feature, as during extensive testing
Mel found that the negatives outweigh the positives

- Make wakeup preemption less aggressive, which resulted in
an unreasonable increase in preemption frequency

* tag 'sched-urgent-2026-01-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/fair: Revert force wakeup preemption
sched/fair: Disable scheduler feature NEXT_BUDDY
sched/fair: Fix pelt clock sync when entering idle

+7 -17
-16
kernel/sched/fair.c
··· 8828 if ((wake_flags & WF_FORK) || pse->sched_delayed) 8829 return; 8830 8831 - /* 8832 - * If @p potentially is completing work required by current then 8833 - * consider preemption. 8834 - * 8835 - * Reschedule if waker is no longer eligible. */ 8836 - if (in_task() && !entity_eligible(cfs_rq, se)) { 8837 - preempt_action = PREEMPT_WAKEUP_RESCHED; 8838 - goto preempt; 8839 - } 8840 - 8841 /* Prefer picking wakee soon if appropriate. */ 8842 if (sched_feat(NEXT_BUDDY) && 8843 set_preempt_buddy(cfs_rq, wake_flags, pse, se)) { ··· 8984 if (new_tasks > 0) 8985 goto again; 8986 } 8987 - 8988 - /* 8989 - * rq is about to be idle, check if we need to update the 8990 - * lost_idle_time of clock_pelt 8991 - */ 8992 - update_idle_rq_clock_pelt(rq); 8993 8994 return NULL; 8995 }
··· 8828 if ((wake_flags & WF_FORK) || pse->sched_delayed) 8829 return; 8830 8831 /* Prefer picking wakee soon if appropriate. */ 8832 if (sched_feat(NEXT_BUDDY) && 8833 set_preempt_buddy(cfs_rq, wake_flags, pse, se)) { ··· 8994 if (new_tasks > 0) 8995 goto again; 8996 } 8997 8998 return NULL; 8999 }
+1 -1
kernel/sched/features.h
··· 29 * wakeup-preemption), since its likely going to consume data we 30 * touched, increases cache locality. 31 */ 32 - SCHED_FEAT(NEXT_BUDDY, true) 33 34 /* 35 * Allow completely ignoring cfs_rq->next; which can be set from various
··· 29 * wakeup-preemption), since its likely going to consume data we 30 * touched, increases cache locality. 31 */ 32 + SCHED_FEAT(NEXT_BUDDY, false) 33 34 /* 35 * Allow completely ignoring cfs_rq->next; which can be set from various
+6
kernel/sched/idle.c
··· 468 scx_update_idle(rq, true, true); 469 schedstat_inc(rq->sched_goidle); 470 next->se.exec_start = rq_clock_task(rq); 471 } 472 473 struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf)
··· 468 scx_update_idle(rq, true, true); 469 schedstat_inc(rq->sched_goidle); 470 next->se.exec_start = rq_clock_task(rq); 471 + 472 + /* 473 + * rq is about to be idle, check if we need to update the 474 + * lost_idle_time of clock_pelt 475 + */ 476 + update_idle_rq_clock_pelt(rq); 477 } 478 479 struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf)