Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/debug: Change SCHED_WARN_ON() to WARN_ON_ONCE()

The scheduler has this special SCHED_WARN() facility that
depends on CONFIG_SCHED_DEBUG.

Since CONFIG_SCHED_DEBUG is getting removed, convert
SCHED_WARN() to WARN_ON_ONCE().

Note that the warning output isn't 100% equivalent:

#define SCHED_WARN_ON(x) WARN_ONCE(x, #x)

Because SCHED_WARN_ON() would output the 'x' condition
as well, while WARN_ONCE() will only show a backtrace.

Hopefully these are rare enough to not really matter.

If it does, we should probably introduce a new WARN_ON()
variant that outputs the condition in stringified form,
or improve WARN_ON() itself.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Tested-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20250317104257.3496611-2-mingo@kernel.org

+56 -62
+12 -12
kernel/sched/core.c
··· 801 801 802 802 #ifdef CONFIG_SCHED_DEBUG 803 803 if (sched_feat(WARN_DOUBLE_CLOCK)) 804 - SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 804 + WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED); 805 805 rq->clock_update_flags |= RQCF_UPDATED; 806 806 #endif 807 807 clock = sched_clock_cpu(cpu_of(rq)); ··· 1719 1719 1720 1720 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1721 1721 1722 - SCHED_WARN_ON(!bucket->tasks); 1722 + WARN_ON_ONCE(!bucket->tasks); 1723 1723 if (likely(bucket->tasks)) 1724 1724 bucket->tasks--; 1725 1725 ··· 1739 1739 * Defensive programming: this should never happen. If it happens, 1740 1740 * e.g. due to future modification, warn and fix up the expected value. 1741 1741 */ 1742 - SCHED_WARN_ON(bucket->value > rq_clamp); 1742 + WARN_ON_ONCE(bucket->value > rq_clamp); 1743 1743 if (bucket->value >= rq_clamp) { 1744 1744 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1745 1745 uclamp_rq_set(rq, clamp_id, bkt_clamp); ··· 2121 2121 2122 2122 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 2123 2123 { 2124 - SCHED_WARN_ON(flags & DEQUEUE_SLEEP); 2124 + WARN_ON_ONCE(flags & DEQUEUE_SLEEP); 2125 2125 2126 2126 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); 2127 2127 ASSERT_EXCLUSIVE_WRITER(p->on_rq); ··· 2726 2726 * XXX do further audits, this smells like something putrid. 2727 2727 */ 2728 2728 if (ctx->flags & SCA_MIGRATE_DISABLE) 2729 - SCHED_WARN_ON(!p->on_cpu); 2729 + WARN_ON_ONCE(!p->on_cpu); 2730 2730 else 2731 2731 lockdep_assert_held(&p->pi_lock); 2732 2732 ··· 4195 4195 * - we're serialized against set_special_state() by virtue of 4196 4196 * it disabling IRQs (this allows not taking ->pi_lock). 4197 4197 */ 4198 - SCHED_WARN_ON(p->se.sched_delayed); 4198 + WARN_ON_ONCE(p->se.sched_delayed); 4199 4199 if (!ttwu_state_match(p, state, &success)) 4200 4200 goto out; 4201 4201 ··· 4489 4489 INIT_LIST_HEAD(&p->se.group_node); 4490 4490 4491 4491 /* A delayed task cannot be in clone(). */ 4492 - SCHED_WARN_ON(p->se.sched_delayed); 4492 + WARN_ON_ONCE(p->se.sched_delayed); 4493 4493 4494 4494 #ifdef CONFIG_FAIR_GROUP_SCHED 4495 4495 p->se.cfs_rq = NULL; ··· 5745 5745 * we are always sure that there is no proxy (only a 5746 5746 * single task is running). 5747 5747 */ 5748 - SCHED_WARN_ON(rq->curr != rq->donor); 5748 + WARN_ON_ONCE(rq->curr != rq->donor); 5749 5749 update_rq_clock(rq); 5750 5750 5751 5751 if (!is_idle_task(curr)) { ··· 5965 5965 preempt_count_set(PREEMPT_DISABLED); 5966 5966 } 5967 5967 rcu_sleep_check(); 5968 - SCHED_WARN_ON(ct_state() == CT_STATE_USER); 5968 + WARN_ON_ONCE(ct_state() == CT_STATE_USER); 5969 5969 5970 5970 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 5971 5971 ··· 6811 6811 * deadlock if the callback attempts to acquire a lock which is 6812 6812 * already acquired. 6813 6813 */ 6814 - SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT); 6814 + WARN_ON_ONCE(current->__state & TASK_RTLOCK_WAIT); 6815 6815 6816 6816 /* 6817 6817 * If we are going to sleep and we have plugged IO queued, ··· 9249 9249 unsigned int clamps; 9250 9250 9251 9251 lockdep_assert_held(&uclamp_mutex); 9252 - SCHED_WARN_ON(!rcu_read_lock_held()); 9252 + WARN_ON_ONCE(!rcu_read_lock_held()); 9253 9253 9254 9254 css_for_each_descendant_pre(css, top_css) { 9255 9255 uc_parent = css_tg(css)->parent ··· 10584 10584 struct mm_struct *mm; 10585 10585 int weight, cpu; 10586 10586 10587 - SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work)); 10587 + WARN_ON_ONCE(t != container_of(work, struct task_struct, cid_work)); 10588 10588 10589 10589 work->next = work; /* Prevent double-add */ 10590 10590 if (t->flags & PF_EXITING)
+1 -1
kernel/sched/core_sched.c
··· 65 65 * a cookie until after we've removed it, we must have core scheduling 66 66 * enabled here. 67 67 */ 68 - SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq)); 68 + WARN_ON_ONCE((p->core_cookie || cookie) && !sched_core_enabled(rq)); 69 69 70 70 if (sched_core_enqueued(p)) 71 71 sched_core_dequeue(rq, p, DEQUEUE_SAVE);
+6 -6
kernel/sched/deadline.c
··· 249 249 250 250 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 251 251 dl_rq->running_bw += dl_bw; 252 - SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ 253 - SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 252 + WARN_ON_ONCE(dl_rq->running_bw < old); /* overflow */ 253 + WARN_ON_ONCE(dl_rq->running_bw > dl_rq->this_bw); 254 254 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 255 255 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); 256 256 } ··· 262 262 263 263 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 264 264 dl_rq->running_bw -= dl_bw; 265 - SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ 265 + WARN_ON_ONCE(dl_rq->running_bw > old); /* underflow */ 266 266 if (dl_rq->running_bw > old) 267 267 dl_rq->running_bw = 0; 268 268 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ ··· 276 276 277 277 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 278 278 dl_rq->this_bw += dl_bw; 279 - SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */ 279 + WARN_ON_ONCE(dl_rq->this_bw < old); /* overflow */ 280 280 } 281 281 282 282 static inline ··· 286 286 287 287 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 288 288 dl_rq->this_bw -= dl_bw; 289 - SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */ 289 + WARN_ON_ONCE(dl_rq->this_bw > old); /* underflow */ 290 290 if (dl_rq->this_bw > old) 291 291 dl_rq->this_bw = 0; 292 - SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 292 + WARN_ON_ONCE(dl_rq->running_bw > dl_rq->this_bw); 293 293 } 294 294 295 295 static inline
+1 -1
kernel/sched/ext.c
··· 2341 2341 { 2342 2342 int cpu = cpu_of(rq); 2343 2343 2344 - SCHED_WARN_ON(task_cpu(p) == cpu); 2344 + WARN_ON_ONCE(task_cpu(p) == cpu); 2345 2345 2346 2346 /* 2347 2347 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
+29 -29
kernel/sched/fair.c
··· 399 399 400 400 static inline void assert_list_leaf_cfs_rq(struct rq *rq) 401 401 { 402 - SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list); 402 + WARN_ON_ONCE(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list); 403 403 } 404 404 405 405 /* Iterate through all leaf cfs_rq's on a runqueue */ ··· 696 696 { 697 697 s64 vlag, limit; 698 698 699 - SCHED_WARN_ON(!se->on_rq); 699 + WARN_ON_ONCE(!se->on_rq); 700 700 701 701 vlag = avg_vruntime(cfs_rq) - se->vruntime; 702 702 limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); ··· 3317 3317 bool vma_pids_skipped; 3318 3318 bool vma_pids_forced = false; 3319 3319 3320 - SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); 3320 + WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work)); 3321 3321 3322 3322 work->next = work; 3323 3323 /* ··· 4036 4036 * Make sure that rounding and/or propagation of PELT values never 4037 4037 * break this. 4038 4038 */ 4039 - SCHED_WARN_ON(sa->load_avg || 4039 + WARN_ON_ONCE(sa->load_avg || 4040 4040 sa->util_avg || 4041 4041 sa->runnable_avg); 4042 4042 ··· 5460 5460 clear_buddies(cfs_rq, se); 5461 5461 5462 5462 if (flags & DEQUEUE_DELAYED) { 5463 - SCHED_WARN_ON(!se->sched_delayed); 5463 + WARN_ON_ONCE(!se->sched_delayed); 5464 5464 } else { 5465 5465 bool delay = sleep; 5466 5466 /* ··· 5470 5470 if (flags & DEQUEUE_SPECIAL) 5471 5471 delay = false; 5472 5472 5473 - SCHED_WARN_ON(delay && se->sched_delayed); 5473 + WARN_ON_ONCE(delay && se->sched_delayed); 5474 5474 5475 5475 if (sched_feat(DELAY_DEQUEUE) && delay && 5476 5476 !entity_eligible(cfs_rq, se)) { ··· 5551 5551 } 5552 5552 5553 5553 update_stats_curr_start(cfs_rq, se); 5554 - SCHED_WARN_ON(cfs_rq->curr); 5554 + WARN_ON_ONCE(cfs_rq->curr); 5555 5555 cfs_rq->curr = se; 5556 5556 5557 5557 /* ··· 5592 5592 if (sched_feat(PICK_BUDDY) && 5593 5593 cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next)) { 5594 5594 /* ->next will never be delayed */ 5595 - SCHED_WARN_ON(cfs_rq->next->sched_delayed); 5595 + WARN_ON_ONCE(cfs_rq->next->sched_delayed); 5596 5596 return cfs_rq->next; 5597 5597 } 5598 5598 ··· 5628 5628 /* in !on_rq case, update occurred at dequeue */ 5629 5629 update_load_avg(cfs_rq, prev, 0); 5630 5630 } 5631 - SCHED_WARN_ON(cfs_rq->curr != prev); 5631 + WARN_ON_ONCE(cfs_rq->curr != prev); 5632 5632 cfs_rq->curr = NULL; 5633 5633 } 5634 5634 ··· 5851 5851 5852 5852 cfs_rq->throttled_clock_self = 0; 5853 5853 5854 - if (SCHED_WARN_ON((s64)delta < 0)) 5854 + if (WARN_ON_ONCE((s64)delta < 0)) 5855 5855 delta = 0; 5856 5856 5857 5857 cfs_rq->throttled_clock_self_time += delta; ··· 5871 5871 cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq); 5872 5872 list_del_leaf_cfs_rq(cfs_rq); 5873 5873 5874 - SCHED_WARN_ON(cfs_rq->throttled_clock_self); 5874 + WARN_ON_ONCE(cfs_rq->throttled_clock_self); 5875 5875 if (cfs_rq->nr_queued) 5876 5876 cfs_rq->throttled_clock_self = rq_clock(rq); 5877 5877 } ··· 5980 5980 * throttled-list. rq->lock protects completion. 5981 5981 */ 5982 5982 cfs_rq->throttled = 1; 5983 - SCHED_WARN_ON(cfs_rq->throttled_clock); 5983 + WARN_ON_ONCE(cfs_rq->throttled_clock); 5984 5984 if (cfs_rq->nr_queued) 5985 5985 cfs_rq->throttled_clock = rq_clock(rq); 5986 5986 return true; ··· 6136 6136 } 6137 6137 6138 6138 /* Already enqueued */ 6139 - if (SCHED_WARN_ON(!list_empty(&cfs_rq->throttled_csd_list))) 6139 + if (WARN_ON_ONCE(!list_empty(&cfs_rq->throttled_csd_list))) 6140 6140 return; 6141 6141 6142 6142 first = list_empty(&rq->cfsb_csd_list); ··· 6155 6155 { 6156 6156 lockdep_assert_rq_held(rq_of(cfs_rq)); 6157 6157 6158 - if (SCHED_WARN_ON(!cfs_rq_throttled(cfs_rq) || 6158 + if (WARN_ON_ONCE(!cfs_rq_throttled(cfs_rq) || 6159 6159 cfs_rq->runtime_remaining <= 0)) 6160 6160 return; 6161 6161 ··· 6191 6191 goto next; 6192 6192 6193 6193 /* By the above checks, this should never be true */ 6194 - SCHED_WARN_ON(cfs_rq->runtime_remaining > 0); 6194 + WARN_ON_ONCE(cfs_rq->runtime_remaining > 0); 6195 6195 6196 6196 raw_spin_lock(&cfs_b->lock); 6197 6197 runtime = -cfs_rq->runtime_remaining + 1; ··· 6212 6212 * We currently only expect to be unthrottling 6213 6213 * a single cfs_rq locally. 6214 6214 */ 6215 - SCHED_WARN_ON(!list_empty(&local_unthrottle)); 6215 + WARN_ON_ONCE(!list_empty(&local_unthrottle)); 6216 6216 list_add_tail(&cfs_rq->throttled_csd_list, 6217 6217 &local_unthrottle); 6218 6218 } ··· 6237 6237 6238 6238 rq_unlock_irqrestore(rq, &rf); 6239 6239 } 6240 - SCHED_WARN_ON(!list_empty(&local_unthrottle)); 6240 + WARN_ON_ONCE(!list_empty(&local_unthrottle)); 6241 6241 6242 6242 rcu_read_unlock(); 6243 6243 ··· 6789 6789 { 6790 6790 struct sched_entity *se = &p->se; 6791 6791 6792 - SCHED_WARN_ON(task_rq(p) != rq); 6792 + WARN_ON_ONCE(task_rq(p) != rq); 6793 6793 6794 6794 if (rq->cfs.h_nr_queued > 1) { 6795 6795 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; ··· 6900 6900 * Because a delayed entity is one that is still on 6901 6901 * the runqueue competing until elegibility. 6902 6902 */ 6903 - SCHED_WARN_ON(!se->sched_delayed); 6904 - SCHED_WARN_ON(!se->on_rq); 6903 + WARN_ON_ONCE(!se->sched_delayed); 6904 + WARN_ON_ONCE(!se->on_rq); 6905 6905 6906 6906 if (sched_feat(DELAY_ZERO)) { 6907 6907 update_entity_lag(cfs_rq, se); ··· 7161 7161 rq->next_balance = jiffies; 7162 7162 7163 7163 if (p && task_delayed) { 7164 - SCHED_WARN_ON(!task_sleep); 7165 - SCHED_WARN_ON(p->on_rq != 1); 7164 + WARN_ON_ONCE(!task_sleep); 7165 + WARN_ON_ONCE(p->on_rq != 1); 7166 7166 7167 7167 /* Fix-up what dequeue_task_fair() skipped */ 7168 7168 hrtick_update(rq); ··· 8740 8740 static void set_next_buddy(struct sched_entity *se) 8741 8741 { 8742 8742 for_each_sched_entity(se) { 8743 - if (SCHED_WARN_ON(!se->on_rq)) 8743 + if (WARN_ON_ONCE(!se->on_rq)) 8744 8744 return; 8745 8745 if (se_is_idle(se)) 8746 8746 return; ··· 12484 12484 12485 12485 void nohz_balance_exit_idle(struct rq *rq) 12486 12486 { 12487 - SCHED_WARN_ON(rq != this_rq()); 12487 + WARN_ON_ONCE(rq != this_rq()); 12488 12488 12489 12489 if (likely(!rq->nohz_tick_stopped)) 12490 12490 return; ··· 12520 12520 { 12521 12521 struct rq *rq = cpu_rq(cpu); 12522 12522 12523 - SCHED_WARN_ON(cpu != smp_processor_id()); 12523 + WARN_ON_ONCE(cpu != smp_processor_id()); 12524 12524 12525 12525 /* If this CPU is going down, then nothing needs to be done: */ 12526 12526 if (!cpu_active(cpu)) ··· 12603 12603 int balance_cpu; 12604 12604 struct rq *rq; 12605 12605 12606 - SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); 12606 + WARN_ON_ONCE((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); 12607 12607 12608 12608 /* 12609 12609 * We assume there will be no idle load after this update and clear ··· 13043 13043 struct cfs_rq *cfs_rqb; 13044 13044 s64 delta; 13045 13045 13046 - SCHED_WARN_ON(task_rq(b)->core != rq->core); 13046 + WARN_ON_ONCE(task_rq(b)->core != rq->core); 13047 13047 13048 13048 #ifdef CONFIG_FAIR_GROUP_SCHED 13049 13049 /* ··· 13246 13246 13247 13247 static void switched_to_fair(struct rq *rq, struct task_struct *p) 13248 13248 { 13249 - SCHED_WARN_ON(p->se.sched_delayed); 13249 + WARN_ON_ONCE(p->se.sched_delayed); 13250 13250 13251 13251 attach_task_cfs_rq(p); 13252 13252 ··· 13281 13281 if (!first) 13282 13282 return; 13283 13283 13284 - SCHED_WARN_ON(se->sched_delayed); 13284 + WARN_ON_ONCE(se->sched_delayed); 13285 13285 13286 13286 if (hrtick_enabled_fair(rq)) 13287 13287 hrtick_start_fair(rq, p);
+1 -1
kernel/sched/rt.c
··· 1713 1713 BUG_ON(idx >= MAX_RT_PRIO); 1714 1714 1715 1715 queue = array->queue + idx; 1716 - if (SCHED_WARN_ON(list_empty(queue))) 1716 + if (WARN_ON_ONCE(list_empty(queue))) 1717 1717 return NULL; 1718 1718 next = list_entry(queue->next, struct sched_rt_entity, run_list); 1719 1719
+5 -11
kernel/sched/sched.h
··· 91 91 #include "cpupri.h" 92 92 #include "cpudeadline.h" 93 93 94 - #ifdef CONFIG_SCHED_DEBUG 95 - # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 96 - #else 97 - # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 98 - #endif 99 - 100 94 /* task_struct::on_rq states: */ 101 95 #define TASK_ON_RQ_QUEUED 1 102 96 #define TASK_ON_RQ_MIGRATING 2 ··· 1565 1571 1566 1572 static inline struct task_struct *task_of(struct sched_entity *se) 1567 1573 { 1568 - SCHED_WARN_ON(!entity_is_task(se)); 1574 + WARN_ON_ONCE(!entity_is_task(se)); 1569 1575 return container_of(se, struct task_struct, se); 1570 1576 } 1571 1577 ··· 1646 1652 * The only reason for not seeing a clock update since the 1647 1653 * last rq_pin_lock() is if we're currently skipping updates. 1648 1654 */ 1649 - SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1655 + WARN_ON_ONCE(rq->clock_update_flags < RQCF_ACT_SKIP); 1650 1656 } 1651 1657 1652 1658 static inline u64 rq_clock(struct rq *rq) ··· 1693 1699 static inline void rq_clock_start_loop_update(struct rq *rq) 1694 1700 { 1695 1701 lockdep_assert_rq_held(rq); 1696 - SCHED_WARN_ON(rq->clock_update_flags & RQCF_ACT_SKIP); 1702 + WARN_ON_ONCE(rq->clock_update_flags & RQCF_ACT_SKIP); 1697 1703 rq->clock_update_flags |= RQCF_ACT_SKIP; 1698 1704 } 1699 1705 ··· 1768 1774 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1769 1775 rf->clock_update_flags = 0; 1770 1776 # ifdef CONFIG_SMP 1771 - SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); 1777 + WARN_ON_ONCE(rq->balance_callback && rq->balance_callback != &balance_push_callback); 1772 1778 # endif 1773 1779 #endif 1774 1780 } ··· 2679 2685 2680 2686 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2681 2687 { 2682 - SCHED_WARN_ON(!rcu_read_lock_held()); 2688 + WARN_ON_ONCE(!rcu_read_lock_held()); 2683 2689 2684 2690 return rq->idle_state; 2685 2691 }
+1 -1
kernel/sched/stats.h
··· 144 144 145 145 if (p->se.sched_delayed) { 146 146 /* CPU migration of "sleeping" task */ 147 - SCHED_WARN_ON(!(flags & ENQUEUE_MIGRATED)); 147 + WARN_ON_ONCE(!(flags & ENQUEUE_MIGRATED)); 148 148 if (p->in_memstall) 149 149 set |= TSK_MEMSTALL; 150 150 if (p->in_iowait)