Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched_ext: Drop "ops" from scx_ops_bypass(), scx_ops_breather() and friends

The tag "ops" is used for two different purposes. First, to indicate that
the entity is directly related to the operations such as flags carried in
sched_ext_ops. Second, to indicate that the entity applies to something
global such as enable or bypass states. The second usage is historical and
causes confusion rather than clarifying anything. For example,
scx_ops_enable_state enums are named SCX_OPS_* and thus conflict with
scx_ops_flags. Let's drop the second usages.

Drop "ops" from scx_ops_bypass(), scx_ops_breather() and friends. Update
scx_show_state.py accordingly.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Andrea Righi <arighi@nvidia.com>

+36 -37
+34 -35
kernel/sched/ext.c
··· 913 913 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); 914 914 static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED); 915 915 static unsigned long scx_in_softlockup; 916 - static atomic_t scx_ops_breather_depth = ATOMIC_INIT(0); 917 - static int scx_ops_bypass_depth; 916 + static atomic_t scx_breather_depth = ATOMIC_INIT(0); 917 + static int scx_bypass_depth; 918 918 static bool scx_ops_init_task_enabled; 919 919 static bool scx_switching_all; 920 920 DEFINE_STATIC_KEY_FALSE(__scx_switched_all); ··· 2223 2223 } 2224 2224 2225 2225 /* 2226 - * list_add_tail() must be used. scx_ops_bypass() depends on tasks being 2226 + * list_add_tail() must be used. scx_bypass() depends on tasks being 2227 2227 * appended to the runnable_list. 2228 2228 */ 2229 2229 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list); ··· 2657 2657 * to the bypass mode can take a long time. Inject artificial delays while the 2658 2658 * bypass mode is switching to guarantee timely completion. 2659 2659 */ 2660 - static void scx_ops_breather(struct rq *rq) 2660 + static void scx_breather(struct rq *rq) 2661 2661 { 2662 2662 u64 until; 2663 2663 2664 2664 lockdep_assert_rq_held(rq); 2665 2665 2666 - if (likely(!atomic_read(&scx_ops_breather_depth))) 2666 + if (likely(!atomic_read(&scx_breather_depth))) 2667 2667 return; 2668 2668 2669 2669 raw_spin_rq_unlock(rq); ··· 2672 2672 2673 2673 do { 2674 2674 int cnt = 1024; 2675 - while (atomic_read(&scx_ops_breather_depth) && --cnt) 2675 + while (atomic_read(&scx_breather_depth) && --cnt) 2676 2676 cpu_relax(); 2677 - } while (atomic_read(&scx_ops_breather_depth) && 2677 + } while (atomic_read(&scx_breather_depth) && 2678 2678 time_before64(ktime_get_ns(), until)); 2679 2679 2680 2680 raw_spin_rq_lock(rq); ··· 2685 2685 struct task_struct *p; 2686 2686 retry: 2687 2687 /* 2688 - * This retry loop can repeatedly race against scx_ops_bypass() 2689 - * dequeueing tasks from @dsq trying to put the system into the bypass 2690 - * mode. On some multi-socket machines (e.g. 2x Intel 8480c), this can 2691 - * live-lock the machine into soft lockups. Give a breather. 2688 + * This retry loop can repeatedly race against scx_bypass() dequeueing 2689 + * tasks from @dsq trying to put the system into the bypass mode. On 2690 + * some multi-socket machines (e.g. 2x Intel 8480c), this can live-lock 2691 + * the machine into soft lockups. Give a breather. 2692 2692 */ 2693 - scx_ops_breather(rq); 2693 + scx_breather(rq); 2694 2694 2695 2695 /* 2696 2696 * The caller can't expect to successfully consume a task if the task's ··· 4473 4473 return; 4474 4474 } 4475 4475 4476 - /* allow only one instance, cleared at the end of scx_ops_bypass() */ 4476 + /* allow only one instance, cleared at the end of scx_bypass() */ 4477 4477 if (test_and_set_bit(0, &scx_in_softlockup)) 4478 4478 return; 4479 4479 ··· 4482 4482 4483 4483 /* 4484 4484 * Some CPUs may be trapped in the dispatch paths. Enable breather 4485 - * immediately; otherwise, we might even be able to get to 4486 - * scx_ops_bypass(). 4485 + * immediately; otherwise, we might even be able to get to scx_bypass(). 4487 4486 */ 4488 - atomic_inc(&scx_ops_breather_depth); 4487 + atomic_inc(&scx_breather_depth); 4489 4488 4490 4489 scx_ops_error("soft lockup - CPU#%d stuck for %us", 4491 4490 smp_processor_id(), dur_s); ··· 4493 4494 static void scx_clear_softlockup(void) 4494 4495 { 4495 4496 if (test_and_clear_bit(0, &scx_in_softlockup)) 4496 - atomic_dec(&scx_ops_breather_depth); 4497 + atomic_dec(&scx_breather_depth); 4497 4498 } 4498 4499 4499 4500 /** 4500 - * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress 4501 + * scx_bypass - [Un]bypass scx_ops and guarantee forward progress 4501 4502 * @bypass: true for bypass, false for unbypass 4502 4503 * 4503 4504 * Bypassing guarantees that all runnable tasks make forward progress without ··· 4527 4528 * 4528 4529 * - scx_prio_less() reverts to the default core_sched_at order. 4529 4530 */ 4530 - static void scx_ops_bypass(bool bypass) 4531 + static void scx_bypass(bool bypass) 4531 4532 { 4532 4533 static DEFINE_RAW_SPINLOCK(bypass_lock); 4533 4534 static unsigned long bypass_timestamp; ··· 4537 4538 4538 4539 raw_spin_lock_irqsave(&bypass_lock, flags); 4539 4540 if (bypass) { 4540 - scx_ops_bypass_depth++; 4541 - WARN_ON_ONCE(scx_ops_bypass_depth <= 0); 4542 - if (scx_ops_bypass_depth != 1) 4541 + scx_bypass_depth++; 4542 + WARN_ON_ONCE(scx_bypass_depth <= 0); 4543 + if (scx_bypass_depth != 1) 4543 4544 goto unlock; 4544 4545 bypass_timestamp = ktime_get_ns(); 4545 4546 scx_add_event(SCX_EV_BYPASS_ACTIVATE, 1); 4546 4547 } else { 4547 - scx_ops_bypass_depth--; 4548 - WARN_ON_ONCE(scx_ops_bypass_depth < 0); 4549 - if (scx_ops_bypass_depth != 0) 4548 + scx_bypass_depth--; 4549 + WARN_ON_ONCE(scx_bypass_depth < 0); 4550 + if (scx_bypass_depth != 0) 4550 4551 goto unlock; 4551 4552 scx_add_event(SCX_EV_BYPASS_DURATION, 4552 4553 ktime_get_ns() - bypass_timestamp); 4553 4554 } 4554 4555 4555 - atomic_inc(&scx_ops_breather_depth); 4556 + atomic_inc(&scx_breather_depth); 4556 4557 4557 4558 /* 4558 4559 * No task property is changing. We just need to make sure all currently ··· 4610 4611 raw_spin_rq_unlock(rq); 4611 4612 } 4612 4613 4613 - atomic_dec(&scx_ops_breather_depth); 4614 + atomic_dec(&scx_breather_depth); 4614 4615 unlock: 4615 4616 raw_spin_unlock_irqrestore(&bypass_lock, flags); 4616 4617 scx_clear_softlockup(); ··· 4691 4692 ei->reason = scx_exit_reason(ei->kind); 4692 4693 4693 4694 /* guarantee forward progress by bypassing scx_ops */ 4694 - scx_ops_bypass(true); 4695 + scx_bypass(true); 4695 4696 4696 4697 switch (scx_set_enable_state(SCX_DISABLING)) { 4697 4698 case SCX_DISABLING: ··· 4829 4830 4830 4831 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING); 4831 4832 done: 4832 - scx_ops_bypass(false); 4833 + scx_bypass(false); 4833 4834 } 4834 4835 4835 4836 static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn); ··· 5403 5404 * scheduling) may not function correctly before all tasks are switched. 5404 5405 * Init in bypass mode to guarantee forward progress. 5405 5406 */ 5406 - scx_ops_bypass(true); 5407 + scx_bypass(true); 5407 5408 5408 5409 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++) 5409 5410 if (((void (**)(void))ops)[i]) ··· 5514 5515 scx_task_iter_stop(&sti); 5515 5516 percpu_up_write(&scx_fork_rwsem); 5516 5517 5517 - scx_ops_bypass(false); 5518 + scx_bypass(false); 5518 5519 5519 5520 if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) { 5520 5521 WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE); ··· 5549 5550 err_disable_unlock_all: 5550 5551 scx_cgroup_unlock(); 5551 5552 percpu_up_write(&scx_fork_rwsem); 5552 - scx_ops_bypass(false); 5553 + scx_bypass(false); 5553 5554 err_disable: 5554 5555 mutex_unlock(&scx_enable_mutex); 5555 5556 /* ··· 6028 6029 case PM_HIBERNATION_PREPARE: 6029 6030 case PM_SUSPEND_PREPARE: 6030 6031 case PM_RESTORE_PREPARE: 6031 - scx_ops_bypass(true); 6032 + scx_bypass(true); 6032 6033 break; 6033 6034 case PM_POST_HIBERNATION: 6034 6035 case PM_POST_SUSPEND: 6035 6036 case PM_POST_RESTORE: 6036 - scx_ops_bypass(false); 6037 + scx_bypass(false); 6037 6038 break; 6038 6039 } 6039 6040 ··· 6291 6292 * cause similar live-lock conditions as consume_dispatch_q(). Insert a 6292 6293 * breather if necessary. 6293 6294 */ 6294 - scx_ops_breather(src_rq); 6295 + scx_breather(src_rq); 6295 6296 6296 6297 locked_rq = src_rq; 6297 6298 raw_spin_lock(&src_dsq->lock);
+2 -2
tools/sched_ext/scx_show_state.py
··· 36 36 print(f'switched_all : {read_static_key("__scx_switched_all")}') 37 37 print(f'enable_state : {state_str(enable_state)} ({enable_state})') 38 38 print(f'in_softlockup : {prog["scx_in_softlockup"].value_()}') 39 - print(f'breather_depth: {read_atomic("scx_ops_breather_depth")}') 40 - print(f'bypass_depth : {prog["scx_ops_bypass_depth"].value_()}') 39 + print(f'breather_depth: {read_atomic("scx_breather_depth")}') 40 + print(f'bypass_depth : {prog["scx_bypass_depth"].value_()}') 41 41 print(f'nr_rejected : {read_atomic("scx_nr_rejected")}') 42 42 print(f'enable_seq : {read_atomic("scx_enable_seq")}')