Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tools/sched_ext: Strip compatibility macros for cgroup and dispatch APIs

Enough time has passed since the introduction of scx_bpf_task_cgroup() and
the scx_bpf_dispatch* -> scx_bpf_dsq* kfunc renaming. Strip the compatibility
macros.

Acked-by: Changwoo Min <changwoo@igalia.com>
Acked-by: Andrea Righi <arighi@nvidia.com>
Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
Signed-off-by: Tejun Heo <tj@kernel.org>

Tejun Heo 111a7980 0128c850

+12 -120
+2 -106
tools/sched_ext/include/scx/compat.bpf.h
··· 15 15 __ret; \ 16 16 }) 17 17 18 - /* v6.12: 819513666966 ("sched_ext: Add cgroup support") */ 19 - #define __COMPAT_scx_bpf_task_cgroup(p) \ 20 - (bpf_ksym_exists(scx_bpf_task_cgroup) ? \ 21 - scx_bpf_task_cgroup((p)) : NULL) 22 - 23 18 /* 24 - * v6.13: The verb `dispatch` was too overloaded and confusing. kfuncs are 25 - * renamed to unload the verb. 19 + * v6.15: 950ad93df2fc ("bpf: add kfunc for populating cpumask bits") 26 20 * 27 - * Build error is triggered if old names are used. New binaries work with both 28 - * new and old names. The compat macros will be removed on v6.15 release. 29 - * 30 - * scx_bpf_dispatch_from_dsq() and friends were added during v6.12 by 31 - * 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()"). 32 - * Preserve __COMPAT macros until v6.15. 21 + * Compat macro will be dropped on v6.19 release. 33 22 */ 34 - void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak; 35 - void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak; 36 - bool scx_bpf_consume___compat(u64 dsq_id) __ksym __weak; 37 - void scx_bpf_dispatch_from_dsq_set_slice___compat(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak; 38 - void scx_bpf_dispatch_from_dsq_set_vtime___compat(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak; 39 - bool scx_bpf_dispatch_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; 40 - bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; 41 23 int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak; 42 - 43 - #define scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags) \ 44 - (bpf_ksym_exists(scx_bpf_dsq_insert) ? \ 45 - scx_bpf_dsq_insert((p), (dsq_id), (slice), (enq_flags)) : \ 46 - scx_bpf_dispatch___compat((p), (dsq_id), (slice), (enq_flags))) 47 - 48 - #define scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags) \ 49 - (bpf_ksym_exists(scx_bpf_dsq_insert_vtime) ? \ 50 - scx_bpf_dsq_insert_vtime((p), (dsq_id), (slice), (vtime), (enq_flags)) : \ 51 - scx_bpf_dispatch_vtime___compat((p), (dsq_id), (slice), (vtime), (enq_flags))) 52 - 53 - #define scx_bpf_dsq_move_to_local(dsq_id) \ 54 - (bpf_ksym_exists(scx_bpf_dsq_move_to_local) ? \ 55 - scx_bpf_dsq_move_to_local((dsq_id)) : \ 56 - scx_bpf_consume___compat((dsq_id))) 57 - 58 - #define __COMPAT_scx_bpf_dsq_move_set_slice(it__iter, slice) \ 59 - (bpf_ksym_exists(scx_bpf_dsq_move_set_slice) ? \ 60 - scx_bpf_dsq_move_set_slice((it__iter), (slice)) : \ 61 - (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___compat) ? \ 62 - scx_bpf_dispatch_from_dsq_set_slice___compat((it__iter), (slice)) : \ 63 - (void)0)) 64 - 65 - #define __COMPAT_scx_bpf_dsq_move_set_vtime(it__iter, vtime) \ 66 - (bpf_ksym_exists(scx_bpf_dsq_move_set_vtime) ? \ 67 - scx_bpf_dsq_move_set_vtime((it__iter), (vtime)) : \ 68 - (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___compat) ? \ 69 - scx_bpf_dispatch_from_dsq_set_vtime___compat((it__iter), (vtime)) : \ 70 - (void) 0)) 71 - 72 - #define __COMPAT_scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \ 73 - (bpf_ksym_exists(scx_bpf_dsq_move) ? \ 74 - scx_bpf_dsq_move((it__iter), (p), (dsq_id), (enq_flags)) : \ 75 - (bpf_ksym_exists(scx_bpf_dispatch_from_dsq___compat) ? \ 76 - scx_bpf_dispatch_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \ 77 - false)) 78 - 79 - #define __COMPAT_scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \ 80 - (bpf_ksym_exists(scx_bpf_dsq_move_vtime) ? \ 81 - scx_bpf_dsq_move_vtime((it__iter), (p), (dsq_id), (enq_flags)) : \ 82 - (bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___compat) ? \ 83 - scx_bpf_dispatch_vtime_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \ 84 - false)) 85 24 86 25 #define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz) \ 87 26 (bpf_ksym_exists(bpf_cpumask_populate) ? \ 88 27 (bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP) 89 - 90 - #define scx_bpf_dispatch(p, dsq_id, slice, enq_flags) \ 91 - _Static_assert(false, "scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()") 92 - 93 - #define scx_bpf_dispatch_vtime(p, dsq_id, slice, vtime, enq_flags) \ 94 - _Static_assert(false, "scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()") 95 - 96 - #define scx_bpf_consume(dsq_id) ({ \ 97 - _Static_assert(false, "scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()"); \ 98 - false; \ 99 - }) 100 - 101 - #define scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \ 102 - _Static_assert(false, "scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()") 103 - 104 - #define scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \ 105 - _Static_assert(false, "scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()") 106 - 107 - #define scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \ 108 - _Static_assert(false, "scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()"); \ 109 - false; \ 110 - }) 111 - 112 - #define scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \ 113 - _Static_assert(false, "scx_bpf_dispatch_vtime_from_dsq() renamed to scx_bpf_dsq_move_vtime()"); \ 114 - false; \ 115 - }) 116 - 117 - #define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \ 118 - _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_slice() renamed to __COMPAT_scx_bpf_dsq_move_set_slice()") 119 - 120 - #define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \ 121 - _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_vtime() renamed to __COMPAT_scx_bpf_dsq_move_set_vtime()") 122 - 123 - #define __COMPAT_scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \ 124 - _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move()"); \ 125 - false; \ 126 - }) 127 - 128 - #define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \ 129 - _Static_assert(false, "__COMPAT_scx_bpf_dispatch_vtime_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move_vtime()"); \ 130 - false; \ 131 - }) 132 28 133 29 /** 134 30 * __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
+5 -5
tools/sched_ext/scx_flatcg.bpf.c
··· 382 382 return; 383 383 } 384 384 385 - cgrp = __COMPAT_scx_bpf_task_cgroup(p); 385 + cgrp = scx_bpf_task_cgroup(p); 386 386 cgc = find_cgrp_ctx(cgrp); 387 387 if (!cgc) 388 388 goto out_release; ··· 508 508 { 509 509 struct cgroup *cgrp; 510 510 511 - cgrp = __COMPAT_scx_bpf_task_cgroup(p); 511 + cgrp = scx_bpf_task_cgroup(p); 512 512 update_active_weight_sums(cgrp, true); 513 513 bpf_cgroup_release(cgrp); 514 514 } ··· 521 521 if (fifo_sched) 522 522 return; 523 523 524 - cgrp = __COMPAT_scx_bpf_task_cgroup(p); 524 + cgrp = scx_bpf_task_cgroup(p); 525 525 cgc = find_cgrp_ctx(cgrp); 526 526 if (cgc) { 527 527 /* ··· 564 564 if (!taskc->bypassed_at) 565 565 return; 566 566 567 - cgrp = __COMPAT_scx_bpf_task_cgroup(p); 567 + cgrp = scx_bpf_task_cgroup(p); 568 568 cgc = find_cgrp_ctx(cgrp); 569 569 if (cgc) { 570 570 __sync_fetch_and_add(&cgc->cvtime_delta, ··· 578 578 { 579 579 struct cgroup *cgrp; 580 580 581 - cgrp = __COMPAT_scx_bpf_task_cgroup(p); 581 + cgrp = scx_bpf_task_cgroup(p); 582 582 update_active_weight_sums(cgrp, false); 583 583 bpf_cgroup_release(cgrp); 584 584 }
+5 -9
tools/sched_ext/scx_qmap.bpf.c
··· 320 320 321 321 if (tctx->highpri) { 322 322 /* exercise the set_*() and vtime interface too */ 323 - __COMPAT_scx_bpf_dsq_move_set_slice( 324 - BPF_FOR_EACH_ITER, slice_ns * 2); 325 - __COMPAT_scx_bpf_dsq_move_set_vtime( 326 - BPF_FOR_EACH_ITER, highpri_seq++); 327 - __COMPAT_scx_bpf_dsq_move_vtime( 328 - BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0); 323 + scx_bpf_dsq_move_set_slice(BPF_FOR_EACH_ITER, slice_ns * 2); 324 + scx_bpf_dsq_move_set_vtime(BPF_FOR_EACH_ITER, highpri_seq++); 325 + scx_bpf_dsq_move_vtime(BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0); 329 326 } 330 327 } 331 328 ··· 339 342 else 340 343 cpu = scx_bpf_pick_any_cpu(p->cpus_ptr, 0); 341 344 342 - if (__COMPAT_scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p, 343 - SCX_DSQ_LOCAL_ON | cpu, 344 - SCX_ENQ_PREEMPT)) { 345 + if (scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p, SCX_DSQ_LOCAL_ON | cpu, 346 + SCX_ENQ_PREEMPT)) { 345 347 if (cpu == this_cpu) { 346 348 dispatched = true; 347 349 __sync_fetch_and_add(&nr_expedited_local, 1);