Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tools/sched_ext: Receive updates from SCX repo

Receive tools/sched_ext updates form https://github.com/sched-ext/scx to
sync userspace bits:

- basic BPF arena allocator abstractions,

- additional process flags definitions,

- fixed is_migration_disabled() helper,

- separate out user_exit_info BPF and user space code.

This also fixes the following warning when building the selftests:

tools/sched_ext/include/scx/common.bpf.h:550:9: warning: 'likely' macro redefined [-Wmacro-redefined]
550 | #define likely(x) __builtin_expect(!!(x), 1)
| ^

Co-developed-by: Cheng-Yang Chou <yphbchou0911@gmail.com>
Signed-off-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>

authored by

Andrea Righi and committed by
Tejun Heo
de68c051 8f5ae30d

+388 -60
+175
tools/sched_ext/include/scx/bpf_arena_common.bpf.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 + /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ 3 + #pragma once 4 + 5 + #ifndef PAGE_SIZE 6 + #define PAGE_SIZE __PAGE_SIZE 7 + /* 8 + * for older kernels try sizeof(struct genradix_node) 9 + * or flexible: 10 + * static inline long __bpf_page_size(void) { 11 + * return bpf_core_enum_value(enum page_size_enum___l, __PAGE_SIZE___l) ?: sizeof(struct genradix_node); 12 + * } 13 + * but generated code is not great. 14 + */ 15 + #endif 16 + 17 + #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM) 18 + #define __arena __attribute__((address_space(1))) 19 + #define __arena_global __attribute__((address_space(1))) 20 + #define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */ 21 + #define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */ 22 + #else 23 + 24 + /* emit instruction: 25 + * rX = rX .off = BPF_ADDR_SPACE_CAST .imm32 = (dst_as << 16) | src_as 26 + * 27 + * This is a workaround for LLVM compiler versions without 28 + * __BPF_FEATURE_ADDR_SPACE_CAST that do not automatically cast between arena 29 + * pointers and native kernel/userspace ones. In this case we explicitly do so 30 + * with cast_kern() and cast_user(). E.g., in the Linux kernel tree, 31 + * tools/testing/selftests/bpf includes tests that use these macros to implement 32 + * linked lists and hashtables backed by arena memory. In sched_ext, we use 33 + * cast_kern() and cast_user() for compatibility with older LLVM toolchains. 34 + */ 35 + #ifndef bpf_addr_space_cast 36 + #define bpf_addr_space_cast(var, dst_as, src_as)\ 37 + asm volatile(".byte 0xBF; \ 38 + .ifc %[reg], r0; \ 39 + .byte 0x00; \ 40 + .endif; \ 41 + .ifc %[reg], r1; \ 42 + .byte 0x11; \ 43 + .endif; \ 44 + .ifc %[reg], r2; \ 45 + .byte 0x22; \ 46 + .endif; \ 47 + .ifc %[reg], r3; \ 48 + .byte 0x33; \ 49 + .endif; \ 50 + .ifc %[reg], r4; \ 51 + .byte 0x44; \ 52 + .endif; \ 53 + .ifc %[reg], r5; \ 54 + .byte 0x55; \ 55 + .endif; \ 56 + .ifc %[reg], r6; \ 57 + .byte 0x66; \ 58 + .endif; \ 59 + .ifc %[reg], r7; \ 60 + .byte 0x77; \ 61 + .endif; \ 62 + .ifc %[reg], r8; \ 63 + .byte 0x88; \ 64 + .endif; \ 65 + .ifc %[reg], r9; \ 66 + .byte 0x99; \ 67 + .endif; \ 68 + .short %[off]; \ 69 + .long %[as]" \ 70 + : [reg]"+r"(var) \ 71 + : [off]"i"(BPF_ADDR_SPACE_CAST) \ 72 + , [as]"i"((dst_as << 16) | src_as)); 73 + #endif 74 + 75 + #define __arena 76 + #define __arena_global SEC(".addr_space.1") 77 + #define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1) 78 + #define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0) 79 + #endif 80 + 81 + void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt, 82 + int node_id, __u64 flags) __ksym __weak; 83 + void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak; 84 + 85 + /* 86 + * Note that cond_break can only be portably used in the body of a breakable 87 + * construct, whereas can_loop can be used anywhere. 88 + */ 89 + #ifdef TEST 90 + #define can_loop true 91 + #define __cond_break(expr) expr 92 + #else 93 + #ifdef __BPF_FEATURE_MAY_GOTO 94 + #define can_loop \ 95 + ({ __label__ l_break, l_continue; \ 96 + bool ret = true; \ 97 + asm volatile goto("may_goto %l[l_break]" \ 98 + :::: l_break); \ 99 + goto l_continue; \ 100 + l_break: ret = false; \ 101 + l_continue:; \ 102 + ret; \ 103 + }) 104 + 105 + #define __cond_break(expr) \ 106 + ({ __label__ l_break, l_continue; \ 107 + asm volatile goto("may_goto %l[l_break]" \ 108 + :::: l_break); \ 109 + goto l_continue; \ 110 + l_break: expr; \ 111 + l_continue:; \ 112 + }) 113 + #else 114 + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 115 + #define can_loop \ 116 + ({ __label__ l_break, l_continue; \ 117 + bool ret = true; \ 118 + asm volatile goto("1:.byte 0xe5; \ 119 + .byte 0; \ 120 + .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \ 121 + .short 0" \ 122 + :::: l_break); \ 123 + goto l_continue; \ 124 + l_break: ret = false; \ 125 + l_continue:; \ 126 + ret; \ 127 + }) 128 + 129 + #define __cond_break(expr) \ 130 + ({ __label__ l_break, l_continue; \ 131 + asm volatile goto("1:.byte 0xe5; \ 132 + .byte 0; \ 133 + .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \ 134 + .short 0" \ 135 + :::: l_break); \ 136 + goto l_continue; \ 137 + l_break: expr; \ 138 + l_continue:; \ 139 + }) 140 + #else 141 + #define can_loop \ 142 + ({ __label__ l_break, l_continue; \ 143 + bool ret = true; \ 144 + asm volatile goto("1:.byte 0xe5; \ 145 + .byte 0; \ 146 + .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \ 147 + .short 0" \ 148 + :::: l_break); \ 149 + goto l_continue; \ 150 + l_break: ret = false; \ 151 + l_continue:; \ 152 + ret; \ 153 + }) 154 + 155 + #define __cond_break(expr) \ 156 + ({ __label__ l_break, l_continue; \ 157 + asm volatile goto("1:.byte 0xe5; \ 158 + .byte 0; \ 159 + .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \ 160 + .short 0" \ 161 + :::: l_break); \ 162 + goto l_continue; \ 163 + l_break: expr; \ 164 + l_continue:; \ 165 + }) 166 + #endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */ 167 + #endif /* __BPF_FEATURE_MAY_GOTO */ 168 + #endif /* TEST */ 169 + 170 + #define cond_break __cond_break(break) 171 + #define cond_break_label(label) __cond_break(goto label) 172 + 173 + 174 + void bpf_preempt_disable(void) __weak __ksym; 175 + void bpf_preempt_enable(void) __weak __ksym;
+33
tools/sched_ext/include/scx/bpf_arena_common.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 + /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ 3 + #pragma once 4 + 5 + #ifndef arena_container_of 6 + #define arena_container_of(ptr, type, member) \ 7 + ({ \ 8 + void __arena *__mptr = (void __arena *)(ptr); \ 9 + ((type *)(__mptr - offsetof(type, member))); \ 10 + }) 11 + #endif 12 + 13 + /* Provide the definition of PAGE_SIZE. */ 14 + #include <sys/user.h> 15 + 16 + #define __arena 17 + #define __arg_arena 18 + #define cast_kern(ptr) /* nop for user space */ 19 + #define cast_user(ptr) /* nop for user space */ 20 + char __attribute__((weak)) arena[1]; 21 + 22 + #ifndef offsetof 23 + #define offsetof(type, member) ((unsigned long)&((type *)0)->member) 24 + #endif 25 + 26 + static inline void __arena* bpf_arena_alloc_pages(void *map, void *addr, __u32 page_cnt, 27 + int node_id, __u64 flags) 28 + { 29 + return NULL; 30 + } 31 + static inline void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) 32 + { 33 + }
+93 -9
tools/sched_ext/include/scx/common.bpf.h
··· 24 24 #include <bpf/bpf_helpers.h> 25 25 #include <bpf/bpf_tracing.h> 26 26 #include <asm-generic/errno.h> 27 - #include "user_exit_info.h" 27 + #include "user_exit_info.bpf.h" 28 28 #include "enum_defs.autogen.h" 29 29 30 + #define PF_IDLE 0x00000002 /* I am an IDLE thread */ 31 + #define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ 30 32 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 33 + #define PF_KCOMPACTD 0x00010000 /* I am kcompactd */ 34 + #define PF_KSWAPD 0x00020000 /* I am kswapd */ 31 35 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 32 36 #define PF_EXITING 0x00000004 33 37 #define CLOCK_MONOTONIC 1 38 + 39 + #ifndef NR_CPUS 40 + #define NR_CPUS 1024 41 + #endif 42 + 43 + #ifndef NUMA_NO_NODE 44 + #define NUMA_NO_NODE (-1) 45 + #endif 34 46 35 47 extern int LINUX_KERNEL_VERSION __kconfig; 36 48 extern const char CONFIG_CC_VERSION_TEXT[64] __kconfig __weak; ··· 119 107 static inline __attribute__((format(printf, 1, 2))) 120 108 void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {} 121 109 110 + #define SCX_STRINGIFY(x) #x 111 + #define SCX_TOSTRING(x) SCX_STRINGIFY(x) 112 + 122 113 /* 123 114 * Helper macro for initializing the fmt and variadic argument inputs to both 124 115 * bstr exit kfuncs. Callers to this function should use ___fmt and ___param to ··· 156 141 * scx_bpf_error() wraps the scx_bpf_error_bstr() kfunc with variadic arguments 157 142 * instead of an array of u64. Invoking this macro will cause the scheduler to 158 143 * exit in an erroneous state, with diagnostic information being passed to the 159 - * user. 144 + * user. It appends the file and line number to aid debugging. 160 145 */ 161 146 #define scx_bpf_error(fmt, args...) \ 162 147 ({ \ 163 - scx_bpf_bstr_preamble(fmt, args) \ 148 + scx_bpf_bstr_preamble( \ 149 + __FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args) \ 164 150 scx_bpf_error_bstr(___fmt, ___param, sizeof(___param)); \ 165 - ___scx_bpf_bstr_format_checker(fmt, ##args); \ 151 + ___scx_bpf_bstr_format_checker( \ 152 + __FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args); \ 166 153 }) 167 154 168 155 /* ··· 246 229 * be a pointer to the area. Use `MEMBER_VPTR(*ptr, .member)` instead of 247 230 * `MEMBER_VPTR(ptr, ->member)`. 248 231 */ 232 + #ifndef MEMBER_VPTR 249 233 #define MEMBER_VPTR(base, member) (typeof((base) member) *) \ 250 234 ({ \ 251 235 u64 __base = (u64)&(base); \ ··· 263 245 [max]"i"(sizeof(base) - sizeof((base) member))); \ 264 246 __addr; \ 265 247 }) 248 + #endif /* MEMBER_VPTR */ 266 249 267 250 /** 268 251 * ARRAY_ELEM_PTR - Obtain the verified pointer to an array element ··· 279 260 * size of the array to compute the max, which will result in rejection by 280 261 * the verifier. 281 262 */ 263 + #ifndef ARRAY_ELEM_PTR 282 264 #define ARRAY_ELEM_PTR(arr, i, n) (typeof(arr[i]) *) \ 283 265 ({ \ 284 266 u64 __base = (u64)arr; \ ··· 294 274 [max]"r"(sizeof(arr[0]) * ((n) - 1))); \ 295 275 __addr; \ 296 276 }) 297 - 277 + #endif /* ARRAY_ELEM_PTR */ 298 278 299 279 /* 300 280 * BPF declarations and helpers ··· 458 438 */ 459 439 static inline bool is_migration_disabled(const struct task_struct *p) 460 440 { 461 - if (bpf_core_field_exists(p->migration_disabled)) 462 - return p->migration_disabled; 441 + /* 442 + * Testing p->migration_disabled in a BPF code is tricky because the 443 + * migration is _always_ disabled while running the BPF code. 444 + * The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) for BPF 445 + * code execution disable and re-enable the migration of the current 446 + * task, respectively. So, the _current_ task of the sched_ext ops is 447 + * always migration-disabled. Moreover, p->migration_disabled could be 448 + * two or greater when a sched_ext ops BPF code (e.g., ops.tick) is 449 + * executed in the middle of the other BPF code execution. 450 + * 451 + * Therefore, we should decide that the _current_ task is 452 + * migration-disabled only when its migration_disabled count is greater 453 + * than one. In other words, when p->migration_disabled == 1, there is 454 + * an ambiguity, so we should check if @p is the current task or not. 455 + */ 456 + if (bpf_core_field_exists(p->migration_disabled)) { 457 + if (p->migration_disabled == 1) 458 + return bpf_get_current_task_btf() != p; 459 + else 460 + return p->migration_disabled; 461 + } 463 462 return false; 464 463 } 465 464 ··· 515 476 */ 516 477 static inline bool time_after(u64 a, u64 b) 517 478 { 518 - return (s64)(b - a) < 0; 479 + return (s64)(b - a) < 0; 519 480 } 520 481 521 482 /** ··· 539 500 */ 540 501 static inline bool time_after_eq(u64 a, u64 b) 541 502 { 542 - return (s64)(a - b) >= 0; 503 + return (s64)(a - b) >= 0; 543 504 } 544 505 545 506 /** ··· 586 547 */ 587 548 588 549 /* useful compiler attributes */ 550 + #ifndef likely 589 551 #define likely(x) __builtin_expect(!!(x), 1) 552 + #endif 553 + #ifndef unlikely 590 554 #define unlikely(x) __builtin_expect(!!(x), 0) 555 + #endif 556 + #ifndef __maybe_unused 591 557 #define __maybe_unused __attribute__((__unused__)) 558 + #endif 592 559 593 560 /* 594 561 * READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They ··· 678 633 }) 679 634 680 635 /* 636 + * __calc_avg - Calculate exponential weighted moving average (EWMA) with 637 + * @old and @new values. @decay represents how large the @old value remains. 638 + * With a larger @decay value, the moving average changes slowly, exhibiting 639 + * fewer fluctuations. 640 + */ 641 + #define __calc_avg(old, new, decay) ({ \ 642 + typeof(decay) thr = 1 << (decay); \ 643 + typeof(old) ret; \ 644 + if (((old) < thr) || ((new) < thr)) { \ 645 + if (((old) == 1) && ((new) == 0)) \ 646 + ret = 0; \ 647 + else \ 648 + ret = ((old) - ((old) >> 1)) + ((new) >> 1); \ 649 + } else { \ 650 + ret = ((old) - ((old) >> (decay))) + ((new) >> (decay)); \ 651 + } \ 652 + ret; \ 653 + }) 654 + 655 + /* 681 656 * log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value. 682 657 * @v: The value for which we're computing the base 2 logarithm. 683 658 */ ··· 725 660 return log2_u32(hi) + 32 + 1; 726 661 else 727 662 return log2_u32(v) + 1; 663 + } 664 + 665 + /* 666 + * sqrt_u64 - Calculate the square root of value @x using Newton's method. 667 + */ 668 + static inline u64 __sqrt_u64(u64 x) 669 + { 670 + if (x == 0 || x == 1) 671 + return x; 672 + 673 + u64 r = ((1ULL << 32) > x) ? x : (1ULL << 32); 674 + 675 + for (int i = 0; i < 8; ++i) { 676 + u64 q = x / r; 677 + if (r <= q) 678 + break; 679 + r = (r + q) >> 1; 680 + } 681 + return r; 728 682 } 729 683 730 684 /*
+3 -2
tools/sched_ext/include/scx/common.h
··· 75 75 #include "enums.h" 76 76 77 77 /* not available when building kernel tools/sched_ext */ 78 - #if __has_include(<lib/sdt_task.h>) 79 - #include <lib/sdt_task.h> 78 + #if __has_include(<lib/sdt_task_defs.h>) 79 + #include "bpf_arena_common.h" 80 + #include <lib/sdt_task_defs.h> 80 81 #endif 81 82 82 83 #endif /* __SCHED_EXT_COMMON_H */
+5
tools/sched_ext/include/scx/compat.bpf.h
··· 38 38 void scx_bpf_dispatch_from_dsq_set_vtime___compat(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak; 39 39 bool scx_bpf_dispatch_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; 40 40 bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; 41 + int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak; 41 42 42 43 #define scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags) \ 43 44 (bpf_ksym_exists(scx_bpf_dsq_insert) ? \ ··· 82 81 (bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___compat) ? \ 83 82 scx_bpf_dispatch_vtime_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \ 84 83 false)) 84 + 85 + #define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz) \ 86 + (bpf_ksym_exists(bpf_cpumask_populate) ? \ 87 + (bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP) 85 88 86 89 #define scx_bpf_dispatch(p, dsq_id, slice, enq_flags) \ 87 90 _Static_assert(false, "scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()")
+40
tools/sched_ext/include/scx/user_exit_info.bpf.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Define struct user_exit_info which is shared between BPF and userspace parts 4 + * to communicate exit status and other information. 5 + * 6 + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 7 + * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 8 + * Copyright (c) 2022 David Vernet <dvernet@meta.com> 9 + */ 10 + 11 + #ifndef __USER_EXIT_INFO_BPF_H 12 + #define __USER_EXIT_INFO_BPF_H 13 + 14 + #ifndef LSP 15 + #include "vmlinux.h" 16 + #endif 17 + #include <bpf/bpf_core_read.h> 18 + 19 + #include "user_exit_info_common.h" 20 + 21 + #define UEI_DEFINE(__name) \ 22 + char RESIZABLE_ARRAY(data, __name##_dump); \ 23 + const volatile u32 __name##_dump_len; \ 24 + struct user_exit_info __name SEC(".data") 25 + 26 + #define UEI_RECORD(__uei_name, __ei) ({ \ 27 + bpf_probe_read_kernel_str(__uei_name.reason, \ 28 + sizeof(__uei_name.reason), (__ei)->reason); \ 29 + bpf_probe_read_kernel_str(__uei_name.msg, \ 30 + sizeof(__uei_name.msg), (__ei)->msg); \ 31 + bpf_probe_read_kernel_str(__uei_name##_dump, \ 32 + __uei_name##_dump_len, (__ei)->dump); \ 33 + if (bpf_core_field_exists((__ei)->exit_code)) \ 34 + __uei_name.exit_code = (__ei)->exit_code; \ 35 + /* use __sync to force memory barrier */ \ 36 + __sync_val_compare_and_swap(&__uei_name.kind, __uei_name.kind, \ 37 + (__ei)->kind); \ 38 + }) 39 + 40 + #endif /* __USER_EXIT_INFO_BPF_H */
+2 -47
tools/sched_ext/include/scx/user_exit_info.h
··· 10 10 #ifndef __USER_EXIT_INFO_H 11 11 #define __USER_EXIT_INFO_H 12 12 13 - #ifdef LSP 14 - #define __bpf__ 15 - #include "../vmlinux.h" 16 - #endif 17 - 18 - enum uei_sizes { 19 - UEI_REASON_LEN = 128, 20 - UEI_MSG_LEN = 1024, 21 - UEI_DUMP_DFL_LEN = 32768, 22 - }; 23 - 24 - struct user_exit_info { 25 - int kind; 26 - s64 exit_code; 27 - char reason[UEI_REASON_LEN]; 28 - char msg[UEI_MSG_LEN]; 29 - }; 30 - 31 - #ifdef __bpf__ 32 - 33 - #ifndef LSP 34 - #include "vmlinux.h" 35 - #endif 36 - #include <bpf/bpf_core_read.h> 37 - 38 - #define UEI_DEFINE(__name) \ 39 - char RESIZABLE_ARRAY(data, __name##_dump); \ 40 - const volatile u32 __name##_dump_len; \ 41 - struct user_exit_info __name SEC(".data") 42 - 43 - #define UEI_RECORD(__uei_name, __ei) ({ \ 44 - bpf_probe_read_kernel_str(__uei_name.reason, \ 45 - sizeof(__uei_name.reason), (__ei)->reason); \ 46 - bpf_probe_read_kernel_str(__uei_name.msg, \ 47 - sizeof(__uei_name.msg), (__ei)->msg); \ 48 - bpf_probe_read_kernel_str(__uei_name##_dump, \ 49 - __uei_name##_dump_len, (__ei)->dump); \ 50 - if (bpf_core_field_exists((__ei)->exit_code)) \ 51 - __uei_name.exit_code = (__ei)->exit_code; \ 52 - /* use __sync to force memory barrier */ \ 53 - __sync_val_compare_and_swap(&__uei_name.kind, __uei_name.kind, \ 54 - (__ei)->kind); \ 55 - }) 56 - 57 - #else /* !__bpf__ */ 58 - 59 13 #include <stdio.h> 60 14 #include <stdbool.h> 15 + 16 + #include "user_exit_info_common.h" 61 17 62 18 /* no need to call the following explicitly if SCX_OPS_LOAD() is used */ 63 19 #define UEI_SET_SIZE(__skel, __ops_name, __uei_name) ({ \ ··· 70 114 71 115 #define UEI_ECODE_RESTART(__ecode) (UEI_ECODE_SYS_ACT((__ecode)) == SCX_ECODE_ACT_RESTART) 72 116 73 - #endif /* __bpf__ */ 74 117 #endif /* __USER_EXIT_INFO_H */
+30
tools/sched_ext/include/scx/user_exit_info_common.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Define struct user_exit_info which is shared between BPF and userspace parts 4 + * to communicate exit status and other information. 5 + * 6 + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 7 + * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 8 + * Copyright (c) 2022 David Vernet <dvernet@meta.com> 9 + */ 10 + #ifndef __USER_EXIT_INFO_COMMON_H 11 + #define __USER_EXIT_INFO_COMMON_H 12 + 13 + #ifdef LSP 14 + #include "../vmlinux.h" 15 + #endif 16 + 17 + enum uei_sizes { 18 + UEI_REASON_LEN = 128, 19 + UEI_MSG_LEN = 1024, 20 + UEI_DUMP_DFL_LEN = 32768, 21 + }; 22 + 23 + struct user_exit_info { 24 + int kind; 25 + s64 exit_code; 26 + char reason[UEI_REASON_LEN]; 27 + char msg[UEI_MSG_LEN]; 28 + }; 29 + 30 + #endif /* __USER_EXIT_INFO_COMMON_H */
+1 -1
tools/sched_ext/scx_central.bpf.c
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 - * A central FIFO sched_ext scheduler which demonstrates the followings: 3 + * A central FIFO sched_ext scheduler which demonstrates the following: 4 4 * 5 5 * a. Making all scheduling decisions from one CPU: 6 6 *
+1
tools/sched_ext/scx_central.c
··· 61 61 skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); 62 62 skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL"); 63 63 64 + assert(skel->rodata->nr_cpu_ids > 0); 64 65 assert(skel->rodata->nr_cpu_ids <= INT32_MAX); 65 66 66 67 while ((opt = getopt(argc, argv, "s:c:pvh")) != -1) {
+1 -1
tools/sched_ext/scx_flatcg.bpf.c
··· 950 950 .cgroup_move = (void *)fcg_cgroup_move, 951 951 .init = (void *)fcg_init, 952 952 .exit = (void *)fcg_exit, 953 - .flags = SCX_OPS_ENQ_EXITING, 953 + .flags = SCX_OPS_HAS_CGROUP_WEIGHT | SCX_OPS_ENQ_EXITING, 954 954 .name = "flatcg");
+2
tools/sched_ext/scx_flatcg.c
··· 6 6 */ 7 7 #include <stdio.h> 8 8 #include <signal.h> 9 + #include <assert.h> 9 10 #include <unistd.h> 10 11 #include <libgen.h> 11 12 #include <limits.h> ··· 138 137 skel = SCX_OPS_OPEN(flatcg_ops, scx_flatcg); 139 138 140 139 skel->rodata->nr_cpus = libbpf_num_possible_cpus(); 140 + assert(skel->rodata->nr_cpus > 0); 141 141 skel->rodata->cgrp_slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL"); 142 142 143 143 while ((opt = getopt(argc, argv, "s:i:dfvh")) != -1) {
+2
tools/sched_ext/scx_simple.c
··· 7 7 #include <stdio.h> 8 8 #include <unistd.h> 9 9 #include <signal.h> 10 + #include <assert.h> 10 11 #include <libgen.h> 11 12 #include <bpf/bpf.h> 12 13 #include <scx/common.h> ··· 42 41 static void read_stats(struct scx_simple *skel, __u64 *stats) 43 42 { 44 43 int nr_cpus = libbpf_num_possible_cpus(); 44 + assert(nr_cpus > 0); 45 45 __u64 cnts[2][nr_cpus]; 46 46 __u32 idx; 47 47