Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched_ext: Add selftests

Add basic selftests.

Signed-off-by: David Vernet <dvernet@meta.com>
Acked-by: Tejun Heo <tj@kernel.org>

authored by

David Vernet and committed by
Tejun Heo
a5db7817 fa48e8d2

+3244
+6
tools/testing/selftests/sched_ext/.gitignore
··· 1 + * 2 + !*.c 3 + !*.h 4 + !Makefile 5 + !.gitignore 6 + !config
+218
tools/testing/selftests/sched_ext/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 3 + include ../../../build/Build.include 4 + include ../../../scripts/Makefile.arch 5 + include ../../../scripts/Makefile.include 6 + include ../lib.mk 7 + 8 + ifneq ($(LLVM),) 9 + ifneq ($(filter %/,$(LLVM)),) 10 + LLVM_PREFIX := $(LLVM) 11 + else ifneq ($(filter -%,$(LLVM)),) 12 + LLVM_SUFFIX := $(LLVM) 13 + endif 14 + 15 + CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as 16 + else 17 + CC := gcc 18 + endif # LLVM 19 + 20 + ifneq ($(CROSS_COMPILE),) 21 + $(error CROSS_COMPILE not supported for scx selftests) 22 + endif # CROSS_COMPILE 23 + 24 + CURDIR := $(abspath .) 25 + REPOROOT := $(abspath ../../../..) 26 + TOOLSDIR := $(REPOROOT)/tools 27 + LIBDIR := $(TOOLSDIR)/lib 28 + BPFDIR := $(LIBDIR)/bpf 29 + TOOLSINCDIR := $(TOOLSDIR)/include 30 + BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool 31 + APIDIR := $(TOOLSINCDIR)/uapi 32 + GENDIR := $(REPOROOT)/include/generated 33 + GENHDR := $(GENDIR)/autoconf.h 34 + SCXTOOLSDIR := $(TOOLSDIR)/sched_ext 35 + SCXTOOLSINCDIR := $(TOOLSDIR)/sched_ext/include 36 + 37 + OUTPUT_DIR := $(CURDIR)/build 38 + OBJ_DIR := $(OUTPUT_DIR)/obj 39 + INCLUDE_DIR := $(OUTPUT_DIR)/include 40 + BPFOBJ_DIR := $(OBJ_DIR)/libbpf 41 + SCXOBJ_DIR := $(OBJ_DIR)/sched_ext 42 + BPFOBJ := $(BPFOBJ_DIR)/libbpf.a 43 + LIBBPF_OUTPUT := $(OBJ_DIR)/libbpf/libbpf.a 44 + DEFAULT_BPFTOOL := $(OUTPUT_DIR)/sbin/bpftool 45 + HOST_BUILD_DIR := $(OBJ_DIR) 46 + HOST_OUTPUT_DIR := $(OUTPUT_DIR) 47 + 48 + VMLINUX_BTF_PATHS ?= ../../../../vmlinux \ 49 + /sys/kernel/btf/vmlinux \ 50 + /boot/vmlinux-$(shell uname -r) 51 + VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) 52 + ifeq ($(VMLINUX_BTF),) 53 + $(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)") 54 + endif 55 + 56 + BPFTOOL ?= $(DEFAULT_BPFTOOL) 57 + 58 + ifneq ($(wildcard $(GENHDR)),) 59 + GENFLAGS := -DHAVE_GENHDR 60 + endif 61 + 62 + CFLAGS += -g -O2 -rdynamic -pthread -Wall -Werror $(GENFLAGS) \ 63 + -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ 64 + -I$(TOOLSINCDIR) -I$(APIDIR) -I$(CURDIR)/include -I$(SCXTOOLSINCDIR) 65 + 66 + # Silence some warnings when compiled with clang 67 + ifneq ($(LLVM),) 68 + CFLAGS += -Wno-unused-command-line-argument 69 + endif 70 + 71 + LDFLAGS = -lelf -lz -lpthread -lzstd 72 + 73 + IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \ 74 + grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__') 75 + 76 + # Get Clang's default includes on this system, as opposed to those seen by 77 + # '-target bpf'. This fixes "missing" files on some architectures/distros, 78 + # such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc. 79 + # 80 + # Use '-idirafter': Don't interfere with include mechanics except where the 81 + # build would have failed anyways. 82 + define get_sys_includes 83 + $(shell $(1) -v -E - </dev/null 2>&1 \ 84 + | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \ 85 + $(shell $(1) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}') 86 + endef 87 + 88 + BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \ 89 + $(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) \ 90 + -I$(CURDIR)/include -I$(CURDIR)/include/bpf-compat \ 91 + -I$(INCLUDE_DIR) -I$(APIDIR) -I$(SCXTOOLSINCDIR) \ 92 + -I$(REPOROOT)/include \ 93 + $(call get_sys_includes,$(CLANG)) \ 94 + -Wall -Wno-compare-distinct-pointer-types \ 95 + -Wno-incompatible-function-pointer-types \ 96 + -O2 -mcpu=v3 97 + 98 + # sort removes libbpf duplicates when not cross-building 99 + MAKE_DIRS := $(sort $(OBJ_DIR)/libbpf $(OBJ_DIR)/libbpf \ 100 + $(OBJ_DIR)/bpftool $(OBJ_DIR)/resolve_btfids \ 101 + $(INCLUDE_DIR) $(SCXOBJ_DIR)) 102 + 103 + $(MAKE_DIRS): 104 + $(call msg,MKDIR,,$@) 105 + $(Q)mkdir -p $@ 106 + 107 + $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ 108 + $(APIDIR)/linux/bpf.h \ 109 + | $(OBJ_DIR)/libbpf 110 + $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(OBJ_DIR)/libbpf/ \ 111 + EXTRA_CFLAGS='-g -O0 -fPIC' \ 112 + DESTDIR=$(OUTPUT_DIR) prefix= all install_headers 113 + 114 + $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ 115 + $(LIBBPF_OUTPUT) | $(OBJ_DIR)/bpftool 116 + $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ 117 + ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) \ 118 + EXTRA_CFLAGS='-g -O0' \ 119 + OUTPUT=$(OBJ_DIR)/bpftool/ \ 120 + LIBBPF_OUTPUT=$(OBJ_DIR)/libbpf/ \ 121 + LIBBPF_DESTDIR=$(OUTPUT_DIR)/ \ 122 + prefix= DESTDIR=$(OUTPUT_DIR)/ install-bin 123 + 124 + $(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR) 125 + ifeq ($(VMLINUX_H),) 126 + $(call msg,GEN,,$@) 127 + $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@ 128 + else 129 + $(call msg,CP,,$@) 130 + $(Q)cp "$(VMLINUX_H)" $@ 131 + endif 132 + 133 + $(SCXOBJ_DIR)/%.bpf.o: %.bpf.c $(INCLUDE_DIR)/vmlinux.h | $(BPFOBJ) $(SCXOBJ_DIR) 134 + $(call msg,CLNG-BPF,,$(notdir $@)) 135 + $(Q)$(CLANG) $(BPF_CFLAGS) -target bpf -c $< -o $@ 136 + 137 + $(INCLUDE_DIR)/%.bpf.skel.h: $(SCXOBJ_DIR)/%.bpf.o $(INCLUDE_DIR)/vmlinux.h $(BPFTOOL) | $(INCLUDE_DIR) 138 + $(eval sched=$(notdir $@)) 139 + $(call msg,GEN-SKEL,,$(sched)) 140 + $(Q)$(BPFTOOL) gen object $(<:.o=.linked1.o) $< 141 + $(Q)$(BPFTOOL) gen object $(<:.o=.linked2.o) $(<:.o=.linked1.o) 142 + $(Q)$(BPFTOOL) gen object $(<:.o=.linked3.o) $(<:.o=.linked2.o) 143 + $(Q)diff $(<:.o=.linked2.o) $(<:.o=.linked3.o) 144 + $(Q)$(BPFTOOL) gen skeleton $(<:.o=.linked3.o) name $(subst .bpf.skel.h,,$(sched)) > $@ 145 + $(Q)$(BPFTOOL) gen subskeleton $(<:.o=.linked3.o) name $(subst .bpf.skel.h,,$(sched)) > $(@:.skel.h=.subskel.h) 146 + 147 + ################ 148 + # C schedulers # 149 + ################ 150 + 151 + override define CLEAN 152 + rm -rf $(OUTPUT_DIR) 153 + rm -f *.o *.bpf.o *.bpf.skel.h *.bpf.subskel.h 154 + rm -f $(TEST_GEN_PROGS) 155 + rm -f runner 156 + endef 157 + 158 + # Every testcase takes all of the BPF progs are dependencies by default. This 159 + # allows testcases to load any BPF scheduler, which is useful for testcases 160 + # that don't need their own prog to run their test. 161 + all_test_bpfprogs := $(foreach prog,$(wildcard *.bpf.c),$(INCLUDE_DIR)/$(patsubst %.c,%.skel.h,$(prog))) 162 + 163 + auto-test-targets := \ 164 + create_dsq \ 165 + enq_last_no_enq_fails \ 166 + enq_select_cpu_fails \ 167 + ddsp_bogus_dsq_fail \ 168 + ddsp_vtimelocal_fail \ 169 + dsp_local_on \ 170 + exit \ 171 + hotplug \ 172 + init_enable_count \ 173 + maximal \ 174 + maybe_null \ 175 + minimal \ 176 + prog_run \ 177 + reload_loop \ 178 + select_cpu_dfl \ 179 + select_cpu_dfl_nodispatch \ 180 + select_cpu_dispatch \ 181 + select_cpu_dispatch_bad_dsq \ 182 + select_cpu_dispatch_dbl_dsp \ 183 + select_cpu_vtime \ 184 + test_example \ 185 + 186 + testcase-targets := $(addsuffix .o,$(addprefix $(SCXOBJ_DIR)/,$(auto-test-targets))) 187 + 188 + $(SCXOBJ_DIR)/runner.o: runner.c | $(SCXOBJ_DIR) 189 + $(CC) $(CFLAGS) -c $< -o $@ 190 + 191 + # Create all of the test targets object files, whose testcase objects will be 192 + # registered into the runner in ELF constructors. 193 + # 194 + # Note that we must do double expansion here in order to support conditionally 195 + # compiling BPF object files only if one is present, as the wildcard Make 196 + # function doesn't support using implicit rules otherwise. 197 + $(testcase-targets): $(SCXOBJ_DIR)/%.o: %.c $(SCXOBJ_DIR)/runner.o $(all_test_bpfprogs) | $(SCXOBJ_DIR) 198 + $(eval test=$(patsubst %.o,%.c,$(notdir $@))) 199 + $(CC) $(CFLAGS) -c $< -o $@ $(SCXOBJ_DIR)/runner.o 200 + 201 + $(SCXOBJ_DIR)/util.o: util.c | $(SCXOBJ_DIR) 202 + $(CC) $(CFLAGS) -c $< -o $@ 203 + 204 + runner: $(SCXOBJ_DIR)/runner.o $(SCXOBJ_DIR)/util.o $(BPFOBJ) $(testcase-targets) 205 + @echo "$(testcase-targets)" 206 + $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) 207 + 208 + TEST_GEN_PROGS := runner 209 + 210 + all: runner 211 + 212 + .PHONY: all clean help 213 + 214 + .DEFAULT_GOAL := all 215 + 216 + .DELETE_ON_ERROR: 217 + 218 + .SECONDARY:
+9
tools/testing/selftests/sched_ext/config
··· 1 + CONFIG_SCHED_DEBUG=y 2 + CONFIG_SCHED_CLASS_EXT=y 3 + CONFIG_CGROUPS=y 4 + CONFIG_CGROUP_SCHED=y 5 + CONFIG_EXT_GROUP_SCHED=y 6 + CONFIG_BPF=y 7 + CONFIG_BPF_SYSCALL=y 8 + CONFIG_DEBUG_INFO=y 9 + CONFIG_DEBUG_INFO_BTF=y
+58
tools/testing/selftests/sched_ext/create_dsq.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Create and destroy DSQs in a loop. 4 + * 5 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 6 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 7 + */ 8 + 9 + #include <scx/common.bpf.h> 10 + 11 + char _license[] SEC("license") = "GPL"; 12 + 13 + void BPF_STRUCT_OPS(create_dsq_exit_task, struct task_struct *p, 14 + struct scx_exit_task_args *args) 15 + { 16 + scx_bpf_destroy_dsq(p->pid); 17 + } 18 + 19 + s32 BPF_STRUCT_OPS_SLEEPABLE(create_dsq_init_task, struct task_struct *p, 20 + struct scx_init_task_args *args) 21 + { 22 + s32 err; 23 + 24 + err = scx_bpf_create_dsq(p->pid, -1); 25 + if (err) 26 + scx_bpf_error("Failed to create DSQ for %s[%d]", 27 + p->comm, p->pid); 28 + 29 + return err; 30 + } 31 + 32 + s32 BPF_STRUCT_OPS_SLEEPABLE(create_dsq_init) 33 + { 34 + u32 i; 35 + s32 err; 36 + 37 + bpf_for(i, 0, 1024) { 38 + err = scx_bpf_create_dsq(i, -1); 39 + if (err) { 40 + scx_bpf_error("Failed to create DSQ %d", i); 41 + return 0; 42 + } 43 + } 44 + 45 + bpf_for(i, 0, 1024) { 46 + scx_bpf_destroy_dsq(i); 47 + } 48 + 49 + return 0; 50 + } 51 + 52 + SEC(".struct_ops.link") 53 + struct sched_ext_ops create_dsq_ops = { 54 + .init_task = create_dsq_init_task, 55 + .exit_task = create_dsq_exit_task, 56 + .init = create_dsq_init, 57 + .name = "create_dsq", 58 + };
+57
tools/testing/selftests/sched_ext/create_dsq.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + */ 6 + #include <bpf/bpf.h> 7 + #include <scx/common.h> 8 + #include <sys/wait.h> 9 + #include <unistd.h> 10 + #include "create_dsq.bpf.skel.h" 11 + #include "scx_test.h" 12 + 13 + static enum scx_test_status setup(void **ctx) 14 + { 15 + struct create_dsq *skel; 16 + 17 + skel = create_dsq__open_and_load(); 18 + if (!skel) { 19 + SCX_ERR("Failed to open and load skel"); 20 + return SCX_TEST_FAIL; 21 + } 22 + *ctx = skel; 23 + 24 + return SCX_TEST_PASS; 25 + } 26 + 27 + static enum scx_test_status run(void *ctx) 28 + { 29 + struct create_dsq *skel = ctx; 30 + struct bpf_link *link; 31 + 32 + link = bpf_map__attach_struct_ops(skel->maps.create_dsq_ops); 33 + if (!link) { 34 + SCX_ERR("Failed to attach scheduler"); 35 + return SCX_TEST_FAIL; 36 + } 37 + 38 + bpf_link__destroy(link); 39 + 40 + return SCX_TEST_PASS; 41 + } 42 + 43 + static void cleanup(void *ctx) 44 + { 45 + struct create_dsq *skel = ctx; 46 + 47 + create_dsq__destroy(skel); 48 + } 49 + 50 + struct scx_test create_dsq = { 51 + .name = "create_dsq", 52 + .description = "Create and destroy a dsq in a loop", 53 + .setup = setup, 54 + .run = run, 55 + .cleanup = cleanup, 56 + }; 57 + REGISTER_SCX_TEST(&create_dsq)
+42
tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2024 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <scx/common.bpf.h> 8 + 9 + char _license[] SEC("license") = "GPL"; 10 + 11 + UEI_DEFINE(uei); 12 + 13 + s32 BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_select_cpu, struct task_struct *p, 14 + s32 prev_cpu, u64 wake_flags) 15 + { 16 + s32 cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0); 17 + 18 + if (cpu >= 0) { 19 + /* 20 + * If we dispatch to a bogus DSQ that will fall back to the 21 + * builtin global DSQ, we fail gracefully. 22 + */ 23 + scx_bpf_dispatch_vtime(p, 0xcafef00d, SCX_SLICE_DFL, 24 + p->scx.dsq_vtime, 0); 25 + return cpu; 26 + } 27 + 28 + return prev_cpu; 29 + } 30 + 31 + void BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_exit, struct scx_exit_info *ei) 32 + { 33 + UEI_RECORD(uei, ei); 34 + } 35 + 36 + SEC(".struct_ops.link") 37 + struct sched_ext_ops ddsp_bogus_dsq_fail_ops = { 38 + .select_cpu = ddsp_bogus_dsq_fail_select_cpu, 39 + .exit = ddsp_bogus_dsq_fail_exit, 40 + .name = "ddsp_bogus_dsq_fail", 41 + .timeout_ms = 1000U, 42 + };
+57
tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2024 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <bpf/bpf.h> 8 + #include <scx/common.h> 9 + #include <sys/wait.h> 10 + #include <unistd.h> 11 + #include "ddsp_bogus_dsq_fail.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + static enum scx_test_status setup(void **ctx) 15 + { 16 + struct ddsp_bogus_dsq_fail *skel; 17 + 18 + skel = ddsp_bogus_dsq_fail__open_and_load(); 19 + SCX_FAIL_IF(!skel, "Failed to open and load skel"); 20 + *ctx = skel; 21 + 22 + return SCX_TEST_PASS; 23 + } 24 + 25 + static enum scx_test_status run(void *ctx) 26 + { 27 + struct ddsp_bogus_dsq_fail *skel = ctx; 28 + struct bpf_link *link; 29 + 30 + link = bpf_map__attach_struct_ops(skel->maps.ddsp_bogus_dsq_fail_ops); 31 + SCX_FAIL_IF(!link, "Failed to attach struct_ops"); 32 + 33 + sleep(1); 34 + 35 + SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_ERROR)); 36 + bpf_link__destroy(link); 37 + 38 + return SCX_TEST_PASS; 39 + } 40 + 41 + static void cleanup(void *ctx) 42 + { 43 + struct ddsp_bogus_dsq_fail *skel = ctx; 44 + 45 + ddsp_bogus_dsq_fail__destroy(skel); 46 + } 47 + 48 + struct scx_test ddsp_bogus_dsq_fail = { 49 + .name = "ddsp_bogus_dsq_fail", 50 + .description = "Verify we gracefully fail, and fall back to using a " 51 + "built-in DSQ, if we do a direct dispatch to an invalid" 52 + " DSQ in ops.select_cpu()", 53 + .setup = setup, 54 + .run = run, 55 + .cleanup = cleanup, 56 + }; 57 + REGISTER_SCX_TEST(&ddsp_bogus_dsq_fail)
+39
tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2024 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <scx/common.bpf.h> 8 + 9 + char _license[] SEC("license") = "GPL"; 10 + 11 + UEI_DEFINE(uei); 12 + 13 + s32 BPF_STRUCT_OPS(ddsp_vtimelocal_fail_select_cpu, struct task_struct *p, 14 + s32 prev_cpu, u64 wake_flags) 15 + { 16 + s32 cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0); 17 + 18 + if (cpu >= 0) { 19 + /* Shouldn't be allowed to vtime dispatch to a builtin DSQ. */ 20 + scx_bpf_dispatch_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 21 + p->scx.dsq_vtime, 0); 22 + return cpu; 23 + } 24 + 25 + return prev_cpu; 26 + } 27 + 28 + void BPF_STRUCT_OPS(ddsp_vtimelocal_fail_exit, struct scx_exit_info *ei) 29 + { 30 + UEI_RECORD(uei, ei); 31 + } 32 + 33 + SEC(".struct_ops.link") 34 + struct sched_ext_ops ddsp_vtimelocal_fail_ops = { 35 + .select_cpu = ddsp_vtimelocal_fail_select_cpu, 36 + .exit = ddsp_vtimelocal_fail_exit, 37 + .name = "ddsp_vtimelocal_fail", 38 + .timeout_ms = 1000U, 39 + };
+56
tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2024 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <bpf/bpf.h> 8 + #include <scx/common.h> 9 + #include <unistd.h> 10 + #include "ddsp_vtimelocal_fail.bpf.skel.h" 11 + #include "scx_test.h" 12 + 13 + static enum scx_test_status setup(void **ctx) 14 + { 15 + struct ddsp_vtimelocal_fail *skel; 16 + 17 + skel = ddsp_vtimelocal_fail__open_and_load(); 18 + SCX_FAIL_IF(!skel, "Failed to open and load skel"); 19 + *ctx = skel; 20 + 21 + return SCX_TEST_PASS; 22 + } 23 + 24 + static enum scx_test_status run(void *ctx) 25 + { 26 + struct ddsp_vtimelocal_fail *skel = ctx; 27 + struct bpf_link *link; 28 + 29 + link = bpf_map__attach_struct_ops(skel->maps.ddsp_vtimelocal_fail_ops); 30 + SCX_FAIL_IF(!link, "Failed to attach struct_ops"); 31 + 32 + sleep(1); 33 + 34 + SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_ERROR)); 35 + bpf_link__destroy(link); 36 + 37 + return SCX_TEST_PASS; 38 + } 39 + 40 + static void cleanup(void *ctx) 41 + { 42 + struct ddsp_vtimelocal_fail *skel = ctx; 43 + 44 + ddsp_vtimelocal_fail__destroy(skel); 45 + } 46 + 47 + struct scx_test ddsp_vtimelocal_fail = { 48 + .name = "ddsp_vtimelocal_fail", 49 + .description = "Verify we gracefully fail, and fall back to using a " 50 + "built-in DSQ, if we do a direct vtime dispatch to a " 51 + "built-in DSQ from DSQ in ops.select_cpu()", 52 + .setup = setup, 53 + .run = run, 54 + .cleanup = cleanup, 55 + }; 56 + REGISTER_SCX_TEST(&ddsp_vtimelocal_fail)
+65
tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + */ 6 + #include <scx/common.bpf.h> 7 + 8 + char _license[] SEC("license") = "GPL"; 9 + const volatile s32 nr_cpus; 10 + 11 + UEI_DEFINE(uei); 12 + 13 + struct { 14 + __uint(type, BPF_MAP_TYPE_QUEUE); 15 + __uint(max_entries, 8192); 16 + __type(value, s32); 17 + } queue SEC(".maps"); 18 + 19 + s32 BPF_STRUCT_OPS(dsp_local_on_select_cpu, struct task_struct *p, 20 + s32 prev_cpu, u64 wake_flags) 21 + { 22 + return prev_cpu; 23 + } 24 + 25 + void BPF_STRUCT_OPS(dsp_local_on_enqueue, struct task_struct *p, 26 + u64 enq_flags) 27 + { 28 + s32 pid = p->pid; 29 + 30 + if (bpf_map_push_elem(&queue, &pid, 0)) 31 + scx_bpf_error("Failed to enqueue %s[%d]", p->comm, p->pid); 32 + } 33 + 34 + void BPF_STRUCT_OPS(dsp_local_on_dispatch, s32 cpu, struct task_struct *prev) 35 + { 36 + s32 pid, target; 37 + struct task_struct *p; 38 + 39 + if (bpf_map_pop_elem(&queue, &pid)) 40 + return; 41 + 42 + p = bpf_task_from_pid(pid); 43 + if (!p) 44 + return; 45 + 46 + target = bpf_get_prandom_u32() % nr_cpus; 47 + 48 + scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0); 49 + bpf_task_release(p); 50 + } 51 + 52 + void BPF_STRUCT_OPS(dsp_local_on_exit, struct scx_exit_info *ei) 53 + { 54 + UEI_RECORD(uei, ei); 55 + } 56 + 57 + SEC(".struct_ops.link") 58 + struct sched_ext_ops dsp_local_on_ops = { 59 + .select_cpu = dsp_local_on_select_cpu, 60 + .enqueue = dsp_local_on_enqueue, 61 + .dispatch = dsp_local_on_dispatch, 62 + .exit = dsp_local_on_exit, 63 + .name = "dsp_local_on", 64 + .timeout_ms = 1000U, 65 + };
+58
tools/testing/selftests/sched_ext/dsp_local_on.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + */ 6 + #include <bpf/bpf.h> 7 + #include <scx/common.h> 8 + #include <unistd.h> 9 + #include "dsp_local_on.bpf.skel.h" 10 + #include "scx_test.h" 11 + 12 + static enum scx_test_status setup(void **ctx) 13 + { 14 + struct dsp_local_on *skel; 15 + 16 + skel = dsp_local_on__open(); 17 + SCX_FAIL_IF(!skel, "Failed to open"); 18 + 19 + skel->rodata->nr_cpus = libbpf_num_possible_cpus(); 20 + SCX_FAIL_IF(dsp_local_on__load(skel), "Failed to load skel"); 21 + *ctx = skel; 22 + 23 + return SCX_TEST_PASS; 24 + } 25 + 26 + static enum scx_test_status run(void *ctx) 27 + { 28 + struct dsp_local_on *skel = ctx; 29 + struct bpf_link *link; 30 + 31 + link = bpf_map__attach_struct_ops(skel->maps.dsp_local_on_ops); 32 + SCX_FAIL_IF(!link, "Failed to attach struct_ops"); 33 + 34 + /* Just sleeping is fine, plenty of scheduling events happening */ 35 + sleep(1); 36 + 37 + SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_ERROR)); 38 + bpf_link__destroy(link); 39 + 40 + return SCX_TEST_PASS; 41 + } 42 + 43 + static void cleanup(void *ctx) 44 + { 45 + struct dsp_local_on *skel = ctx; 46 + 47 + dsp_local_on__destroy(skel); 48 + } 49 + 50 + struct scx_test dsp_local_on = { 51 + .name = "dsp_local_on", 52 + .description = "Verify we can directly dispatch tasks to a local DSQs " 53 + "from osp.dispatch()", 54 + .setup = setup, 55 + .run = run, 56 + .cleanup = cleanup, 57 + }; 58 + REGISTER_SCX_TEST(&dsp_local_on)
+21
tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * A scheduler that validates the behavior of direct dispatching with a default 4 + * select_cpu implementation. 5 + * 6 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 7 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 8 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 9 + */ 10 + 11 + #include <scx/common.bpf.h> 12 + 13 + char _license[] SEC("license") = "GPL"; 14 + 15 + SEC(".struct_ops.link") 16 + struct sched_ext_ops enq_last_no_enq_fails_ops = { 17 + .name = "enq_last_no_enq_fails", 18 + /* Need to define ops.enqueue() with SCX_OPS_ENQ_LAST */ 19 + .flags = SCX_OPS_ENQ_LAST, 20 + .timeout_ms = 1000U, 21 + };
+60
tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <bpf/bpf.h> 8 + #include <scx/common.h> 9 + #include <sys/wait.h> 10 + #include <unistd.h> 11 + #include "enq_last_no_enq_fails.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + static enum scx_test_status setup(void **ctx) 15 + { 16 + struct enq_last_no_enq_fails *skel; 17 + 18 + skel = enq_last_no_enq_fails__open_and_load(); 19 + if (!skel) { 20 + SCX_ERR("Failed to open and load skel"); 21 + return SCX_TEST_FAIL; 22 + } 23 + *ctx = skel; 24 + 25 + return SCX_TEST_PASS; 26 + } 27 + 28 + static enum scx_test_status run(void *ctx) 29 + { 30 + struct enq_last_no_enq_fails *skel = ctx; 31 + struct bpf_link *link; 32 + 33 + link = bpf_map__attach_struct_ops(skel->maps.enq_last_no_enq_fails_ops); 34 + if (link) { 35 + SCX_ERR("Incorrectly succeeded in to attaching scheduler"); 36 + return SCX_TEST_FAIL; 37 + } 38 + 39 + bpf_link__destroy(link); 40 + 41 + return SCX_TEST_PASS; 42 + } 43 + 44 + static void cleanup(void *ctx) 45 + { 46 + struct enq_last_no_enq_fails *skel = ctx; 47 + 48 + enq_last_no_enq_fails__destroy(skel); 49 + } 50 + 51 + struct scx_test enq_last_no_enq_fails = { 52 + .name = "enq_last_no_enq_fails", 53 + .description = "Verify we fail to load a scheduler if we specify " 54 + "the SCX_OPS_ENQ_LAST flag without defining " 55 + "ops.enqueue()", 56 + .setup = setup, 57 + .run = run, 58 + .cleanup = cleanup, 59 + }; 60 + REGISTER_SCX_TEST(&enq_last_no_enq_fails)
+43
tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 6 + */ 7 + 8 + #include <scx/common.bpf.h> 9 + 10 + char _license[] SEC("license") = "GPL"; 11 + 12 + /* Manually specify the signature until the kfunc is added to the scx repo. */ 13 + s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, 14 + bool *found) __ksym; 15 + 16 + s32 BPF_STRUCT_OPS(enq_select_cpu_fails_select_cpu, struct task_struct *p, 17 + s32 prev_cpu, u64 wake_flags) 18 + { 19 + return prev_cpu; 20 + } 21 + 22 + void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p, 23 + u64 enq_flags) 24 + { 25 + /* 26 + * Need to initialize the variable or the verifier will fail to load. 27 + * Improving these semantics is actively being worked on. 28 + */ 29 + bool found = false; 30 + 31 + /* Can only call from ops.select_cpu() */ 32 + scx_bpf_select_cpu_dfl(p, 0, 0, &found); 33 + 34 + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); 35 + } 36 + 37 + SEC(".struct_ops.link") 38 + struct sched_ext_ops enq_select_cpu_fails_ops = { 39 + .select_cpu = enq_select_cpu_fails_select_cpu, 40 + .enqueue = enq_select_cpu_fails_enqueue, 41 + .name = "enq_select_cpu_fails", 42 + .timeout_ms = 1000U, 43 + };
+61
tools/testing/selftests/sched_ext/enq_select_cpu_fails.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <bpf/bpf.h> 8 + #include <scx/common.h> 9 + #include <sys/wait.h> 10 + #include <unistd.h> 11 + #include "enq_select_cpu_fails.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + static enum scx_test_status setup(void **ctx) 15 + { 16 + struct enq_select_cpu_fails *skel; 17 + 18 + skel = enq_select_cpu_fails__open_and_load(); 19 + if (!skel) { 20 + SCX_ERR("Failed to open and load skel"); 21 + return SCX_TEST_FAIL; 22 + } 23 + *ctx = skel; 24 + 25 + return SCX_TEST_PASS; 26 + } 27 + 28 + static enum scx_test_status run(void *ctx) 29 + { 30 + struct enq_select_cpu_fails *skel = ctx; 31 + struct bpf_link *link; 32 + 33 + link = bpf_map__attach_struct_ops(skel->maps.enq_select_cpu_fails_ops); 34 + if (!link) { 35 + SCX_ERR("Failed to attach scheduler"); 36 + return SCX_TEST_FAIL; 37 + } 38 + 39 + sleep(1); 40 + 41 + bpf_link__destroy(link); 42 + 43 + return SCX_TEST_PASS; 44 + } 45 + 46 + static void cleanup(void *ctx) 47 + { 48 + struct enq_select_cpu_fails *skel = ctx; 49 + 50 + enq_select_cpu_fails__destroy(skel); 51 + } 52 + 53 + struct scx_test enq_select_cpu_fails = { 54 + .name = "enq_select_cpu_fails", 55 + .description = "Verify we fail to call scx_bpf_select_cpu_dfl() " 56 + "from ops.enqueue()", 57 + .setup = setup, 58 + .run = run, 59 + .cleanup = cleanup, 60 + }; 61 + REGISTER_SCX_TEST(&enq_select_cpu_fails)
+84
tools/testing/selftests/sched_ext/exit.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + */ 6 + 7 + #include <scx/common.bpf.h> 8 + 9 + char _license[] SEC("license") = "GPL"; 10 + 11 + #include "exit_test.h" 12 + 13 + const volatile int exit_point; 14 + UEI_DEFINE(uei); 15 + 16 + #define EXIT_CLEANLY() scx_bpf_exit(exit_point, "%d", exit_point) 17 + 18 + s32 BPF_STRUCT_OPS(exit_select_cpu, struct task_struct *p, 19 + s32 prev_cpu, u64 wake_flags) 20 + { 21 + bool found; 22 + 23 + if (exit_point == EXIT_SELECT_CPU) 24 + EXIT_CLEANLY(); 25 + 26 + return scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &found); 27 + } 28 + 29 + void BPF_STRUCT_OPS(exit_enqueue, struct task_struct *p, u64 enq_flags) 30 + { 31 + if (exit_point == EXIT_ENQUEUE) 32 + EXIT_CLEANLY(); 33 + 34 + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); 35 + } 36 + 37 + void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p) 38 + { 39 + if (exit_point == EXIT_DISPATCH) 40 + EXIT_CLEANLY(); 41 + 42 + scx_bpf_consume(SCX_DSQ_GLOBAL); 43 + } 44 + 45 + void BPF_STRUCT_OPS(exit_enable, struct task_struct *p) 46 + { 47 + if (exit_point == EXIT_ENABLE) 48 + EXIT_CLEANLY(); 49 + } 50 + 51 + s32 BPF_STRUCT_OPS(exit_init_task, struct task_struct *p, 52 + struct scx_init_task_args *args) 53 + { 54 + if (exit_point == EXIT_INIT_TASK) 55 + EXIT_CLEANLY(); 56 + 57 + return 0; 58 + } 59 + 60 + void BPF_STRUCT_OPS(exit_exit, struct scx_exit_info *ei) 61 + { 62 + UEI_RECORD(uei, ei); 63 + } 64 + 65 + s32 BPF_STRUCT_OPS_SLEEPABLE(exit_init) 66 + { 67 + if (exit_point == EXIT_INIT) 68 + EXIT_CLEANLY(); 69 + 70 + return 0; 71 + } 72 + 73 + SEC(".struct_ops.link") 74 + struct sched_ext_ops exit_ops = { 75 + .select_cpu = exit_select_cpu, 76 + .enqueue = exit_enqueue, 77 + .dispatch = exit_dispatch, 78 + .init_task = exit_init_task, 79 + .enable = exit_enable, 80 + .exit = exit_exit, 81 + .init = exit_init, 82 + .name = "exit", 83 + .timeout_ms = 1000U, 84 + };
+55
tools/testing/selftests/sched_ext/exit.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + */ 6 + #include <bpf/bpf.h> 7 + #include <sched.h> 8 + #include <scx/common.h> 9 + #include <sys/wait.h> 10 + #include <unistd.h> 11 + #include "exit.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + #include "exit_test.h" 15 + 16 + static enum scx_test_status run(void *ctx) 17 + { 18 + enum exit_test_case tc; 19 + 20 + for (tc = 0; tc < NUM_EXITS; tc++) { 21 + struct exit *skel; 22 + struct bpf_link *link; 23 + char buf[16]; 24 + 25 + skel = exit__open(); 26 + skel->rodata->exit_point = tc; 27 + exit__load(skel); 28 + link = bpf_map__attach_struct_ops(skel->maps.exit_ops); 29 + if (!link) { 30 + SCX_ERR("Failed to attach scheduler"); 31 + exit__destroy(skel); 32 + return SCX_TEST_FAIL; 33 + } 34 + 35 + /* Assumes uei.kind is written last */ 36 + while (skel->data->uei.kind == EXIT_KIND(SCX_EXIT_NONE)) 37 + sched_yield(); 38 + 39 + SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_UNREG_BPF)); 40 + SCX_EQ(skel->data->uei.exit_code, tc); 41 + sprintf(buf, "%d", tc); 42 + SCX_ASSERT(!strcmp(skel->data->uei.msg, buf)); 43 + bpf_link__destroy(link); 44 + exit__destroy(skel); 45 + } 46 + 47 + return SCX_TEST_PASS; 48 + } 49 + 50 + struct scx_test exit_test = { 51 + .name = "exit", 52 + .description = "Verify we can cleanly exit a scheduler in multiple places", 53 + .run = run, 54 + }; 55 + REGISTER_SCX_TEST(&exit_test)
+20
tools/testing/selftests/sched_ext/exit_test.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + */ 6 + 7 + #ifndef __EXIT_TEST_H__ 8 + #define __EXIT_TEST_H__ 9 + 10 + enum exit_test_case { 11 + EXIT_SELECT_CPU, 12 + EXIT_ENQUEUE, 13 + EXIT_DISPATCH, 14 + EXIT_ENABLE, 15 + EXIT_INIT_TASK, 16 + EXIT_INIT, 17 + NUM_EXITS, 18 + }; 19 + 20 + #endif // # __EXIT_TEST_H__
+61
tools/testing/selftests/sched_ext/hotplug.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + */ 6 + 7 + #include <scx/common.bpf.h> 8 + 9 + char _license[] SEC("license") = "GPL"; 10 + 11 + #include "hotplug_test.h" 12 + 13 + UEI_DEFINE(uei); 14 + 15 + void BPF_STRUCT_OPS(hotplug_exit, struct scx_exit_info *ei) 16 + { 17 + UEI_RECORD(uei, ei); 18 + } 19 + 20 + static void exit_from_hotplug(s32 cpu, bool onlining) 21 + { 22 + /* 23 + * Ignored, just used to verify that we can invoke blocking kfuncs 24 + * from the hotplug path. 25 + */ 26 + scx_bpf_create_dsq(0, -1); 27 + 28 + s64 code = SCX_ECODE_ACT_RESTART | HOTPLUG_EXIT_RSN; 29 + 30 + if (onlining) 31 + code |= HOTPLUG_ONLINING; 32 + 33 + scx_bpf_exit(code, "hotplug event detected (%d going %s)", cpu, 34 + onlining ? "online" : "offline"); 35 + } 36 + 37 + void BPF_STRUCT_OPS_SLEEPABLE(hotplug_cpu_online, s32 cpu) 38 + { 39 + exit_from_hotplug(cpu, true); 40 + } 41 + 42 + void BPF_STRUCT_OPS_SLEEPABLE(hotplug_cpu_offline, s32 cpu) 43 + { 44 + exit_from_hotplug(cpu, false); 45 + } 46 + 47 + SEC(".struct_ops.link") 48 + struct sched_ext_ops hotplug_cb_ops = { 49 + .cpu_online = hotplug_cpu_online, 50 + .cpu_offline = hotplug_cpu_offline, 51 + .exit = hotplug_exit, 52 + .name = "hotplug_cbs", 53 + .timeout_ms = 1000U, 54 + }; 55 + 56 + SEC(".struct_ops.link") 57 + struct sched_ext_ops hotplug_nocb_ops = { 58 + .exit = hotplug_exit, 59 + .name = "hotplug_nocbs", 60 + .timeout_ms = 1000U, 61 + };
+168
tools/testing/selftests/sched_ext/hotplug.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + */ 6 + #include <bpf/bpf.h> 7 + #include <sched.h> 8 + #include <scx/common.h> 9 + #include <sched.h> 10 + #include <sys/wait.h> 11 + #include <unistd.h> 12 + 13 + #include "hotplug_test.h" 14 + #include "hotplug.bpf.skel.h" 15 + #include "scx_test.h" 16 + #include "util.h" 17 + 18 + const char *online_path = "/sys/devices/system/cpu/cpu1/online"; 19 + 20 + static bool is_cpu_online(void) 21 + { 22 + return file_read_long(online_path) > 0; 23 + } 24 + 25 + static void toggle_online_status(bool online) 26 + { 27 + long val = online ? 1 : 0; 28 + int ret; 29 + 30 + ret = file_write_long(online_path, val); 31 + if (ret != 0) 32 + fprintf(stderr, "Failed to bring CPU %s (%s)", 33 + online ? "online" : "offline", strerror(errno)); 34 + } 35 + 36 + static enum scx_test_status setup(void **ctx) 37 + { 38 + if (!is_cpu_online()) 39 + return SCX_TEST_SKIP; 40 + 41 + return SCX_TEST_PASS; 42 + } 43 + 44 + static enum scx_test_status test_hotplug(bool onlining, bool cbs_defined) 45 + { 46 + struct hotplug *skel; 47 + struct bpf_link *link; 48 + long kind, code; 49 + 50 + SCX_ASSERT(is_cpu_online()); 51 + 52 + skel = hotplug__open_and_load(); 53 + SCX_ASSERT(skel); 54 + 55 + /* Testing the offline -> online path, so go offline before starting */ 56 + if (onlining) 57 + toggle_online_status(0); 58 + 59 + if (cbs_defined) { 60 + kind = SCX_KIND_VAL(SCX_EXIT_UNREG_BPF); 61 + code = SCX_ECODE_VAL(SCX_ECODE_ACT_RESTART) | HOTPLUG_EXIT_RSN; 62 + if (onlining) 63 + code |= HOTPLUG_ONLINING; 64 + } else { 65 + kind = SCX_KIND_VAL(SCX_EXIT_UNREG_KERN); 66 + code = SCX_ECODE_VAL(SCX_ECODE_ACT_RESTART) | 67 + SCX_ECODE_VAL(SCX_ECODE_RSN_HOTPLUG); 68 + } 69 + 70 + if (cbs_defined) 71 + link = bpf_map__attach_struct_ops(skel->maps.hotplug_cb_ops); 72 + else 73 + link = bpf_map__attach_struct_ops(skel->maps.hotplug_nocb_ops); 74 + 75 + if (!link) { 76 + SCX_ERR("Failed to attach scheduler"); 77 + hotplug__destroy(skel); 78 + return SCX_TEST_FAIL; 79 + } 80 + 81 + toggle_online_status(onlining ? 1 : 0); 82 + 83 + while (!UEI_EXITED(skel, uei)) 84 + sched_yield(); 85 + 86 + SCX_EQ(skel->data->uei.kind, kind); 87 + SCX_EQ(UEI_REPORT(skel, uei), code); 88 + 89 + if (!onlining) 90 + toggle_online_status(1); 91 + 92 + bpf_link__destroy(link); 93 + hotplug__destroy(skel); 94 + 95 + return SCX_TEST_PASS; 96 + } 97 + 98 + static enum scx_test_status test_hotplug_attach(void) 99 + { 100 + struct hotplug *skel; 101 + struct bpf_link *link; 102 + enum scx_test_status status = SCX_TEST_PASS; 103 + long kind, code; 104 + 105 + SCX_ASSERT(is_cpu_online()); 106 + SCX_ASSERT(scx_hotplug_seq() > 0); 107 + 108 + skel = SCX_OPS_OPEN(hotplug_nocb_ops, hotplug); 109 + SCX_ASSERT(skel); 110 + 111 + SCX_OPS_LOAD(skel, hotplug_nocb_ops, hotplug, uei); 112 + 113 + /* 114 + * Take the CPU offline to increment the global hotplug seq, which 115 + * should cause attach to fail due to us setting the hotplug seq above 116 + */ 117 + toggle_online_status(0); 118 + link = bpf_map__attach_struct_ops(skel->maps.hotplug_nocb_ops); 119 + 120 + toggle_online_status(1); 121 + 122 + SCX_ASSERT(link); 123 + while (!UEI_EXITED(skel, uei)) 124 + sched_yield(); 125 + 126 + kind = SCX_KIND_VAL(SCX_EXIT_UNREG_KERN); 127 + code = SCX_ECODE_VAL(SCX_ECODE_ACT_RESTART) | 128 + SCX_ECODE_VAL(SCX_ECODE_RSN_HOTPLUG); 129 + SCX_EQ(skel->data->uei.kind, kind); 130 + SCX_EQ(UEI_REPORT(skel, uei), code); 131 + 132 + bpf_link__destroy(link); 133 + hotplug__destroy(skel); 134 + 135 + return status; 136 + } 137 + 138 + static enum scx_test_status run(void *ctx) 139 + { 140 + 141 + #define HP_TEST(__onlining, __cbs_defined) ({ \ 142 + if (test_hotplug(__onlining, __cbs_defined) != SCX_TEST_PASS) \ 143 + return SCX_TEST_FAIL; \ 144 + }) 145 + 146 + HP_TEST(true, true); 147 + HP_TEST(false, true); 148 + HP_TEST(true, false); 149 + HP_TEST(false, false); 150 + 151 + #undef HP_TEST 152 + 153 + return test_hotplug_attach(); 154 + } 155 + 156 + static void cleanup(void *ctx) 157 + { 158 + toggle_online_status(1); 159 + } 160 + 161 + struct scx_test hotplug_test = { 162 + .name = "hotplug", 163 + .description = "Verify hotplug behavior", 164 + .setup = setup, 165 + .run = run, 166 + .cleanup = cleanup, 167 + }; 168 + REGISTER_SCX_TEST(&hotplug_test)
+15
tools/testing/selftests/sched_ext/hotplug_test.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + */ 6 + 7 + #ifndef __HOTPLUG_TEST_H__ 8 + #define __HOTPLUG_TEST_H__ 9 + 10 + enum hotplug_test_flags { 11 + HOTPLUG_EXIT_RSN = 1LLU << 0, 12 + HOTPLUG_ONLINING = 1LLU << 1, 13 + }; 14 + 15 + #endif // # __HOTPLUG_TEST_H__
+53
tools/testing/selftests/sched_ext/init_enable_count.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * A scheduler that verifies that we do proper counting of init, enable, etc 4 + * callbacks. 5 + * 6 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 7 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 8 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 9 + */ 10 + 11 + #include <scx/common.bpf.h> 12 + 13 + char _license[] SEC("license") = "GPL"; 14 + 15 + u64 init_task_cnt, exit_task_cnt, enable_cnt, disable_cnt; 16 + u64 init_fork_cnt, init_transition_cnt; 17 + 18 + s32 BPF_STRUCT_OPS_SLEEPABLE(cnt_init_task, struct task_struct *p, 19 + struct scx_init_task_args *args) 20 + { 21 + __sync_fetch_and_add(&init_task_cnt, 1); 22 + 23 + if (args->fork) 24 + __sync_fetch_and_add(&init_fork_cnt, 1); 25 + else 26 + __sync_fetch_and_add(&init_transition_cnt, 1); 27 + 28 + return 0; 29 + } 30 + 31 + void BPF_STRUCT_OPS(cnt_exit_task, struct task_struct *p) 32 + { 33 + __sync_fetch_and_add(&exit_task_cnt, 1); 34 + } 35 + 36 + void BPF_STRUCT_OPS(cnt_enable, struct task_struct *p) 37 + { 38 + __sync_fetch_and_add(&enable_cnt, 1); 39 + } 40 + 41 + void BPF_STRUCT_OPS(cnt_disable, struct task_struct *p) 42 + { 43 + __sync_fetch_and_add(&disable_cnt, 1); 44 + } 45 + 46 + SEC(".struct_ops.link") 47 + struct sched_ext_ops init_enable_count_ops = { 48 + .init_task = cnt_init_task, 49 + .exit_task = cnt_exit_task, 50 + .enable = cnt_enable, 51 + .disable = cnt_disable, 52 + .name = "init_enable_count", 53 + };
+166
tools/testing/selftests/sched_ext/init_enable_count.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <stdio.h> 8 + #include <unistd.h> 9 + #include <sched.h> 10 + #include <bpf/bpf.h> 11 + #include <scx/common.h> 12 + #include <sys/wait.h> 13 + #include "scx_test.h" 14 + #include "init_enable_count.bpf.skel.h" 15 + 16 + #define SCHED_EXT 7 17 + 18 + static struct init_enable_count * 19 + open_load_prog(bool global) 20 + { 21 + struct init_enable_count *skel; 22 + 23 + skel = init_enable_count__open(); 24 + SCX_BUG_ON(!skel, "Failed to open skel"); 25 + 26 + if (!global) 27 + skel->struct_ops.init_enable_count_ops->flags |= SCX_OPS_SWITCH_PARTIAL; 28 + 29 + SCX_BUG_ON(init_enable_count__load(skel), "Failed to load skel"); 30 + 31 + return skel; 32 + } 33 + 34 + static enum scx_test_status run_test(bool global) 35 + { 36 + struct init_enable_count *skel; 37 + struct bpf_link *link; 38 + const u32 num_children = 5, num_pre_forks = 1024; 39 + int ret, i, status; 40 + struct sched_param param = {}; 41 + pid_t pids[num_pre_forks]; 42 + 43 + skel = open_load_prog(global); 44 + 45 + /* 46 + * Fork a bunch of children before we attach the scheduler so that we 47 + * ensure (at least in practical terms) that there are more tasks that 48 + * transition from SCHED_OTHER -> SCHED_EXT than there are tasks that 49 + * take the fork() path either below or in other processes. 50 + */ 51 + for (i = 0; i < num_pre_forks; i++) { 52 + pids[i] = fork(); 53 + SCX_FAIL_IF(pids[i] < 0, "Failed to fork child"); 54 + if (pids[i] == 0) { 55 + sleep(1); 56 + exit(0); 57 + } 58 + } 59 + 60 + link = bpf_map__attach_struct_ops(skel->maps.init_enable_count_ops); 61 + SCX_FAIL_IF(!link, "Failed to attach struct_ops"); 62 + 63 + for (i = 0; i < num_pre_forks; i++) { 64 + SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i], 65 + "Failed to wait for pre-forked child\n"); 66 + 67 + SCX_FAIL_IF(status != 0, "Pre-forked child %d exited with status %d\n", i, 68 + status); 69 + } 70 + 71 + bpf_link__destroy(link); 72 + SCX_GE(skel->bss->init_task_cnt, num_pre_forks); 73 + SCX_GE(skel->bss->exit_task_cnt, num_pre_forks); 74 + 75 + link = bpf_map__attach_struct_ops(skel->maps.init_enable_count_ops); 76 + SCX_FAIL_IF(!link, "Failed to attach struct_ops"); 77 + 78 + /* SCHED_EXT children */ 79 + for (i = 0; i < num_children; i++) { 80 + pids[i] = fork(); 81 + SCX_FAIL_IF(pids[i] < 0, "Failed to fork child"); 82 + 83 + if (pids[i] == 0) { 84 + ret = sched_setscheduler(0, SCHED_EXT, &param); 85 + SCX_BUG_ON(ret, "Failed to set sched to sched_ext"); 86 + 87 + /* 88 + * Reset to SCHED_OTHER for half of them. Counts for 89 + * everything should still be the same regardless, as 90 + * ops.disable() is invoked even if a task is still on 91 + * SCHED_EXT before it exits. 92 + */ 93 + if (i % 2 == 0) { 94 + ret = sched_setscheduler(0, SCHED_OTHER, &param); 95 + SCX_BUG_ON(ret, "Failed to reset sched to normal"); 96 + } 97 + exit(0); 98 + } 99 + } 100 + for (i = 0; i < num_children; i++) { 101 + SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i], 102 + "Failed to wait for SCX child\n"); 103 + 104 + SCX_FAIL_IF(status != 0, "SCX child %d exited with status %d\n", i, 105 + status); 106 + } 107 + 108 + /* SCHED_OTHER children */ 109 + for (i = 0; i < num_children; i++) { 110 + pids[i] = fork(); 111 + if (pids[i] == 0) 112 + exit(0); 113 + } 114 + 115 + for (i = 0; i < num_children; i++) { 116 + SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i], 117 + "Failed to wait for normal child\n"); 118 + 119 + SCX_FAIL_IF(status != 0, "Normal child %d exited with status %d\n", i, 120 + status); 121 + } 122 + 123 + bpf_link__destroy(link); 124 + 125 + SCX_GE(skel->bss->init_task_cnt, 2 * num_children); 126 + SCX_GE(skel->bss->exit_task_cnt, 2 * num_children); 127 + 128 + if (global) { 129 + SCX_GE(skel->bss->enable_cnt, 2 * num_children); 130 + SCX_GE(skel->bss->disable_cnt, 2 * num_children); 131 + } else { 132 + SCX_EQ(skel->bss->enable_cnt, num_children); 133 + SCX_EQ(skel->bss->disable_cnt, num_children); 134 + } 135 + /* 136 + * We forked a ton of tasks before we attached the scheduler above, so 137 + * this should be fine. Technically it could be flaky if a ton of forks 138 + * are happening at the same time in other processes, but that should 139 + * be exceedingly unlikely. 140 + */ 141 + SCX_GT(skel->bss->init_transition_cnt, skel->bss->init_fork_cnt); 142 + SCX_GE(skel->bss->init_fork_cnt, 2 * num_children); 143 + 144 + init_enable_count__destroy(skel); 145 + 146 + return SCX_TEST_PASS; 147 + } 148 + 149 + static enum scx_test_status run(void *ctx) 150 + { 151 + enum scx_test_status status; 152 + 153 + status = run_test(true); 154 + if (status != SCX_TEST_PASS) 155 + return status; 156 + 157 + return run_test(false); 158 + } 159 + 160 + struct scx_test init_enable_count = { 161 + .name = "init_enable_count", 162 + .description = "Verify we do the correct amount of counting of init, " 163 + "enable, etc callbacks.", 164 + .run = run, 165 + }; 166 + REGISTER_SCX_TEST(&init_enable_count)
+132
tools/testing/selftests/sched_ext/maximal.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * A scheduler with every callback defined. 4 + * 5 + * This scheduler defines every callback. 6 + * 7 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 8 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 9 + */ 10 + 11 + #include <scx/common.bpf.h> 12 + 13 + char _license[] SEC("license") = "GPL"; 14 + 15 + s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu, 16 + u64 wake_flags) 17 + { 18 + return prev_cpu; 19 + } 20 + 21 + void BPF_STRUCT_OPS(maximal_enqueue, struct task_struct *p, u64 enq_flags) 22 + { 23 + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); 24 + } 25 + 26 + void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags) 27 + {} 28 + 29 + void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev) 30 + { 31 + scx_bpf_consume(SCX_DSQ_GLOBAL); 32 + } 33 + 34 + void BPF_STRUCT_OPS(maximal_runnable, struct task_struct *p, u64 enq_flags) 35 + {} 36 + 37 + void BPF_STRUCT_OPS(maximal_running, struct task_struct *p) 38 + {} 39 + 40 + void BPF_STRUCT_OPS(maximal_stopping, struct task_struct *p, bool runnable) 41 + {} 42 + 43 + void BPF_STRUCT_OPS(maximal_quiescent, struct task_struct *p, u64 deq_flags) 44 + {} 45 + 46 + bool BPF_STRUCT_OPS(maximal_yield, struct task_struct *from, 47 + struct task_struct *to) 48 + { 49 + return false; 50 + } 51 + 52 + bool BPF_STRUCT_OPS(maximal_core_sched_before, struct task_struct *a, 53 + struct task_struct *b) 54 + { 55 + return false; 56 + } 57 + 58 + void BPF_STRUCT_OPS(maximal_set_weight, struct task_struct *p, u32 weight) 59 + {} 60 + 61 + void BPF_STRUCT_OPS(maximal_set_cpumask, struct task_struct *p, 62 + const struct cpumask *cpumask) 63 + {} 64 + 65 + void BPF_STRUCT_OPS(maximal_update_idle, s32 cpu, bool idle) 66 + {} 67 + 68 + void BPF_STRUCT_OPS(maximal_cpu_acquire, s32 cpu, 69 + struct scx_cpu_acquire_args *args) 70 + {} 71 + 72 + void BPF_STRUCT_OPS(maximal_cpu_release, s32 cpu, 73 + struct scx_cpu_release_args *args) 74 + {} 75 + 76 + void BPF_STRUCT_OPS(maximal_cpu_online, s32 cpu) 77 + {} 78 + 79 + void BPF_STRUCT_OPS(maximal_cpu_offline, s32 cpu) 80 + {} 81 + 82 + s32 BPF_STRUCT_OPS(maximal_init_task, struct task_struct *p, 83 + struct scx_init_task_args *args) 84 + { 85 + return 0; 86 + } 87 + 88 + void BPF_STRUCT_OPS(maximal_enable, struct task_struct *p) 89 + {} 90 + 91 + void BPF_STRUCT_OPS(maximal_exit_task, struct task_struct *p, 92 + struct scx_exit_task_args *args) 93 + {} 94 + 95 + void BPF_STRUCT_OPS(maximal_disable, struct task_struct *p) 96 + {} 97 + 98 + s32 BPF_STRUCT_OPS_SLEEPABLE(maximal_init) 99 + { 100 + return 0; 101 + } 102 + 103 + void BPF_STRUCT_OPS(maximal_exit, struct scx_exit_info *info) 104 + {} 105 + 106 + SEC(".struct_ops.link") 107 + struct sched_ext_ops maximal_ops = { 108 + .select_cpu = maximal_select_cpu, 109 + .enqueue = maximal_enqueue, 110 + .dequeue = maximal_dequeue, 111 + .dispatch = maximal_dispatch, 112 + .runnable = maximal_runnable, 113 + .running = maximal_running, 114 + .stopping = maximal_stopping, 115 + .quiescent = maximal_quiescent, 116 + .yield = maximal_yield, 117 + .core_sched_before = maximal_core_sched_before, 118 + .set_weight = maximal_set_weight, 119 + .set_cpumask = maximal_set_cpumask, 120 + .update_idle = maximal_update_idle, 121 + .cpu_acquire = maximal_cpu_acquire, 122 + .cpu_release = maximal_cpu_release, 123 + .cpu_online = maximal_cpu_online, 124 + .cpu_offline = maximal_cpu_offline, 125 + .init_task = maximal_init_task, 126 + .enable = maximal_enable, 127 + .exit_task = maximal_exit_task, 128 + .disable = maximal_disable, 129 + .init = maximal_init, 130 + .exit = maximal_exit, 131 + .name = "maximal", 132 + };
+51
tools/testing/selftests/sched_ext/maximal.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + */ 6 + #include <bpf/bpf.h> 7 + #include <scx/common.h> 8 + #include <sys/wait.h> 9 + #include <unistd.h> 10 + #include "maximal.bpf.skel.h" 11 + #include "scx_test.h" 12 + 13 + static enum scx_test_status setup(void **ctx) 14 + { 15 + struct maximal *skel; 16 + 17 + skel = maximal__open_and_load(); 18 + SCX_FAIL_IF(!skel, "Failed to open and load skel"); 19 + *ctx = skel; 20 + 21 + return SCX_TEST_PASS; 22 + } 23 + 24 + static enum scx_test_status run(void *ctx) 25 + { 26 + struct maximal *skel = ctx; 27 + struct bpf_link *link; 28 + 29 + link = bpf_map__attach_struct_ops(skel->maps.maximal_ops); 30 + SCX_FAIL_IF(!link, "Failed to attach scheduler"); 31 + 32 + bpf_link__destroy(link); 33 + 34 + return SCX_TEST_PASS; 35 + } 36 + 37 + static void cleanup(void *ctx) 38 + { 39 + struct maximal *skel = ctx; 40 + 41 + maximal__destroy(skel); 42 + } 43 + 44 + struct scx_test maximal = { 45 + .name = "maximal", 46 + .description = "Verify we can load a scheduler with every callback defined", 47 + .setup = setup, 48 + .run = run, 49 + .cleanup = cleanup, 50 + }; 51 + REGISTER_SCX_TEST(&maximal)
+36
tools/testing/selftests/sched_ext/maybe_null.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + */ 5 + 6 + #include <scx/common.bpf.h> 7 + 8 + char _license[] SEC("license") = "GPL"; 9 + 10 + u64 vtime_test; 11 + 12 + void BPF_STRUCT_OPS(maybe_null_running, struct task_struct *p) 13 + {} 14 + 15 + void BPF_STRUCT_OPS(maybe_null_success_dispatch, s32 cpu, struct task_struct *p) 16 + { 17 + if (p != NULL) 18 + vtime_test = p->scx.dsq_vtime; 19 + } 20 + 21 + bool BPF_STRUCT_OPS(maybe_null_success_yield, struct task_struct *from, 22 + struct task_struct *to) 23 + { 24 + if (to) 25 + bpf_printk("Yielding to %s[%d]", to->comm, to->pid); 26 + 27 + return false; 28 + } 29 + 30 + SEC(".struct_ops.link") 31 + struct sched_ext_ops maybe_null_success = { 32 + .dispatch = maybe_null_success_dispatch, 33 + .yield = maybe_null_success_yield, 34 + .enable = maybe_null_running, 35 + .name = "minimal", 36 + };
+49
tools/testing/selftests/sched_ext/maybe_null.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + */ 5 + #include <bpf/bpf.h> 6 + #include <scx/common.h> 7 + #include <sys/wait.h> 8 + #include <unistd.h> 9 + #include "maybe_null.bpf.skel.h" 10 + #include "maybe_null_fail_dsp.bpf.skel.h" 11 + #include "maybe_null_fail_yld.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + static enum scx_test_status run(void *ctx) 15 + { 16 + struct maybe_null *skel; 17 + struct maybe_null_fail_dsp *fail_dsp; 18 + struct maybe_null_fail_yld *fail_yld; 19 + 20 + skel = maybe_null__open_and_load(); 21 + if (!skel) { 22 + SCX_ERR("Failed to open and load maybe_null skel"); 23 + return SCX_TEST_FAIL; 24 + } 25 + maybe_null__destroy(skel); 26 + 27 + fail_dsp = maybe_null_fail_dsp__open_and_load(); 28 + if (fail_dsp) { 29 + maybe_null_fail_dsp__destroy(fail_dsp); 30 + SCX_ERR("Should failed to open and load maybe_null_fail_dsp skel"); 31 + return SCX_TEST_FAIL; 32 + } 33 + 34 + fail_yld = maybe_null_fail_yld__open_and_load(); 35 + if (fail_yld) { 36 + maybe_null_fail_yld__destroy(fail_yld); 37 + SCX_ERR("Should failed to open and load maybe_null_fail_yld skel"); 38 + return SCX_TEST_FAIL; 39 + } 40 + 41 + return SCX_TEST_PASS; 42 + } 43 + 44 + struct scx_test maybe_null = { 45 + .name = "maybe_null", 46 + .description = "Verify if PTR_MAYBE_NULL work for .dispatch", 47 + .run = run, 48 + }; 49 + REGISTER_SCX_TEST(&maybe_null)
+25
tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + */ 5 + 6 + #include <scx/common.bpf.h> 7 + 8 + char _license[] SEC("license") = "GPL"; 9 + 10 + u64 vtime_test; 11 + 12 + void BPF_STRUCT_OPS(maybe_null_running, struct task_struct *p) 13 + {} 14 + 15 + void BPF_STRUCT_OPS(maybe_null_fail_dispatch, s32 cpu, struct task_struct *p) 16 + { 17 + vtime_test = p->scx.dsq_vtime; 18 + } 19 + 20 + SEC(".struct_ops.link") 21 + struct sched_ext_ops maybe_null_fail = { 22 + .dispatch = maybe_null_fail_dispatch, 23 + .enable = maybe_null_running, 24 + .name = "maybe_null_fail_dispatch", 25 + };
+28
tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + */ 5 + 6 + #include <scx/common.bpf.h> 7 + 8 + char _license[] SEC("license") = "GPL"; 9 + 10 + u64 vtime_test; 11 + 12 + void BPF_STRUCT_OPS(maybe_null_running, struct task_struct *p) 13 + {} 14 + 15 + bool BPF_STRUCT_OPS(maybe_null_fail_yield, struct task_struct *from, 16 + struct task_struct *to) 17 + { 18 + bpf_printk("Yielding to %s[%d]", to->comm, to->pid); 19 + 20 + return false; 21 + } 22 + 23 + SEC(".struct_ops.link") 24 + struct sched_ext_ops maybe_null_fail = { 25 + .yield = maybe_null_fail_yield, 26 + .enable = maybe_null_running, 27 + .name = "maybe_null_fail_yield", 28 + };
+21
tools/testing/selftests/sched_ext/minimal.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * A completely minimal scheduler. 4 + * 5 + * This scheduler defines the absolute minimal set of struct sched_ext_ops 6 + * fields: its name. It should _not_ fail to be loaded, and can be used to 7 + * exercise the default scheduling paths in ext.c. 8 + * 9 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 10 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 11 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 12 + */ 13 + 14 + #include <scx/common.bpf.h> 15 + 16 + char _license[] SEC("license") = "GPL"; 17 + 18 + SEC(".struct_ops.link") 19 + struct sched_ext_ops minimal_ops = { 20 + .name = "minimal", 21 + };
+58
tools/testing/selftests/sched_ext/minimal.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <bpf/bpf.h> 8 + #include <scx/common.h> 9 + #include <sys/wait.h> 10 + #include <unistd.h> 11 + #include "minimal.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + static enum scx_test_status setup(void **ctx) 15 + { 16 + struct minimal *skel; 17 + 18 + skel = minimal__open_and_load(); 19 + if (!skel) { 20 + SCX_ERR("Failed to open and load skel"); 21 + return SCX_TEST_FAIL; 22 + } 23 + *ctx = skel; 24 + 25 + return SCX_TEST_PASS; 26 + } 27 + 28 + static enum scx_test_status run(void *ctx) 29 + { 30 + struct minimal *skel = ctx; 31 + struct bpf_link *link; 32 + 33 + link = bpf_map__attach_struct_ops(skel->maps.minimal_ops); 34 + if (!link) { 35 + SCX_ERR("Failed to attach scheduler"); 36 + return SCX_TEST_FAIL; 37 + } 38 + 39 + bpf_link__destroy(link); 40 + 41 + return SCX_TEST_PASS; 42 + } 43 + 44 + static void cleanup(void *ctx) 45 + { 46 + struct minimal *skel = ctx; 47 + 48 + minimal__destroy(skel); 49 + } 50 + 51 + struct scx_test minimal = { 52 + .name = "minimal", 53 + .description = "Verify we can load a fully minimal scheduler", 54 + .setup = setup, 55 + .run = run, 56 + .cleanup = cleanup, 57 + }; 58 + REGISTER_SCX_TEST(&minimal)
+32
tools/testing/selftests/sched_ext/prog_run.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * A scheduler that validates that we can invoke sched_ext kfuncs in 4 + * BPF_PROG_TYPE_SYSCALL programs. 5 + * 6 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 7 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 8 + */ 9 + 10 + #include <scx/common.bpf.h> 11 + 12 + UEI_DEFINE(uei); 13 + 14 + char _license[] SEC("license") = "GPL"; 15 + 16 + SEC("syscall") 17 + int BPF_PROG(prog_run_syscall) 18 + { 19 + scx_bpf_exit(0xdeadbeef, "Exited from PROG_RUN"); 20 + return 0; 21 + } 22 + 23 + void BPF_STRUCT_OPS(prog_run_exit, struct scx_exit_info *ei) 24 + { 25 + UEI_RECORD(uei, ei); 26 + } 27 + 28 + SEC(".struct_ops.link") 29 + struct sched_ext_ops prog_run_ops = { 30 + .exit = prog_run_exit, 31 + .name = "prog_run", 32 + };
+78
tools/testing/selftests/sched_ext/prog_run.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + */ 6 + #include <bpf/bpf.h> 7 + #include <sched.h> 8 + #include <scx/common.h> 9 + #include <sys/wait.h> 10 + #include <unistd.h> 11 + #include "prog_run.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + static enum scx_test_status setup(void **ctx) 15 + { 16 + struct prog_run *skel; 17 + 18 + skel = prog_run__open_and_load(); 19 + if (!skel) { 20 + SCX_ERR("Failed to open and load skel"); 21 + return SCX_TEST_FAIL; 22 + } 23 + *ctx = skel; 24 + 25 + return SCX_TEST_PASS; 26 + } 27 + 28 + static enum scx_test_status run(void *ctx) 29 + { 30 + struct prog_run *skel = ctx; 31 + struct bpf_link *link; 32 + int prog_fd, err = 0; 33 + 34 + prog_fd = bpf_program__fd(skel->progs.prog_run_syscall); 35 + if (prog_fd < 0) { 36 + SCX_ERR("Failed to get BPF_PROG_RUN prog"); 37 + return SCX_TEST_FAIL; 38 + } 39 + 40 + LIBBPF_OPTS(bpf_test_run_opts, topts); 41 + 42 + link = bpf_map__attach_struct_ops(skel->maps.prog_run_ops); 43 + if (!link) { 44 + SCX_ERR("Failed to attach scheduler"); 45 + close(prog_fd); 46 + return SCX_TEST_FAIL; 47 + } 48 + 49 + err = bpf_prog_test_run_opts(prog_fd, &topts); 50 + SCX_EQ(err, 0); 51 + 52 + /* Assumes uei.kind is written last */ 53 + while (skel->data->uei.kind == EXIT_KIND(SCX_EXIT_NONE)) 54 + sched_yield(); 55 + 56 + SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_UNREG_BPF)); 57 + SCX_EQ(skel->data->uei.exit_code, 0xdeadbeef); 58 + close(prog_fd); 59 + bpf_link__destroy(link); 60 + 61 + return SCX_TEST_PASS; 62 + } 63 + 64 + static void cleanup(void *ctx) 65 + { 66 + struct prog_run *skel = ctx; 67 + 68 + prog_run__destroy(skel); 69 + } 70 + 71 + struct scx_test prog_run = { 72 + .name = "prog_run", 73 + .description = "Verify we can call into a scheduler with BPF_PROG_RUN, and invoke kfuncs", 74 + .setup = setup, 75 + .run = run, 76 + .cleanup = cleanup, 77 + }; 78 + REGISTER_SCX_TEST(&prog_run)
+75
tools/testing/selftests/sched_ext/reload_loop.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + */ 6 + #include <bpf/bpf.h> 7 + #include <pthread.h> 8 + #include <scx/common.h> 9 + #include <sys/wait.h> 10 + #include <unistd.h> 11 + #include "maximal.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + static struct maximal *skel; 15 + static pthread_t threads[2]; 16 + 17 + bool force_exit = false; 18 + 19 + static enum scx_test_status setup(void **ctx) 20 + { 21 + skel = maximal__open_and_load(); 22 + if (!skel) { 23 + SCX_ERR("Failed to open and load skel"); 24 + return SCX_TEST_FAIL; 25 + } 26 + 27 + return SCX_TEST_PASS; 28 + } 29 + 30 + static void *do_reload_loop(void *arg) 31 + { 32 + u32 i; 33 + 34 + for (i = 0; i < 1024 && !force_exit; i++) { 35 + struct bpf_link *link; 36 + 37 + link = bpf_map__attach_struct_ops(skel->maps.maximal_ops); 38 + if (link) 39 + bpf_link__destroy(link); 40 + } 41 + 42 + return NULL; 43 + } 44 + 45 + static enum scx_test_status run(void *ctx) 46 + { 47 + int err; 48 + void *ret; 49 + 50 + err = pthread_create(&threads[0], NULL, do_reload_loop, NULL); 51 + SCX_FAIL_IF(err, "Failed to create thread 0"); 52 + 53 + err = pthread_create(&threads[1], NULL, do_reload_loop, NULL); 54 + SCX_FAIL_IF(err, "Failed to create thread 1"); 55 + 56 + SCX_FAIL_IF(pthread_join(threads[0], &ret), "thread 0 failed"); 57 + SCX_FAIL_IF(pthread_join(threads[1], &ret), "thread 1 failed"); 58 + 59 + return SCX_TEST_PASS; 60 + } 61 + 62 + static void cleanup(void *ctx) 63 + { 64 + force_exit = true; 65 + maximal__destroy(skel); 66 + } 67 + 68 + struct scx_test reload_loop = { 69 + .name = "reload_loop", 70 + .description = "Stress test loading and unloading schedulers repeatedly in a tight loop", 71 + .setup = setup, 72 + .run = run, 73 + .cleanup = cleanup, 74 + }; 75 + REGISTER_SCX_TEST(&reload_loop)
+201
tools/testing/selftests/sched_ext/runner.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2024 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <stdio.h> 8 + #include <unistd.h> 9 + #include <signal.h> 10 + #include <libgen.h> 11 + #include <bpf/bpf.h> 12 + #include "scx_test.h" 13 + 14 + const char help_fmt[] = 15 + "The runner for sched_ext tests.\n" 16 + "\n" 17 + "The runner is statically linked against all testcases, and runs them all serially.\n" 18 + "It's required for the testcases to be serial, as only a single host-wide sched_ext\n" 19 + "scheduler may be loaded at any given time." 20 + "\n" 21 + "Usage: %s [-t TEST] [-h]\n" 22 + "\n" 23 + " -t TEST Only run tests whose name includes this string\n" 24 + " -s Include print output for skipped tests\n" 25 + " -q Don't print the test descriptions during run\n" 26 + " -h Display this help and exit\n"; 27 + 28 + static volatile int exit_req; 29 + static bool quiet, print_skipped; 30 + 31 + #define MAX_SCX_TESTS 2048 32 + 33 + static struct scx_test __scx_tests[MAX_SCX_TESTS]; 34 + static unsigned __scx_num_tests = 0; 35 + 36 + static void sigint_handler(int simple) 37 + { 38 + exit_req = 1; 39 + } 40 + 41 + static void print_test_preamble(const struct scx_test *test, bool quiet) 42 + { 43 + printf("===== START =====\n"); 44 + printf("TEST: %s\n", test->name); 45 + if (!quiet) 46 + printf("DESCRIPTION: %s\n", test->description); 47 + printf("OUTPUT:\n"); 48 + } 49 + 50 + static const char *status_to_result(enum scx_test_status status) 51 + { 52 + switch (status) { 53 + case SCX_TEST_PASS: 54 + case SCX_TEST_SKIP: 55 + return "ok"; 56 + case SCX_TEST_FAIL: 57 + return "not ok"; 58 + default: 59 + return "<UNKNOWN>"; 60 + } 61 + } 62 + 63 + static void print_test_result(const struct scx_test *test, 64 + enum scx_test_status status, 65 + unsigned int testnum) 66 + { 67 + const char *result = status_to_result(status); 68 + const char *directive = status == SCX_TEST_SKIP ? "SKIP " : ""; 69 + 70 + printf("%s %u %s # %s\n", result, testnum, test->name, directive); 71 + printf("===== END =====\n"); 72 + } 73 + 74 + static bool should_skip_test(const struct scx_test *test, const char * filter) 75 + { 76 + return !strstr(test->name, filter); 77 + } 78 + 79 + static enum scx_test_status run_test(const struct scx_test *test) 80 + { 81 + enum scx_test_status status; 82 + void *context = NULL; 83 + 84 + if (test->setup) { 85 + status = test->setup(&context); 86 + if (status != SCX_TEST_PASS) 87 + return status; 88 + } 89 + 90 + status = test->run(context); 91 + 92 + if (test->cleanup) 93 + test->cleanup(context); 94 + 95 + return status; 96 + } 97 + 98 + static bool test_valid(const struct scx_test *test) 99 + { 100 + if (!test) { 101 + fprintf(stderr, "NULL test detected\n"); 102 + return false; 103 + } 104 + 105 + if (!test->name) { 106 + fprintf(stderr, 107 + "Test with no name found. Must specify test name.\n"); 108 + return false; 109 + } 110 + 111 + if (!test->description) { 112 + fprintf(stderr, "Test %s requires description.\n", test->name); 113 + return false; 114 + } 115 + 116 + if (!test->run) { 117 + fprintf(stderr, "Test %s has no run() callback\n", test->name); 118 + return false; 119 + } 120 + 121 + return true; 122 + } 123 + 124 + int main(int argc, char **argv) 125 + { 126 + const char *filter = NULL; 127 + unsigned testnum = 0, i; 128 + unsigned passed = 0, skipped = 0, failed = 0; 129 + int opt; 130 + 131 + signal(SIGINT, sigint_handler); 132 + signal(SIGTERM, sigint_handler); 133 + 134 + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 135 + 136 + while ((opt = getopt(argc, argv, "qst:h")) != -1) { 137 + switch (opt) { 138 + case 'q': 139 + quiet = true; 140 + break; 141 + case 's': 142 + print_skipped = true; 143 + break; 144 + case 't': 145 + filter = optarg; 146 + break; 147 + default: 148 + fprintf(stderr, help_fmt, basename(argv[0])); 149 + return opt != 'h'; 150 + } 151 + } 152 + 153 + for (i = 0; i < __scx_num_tests; i++) { 154 + enum scx_test_status status; 155 + struct scx_test *test = &__scx_tests[i]; 156 + 157 + if (filter && should_skip_test(test, filter)) { 158 + /* 159 + * Printing the skipped tests and their preambles can 160 + * add a lot of noise to the runner output. Printing 161 + * this is only really useful for CI, so let's skip it 162 + * by default. 163 + */ 164 + if (print_skipped) { 165 + print_test_preamble(test, quiet); 166 + print_test_result(test, SCX_TEST_SKIP, ++testnum); 167 + } 168 + continue; 169 + } 170 + 171 + print_test_preamble(test, quiet); 172 + status = run_test(test); 173 + print_test_result(test, status, ++testnum); 174 + switch (status) { 175 + case SCX_TEST_PASS: 176 + passed++; 177 + break; 178 + case SCX_TEST_SKIP: 179 + skipped++; 180 + break; 181 + case SCX_TEST_FAIL: 182 + failed++; 183 + break; 184 + } 185 + } 186 + printf("\n\n=============================\n\n"); 187 + printf("RESULTS:\n\n"); 188 + printf("PASSED: %u\n", passed); 189 + printf("SKIPPED: %u\n", skipped); 190 + printf("FAILED: %u\n", failed); 191 + 192 + return 0; 193 + } 194 + 195 + void scx_test_register(struct scx_test *test) 196 + { 197 + SCX_BUG_ON(!test_valid(test), "Invalid test found"); 198 + SCX_BUG_ON(__scx_num_tests >= MAX_SCX_TESTS, "Maximum tests exceeded"); 199 + 200 + __scx_tests[__scx_num_tests++] = *test; 201 + }
+131
tools/testing/selftests/sched_ext/scx_test.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 5 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 6 + */ 7 + 8 + #ifndef __SCX_TEST_H__ 9 + #define __SCX_TEST_H__ 10 + 11 + #include <errno.h> 12 + #include <scx/common.h> 13 + #include <scx/compat.h> 14 + 15 + enum scx_test_status { 16 + SCX_TEST_PASS = 0, 17 + SCX_TEST_SKIP, 18 + SCX_TEST_FAIL, 19 + }; 20 + 21 + #define EXIT_KIND(__ent) __COMPAT_ENUM_OR_ZERO("scx_exit_kind", #__ent) 22 + 23 + struct scx_test { 24 + /** 25 + * name - The name of the testcase. 26 + */ 27 + const char *name; 28 + 29 + /** 30 + * description - A description of your testcase: what it tests and is 31 + * meant to validate. 32 + */ 33 + const char *description; 34 + 35 + /* 36 + * setup - Setup the test. 37 + * @ctx: A pointer to a context object that will be passed to run and 38 + * cleanup. 39 + * 40 + * An optional callback that allows a testcase to perform setup for its 41 + * run. A test may return SCX_TEST_SKIP to skip the run. 42 + */ 43 + enum scx_test_status (*setup)(void **ctx); 44 + 45 + /* 46 + * run - Run the test. 47 + * @ctx: Context set in the setup() callback. If @ctx was not set in 48 + * setup(), it is NULL. 49 + * 50 + * The main test. Callers should return one of: 51 + * 52 + * - SCX_TEST_PASS: Test passed 53 + * - SCX_TEST_SKIP: Test should be skipped 54 + * - SCX_TEST_FAIL: Test failed 55 + * 56 + * This callback must be defined. 57 + */ 58 + enum scx_test_status (*run)(void *ctx); 59 + 60 + /* 61 + * cleanup - Perform cleanup following the test 62 + * @ctx: Context set in the setup() callback. If @ctx was not set in 63 + * setup(), it is NULL. 64 + * 65 + * An optional callback that allows a test to perform cleanup after 66 + * being run. This callback is run even if the run() callback returns 67 + * SCX_TEST_SKIP or SCX_TEST_FAIL. It is not run if setup() returns 68 + * SCX_TEST_SKIP or SCX_TEST_FAIL. 69 + */ 70 + void (*cleanup)(void *ctx); 71 + }; 72 + 73 + void scx_test_register(struct scx_test *test); 74 + 75 + #define REGISTER_SCX_TEST(__test) \ 76 + __attribute__((constructor)) \ 77 + static void ___scxregister##__LINE__(void) \ 78 + { \ 79 + scx_test_register(__test); \ 80 + } 81 + 82 + #define SCX_ERR(__fmt, ...) \ 83 + do { \ 84 + fprintf(stderr, "ERR: %s:%d\n", __FILE__, __LINE__); \ 85 + fprintf(stderr, __fmt"\n", ##__VA_ARGS__); \ 86 + } while (0) 87 + 88 + #define SCX_FAIL(__fmt, ...) \ 89 + do { \ 90 + SCX_ERR(__fmt, ##__VA_ARGS__); \ 91 + return SCX_TEST_FAIL; \ 92 + } while (0) 93 + 94 + #define SCX_FAIL_IF(__cond, __fmt, ...) \ 95 + do { \ 96 + if (__cond) \ 97 + SCX_FAIL(__fmt, ##__VA_ARGS__); \ 98 + } while (0) 99 + 100 + #define SCX_GT(_x, _y) SCX_FAIL_IF((_x) <= (_y), "Expected %s > %s (%lu > %lu)", \ 101 + #_x, #_y, (u64)(_x), (u64)(_y)) 102 + #define SCX_GE(_x, _y) SCX_FAIL_IF((_x) < (_y), "Expected %s >= %s (%lu >= %lu)", \ 103 + #_x, #_y, (u64)(_x), (u64)(_y)) 104 + #define SCX_LT(_x, _y) SCX_FAIL_IF((_x) >= (_y), "Expected %s < %s (%lu < %lu)", \ 105 + #_x, #_y, (u64)(_x), (u64)(_y)) 106 + #define SCX_LE(_x, _y) SCX_FAIL_IF((_x) > (_y), "Expected %s <= %s (%lu <= %lu)", \ 107 + #_x, #_y, (u64)(_x), (u64)(_y)) 108 + #define SCX_EQ(_x, _y) SCX_FAIL_IF((_x) != (_y), "Expected %s == %s (%lu == %lu)", \ 109 + #_x, #_y, (u64)(_x), (u64)(_y)) 110 + #define SCX_ASSERT(_x) SCX_FAIL_IF(!(_x), "Expected %s to be true (%lu)", \ 111 + #_x, (u64)(_x)) 112 + 113 + #define SCX_ECODE_VAL(__ecode) ({ \ 114 + u64 __val = 0; \ 115 + bool __found = false; \ 116 + \ 117 + __found = __COMPAT_read_enum("scx_exit_code", #__ecode, &__val); \ 118 + SCX_ASSERT(__found); \ 119 + (s64)__val; \ 120 + }) 121 + 122 + #define SCX_KIND_VAL(__kind) ({ \ 123 + u64 __val = 0; \ 124 + bool __found = false; \ 125 + \ 126 + __found = __COMPAT_read_enum("scx_exit_kind", #__kind, &__val); \ 127 + SCX_ASSERT(__found); \ 128 + __val; \ 129 + }) 130 + 131 + #endif // # __SCX_TEST_H__
+40
tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * A scheduler that validates the behavior of direct dispatching with a default 4 + * select_cpu implementation. 5 + * 6 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 7 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 8 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 9 + */ 10 + 11 + #include <scx/common.bpf.h> 12 + 13 + char _license[] SEC("license") = "GPL"; 14 + 15 + bool saw_local = false; 16 + 17 + static bool task_is_test(const struct task_struct *p) 18 + { 19 + return !bpf_strncmp(p->comm, 9, "select_cpu"); 20 + } 21 + 22 + void BPF_STRUCT_OPS(select_cpu_dfl_enqueue, struct task_struct *p, 23 + u64 enq_flags) 24 + { 25 + const struct cpumask *idle_mask = scx_bpf_get_idle_cpumask(); 26 + 27 + if (task_is_test(p) && 28 + bpf_cpumask_test_cpu(scx_bpf_task_cpu(p), idle_mask)) { 29 + saw_local = true; 30 + } 31 + scx_bpf_put_idle_cpumask(idle_mask); 32 + 33 + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); 34 + } 35 + 36 + SEC(".struct_ops.link") 37 + struct sched_ext_ops select_cpu_dfl_ops = { 38 + .enqueue = select_cpu_dfl_enqueue, 39 + .name = "select_cpu_dfl", 40 + };
+72
tools/testing/selftests/sched_ext/select_cpu_dfl.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <bpf/bpf.h> 8 + #include <scx/common.h> 9 + #include <sys/wait.h> 10 + #include <unistd.h> 11 + #include "select_cpu_dfl.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + #define NUM_CHILDREN 1028 15 + 16 + static enum scx_test_status setup(void **ctx) 17 + { 18 + struct select_cpu_dfl *skel; 19 + 20 + skel = select_cpu_dfl__open_and_load(); 21 + SCX_FAIL_IF(!skel, "Failed to open and load skel"); 22 + *ctx = skel; 23 + 24 + return SCX_TEST_PASS; 25 + } 26 + 27 + static enum scx_test_status run(void *ctx) 28 + { 29 + struct select_cpu_dfl *skel = ctx; 30 + struct bpf_link *link; 31 + pid_t pids[NUM_CHILDREN]; 32 + int i, status; 33 + 34 + link = bpf_map__attach_struct_ops(skel->maps.select_cpu_dfl_ops); 35 + SCX_FAIL_IF(!link, "Failed to attach scheduler"); 36 + 37 + for (i = 0; i < NUM_CHILDREN; i++) { 38 + pids[i] = fork(); 39 + if (pids[i] == 0) { 40 + sleep(1); 41 + exit(0); 42 + } 43 + } 44 + 45 + for (i = 0; i < NUM_CHILDREN; i++) { 46 + SCX_EQ(waitpid(pids[i], &status, 0), pids[i]); 47 + SCX_EQ(status, 0); 48 + } 49 + 50 + SCX_ASSERT(!skel->bss->saw_local); 51 + 52 + bpf_link__destroy(link); 53 + 54 + return SCX_TEST_PASS; 55 + } 56 + 57 + static void cleanup(void *ctx) 58 + { 59 + struct select_cpu_dfl *skel = ctx; 60 + 61 + select_cpu_dfl__destroy(skel); 62 + } 63 + 64 + struct scx_test select_cpu_dfl = { 65 + .name = "select_cpu_dfl", 66 + .description = "Verify the default ops.select_cpu() dispatches tasks " 67 + "when idles cores are found, and skips ops.enqueue()", 68 + .setup = setup, 69 + .run = run, 70 + .cleanup = cleanup, 71 + }; 72 + REGISTER_SCX_TEST(&select_cpu_dfl)
+89
tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * A scheduler that validates the behavior of direct dispatching with a default 4 + * select_cpu implementation, and with the SCX_OPS_ENQ_DFL_NO_DISPATCH ops flag 5 + * specified. 6 + * 7 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 8 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 9 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 10 + */ 11 + 12 + #include <scx/common.bpf.h> 13 + 14 + char _license[] SEC("license") = "GPL"; 15 + 16 + bool saw_local = false; 17 + 18 + /* Per-task scheduling context */ 19 + struct task_ctx { 20 + bool force_local; /* CPU changed by ops.select_cpu() */ 21 + }; 22 + 23 + struct { 24 + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); 25 + __uint(map_flags, BPF_F_NO_PREALLOC); 26 + __type(key, int); 27 + __type(value, struct task_ctx); 28 + } task_ctx_stor SEC(".maps"); 29 + 30 + /* Manually specify the signature until the kfunc is added to the scx repo. */ 31 + s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, 32 + bool *found) __ksym; 33 + 34 + s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_select_cpu, struct task_struct *p, 35 + s32 prev_cpu, u64 wake_flags) 36 + { 37 + struct task_ctx *tctx; 38 + s32 cpu; 39 + 40 + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); 41 + if (!tctx) { 42 + scx_bpf_error("task_ctx lookup failed"); 43 + return -ESRCH; 44 + } 45 + 46 + cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, 47 + &tctx->force_local); 48 + 49 + return cpu; 50 + } 51 + 52 + void BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_enqueue, struct task_struct *p, 53 + u64 enq_flags) 54 + { 55 + u64 dsq_id = SCX_DSQ_GLOBAL; 56 + struct task_ctx *tctx; 57 + 58 + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); 59 + if (!tctx) { 60 + scx_bpf_error("task_ctx lookup failed"); 61 + return; 62 + } 63 + 64 + if (tctx->force_local) { 65 + dsq_id = SCX_DSQ_LOCAL; 66 + tctx->force_local = false; 67 + saw_local = true; 68 + } 69 + 70 + scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, enq_flags); 71 + } 72 + 73 + s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_init_task, 74 + struct task_struct *p, struct scx_init_task_args *args) 75 + { 76 + if (bpf_task_storage_get(&task_ctx_stor, p, 0, 77 + BPF_LOCAL_STORAGE_GET_F_CREATE)) 78 + return 0; 79 + else 80 + return -ENOMEM; 81 + } 82 + 83 + SEC(".struct_ops.link") 84 + struct sched_ext_ops select_cpu_dfl_nodispatch_ops = { 85 + .select_cpu = select_cpu_dfl_nodispatch_select_cpu, 86 + .enqueue = select_cpu_dfl_nodispatch_enqueue, 87 + .init_task = select_cpu_dfl_nodispatch_init_task, 88 + .name = "select_cpu_dfl_nodispatch", 89 + };
+72
tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <bpf/bpf.h> 8 + #include <scx/common.h> 9 + #include <sys/wait.h> 10 + #include <unistd.h> 11 + #include "select_cpu_dfl_nodispatch.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + #define NUM_CHILDREN 1028 15 + 16 + static enum scx_test_status setup(void **ctx) 17 + { 18 + struct select_cpu_dfl_nodispatch *skel; 19 + 20 + skel = select_cpu_dfl_nodispatch__open_and_load(); 21 + SCX_FAIL_IF(!skel, "Failed to open and load skel"); 22 + *ctx = skel; 23 + 24 + return SCX_TEST_PASS; 25 + } 26 + 27 + static enum scx_test_status run(void *ctx) 28 + { 29 + struct select_cpu_dfl_nodispatch *skel = ctx; 30 + struct bpf_link *link; 31 + pid_t pids[NUM_CHILDREN]; 32 + int i, status; 33 + 34 + link = bpf_map__attach_struct_ops(skel->maps.select_cpu_dfl_nodispatch_ops); 35 + SCX_FAIL_IF(!link, "Failed to attach scheduler"); 36 + 37 + for (i = 0; i < NUM_CHILDREN; i++) { 38 + pids[i] = fork(); 39 + if (pids[i] == 0) { 40 + sleep(1); 41 + exit(0); 42 + } 43 + } 44 + 45 + for (i = 0; i < NUM_CHILDREN; i++) { 46 + SCX_EQ(waitpid(pids[i], &status, 0), pids[i]); 47 + SCX_EQ(status, 0); 48 + } 49 + 50 + SCX_ASSERT(skel->bss->saw_local); 51 + 52 + bpf_link__destroy(link); 53 + 54 + return SCX_TEST_PASS; 55 + } 56 + 57 + static void cleanup(void *ctx) 58 + { 59 + struct select_cpu_dfl_nodispatch *skel = ctx; 60 + 61 + select_cpu_dfl_nodispatch__destroy(skel); 62 + } 63 + 64 + struct scx_test select_cpu_dfl_nodispatch = { 65 + .name = "select_cpu_dfl_nodispatch", 66 + .description = "Verify behavior of scx_bpf_select_cpu_dfl() in " 67 + "ops.select_cpu()", 68 + .setup = setup, 69 + .run = run, 70 + .cleanup = cleanup, 71 + }; 72 + REGISTER_SCX_TEST(&select_cpu_dfl_nodispatch)
+41
tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * A scheduler that validates the behavior of direct dispatching with a default 4 + * select_cpu implementation. 5 + * 6 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 7 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 8 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 9 + */ 10 + 11 + #include <scx/common.bpf.h> 12 + 13 + char _license[] SEC("license") = "GPL"; 14 + 15 + s32 BPF_STRUCT_OPS(select_cpu_dispatch_select_cpu, struct task_struct *p, 16 + s32 prev_cpu, u64 wake_flags) 17 + { 18 + u64 dsq_id = SCX_DSQ_LOCAL; 19 + s32 cpu = prev_cpu; 20 + 21 + if (scx_bpf_test_and_clear_cpu_idle(cpu)) 22 + goto dispatch; 23 + 24 + cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0); 25 + if (cpu >= 0) 26 + goto dispatch; 27 + 28 + dsq_id = SCX_DSQ_GLOBAL; 29 + cpu = prev_cpu; 30 + 31 + dispatch: 32 + scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, 0); 33 + return cpu; 34 + } 35 + 36 + SEC(".struct_ops.link") 37 + struct sched_ext_ops select_cpu_dispatch_ops = { 38 + .select_cpu = select_cpu_dispatch_select_cpu, 39 + .name = "select_cpu_dispatch", 40 + .timeout_ms = 1000U, 41 + };
+70
tools/testing/selftests/sched_ext/select_cpu_dispatch.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <bpf/bpf.h> 8 + #include <scx/common.h> 9 + #include <sys/wait.h> 10 + #include <unistd.h> 11 + #include "select_cpu_dispatch.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + #define NUM_CHILDREN 1028 15 + 16 + static enum scx_test_status setup(void **ctx) 17 + { 18 + struct select_cpu_dispatch *skel; 19 + 20 + skel = select_cpu_dispatch__open_and_load(); 21 + SCX_FAIL_IF(!skel, "Failed to open and load skel"); 22 + *ctx = skel; 23 + 24 + return SCX_TEST_PASS; 25 + } 26 + 27 + static enum scx_test_status run(void *ctx) 28 + { 29 + struct select_cpu_dispatch *skel = ctx; 30 + struct bpf_link *link; 31 + pid_t pids[NUM_CHILDREN]; 32 + int i, status; 33 + 34 + link = bpf_map__attach_struct_ops(skel->maps.select_cpu_dispatch_ops); 35 + SCX_FAIL_IF(!link, "Failed to attach scheduler"); 36 + 37 + for (i = 0; i < NUM_CHILDREN; i++) { 38 + pids[i] = fork(); 39 + if (pids[i] == 0) { 40 + sleep(1); 41 + exit(0); 42 + } 43 + } 44 + 45 + for (i = 0; i < NUM_CHILDREN; i++) { 46 + SCX_EQ(waitpid(pids[i], &status, 0), pids[i]); 47 + SCX_EQ(status, 0); 48 + } 49 + 50 + bpf_link__destroy(link); 51 + 52 + return SCX_TEST_PASS; 53 + } 54 + 55 + static void cleanup(void *ctx) 56 + { 57 + struct select_cpu_dispatch *skel = ctx; 58 + 59 + select_cpu_dispatch__destroy(skel); 60 + } 61 + 62 + struct scx_test select_cpu_dispatch = { 63 + .name = "select_cpu_dispatch", 64 + .description = "Test direct dispatching to built-in DSQs from " 65 + "ops.select_cpu()", 66 + .setup = setup, 67 + .run = run, 68 + .cleanup = cleanup, 69 + }; 70 + REGISTER_SCX_TEST(&select_cpu_dispatch)
+37
tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * A scheduler that validates the behavior of direct dispatching with a default 4 + * select_cpu implementation. 5 + * 6 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 7 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 8 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 9 + */ 10 + 11 + #include <scx/common.bpf.h> 12 + 13 + char _license[] SEC("license") = "GPL"; 14 + 15 + UEI_DEFINE(uei); 16 + 17 + s32 BPF_STRUCT_OPS(select_cpu_dispatch_bad_dsq_select_cpu, struct task_struct *p, 18 + s32 prev_cpu, u64 wake_flags) 19 + { 20 + /* Dispatching to a random DSQ should fail. */ 21 + scx_bpf_dispatch(p, 0xcafef00d, SCX_SLICE_DFL, 0); 22 + 23 + return prev_cpu; 24 + } 25 + 26 + void BPF_STRUCT_OPS(select_cpu_dispatch_bad_dsq_exit, struct scx_exit_info *ei) 27 + { 28 + UEI_RECORD(uei, ei); 29 + } 30 + 31 + SEC(".struct_ops.link") 32 + struct sched_ext_ops select_cpu_dispatch_bad_dsq_ops = { 33 + .select_cpu = select_cpu_dispatch_bad_dsq_select_cpu, 34 + .exit = select_cpu_dispatch_bad_dsq_exit, 35 + .name = "select_cpu_dispatch_bad_dsq", 36 + .timeout_ms = 1000U, 37 + };
+56
tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <bpf/bpf.h> 8 + #include <scx/common.h> 9 + #include <sys/wait.h> 10 + #include <unistd.h> 11 + #include "select_cpu_dispatch_bad_dsq.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + static enum scx_test_status setup(void **ctx) 15 + { 16 + struct select_cpu_dispatch_bad_dsq *skel; 17 + 18 + skel = select_cpu_dispatch_bad_dsq__open_and_load(); 19 + SCX_FAIL_IF(!skel, "Failed to open and load skel"); 20 + *ctx = skel; 21 + 22 + return SCX_TEST_PASS; 23 + } 24 + 25 + static enum scx_test_status run(void *ctx) 26 + { 27 + struct select_cpu_dispatch_bad_dsq *skel = ctx; 28 + struct bpf_link *link; 29 + 30 + link = bpf_map__attach_struct_ops(skel->maps.select_cpu_dispatch_bad_dsq_ops); 31 + SCX_FAIL_IF(!link, "Failed to attach scheduler"); 32 + 33 + sleep(1); 34 + 35 + SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_ERROR)); 36 + bpf_link__destroy(link); 37 + 38 + return SCX_TEST_PASS; 39 + } 40 + 41 + static void cleanup(void *ctx) 42 + { 43 + struct select_cpu_dispatch_bad_dsq *skel = ctx; 44 + 45 + select_cpu_dispatch_bad_dsq__destroy(skel); 46 + } 47 + 48 + struct scx_test select_cpu_dispatch_bad_dsq = { 49 + .name = "select_cpu_dispatch_bad_dsq", 50 + .description = "Verify graceful failure if we direct-dispatch to a " 51 + "bogus DSQ in ops.select_cpu()", 52 + .setup = setup, 53 + .run = run, 54 + .cleanup = cleanup, 55 + }; 56 + REGISTER_SCX_TEST(&select_cpu_dispatch_bad_dsq)
+38
tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * A scheduler that validates the behavior of direct dispatching with a default 4 + * select_cpu implementation. 5 + * 6 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 7 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 8 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 9 + */ 10 + 11 + #include <scx/common.bpf.h> 12 + 13 + char _license[] SEC("license") = "GPL"; 14 + 15 + UEI_DEFINE(uei); 16 + 17 + s32 BPF_STRUCT_OPS(select_cpu_dispatch_dbl_dsp_select_cpu, struct task_struct *p, 18 + s32 prev_cpu, u64 wake_flags) 19 + { 20 + /* Dispatching twice in a row is disallowed. */ 21 + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); 22 + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); 23 + 24 + return prev_cpu; 25 + } 26 + 27 + void BPF_STRUCT_OPS(select_cpu_dispatch_dbl_dsp_exit, struct scx_exit_info *ei) 28 + { 29 + UEI_RECORD(uei, ei); 30 + } 31 + 32 + SEC(".struct_ops.link") 33 + struct sched_ext_ops select_cpu_dispatch_dbl_dsp_ops = { 34 + .select_cpu = select_cpu_dispatch_dbl_dsp_select_cpu, 35 + .exit = select_cpu_dispatch_dbl_dsp_exit, 36 + .name = "select_cpu_dispatch_dbl_dsp", 37 + .timeout_ms = 1000U, 38 + };
+56
tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2023 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <bpf/bpf.h> 8 + #include <scx/common.h> 9 + #include <sys/wait.h> 10 + #include <unistd.h> 11 + #include "select_cpu_dispatch_dbl_dsp.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + static enum scx_test_status setup(void **ctx) 15 + { 16 + struct select_cpu_dispatch_dbl_dsp *skel; 17 + 18 + skel = select_cpu_dispatch_dbl_dsp__open_and_load(); 19 + SCX_FAIL_IF(!skel, "Failed to open and load skel"); 20 + *ctx = skel; 21 + 22 + return SCX_TEST_PASS; 23 + } 24 + 25 + static enum scx_test_status run(void *ctx) 26 + { 27 + struct select_cpu_dispatch_dbl_dsp *skel = ctx; 28 + struct bpf_link *link; 29 + 30 + link = bpf_map__attach_struct_ops(skel->maps.select_cpu_dispatch_dbl_dsp_ops); 31 + SCX_FAIL_IF(!link, "Failed to attach scheduler"); 32 + 33 + sleep(1); 34 + 35 + SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_ERROR)); 36 + bpf_link__destroy(link); 37 + 38 + return SCX_TEST_PASS; 39 + } 40 + 41 + static void cleanup(void *ctx) 42 + { 43 + struct select_cpu_dispatch_dbl_dsp *skel = ctx; 44 + 45 + select_cpu_dispatch_dbl_dsp__destroy(skel); 46 + } 47 + 48 + struct scx_test select_cpu_dispatch_dbl_dsp = { 49 + .name = "select_cpu_dispatch_dbl_dsp", 50 + .description = "Verify graceful failure if we dispatch twice to a " 51 + "DSQ in ops.select_cpu()", 52 + .setup = setup, 53 + .run = run, 54 + .cleanup = cleanup, 55 + }; 56 + REGISTER_SCX_TEST(&select_cpu_dispatch_dbl_dsp)
+92
tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * A scheduler that validates that enqueue flags are properly stored and 4 + * applied at dispatch time when a task is directly dispatched from 5 + * ops.select_cpu(). We validate this by using scx_bpf_dispatch_vtime(), and 6 + * making the test a very basic vtime scheduler. 7 + * 8 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 9 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 10 + * Copyright (c) 2024 Tejun Heo <tj@kernel.org> 11 + */ 12 + 13 + #include <scx/common.bpf.h> 14 + 15 + char _license[] SEC("license") = "GPL"; 16 + 17 + volatile bool consumed; 18 + 19 + static u64 vtime_now; 20 + 21 + #define VTIME_DSQ 0 22 + 23 + static inline bool vtime_before(u64 a, u64 b) 24 + { 25 + return (s64)(a - b) < 0; 26 + } 27 + 28 + static inline u64 task_vtime(const struct task_struct *p) 29 + { 30 + u64 vtime = p->scx.dsq_vtime; 31 + 32 + if (vtime_before(vtime, vtime_now - SCX_SLICE_DFL)) 33 + return vtime_now - SCX_SLICE_DFL; 34 + else 35 + return vtime; 36 + } 37 + 38 + s32 BPF_STRUCT_OPS(select_cpu_vtime_select_cpu, struct task_struct *p, 39 + s32 prev_cpu, u64 wake_flags) 40 + { 41 + s32 cpu; 42 + 43 + cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0); 44 + if (cpu >= 0) 45 + goto ddsp; 46 + 47 + cpu = prev_cpu; 48 + scx_bpf_test_and_clear_cpu_idle(cpu); 49 + ddsp: 50 + scx_bpf_dispatch_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0); 51 + return cpu; 52 + } 53 + 54 + void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p) 55 + { 56 + if (scx_bpf_consume(VTIME_DSQ)) 57 + consumed = true; 58 + } 59 + 60 + void BPF_STRUCT_OPS(select_cpu_vtime_running, struct task_struct *p) 61 + { 62 + if (vtime_before(vtime_now, p->scx.dsq_vtime)) 63 + vtime_now = p->scx.dsq_vtime; 64 + } 65 + 66 + void BPF_STRUCT_OPS(select_cpu_vtime_stopping, struct task_struct *p, 67 + bool runnable) 68 + { 69 + p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight; 70 + } 71 + 72 + void BPF_STRUCT_OPS(select_cpu_vtime_enable, struct task_struct *p) 73 + { 74 + p->scx.dsq_vtime = vtime_now; 75 + } 76 + 77 + s32 BPF_STRUCT_OPS_SLEEPABLE(select_cpu_vtime_init) 78 + { 79 + return scx_bpf_create_dsq(VTIME_DSQ, -1); 80 + } 81 + 82 + SEC(".struct_ops.link") 83 + struct sched_ext_ops select_cpu_vtime_ops = { 84 + .select_cpu = select_cpu_vtime_select_cpu, 85 + .dispatch = select_cpu_vtime_dispatch, 86 + .running = select_cpu_vtime_running, 87 + .stopping = select_cpu_vtime_stopping, 88 + .enable = select_cpu_vtime_enable, 89 + .init = select_cpu_vtime_init, 90 + .name = "select_cpu_vtime", 91 + .timeout_ms = 1000U, 92 + };
+59
tools/testing/selftests/sched_ext/select_cpu_vtime.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + * Copyright (c) 2024 Tejun Heo <tj@kernel.org> 6 + */ 7 + #include <bpf/bpf.h> 8 + #include <scx/common.h> 9 + #include <sys/wait.h> 10 + #include <unistd.h> 11 + #include "select_cpu_vtime.bpf.skel.h" 12 + #include "scx_test.h" 13 + 14 + static enum scx_test_status setup(void **ctx) 15 + { 16 + struct select_cpu_vtime *skel; 17 + 18 + skel = select_cpu_vtime__open_and_load(); 19 + SCX_FAIL_IF(!skel, "Failed to open and load skel"); 20 + *ctx = skel; 21 + 22 + return SCX_TEST_PASS; 23 + } 24 + 25 + static enum scx_test_status run(void *ctx) 26 + { 27 + struct select_cpu_vtime *skel = ctx; 28 + struct bpf_link *link; 29 + 30 + SCX_ASSERT(!skel->bss->consumed); 31 + 32 + link = bpf_map__attach_struct_ops(skel->maps.select_cpu_vtime_ops); 33 + SCX_FAIL_IF(!link, "Failed to attach scheduler"); 34 + 35 + sleep(1); 36 + 37 + SCX_ASSERT(skel->bss->consumed); 38 + 39 + bpf_link__destroy(link); 40 + 41 + return SCX_TEST_PASS; 42 + } 43 + 44 + static void cleanup(void *ctx) 45 + { 46 + struct select_cpu_vtime *skel = ctx; 47 + 48 + select_cpu_vtime__destroy(skel); 49 + } 50 + 51 + struct scx_test select_cpu_vtime = { 52 + .name = "select_cpu_vtime", 53 + .description = "Test doing direct vtime-dispatching from " 54 + "ops.select_cpu(), to a non-built-in DSQ", 55 + .setup = setup, 56 + .run = run, 57 + .cleanup = cleanup, 58 + }; 59 + REGISTER_SCX_TEST(&select_cpu_vtime)
+49
tools/testing/selftests/sched_ext/test_example.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 Tejun Heo <tj@kernel.org> 5 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 6 + */ 7 + #include <bpf/bpf.h> 8 + #include <scx/common.h> 9 + #include "scx_test.h" 10 + 11 + static bool setup_called = false; 12 + static bool run_called = false; 13 + static bool cleanup_called = false; 14 + 15 + static int context = 10; 16 + 17 + static enum scx_test_status setup(void **ctx) 18 + { 19 + setup_called = true; 20 + *ctx = &context; 21 + 22 + return SCX_TEST_PASS; 23 + } 24 + 25 + static enum scx_test_status run(void *ctx) 26 + { 27 + int *arg = ctx; 28 + 29 + SCX_ASSERT(setup_called); 30 + SCX_ASSERT(!run_called && !cleanup_called); 31 + SCX_EQ(*arg, context); 32 + 33 + run_called = true; 34 + return SCX_TEST_PASS; 35 + } 36 + 37 + static void cleanup (void *ctx) 38 + { 39 + SCX_BUG_ON(!run_called || cleanup_called, "Wrong callbacks invoked"); 40 + } 41 + 42 + struct scx_test example = { 43 + .name = "example", 44 + .description = "Validate the basic function of the test suite itself", 45 + .setup = setup, 46 + .run = run, 47 + .cleanup = cleanup, 48 + }; 49 + REGISTER_SCX_TEST(&example)
+71
tools/testing/selftests/sched_ext/util.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <dvernet@meta.com> 5 + */ 6 + #include <errno.h> 7 + #include <fcntl.h> 8 + #include <stdio.h> 9 + #include <stdlib.h> 10 + #include <string.h> 11 + #include <unistd.h> 12 + 13 + /* Returns read len on success, or -errno on failure. */ 14 + static ssize_t read_text(const char *path, char *buf, size_t max_len) 15 + { 16 + ssize_t len; 17 + int fd; 18 + 19 + fd = open(path, O_RDONLY); 20 + if (fd < 0) 21 + return -errno; 22 + 23 + len = read(fd, buf, max_len - 1); 24 + 25 + if (len >= 0) 26 + buf[len] = 0; 27 + 28 + close(fd); 29 + return len < 0 ? -errno : len; 30 + } 31 + 32 + /* Returns written len on success, or -errno on failure. */ 33 + static ssize_t write_text(const char *path, char *buf, ssize_t len) 34 + { 35 + int fd; 36 + ssize_t written; 37 + 38 + fd = open(path, O_WRONLY | O_APPEND); 39 + if (fd < 0) 40 + return -errno; 41 + 42 + written = write(fd, buf, len); 43 + close(fd); 44 + return written < 0 ? -errno : written; 45 + } 46 + 47 + long file_read_long(const char *path) 48 + { 49 + char buf[128]; 50 + 51 + 52 + if (read_text(path, buf, sizeof(buf)) <= 0) 53 + return -1; 54 + 55 + return atol(buf); 56 + } 57 + 58 + int file_write_long(const char *path, long val) 59 + { 60 + char buf[64]; 61 + int ret; 62 + 63 + ret = sprintf(buf, "%lu", val); 64 + if (ret < 0) 65 + return ret; 66 + 67 + if (write_text(path, buf, sizeof(buf)) <= 0) 68 + return -1; 69 + 70 + return 0; 71 + }
+13
tools/testing/selftests/sched_ext/util.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 + * Copyright (c) 2024 David Vernet <void@manifault.com> 5 + */ 6 + 7 + #ifndef __SCX_TEST_UTIL_H__ 8 + #define __SCX_TEST_UTIL_H__ 9 + 10 + long file_read_long(const char *path); 11 + int file_write_long(const char *path, long val); 12 + 13 + #endif // __SCX_TEST_H__