Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'sched_ext-for-6.12-rc3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext

Pull sched_ext fixes from Tejun Heo:

- More issues reported in the enable/disable paths on large machines
with many tasks due to scx_tasks_lock being held too long. Break up
the task iterations

- Remove ops.select_cpu() dependency in bypass mode so that a
misbehaving implementation can't live-lock the machine by pushing all
tasks to few CPUs in bypass mode

- Other misc fixes

* tag 'sched_ext-for-6.12-rc3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext:
sched_ext: Remove unnecessary cpu_relax()
sched_ext: Don't hold scx_tasks_lock for too long
sched_ext: Move scx_tasks_lock handling into scx_task_iter helpers
sched_ext: bypass mode shouldn't depend on ops.select_cpu()
sched_ext: Move scx_buildin_idle_enabled check to scx_bpf_select_cpu_dfl()
sched_ext: Start schedulers with consistent p->scx.slice values
Revert "sched_ext: Use shorter slice while bypassing"
sched_ext: use correct function name in pick_task_scx() warning message
selftests: sched_ext: Add sched_ext as proper selftest target

+140 -130
+102 -86
kernel/sched/ext.c
··· 9 9 #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void))) 10 10 11 11 enum scx_consts { 12 - SCX_SLICE_BYPASS = SCX_SLICE_DFL / 4, 13 12 SCX_DSP_DFL_MAX_BATCH = 32, 14 13 SCX_DSP_MAX_LOOPS = 32, 15 14 SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ, ··· 18 19 SCX_EXIT_DUMP_DFL_LEN = 32768, 19 20 20 21 SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE, 22 + 23 + /* 24 + * Iterating all tasks may take a while. Periodically drop 25 + * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls. 26 + */ 27 + SCX_OPS_TASK_ITER_BATCH = 32, 21 28 }; 22 29 23 30 enum scx_exit_kind { ··· 1279 1274 struct task_struct *locked; 1280 1275 struct rq *rq; 1281 1276 struct rq_flags rf; 1277 + u32 cnt; 1282 1278 }; 1283 1279 1284 1280 /** 1285 - * scx_task_iter_init - Initialize a task iterator 1281 + * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration 1286 1282 * @iter: iterator to init 1287 1283 * 1288 - * Initialize @iter. Must be called with scx_tasks_lock held. Once initialized, 1289 - * @iter must eventually be exited with scx_task_iter_exit(). 1284 + * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter 1285 + * must eventually be stopped with scx_task_iter_stop(). 1290 1286 * 1291 - * scx_tasks_lock may be released between this and the first next() call or 1292 - * between any two next() calls. If scx_tasks_lock is released between two 1293 - * next() calls, the caller is responsible for ensuring that the task being 1294 - * iterated remains accessible either through RCU read lock or obtaining a 1295 - * reference count. 1287 + * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock() 1288 + * between this and the first next() call or between any two next() calls. If 1289 + * the locks are released between two next() calls, the caller is responsible 1290 + * for ensuring that the task being iterated remains accessible either through 1291 + * RCU read lock or obtaining a reference count. 1296 1292 * 1297 1293 * All tasks which existed when the iteration started are guaranteed to be 1298 1294 * visited as long as they still exist. 1299 1295 */ 1300 - static void scx_task_iter_init(struct scx_task_iter *iter) 1296 + static void scx_task_iter_start(struct scx_task_iter *iter) 1301 1297 { 1302 - lockdep_assert_held(&scx_tasks_lock); 1303 - 1304 1298 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS & 1305 1299 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1)); 1300 + 1301 + spin_lock_irq(&scx_tasks_lock); 1306 1302 1307 1303 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR }; 1308 1304 list_add(&iter->cursor.tasks_node, &scx_tasks); 1309 1305 iter->locked = NULL; 1306 + iter->cnt = 0; 1310 1307 } 1311 1308 1312 - /** 1313 - * scx_task_iter_rq_unlock - Unlock rq locked by a task iterator 1314 - * @iter: iterator to unlock rq for 1315 - * 1316 - * If @iter is in the middle of a locked iteration, it may be locking the rq of 1317 - * the task currently being visited. Unlock the rq if so. This function can be 1318 - * safely called anytime during an iteration. 1319 - * 1320 - * Returns %true if the rq @iter was locking is unlocked. %false if @iter was 1321 - * not locking an rq. 1322 - */ 1323 - static bool scx_task_iter_rq_unlock(struct scx_task_iter *iter) 1309 + static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter) 1324 1310 { 1325 1311 if (iter->locked) { 1326 1312 task_rq_unlock(iter->rq, iter->locked, &iter->rf); 1327 1313 iter->locked = NULL; 1328 - return true; 1329 - } else { 1330 - return false; 1331 1314 } 1332 1315 } 1333 1316 1334 1317 /** 1335 - * scx_task_iter_exit - Exit a task iterator 1318 + * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator 1319 + * @iter: iterator to unlock 1320 + * 1321 + * If @iter is in the middle of a locked iteration, it may be locking the rq of 1322 + * the task currently being visited in addition to scx_tasks_lock. Unlock both. 1323 + * This function can be safely called anytime during an iteration. 1324 + */ 1325 + static void scx_task_iter_unlock(struct scx_task_iter *iter) 1326 + { 1327 + __scx_task_iter_rq_unlock(iter); 1328 + spin_unlock_irq(&scx_tasks_lock); 1329 + } 1330 + 1331 + /** 1332 + * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock() 1333 + * @iter: iterator to re-lock 1334 + * 1335 + * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it 1336 + * doesn't re-lock the rq lock. Must be called before other iterator operations. 1337 + */ 1338 + static void scx_task_iter_relock(struct scx_task_iter *iter) 1339 + { 1340 + spin_lock_irq(&scx_tasks_lock); 1341 + } 1342 + 1343 + /** 1344 + * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock 1336 1345 * @iter: iterator to exit 1337 1346 * 1338 - * Exit a previously initialized @iter. Must be called with scx_tasks_lock held. 1339 - * If the iterator holds a task's rq lock, that rq lock is released. See 1340 - * scx_task_iter_init() for details. 1347 + * Exit a previously initialized @iter. Must be called with scx_tasks_lock held 1348 + * which is released on return. If the iterator holds a task's rq lock, that rq 1349 + * lock is also released. See scx_task_iter_start() for details. 1341 1350 */ 1342 - static void scx_task_iter_exit(struct scx_task_iter *iter) 1351 + static void scx_task_iter_stop(struct scx_task_iter *iter) 1343 1352 { 1344 - lockdep_assert_held(&scx_tasks_lock); 1345 - 1346 - scx_task_iter_rq_unlock(iter); 1347 1353 list_del_init(&iter->cursor.tasks_node); 1354 + scx_task_iter_unlock(iter); 1348 1355 } 1349 1356 1350 1357 /** 1351 1358 * scx_task_iter_next - Next task 1352 1359 * @iter: iterator to walk 1353 1360 * 1354 - * Visit the next task. See scx_task_iter_init() for details. 1361 + * Visit the next task. See scx_task_iter_start() for details. Locks are dropped 1362 + * and re-acquired every %SCX_OPS_TASK_ITER_BATCH iterations to avoid causing 1363 + * stalls by holding scx_tasks_lock for too long. 1355 1364 */ 1356 1365 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter) 1357 1366 { 1358 1367 struct list_head *cursor = &iter->cursor.tasks_node; 1359 1368 struct sched_ext_entity *pos; 1360 1369 1361 - lockdep_assert_held(&scx_tasks_lock); 1370 + if (!(++iter->cnt % SCX_OPS_TASK_ITER_BATCH)) { 1371 + scx_task_iter_unlock(iter); 1372 + cond_resched(); 1373 + scx_task_iter_relock(iter); 1374 + } 1362 1375 1363 1376 list_for_each_entry(pos, cursor, tasks_node) { 1364 1377 if (&pos->tasks_node == &scx_tasks) ··· 1397 1374 * @include_dead: Whether we should include dead tasks in the iteration 1398 1375 * 1399 1376 * Visit the non-idle task with its rq lock held. Allows callers to specify 1400 - * whether they would like to filter out dead tasks. See scx_task_iter_init() 1377 + * whether they would like to filter out dead tasks. See scx_task_iter_start() 1401 1378 * for details. 1402 1379 */ 1403 1380 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter) 1404 1381 { 1405 1382 struct task_struct *p; 1406 1383 1407 - scx_task_iter_rq_unlock(iter); 1384 + __scx_task_iter_rq_unlock(iter); 1408 1385 1409 1386 while ((p = scx_task_iter_next(iter))) { 1410 1387 /* ··· 1972 1949 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, 1973 1950 int sticky_cpu) 1974 1951 { 1975 - bool bypassing = scx_rq_bypassing(rq); 1976 1952 struct task_struct **ddsp_taskp; 1977 1953 unsigned long qseq; 1978 1954 ··· 1989 1967 if (!scx_rq_online(rq)) 1990 1968 goto local; 1991 1969 1992 - if (bypassing) 1970 + if (scx_rq_bypassing(rq)) 1993 1971 goto global; 1994 1972 1995 1973 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) ··· 2044 2022 2045 2023 global: 2046 2024 touch_core_sched(rq, p); /* see the comment in local: */ 2047 - p->scx.slice = bypassing ? SCX_SLICE_BYPASS : SCX_SLICE_DFL; 2025 + p->scx.slice = SCX_SLICE_DFL; 2048 2026 dispatch_enqueue(find_global_dsq(p), p, enq_flags); 2049 2027 } 2050 2028 ··· 2980 2958 2981 2959 if (unlikely(!p->scx.slice)) { 2982 2960 if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) { 2983 - printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in pick_next_task_scx()\n", 2984 - p->comm, p->pid); 2961 + printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n", 2962 + p->comm, p->pid, __func__); 2985 2963 scx_warned_zero_slice = true; 2986 2964 } 2987 2965 p->scx.slice = SCX_SLICE_DFL; ··· 3086 3064 3087 3065 *found = false; 3088 3066 3089 - if (!static_branch_likely(&scx_builtin_idle_enabled)) { 3090 - scx_ops_error("built-in idle tracking is disabled"); 3091 - return prev_cpu; 3092 - } 3093 - 3094 3067 /* 3095 3068 * If WAKE_SYNC, the waker's local DSQ is empty, and the system is 3096 3069 * under utilized, wake up @p to the local DSQ of the waker. Checking ··· 3150 3133 if (unlikely(wake_flags & WF_EXEC)) 3151 3134 return prev_cpu; 3152 3135 3153 - if (SCX_HAS_OP(select_cpu)) { 3136 + if (SCX_HAS_OP(select_cpu) && !scx_rq_bypassing(task_rq(p))) { 3154 3137 s32 cpu; 3155 3138 struct task_struct **ddsp_taskp; 3156 3139 ··· 3215 3198 { 3216 3199 int cpu = cpu_of(rq); 3217 3200 3218 - if (SCX_HAS_OP(update_idle)) { 3201 + if (SCX_HAS_OP(update_idle) && !scx_rq_bypassing(rq)) { 3219 3202 SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle); 3220 3203 if (!static_branch_unlikely(&scx_builtin_idle_enabled)) 3221 3204 return; ··· 4278 4261 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue 4279 4262 * to force global FIFO scheduling. 4280 4263 * 4281 - * a. ops.enqueue() is ignored and tasks are queued in simple global FIFO order. 4282 - * %SCX_OPS_ENQ_LAST is also ignored. 4264 + * - ops.select_cpu() is ignored and the default select_cpu() is used. 4283 4265 * 4284 - * b. ops.dispatch() is ignored. 4266 + * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order. 4267 + * %SCX_OPS_ENQ_LAST is also ignored. 4285 4268 * 4286 - * c. balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice 4287 - * can't be trusted. Whenever a tick triggers, the running task is rotated to 4288 - * the tail of the queue with core_sched_at touched. 4269 + * - ops.dispatch() is ignored. 4289 4270 * 4290 - * d. pick_next_task() suppresses zero slice warning. 4271 + * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice 4272 + * can't be trusted. Whenever a tick triggers, the running task is rotated to 4273 + * the tail of the queue with core_sched_at touched. 4291 4274 * 4292 - * e. scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM 4293 - * operations. 4275 + * - pick_next_task() suppresses zero slice warning. 4294 4276 * 4295 - * f. scx_prio_less() reverts to the default core_sched_at order. 4277 + * - scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM 4278 + * operations. 4279 + * 4280 + * - scx_prio_less() reverts to the default core_sched_at order. 4296 4281 */ 4297 4282 static void scx_ops_bypass(bool bypass) 4298 4283 { ··· 4364 4345 4365 4346 rq_unlock_irqrestore(rq, &rf); 4366 4347 4367 - /* kick to restore ticks */ 4348 + /* resched to restore ticks and idle state */ 4368 4349 resched_cpu(cpu); 4369 4350 } 4370 4351 } ··· 4486 4467 4487 4468 scx_ops_init_task_enabled = false; 4488 4469 4489 - spin_lock_irq(&scx_tasks_lock); 4490 - scx_task_iter_init(&sti); 4470 + scx_task_iter_start(&sti); 4491 4471 while ((p = scx_task_iter_next_locked(&sti))) { 4492 4472 const struct sched_class *old_class = p->sched_class; 4493 4473 struct sched_enq_and_set_ctx ctx; 4494 4474 4495 4475 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); 4496 4476 4497 - p->scx.slice = min_t(u64, p->scx.slice, SCX_SLICE_DFL); 4498 4477 __setscheduler_prio(p, p->prio); 4499 4478 check_class_changing(task_rq(p), p, old_class); 4500 4479 ··· 4501 4484 check_class_changed(task_rq(p), p, old_class, p->prio); 4502 4485 scx_ops_exit_task(p); 4503 4486 } 4504 - scx_task_iter_exit(&sti); 4505 - spin_unlock_irq(&scx_tasks_lock); 4487 + scx_task_iter_stop(&sti); 4506 4488 percpu_up_write(&scx_fork_rwsem); 4507 4489 4508 4490 /* no task is on scx, turn off all the switches and flush in-progress calls */ ··· 5152 5136 if (ret) 5153 5137 goto err_disable_unlock_all; 5154 5138 5155 - spin_lock_irq(&scx_tasks_lock); 5156 - scx_task_iter_init(&sti); 5139 + scx_task_iter_start(&sti); 5157 5140 while ((p = scx_task_iter_next_locked(&sti))) { 5158 5141 /* 5159 5142 * @p may already be dead, have lost all its usages counts and ··· 5162 5147 if (!tryget_task_struct(p)) 5163 5148 continue; 5164 5149 5165 - scx_task_iter_rq_unlock(&sti); 5166 - spin_unlock_irq(&scx_tasks_lock); 5150 + scx_task_iter_unlock(&sti); 5167 5151 5168 5152 ret = scx_ops_init_task(p, task_group(p), false); 5169 5153 if (ret) { 5170 5154 put_task_struct(p); 5171 - spin_lock_irq(&scx_tasks_lock); 5172 - scx_task_iter_exit(&sti); 5173 - spin_unlock_irq(&scx_tasks_lock); 5155 + scx_task_iter_relock(&sti); 5156 + scx_task_iter_stop(&sti); 5174 5157 scx_ops_error("ops.init_task() failed (%d) for %s[%d]", 5175 5158 ret, p->comm, p->pid); 5176 5159 goto err_disable_unlock_all; ··· 5177 5164 scx_set_task_state(p, SCX_TASK_READY); 5178 5165 5179 5166 put_task_struct(p); 5180 - spin_lock_irq(&scx_tasks_lock); 5167 + scx_task_iter_relock(&sti); 5181 5168 } 5182 - scx_task_iter_exit(&sti); 5183 - spin_unlock_irq(&scx_tasks_lock); 5169 + scx_task_iter_stop(&sti); 5184 5170 scx_cgroup_unlock(); 5185 5171 percpu_up_write(&scx_fork_rwsem); 5186 5172 ··· 5196 5184 * scx_tasks_lock. 5197 5185 */ 5198 5186 percpu_down_write(&scx_fork_rwsem); 5199 - spin_lock_irq(&scx_tasks_lock); 5200 - scx_task_iter_init(&sti); 5187 + scx_task_iter_start(&sti); 5201 5188 while ((p = scx_task_iter_next_locked(&sti))) { 5202 5189 const struct sched_class *old_class = p->sched_class; 5203 5190 struct sched_enq_and_set_ctx ctx; 5204 5191 5205 5192 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); 5206 5193 5194 + p->scx.slice = SCX_SLICE_DFL; 5207 5195 __setscheduler_prio(p, p->prio); 5208 5196 check_class_changing(task_rq(p), p, old_class); 5209 5197 ··· 5211 5199 5212 5200 check_class_changed(task_rq(p), p, old_class, p->prio); 5213 5201 } 5214 - scx_task_iter_exit(&sti); 5215 - spin_unlock_irq(&scx_tasks_lock); 5202 + scx_task_iter_stop(&sti); 5216 5203 percpu_up_write(&scx_fork_rwsem); 5217 5204 5218 5205 scx_ops_bypass(false); ··· 5883 5872 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, 5884 5873 u64 wake_flags, bool *is_idle) 5885 5874 { 5886 - if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) { 5887 - *is_idle = false; 5888 - return prev_cpu; 5875 + if (!static_branch_likely(&scx_builtin_idle_enabled)) { 5876 + scx_ops_error("built-in idle tracking is disabled"); 5877 + goto prev_cpu; 5889 5878 } 5879 + 5880 + if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) 5881 + goto prev_cpu; 5882 + 5890 5883 #ifdef CONFIG_SMP 5891 5884 return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle); 5892 - #else 5885 + #endif 5886 + 5887 + prev_cpu: 5893 5888 *is_idle = false; 5894 5889 return prev_cpu; 5895 - #endif 5896 5890 } 5897 5891 5898 5892 __bpf_kfunc_end_defs();
+5 -4
tools/testing/selftests/Makefile
··· 88 88 TARGETS += rseq 89 89 TARGETS += rtc 90 90 TARGETS += rust 91 + TARGETS += sched_ext 91 92 TARGETS += seccomp 92 93 TARGETS += sgx 93 94 TARGETS += sigaltstack ··· 130 129 endif 131 130 endif 132 131 133 - # User can optionally provide a TARGETS skiplist. By default we skip 134 - # BPF since it has cutting edge build time dependencies which require 135 - # more effort to install. 136 - SKIP_TARGETS ?= bpf 132 + # User can optionally provide a TARGETS skiplist. By default we skip 133 + # targets using BPF since it has cutting edge build time dependencies 134 + # which require more effort to install. 135 + SKIP_TARGETS ?= bpf sched_ext 137 136 ifneq ($(SKIP_TARGETS),) 138 137 TMP := $(filter-out $(SKIP_TARGETS), $(TARGETS)) 139 138 override TARGETS := $(TMP)
+33 -40
tools/testing/selftests/sched_ext/Makefile
··· 3 3 include ../../../build/Build.include 4 4 include ../../../scripts/Makefile.arch 5 5 include ../../../scripts/Makefile.include 6 + 7 + TEST_GEN_PROGS := runner 8 + 9 + # override lib.mk's default rules 10 + OVERRIDE_TARGETS := 1 6 11 include ../lib.mk 7 - 8 - ifneq ($(LLVM),) 9 - ifneq ($(filter %/,$(LLVM)),) 10 - LLVM_PREFIX := $(LLVM) 11 - else ifneq ($(filter -%,$(LLVM)),) 12 - LLVM_SUFFIX := $(LLVM) 13 - endif 14 - 15 - CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as 16 - else 17 - CC := gcc 18 - endif # LLVM 19 - 20 - ifneq ($(CROSS_COMPILE),) 21 - $(error CROSS_COMPILE not supported for scx selftests) 22 - endif # CROSS_COMPILE 23 12 24 13 CURDIR := $(abspath .) 25 14 REPOROOT := $(abspath ../../../..) ··· 23 34 SCXTOOLSDIR := $(TOOLSDIR)/sched_ext 24 35 SCXTOOLSINCDIR := $(TOOLSDIR)/sched_ext/include 25 36 26 - OUTPUT_DIR := $(CURDIR)/build 37 + OUTPUT_DIR := $(OUTPUT)/build 27 38 OBJ_DIR := $(OUTPUT_DIR)/obj 28 39 INCLUDE_DIR := $(OUTPUT_DIR)/include 29 40 BPFOBJ_DIR := $(OBJ_DIR)/libbpf 30 41 SCXOBJ_DIR := $(OBJ_DIR)/sched_ext 31 42 BPFOBJ := $(BPFOBJ_DIR)/libbpf.a 32 43 LIBBPF_OUTPUT := $(OBJ_DIR)/libbpf/libbpf.a 33 - DEFAULT_BPFTOOL := $(OUTPUT_DIR)/sbin/bpftool 34 - HOST_BUILD_DIR := $(OBJ_DIR) 35 - HOST_OUTPUT_DIR := $(OUTPUT_DIR) 36 44 37 - VMLINUX_BTF_PATHS ?= ../../../../vmlinux \ 45 + DEFAULT_BPFTOOL := $(OUTPUT_DIR)/host/sbin/bpftool 46 + HOST_OBJ_DIR := $(OBJ_DIR)/host/bpftool 47 + HOST_LIBBPF_OUTPUT := $(OBJ_DIR)/host/libbpf/ 48 + HOST_LIBBPF_DESTDIR := $(OUTPUT_DIR)/host/ 49 + HOST_DESTDIR := $(OUTPUT_DIR)/host/ 50 + 51 + VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ 52 + $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ 53 + ../../../../vmlinux \ 38 54 /sys/kernel/btf/vmlinux \ 39 55 /boot/vmlinux-$(shell uname -r) 40 56 VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) ··· 74 80 # Use '-idirafter': Don't interfere with include mechanics except where the 75 81 # build would have failed anyways. 76 82 define get_sys_includes 77 - $(shell $(1) -v -E - </dev/null 2>&1 \ 83 + $(shell $(1) $(2) -v -E - </dev/null 2>&1 \ 78 84 | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \ 79 - $(shell $(1) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}') 85 + $(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}') 80 86 endef 87 + 88 + ifneq ($(CROSS_COMPILE),) 89 + CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%)) 90 + endif 91 + 92 + CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH)) 81 93 82 94 BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \ 83 95 $(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) \ 84 96 -I$(CURDIR)/include -I$(CURDIR)/include/bpf-compat \ 85 97 -I$(INCLUDE_DIR) -I$(APIDIR) -I$(SCXTOOLSINCDIR) \ 86 98 -I$(REPOROOT)/include \ 87 - $(call get_sys_includes,$(CLANG)) \ 99 + $(CLANG_SYS_INCLUDES) \ 88 100 -Wall -Wno-compare-distinct-pointer-types \ 89 101 -Wno-incompatible-function-pointer-types \ 90 102 -O2 -mcpu=v3 ··· 98 98 # sort removes libbpf duplicates when not cross-building 99 99 MAKE_DIRS := $(sort $(OBJ_DIR)/libbpf $(OBJ_DIR)/libbpf \ 100 100 $(OBJ_DIR)/bpftool $(OBJ_DIR)/resolve_btfids \ 101 - $(INCLUDE_DIR) $(SCXOBJ_DIR)) 101 + $(HOST_OBJ_DIR) $(INCLUDE_DIR) $(SCXOBJ_DIR)) 102 102 103 103 $(MAKE_DIRS): 104 104 $(call msg,MKDIR,,$@) ··· 108 108 $(APIDIR)/linux/bpf.h \ 109 109 | $(OBJ_DIR)/libbpf 110 110 $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(OBJ_DIR)/libbpf/ \ 111 + ARCH=$(ARCH) CC="$(CC)" CROSS_COMPILE=$(CROSS_COMPILE) \ 111 112 EXTRA_CFLAGS='-g -O0 -fPIC' \ 112 113 DESTDIR=$(OUTPUT_DIR) prefix= all install_headers 113 114 114 115 $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ 115 - $(LIBBPF_OUTPUT) | $(OBJ_DIR)/bpftool 116 + $(LIBBPF_OUTPUT) | $(HOST_OBJ_DIR) 116 117 $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ 117 118 ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) \ 118 119 EXTRA_CFLAGS='-g -O0' \ 119 - OUTPUT=$(OBJ_DIR)/bpftool/ \ 120 - LIBBPF_OUTPUT=$(OBJ_DIR)/libbpf/ \ 121 - LIBBPF_DESTDIR=$(OUTPUT_DIR)/ \ 122 - prefix= DESTDIR=$(OUTPUT_DIR)/ install-bin 120 + OUTPUT=$(HOST_OBJ_DIR)/ \ 121 + LIBBPF_OUTPUT=$(HOST_LIBBPF_OUTPUT) \ 122 + LIBBPF_DESTDIR=$(HOST_LIBBPF_DESTDIR) \ 123 + prefix= DESTDIR=$(HOST_DESTDIR) install-bin 123 124 124 125 $(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR) 125 126 ifeq ($(VMLINUX_H),) ··· 151 150 152 151 override define CLEAN 153 152 rm -rf $(OUTPUT_DIR) 154 - rm -f *.o *.bpf.o *.bpf.skel.h *.bpf.subskel.h 155 153 rm -f $(TEST_GEN_PROGS) 156 - rm -f runner 157 154 endef 158 155 159 156 # Every testcase takes all of the BPF progs are dependencies by default. This ··· 195 196 # function doesn't support using implicit rules otherwise. 196 197 $(testcase-targets): $(SCXOBJ_DIR)/%.o: %.c $(SCXOBJ_DIR)/runner.o $(all_test_bpfprogs) | $(SCXOBJ_DIR) 197 198 $(eval test=$(patsubst %.o,%.c,$(notdir $@))) 198 - $(CC) $(CFLAGS) -c $< -o $@ $(SCXOBJ_DIR)/runner.o 199 + $(CC) $(CFLAGS) -c $< -o $@ 199 200 200 201 $(SCXOBJ_DIR)/util.o: util.c | $(SCXOBJ_DIR) 201 202 $(CC) $(CFLAGS) -c $< -o $@ 202 203 203 - runner: $(SCXOBJ_DIR)/runner.o $(SCXOBJ_DIR)/util.o $(BPFOBJ) $(testcase-targets) 204 + $(OUTPUT)/runner: $(SCXOBJ_DIR)/runner.o $(SCXOBJ_DIR)/util.o $(BPFOBJ) $(testcase-targets) 204 205 @echo "$(testcase-targets)" 205 206 $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) 206 - 207 - TEST_GEN_PROGS := runner 208 - 209 - all: runner 210 - 211 - .PHONY: all clean help 212 207 213 208 .DEFAULT_GOAL := all 214 209