Merge tag 'sched_ext-for-6.15-rc3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext

Pull sched_ext fixes from Tejun Heo:

- Use kvzalloc() so that large exit_dump buffer allocations don't fail
easily

- Remove cpu.weight / cpu.idle unimplemented warnings which are more
annoying than helpful.

This makes SCX_OPS_HAS_CGROUP_WEIGHT unnecessary. Mark it for
deprecation

* tag 'sched_ext-for-6.15-rc3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext:
sched_ext: Mark SCX_OPS_HAS_CGROUP_WEIGHT for deprecation
sched_ext: Remove cpu.weight / cpu.idle unimplemented warnings
sched_ext: Use kvzalloc for large exit_dump allocation

+8 -44
+7 -43
kernel/sched/ext.c
··· 163 /* 164 * CPU cgroup support flags 165 */ 166 - SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* cpu.weight */ 167 168 SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE | 169 SCX_OPS_ENQ_LAST | ··· 3899 3900 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem); 3901 static bool scx_cgroup_enabled; 3902 - static bool cgroup_warned_missing_weight; 3903 - static bool cgroup_warned_missing_idle; 3904 - 3905 - static void scx_cgroup_warn_missing_weight(struct task_group *tg) 3906 - { 3907 - if (scx_ops_enable_state() == SCX_OPS_DISABLED || 3908 - cgroup_warned_missing_weight) 3909 - return; 3910 - 3911 - if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent) 3912 - return; 3913 - 3914 - pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n", 3915 - scx_ops.name); 3916 - cgroup_warned_missing_weight = true; 3917 - } 3918 - 3919 - static void scx_cgroup_warn_missing_idle(struct task_group *tg) 3920 - { 3921 - if (!scx_cgroup_enabled || cgroup_warned_missing_idle) 3922 - return; 3923 - 3924 - if (!tg->idle) 3925 - return; 3926 - 3927 - pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n", 3928 - scx_ops.name); 3929 - cgroup_warned_missing_idle = true; 3930 - } 3931 3932 int scx_tg_online(struct task_group *tg) 3933 { ··· 3907 WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED)); 3908 3909 percpu_down_read(&scx_cgroup_rwsem); 3910 - 3911 - scx_cgroup_warn_missing_weight(tg); 3912 3913 if (scx_cgroup_enabled) { 3914 if (SCX_HAS_OP(cgroup_init)) { ··· 4045 4046 void scx_group_set_idle(struct task_group *tg, bool idle) 4047 { 4048 - percpu_down_read(&scx_cgroup_rwsem); 4049 - scx_cgroup_warn_missing_idle(tg); 4050 - percpu_up_read(&scx_cgroup_rwsem); 4051 } 4052 4053 static void scx_cgroup_lock(void) ··· 4239 4240 percpu_rwsem_assert_held(&scx_cgroup_rwsem); 4241 4242 - cgroup_warned_missing_weight = false; 4243 - cgroup_warned_missing_idle = false; 4244 - 4245 /* 4246 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk 4247 * cgroups and init, all online cgroups are initialized. ··· 4247 css_for_each_descendant_pre(css, &root_task_group.css) { 4248 struct task_group *tg = css_tg(css); 4249 struct scx_cgroup_init_args args = { .weight = tg->scx_weight }; 4250 - 4251 - scx_cgroup_warn_missing_weight(tg); 4252 - scx_cgroup_warn_missing_idle(tg); 4253 4254 if ((tg->scx_flags & 4255 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE) ··· 4584 4585 static void free_exit_info(struct scx_exit_info *ei) 4586 { 4587 - kfree(ei->dump); 4588 kfree(ei->msg); 4589 kfree(ei->bt); 4590 kfree(ei); ··· 4600 4601 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL); 4602 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL); 4603 - ei->dump = kzalloc(exit_dump_len, GFP_KERNEL); 4604 4605 if (!ei->bt || !ei->msg || !ei->dump) { 4606 free_exit_info(ei); ··· 5212 scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled"); 5213 return -EINVAL; 5214 } 5215 5216 return 0; 5217 }
··· 163 /* 164 * CPU cgroup support flags 165 */ 166 + SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* DEPRECATED, will be removed on 6.18 */ 167 168 SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE | 169 SCX_OPS_ENQ_LAST | ··· 3899 3900 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem); 3901 static bool scx_cgroup_enabled; 3902 3903 int scx_tg_online(struct task_group *tg) 3904 { ··· 3936 WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED)); 3937 3938 percpu_down_read(&scx_cgroup_rwsem); 3939 3940 if (scx_cgroup_enabled) { 3941 if (SCX_HAS_OP(cgroup_init)) { ··· 4076 4077 void scx_group_set_idle(struct task_group *tg, bool idle) 4078 { 4079 + /* TODO: Implement ops->cgroup_set_idle() */ 4080 } 4081 4082 static void scx_cgroup_lock(void) ··· 4272 4273 percpu_rwsem_assert_held(&scx_cgroup_rwsem); 4274 4275 /* 4276 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk 4277 * cgroups and init, all online cgroups are initialized. ··· 4283 css_for_each_descendant_pre(css, &root_task_group.css) { 4284 struct task_group *tg = css_tg(css); 4285 struct scx_cgroup_init_args args = { .weight = tg->scx_weight }; 4286 4287 if ((tg->scx_flags & 4288 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE) ··· 4623 4624 static void free_exit_info(struct scx_exit_info *ei) 4625 { 4626 + kvfree(ei->dump); 4627 kfree(ei->msg); 4628 kfree(ei->bt); 4629 kfree(ei); ··· 4639 4640 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL); 4641 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL); 4642 + ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL); 4643 4644 if (!ei->bt || !ei->msg || !ei->dump) { 4645 free_exit_info(ei); ··· 5251 scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled"); 5252 return -EINVAL; 5253 } 5254 + 5255 + if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT) 5256 + pr_warn("SCX_OPS_HAS_CGROUP_WEIGHT is deprecated and a noop\n"); 5257 5258 return 0; 5259 }
+1 -1
tools/sched_ext/scx_flatcg.bpf.c
··· 950 .cgroup_move = (void *)fcg_cgroup_move, 951 .init = (void *)fcg_init, 952 .exit = (void *)fcg_exit, 953 - .flags = SCX_OPS_HAS_CGROUP_WEIGHT | SCX_OPS_ENQ_EXITING, 954 .name = "flatcg");
··· 950 .cgroup_move = (void *)fcg_cgroup_move, 951 .init = (void *)fcg_init, 952 .exit = (void *)fcg_exit, 953 + .flags = SCX_OPS_ENQ_EXITING, 954 .name = "flatcg");