Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf:add _impl suffix for bpf_task_work_schedule* kfuncs

Rename:
bpf_task_work_schedule_resume()->bpf_task_work_schedule_resume_impl()
bpf_task_work_schedule_signal()->bpf_task_work_schedule_signal_impl()

This aligns task work scheduling kfuncs with the established naming
scheme for kfuncs with the bpf_prog_aux argument provided by the
verifier implicitly. This convention will be taken advantage of with the
upcoming KF_IMPLICIT_ARGS feature to preserve backwards compatibility to
BPF programs.

Acked-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Link: https://lore.kernel.org/r/20251104-implv2-v3-1-4772b9ae0e06@meta.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Ihor Solodrai <ihor.solodrai@linux.dev>

authored by

Mykyta Yatsenko and committed by
Alexei Starovoitov
ea0714d6 156c75f5

+29 -25
+14 -10
kernel/bpf/helpers.c
··· 4169 4169 } 4170 4170 4171 4171 /** 4172 - * bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL mode 4172 + * bpf_task_work_schedule_signal_impl - Schedule BPF callback using task_work_add with TWA_SIGNAL 4173 + * mode 4173 4174 * @task: Task struct for which callback should be scheduled 4174 4175 * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping 4175 4176 * @map__map: bpf_map that embeds struct bpf_task_work in the values ··· 4179 4178 * 4180 4179 * Return: 0 if task work has been scheduled successfully, negative error code otherwise 4181 4180 */ 4182 - __bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw, 4183 - void *map__map, bpf_task_work_callback_t callback, 4184 - void *aux__prog) 4181 + __bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task, 4182 + struct bpf_task_work *tw, void *map__map, 4183 + bpf_task_work_callback_t callback, 4184 + void *aux__prog) 4185 4185 { 4186 4186 return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL); 4187 4187 } 4188 4188 4189 4189 /** 4190 - * bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME mode 4190 + * bpf_task_work_schedule_resume_impl - Schedule BPF callback using task_work_add with TWA_RESUME 4191 + * mode 4191 4192 * @task: Task struct for which callback should be scheduled 4192 4193 * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping 4193 4194 * @map__map: bpf_map that embeds struct bpf_task_work in the values ··· 4198 4195 * 4199 4196 * Return: 0 if task work has been scheduled successfully, negative error code otherwise 4200 4197 */ 4201 - __bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw, 4202 - void *map__map, bpf_task_work_callback_t callback, 4203 - void *aux__prog) 4198 + __bpf_kfunc int bpf_task_work_schedule_resume_impl(struct task_struct *task, 4199 + struct bpf_task_work *tw, void *map__map, 4200 + bpf_task_work_callback_t callback, 4201 + void *aux__prog) 4204 4202 { 4205 4203 return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME); 4206 4204 } ··· 4381 4377 BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU) 4382 4378 #endif 4383 4379 BTF_ID_FLAGS(func, bpf_stream_vprintk, KF_TRUSTED_ARGS) 4384 - BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_TRUSTED_ARGS) 4385 - BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_TRUSTED_ARGS) 4380 + BTF_ID_FLAGS(func, bpf_task_work_schedule_signal_impl, KF_TRUSTED_ARGS) 4381 + BTF_ID_FLAGS(func, bpf_task_work_schedule_resume_impl, KF_TRUSTED_ARGS) 4386 4382 BTF_KFUNCS_END(common_btf_ids) 4387 4383 4388 4384 static const struct btf_kfunc_id_set common_kfunc_set = {
+6 -6
kernel/bpf/verifier.c
··· 12259 12259 KF_bpf_res_spin_lock_irqsave, 12260 12260 KF_bpf_res_spin_unlock_irqrestore, 12261 12261 KF___bpf_trap, 12262 - KF_bpf_task_work_schedule_signal, 12263 - KF_bpf_task_work_schedule_resume, 12262 + KF_bpf_task_work_schedule_signal_impl, 12263 + KF_bpf_task_work_schedule_resume_impl, 12264 12264 }; 12265 12265 12266 12266 BTF_ID_LIST(special_kfunc_list) ··· 12331 12331 BTF_ID(func, bpf_res_spin_lock_irqsave) 12332 12332 BTF_ID(func, bpf_res_spin_unlock_irqrestore) 12333 12333 BTF_ID(func, __bpf_trap) 12334 - BTF_ID(func, bpf_task_work_schedule_signal) 12335 - BTF_ID(func, bpf_task_work_schedule_resume) 12334 + BTF_ID(func, bpf_task_work_schedule_signal_impl) 12335 + BTF_ID(func, bpf_task_work_schedule_resume_impl) 12336 12336 12337 12337 static bool is_task_work_add_kfunc(u32 func_id) 12338 12338 { 12339 - return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] || 12340 - func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume]; 12339 + return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal_impl] || 12340 + func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume_impl]; 12341 12341 } 12342 12342 12343 12343 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
+3 -3
tools/testing/selftests/bpf/progs/task_work.c
··· 66 66 if (!work) 67 67 return 0; 68 68 69 - bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL); 69 + bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL); 70 70 return 0; 71 71 } 72 72 ··· 80 80 work = bpf_map_lookup_elem(&arrmap, &key); 81 81 if (!work) 82 82 return 0; 83 - bpf_task_work_schedule_signal(task, &work->tw, &arrmap, process_work, NULL); 83 + bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, process_work, NULL); 84 84 return 0; 85 85 } 86 86 ··· 102 102 work = bpf_map_lookup_elem(&lrumap, &key); 103 103 if (!work || work->data[0]) 104 104 return 0; 105 - bpf_task_work_schedule_resume(task, &work->tw, &lrumap, process_work, NULL); 105 + bpf_task_work_schedule_resume_impl(task, &work->tw, &lrumap, process_work, NULL); 106 106 return 0; 107 107 }
+4 -4
tools/testing/selftests/bpf/progs/task_work_fail.c
··· 53 53 work = bpf_map_lookup_elem(&arrmap, &key); 54 54 if (!work) 55 55 return 0; 56 - bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL); 56 + bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL); 57 57 return 0; 58 58 } 59 59 ··· 65 65 struct bpf_task_work tw; 66 66 67 67 task = bpf_get_current_task_btf(); 68 - bpf_task_work_schedule_resume(task, &tw, &hmap, process_work, NULL); 68 + bpf_task_work_schedule_resume_impl(task, &tw, &hmap, process_work, NULL); 69 69 return 0; 70 70 } 71 71 ··· 76 76 struct task_struct *task; 77 77 78 78 task = bpf_get_current_task_btf(); 79 - bpf_task_work_schedule_resume(task, NULL, &hmap, process_work, NULL); 79 + bpf_task_work_schedule_resume_impl(task, NULL, &hmap, process_work, NULL); 80 80 return 0; 81 81 } 82 82 ··· 91 91 work = bpf_map_lookup_elem(&arrmap, &key); 92 92 if (!work) 93 93 return 0; 94 - bpf_task_work_schedule_resume(task, &work->tw, NULL, process_work, NULL); 94 + bpf_task_work_schedule_resume_impl(task, &work->tw, NULL, process_work, NULL); 95 95 return 0; 96 96 }
+2 -2
tools/testing/selftests/bpf/progs/task_work_stress.c
··· 51 51 if (!work) 52 52 return 0; 53 53 } 54 - err = bpf_task_work_schedule_signal(bpf_get_current_task_btf(), &work->tw, &hmap, 55 - process_work, NULL); 54 + err = bpf_task_work_schedule_signal_impl(bpf_get_current_task_btf(), &work->tw, &hmap, 55 + process_work, NULL); 56 56 if (err) 57 57 __sync_fetch_and_add(&schedule_error, 1); 58 58 else