Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: get_call_summary() utility function

Refactor mark_fastcall_pattern_for_call() to extract a utility
function get_call_summary(). For a helper or kfunc call this function
fills the following information: {num_params, is_void, fastcall}.

This function would be used in the next patch in order to get number
of parameters of a helper or kfunc call.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250304195024.2478889-3-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
22f83974 80ca3f1d

+58 -65
+58 -65
kernel/bpf/verifier.c
··· 17019 17019 /* Bitmask with 1s for all caller saved registers */ 17020 17020 #define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1) 17021 17021 17022 - /* Return a bitmask specifying which caller saved registers are 17023 - * clobbered by a call to a helper *as if* this helper follows 17024 - * bpf_fastcall contract: 17025 - * - includes R0 if function is non-void; 17026 - * - includes R1-R5 if corresponding parameter has is described 17027 - * in the function prototype. 17028 - */ 17029 - static u32 helper_fastcall_clobber_mask(const struct bpf_func_proto *fn) 17030 - { 17031 - u32 mask; 17032 - int i; 17033 - 17034 - mask = 0; 17035 - if (fn->ret_type != RET_VOID) 17036 - mask |= BIT(BPF_REG_0); 17037 - for (i = 0; i < ARRAY_SIZE(fn->arg_type); ++i) 17038 - if (fn->arg_type[i] != ARG_DONTCARE) 17039 - mask |= BIT(BPF_REG_1 + i); 17040 - return mask; 17041 - } 17042 - 17043 17022 /* True if do_misc_fixups() replaces calls to helper number 'imm', 17044 17023 * replacement patch is presumed to follow bpf_fastcall contract 17045 17024 * (see mark_fastcall_pattern_for_call() below). ··· 17035 17056 } 17036 17057 } 17037 17058 17038 - /* Same as helper_fastcall_clobber_mask() but for kfuncs, see comment above */ 17039 - static u32 kfunc_fastcall_clobber_mask(struct bpf_kfunc_call_arg_meta *meta) 17040 - { 17041 - u32 vlen, i, mask; 17059 + struct call_summary { 17060 + u8 num_params; 17061 + bool is_void; 17062 + bool fastcall; 17063 + }; 17042 17064 17043 - vlen = btf_type_vlen(meta->func_proto); 17044 - mask = 0; 17045 - if (!btf_type_is_void(btf_type_by_id(meta->btf, meta->func_proto->type))) 17046 - mask |= BIT(BPF_REG_0); 17047 - for (i = 0; i < vlen; ++i) 17048 - mask |= BIT(BPF_REG_1 + i); 17049 - return mask; 17050 - } 17051 - 17052 - /* Same as verifier_inlines_helper_call() but for kfuncs, see comment above */ 17053 - static bool is_fastcall_kfunc_call(struct bpf_kfunc_call_arg_meta *meta) 17065 + /* If @call is a kfunc or helper call, fills @cs and returns true, 17066 + * otherwise returns false. 17067 + */ 17068 + static bool get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call, 17069 + struct call_summary *cs) 17054 17070 { 17055 - return meta->kfunc_flags & KF_FASTCALL; 17071 + struct bpf_kfunc_call_arg_meta meta; 17072 + const struct bpf_func_proto *fn; 17073 + int i; 17074 + 17075 + if (bpf_helper_call(call)) { 17076 + 17077 + if (get_helper_proto(env, call->imm, &fn) < 0) 17078 + /* error would be reported later */ 17079 + return false; 17080 + cs->fastcall = fn->allow_fastcall && 17081 + (verifier_inlines_helper_call(env, call->imm) || 17082 + bpf_jit_inlines_helper_call(call->imm)); 17083 + cs->is_void = fn->ret_type == RET_VOID; 17084 + cs->num_params = 0; 17085 + for (i = 0; i < ARRAY_SIZE(fn->arg_type); ++i) { 17086 + if (fn->arg_type[i] == ARG_DONTCARE) 17087 + break; 17088 + cs->num_params++; 17089 + } 17090 + return true; 17091 + } 17092 + 17093 + if (bpf_pseudo_kfunc_call(call)) { 17094 + int err; 17095 + 17096 + err = fetch_kfunc_meta(env, call, &meta, NULL); 17097 + if (err < 0) 17098 + /* error would be reported later */ 17099 + return false; 17100 + cs->num_params = btf_type_vlen(meta.func_proto); 17101 + cs->fastcall = meta.kfunc_flags & KF_FASTCALL; 17102 + cs->is_void = btf_type_is_void(btf_type_by_id(meta.btf, meta.func_proto->type)); 17103 + return true; 17104 + } 17105 + 17106 + return false; 17056 17107 } 17057 17108 17058 17109 /* LLVM define a bpf_fastcall function attribute. ··· 17165 17156 { 17166 17157 struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx; 17167 17158 struct bpf_insn *call = &env->prog->insnsi[insn_idx]; 17168 - const struct bpf_func_proto *fn; 17169 - u32 clobbered_regs_mask = ALL_CALLER_SAVED_REGS; 17159 + u32 clobbered_regs_mask; 17160 + struct call_summary cs; 17170 17161 u32 expected_regs_mask; 17171 - bool can_be_inlined = false; 17172 17162 s16 off; 17173 17163 int i; 17174 17164 17175 - if (bpf_helper_call(call)) { 17176 - if (get_helper_proto(env, call->imm, &fn) < 0) 17177 - /* error would be reported later */ 17178 - return; 17179 - clobbered_regs_mask = helper_fastcall_clobber_mask(fn); 17180 - can_be_inlined = fn->allow_fastcall && 17181 - (verifier_inlines_helper_call(env, call->imm) || 17182 - bpf_jit_inlines_helper_call(call->imm)); 17183 - } 17184 - 17185 - if (bpf_pseudo_kfunc_call(call)) { 17186 - struct bpf_kfunc_call_arg_meta meta; 17187 - int err; 17188 - 17189 - err = fetch_kfunc_meta(env, call, &meta, NULL); 17190 - if (err < 0) 17191 - /* error would be reported later */ 17192 - return; 17193 - 17194 - clobbered_regs_mask = kfunc_fastcall_clobber_mask(&meta); 17195 - can_be_inlined = is_fastcall_kfunc_call(&meta); 17196 - } 17197 - 17198 - if (clobbered_regs_mask == ALL_CALLER_SAVED_REGS) 17165 + if (!get_call_summary(env, call, &cs)) 17199 17166 return; 17200 17167 17168 + /* A bitmask specifying which caller saved registers are clobbered 17169 + * by a call to a helper/kfunc *as if* this helper/kfunc follows 17170 + * bpf_fastcall contract: 17171 + * - includes R0 if function is non-void; 17172 + * - includes R1-R5 if corresponding parameter has is described 17173 + * in the function prototype. 17174 + */ 17175 + clobbered_regs_mask = GENMASK(cs.num_params, cs.is_void ? 1 : 0); 17201 17176 /* e.g. if helper call clobbers r{0,1}, expect r{2,3,4,5} in the pattern */ 17202 17177 expected_regs_mask = ~clobbered_regs_mask & ALL_CALLER_SAVED_REGS; 17203 17178 ··· 17239 17246 * don't set 'fastcall_spills_num' for call B so that remove_fastcall_spills_fills() 17240 17247 * does not remove spill/fill pair {4,6}. 17241 17248 */ 17242 - if (can_be_inlined) 17249 + if (cs.fastcall) 17243 17250 env->insn_aux_data[insn_idx].fastcall_spills_num = i - 1; 17244 17251 else 17245 17252 subprog->keep_fastcall_stack = 1;