Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: jmp_offset() and verbose_insn() utility functions

Extract two utility functions:
- One BPF jump instruction uses .imm field to encode jump offset,
while the rest use .off. Encapsulate this detail as jmp_offset()
function.
- Avoid duplicating instruction printing callback definitions by
defining a verbose_insn() function, which disassembles an
instruction into the verifier log while hiding this detail.

These functions will be used in the next patch.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250304195024.2478889-2-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
80ca3f1d 5bde5750

+23 -17
+23 -17
kernel/bpf/verifier.c
··· 3360 3360 return 0; 3361 3361 } 3362 3362 3363 + static int jmp_offset(struct bpf_insn *insn) 3364 + { 3365 + u8 code = insn->code; 3366 + 3367 + if (code == (BPF_JMP32 | BPF_JA)) 3368 + return insn->imm; 3369 + return insn->off; 3370 + } 3371 + 3363 3372 static int check_subprogs(struct bpf_verifier_env *env) 3364 3373 { 3365 3374 int i, subprog_start, subprog_end, off, cur_subprog = 0; ··· 3395 3386 goto next; 3396 3387 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 3397 3388 goto next; 3398 - if (code == (BPF_JMP32 | BPF_JA)) 3399 - off = i + insn[i].imm + 1; 3400 - else 3401 - off = i + insn[i].off + 1; 3389 + off = i + jmp_offset(&insn[i]) + 1; 3402 3390 if (off < subprog_start || off >= subprog_end) { 3403 3391 verbose(env, "jump out of range from insn %d to %d\n", i, off); 3404 3392 return -EINVAL; ··· 3925 3919 return btf_name_by_offset(desc_btf, func->name_off); 3926 3920 } 3927 3921 3922 + static void verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn) 3923 + { 3924 + const struct bpf_insn_cbs cbs = { 3925 + .cb_call = disasm_kfunc_name, 3926 + .cb_print = verbose, 3927 + .private_data = env, 3928 + }; 3929 + 3930 + print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 3931 + } 3932 + 3928 3933 static inline void bt_init(struct backtrack_state *bt, u32 frame) 3929 3934 { 3930 3935 bt->frame = frame; ··· 4136 4119 static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, 4137 4120 struct bpf_insn_hist_entry *hist, struct backtrack_state *bt) 4138 4121 { 4139 - const struct bpf_insn_cbs cbs = { 4140 - .cb_call = disasm_kfunc_name, 4141 - .cb_print = verbose, 4142 - .private_data = env, 4143 - }; 4144 4122 struct bpf_insn *insn = env->prog->insnsi + idx; 4145 4123 u8 class = BPF_CLASS(insn->code); 4146 4124 u8 opcode = BPF_OP(insn->code); ··· 4153 4141 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt)); 4154 4142 verbose(env, "stack=%s before ", env->tmp_str_buf); 4155 4143 verbose(env, "%d: ", idx); 4156 - print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 4144 + verbose_insn(env, insn); 4157 4145 } 4158 4146 4159 4147 /* If there is a history record that some registers gained range at this insn, ··· 19285 19273 } 19286 19274 19287 19275 if (env->log.level & BPF_LOG_LEVEL) { 19288 - const struct bpf_insn_cbs cbs = { 19289 - .cb_call = disasm_kfunc_name, 19290 - .cb_print = verbose, 19291 - .private_data = env, 19292 - }; 19293 - 19294 19276 if (verifier_state_scratched(env)) 19295 19277 print_insn_state(env, state, state->curframe); 19296 19278 19297 19279 verbose_linfo(env, env->insn_idx, "; "); 19298 19280 env->prev_log_pos = env->log.end_pos; 19299 19281 verbose(env, "%d: ", env->insn_idx); 19300 - print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 19282 + verbose_insn(env, insn); 19301 19283 env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos; 19302 19284 env->prev_log_pos = env->log.end_pos; 19303 19285 }