at for-next 34 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4#ifndef _LINUX_BPF_VERIFIER_H 5#define _LINUX_BPF_VERIFIER_H 1 6 7#include <linux/bpf.h> /* for enum bpf_reg_type */ 8#include <linux/btf.h> /* for struct btf and btf_id() */ 9#include <linux/filter.h> /* for MAX_BPF_STACK */ 10#include <linux/tnum.h> 11 12/* Maximum variable offset umax_value permitted when resolving memory accesses. 13 * In practice this is far bigger than any realistic pointer offset; this limit 14 * ensures that umax_value + (int)off + (int)size cannot overflow a u64. 15 */ 16#define BPF_MAX_VAR_OFF (1 << 29) 17/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures 18 * that converting umax_value to int cannot overflow. 19 */ 20#define BPF_MAX_VAR_SIZ (1 << 29) 21/* size of tmp_str_buf in bpf_verifier. 22 * we need at least 306 bytes to fit full stack mask representation 23 * (in the "-8,-16,...,-512" form) 24 */ 25#define TMP_STR_BUF_LEN 320 26/* Patch buffer size */ 27#define INSN_BUF_SIZE 32 28 29/* Liveness marks, used for registers and spilled-regs (in stack slots). 30 * Read marks propagate upwards until they find a write mark; they record that 31 * "one of this state's descendants read this reg" (and therefore the reg is 32 * relevant for states_equal() checks). 33 * Write marks collect downwards and do not propagate; they record that "the 34 * straight-line code that reached this state (from its parent) wrote this reg" 35 * (and therefore that reads propagated from this state or its descendants 36 * should not propagate to its parent). 37 * A state with a write mark can receive read marks; it just won't propagate 38 * them to its parent, since the write mark is a property, not of the state, 39 * but of the link between it and its parent. See mark_reg_read() and 40 * mark_stack_slot_read() in kernel/bpf/verifier.c. 41 */ 42enum bpf_reg_liveness { 43 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ 44 REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ 45 REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ 46 REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, 47 REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ 48 REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ 49}; 50 51#define ITER_PREFIX "bpf_iter_" 52 53enum bpf_iter_state { 54 BPF_ITER_STATE_INVALID, /* for non-first slot */ 55 BPF_ITER_STATE_ACTIVE, 56 BPF_ITER_STATE_DRAINED, 57}; 58 59struct bpf_reg_state { 60 /* Ordering of fields matters. See states_equal() */ 61 enum bpf_reg_type type; 62 /* 63 * Fixed part of pointer offset, pointer types only. 64 * Or constant delta between "linked" scalars with the same ID. 65 */ 66 s32 off; 67 union { 68 /* valid when type == PTR_TO_PACKET */ 69 int range; 70 71 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | 72 * PTR_TO_MAP_VALUE_OR_NULL 73 */ 74 struct { 75 struct bpf_map *map_ptr; 76 /* To distinguish map lookups from outer map 77 * the map_uid is non-zero for registers 78 * pointing to inner maps. 79 */ 80 u32 map_uid; 81 }; 82 83 /* for PTR_TO_BTF_ID */ 84 struct { 85 struct btf *btf; 86 u32 btf_id; 87 }; 88 89 struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ 90 u32 mem_size; 91 u32 dynptr_id; /* for dynptr slices */ 92 }; 93 94 /* For dynptr stack slots */ 95 struct { 96 enum bpf_dynptr_type type; 97 /* A dynptr is 16 bytes so it takes up 2 stack slots. 98 * We need to track which slot is the first slot 99 * to protect against cases where the user may try to 100 * pass in an address starting at the second slot of the 101 * dynptr. 102 */ 103 bool first_slot; 104 } dynptr; 105 106 /* For bpf_iter stack slots */ 107 struct { 108 /* BTF container and BTF type ID describing 109 * struct bpf_iter_<type> of an iterator state 110 */ 111 struct btf *btf; 112 u32 btf_id; 113 /* packing following two fields to fit iter state into 16 bytes */ 114 enum bpf_iter_state state:2; 115 int depth:30; 116 } iter; 117 118 /* Max size from any of the above. */ 119 struct { 120 unsigned long raw1; 121 unsigned long raw2; 122 } raw; 123 124 u32 subprogno; /* for PTR_TO_FUNC */ 125 }; 126 /* For scalar types (SCALAR_VALUE), this represents our knowledge of 127 * the actual value. 128 * For pointer types, this represents the variable part of the offset 129 * from the pointed-to object, and is shared with all bpf_reg_states 130 * with the same id as us. 131 */ 132 struct tnum var_off; 133 /* Used to determine if any memory access using this register will 134 * result in a bad access. 135 * These refer to the same value as var_off, not necessarily the actual 136 * contents of the register. 137 */ 138 s64 smin_value; /* minimum possible (s64)value */ 139 s64 smax_value; /* maximum possible (s64)value */ 140 u64 umin_value; /* minimum possible (u64)value */ 141 u64 umax_value; /* maximum possible (u64)value */ 142 s32 s32_min_value; /* minimum possible (s32)value */ 143 s32 s32_max_value; /* maximum possible (s32)value */ 144 u32 u32_min_value; /* minimum possible (u32)value */ 145 u32 u32_max_value; /* maximum possible (u32)value */ 146 /* For PTR_TO_PACKET, used to find other pointers with the same variable 147 * offset, so they can share range knowledge. 148 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we 149 * came from, when one is tested for != NULL. 150 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation 151 * for the purpose of tracking that it's freed. 152 * For PTR_TO_SOCKET this is used to share which pointers retain the 153 * same reference to the socket, to determine proper reference freeing. 154 * For stack slots that are dynptrs, this is used to track references to 155 * the dynptr to determine proper reference freeing. 156 * Similarly to dynptrs, we use ID to track "belonging" of a reference 157 * to a specific instance of bpf_iter. 158 */ 159 /* 160 * Upper bit of ID is used to remember relationship between "linked" 161 * registers. Example: 162 * r1 = r2; both will have r1->id == r2->id == N 163 * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10 164 */ 165#define BPF_ADD_CONST (1U << 31) 166 u32 id; 167 /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned 168 * from a pointer-cast helper, bpf_sk_fullsock() and 169 * bpf_tcp_sock(). 170 * 171 * Consider the following where "sk" is a reference counted 172 * pointer returned from "sk = bpf_sk_lookup_tcp();": 173 * 174 * 1: sk = bpf_sk_lookup_tcp(); 175 * 2: if (!sk) { return 0; } 176 * 3: fullsock = bpf_sk_fullsock(sk); 177 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } 178 * 5: tp = bpf_tcp_sock(fullsock); 179 * 6: if (!tp) { bpf_sk_release(sk); return 0; } 180 * 7: bpf_sk_release(sk); 181 * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain 182 * 183 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and 184 * "tp" ptr should be invalidated also. In order to do that, 185 * the reg holding "fullsock" and "sk" need to remember 186 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id 187 * such that the verifier can reset all regs which have 188 * ref_obj_id matching the sk_reg->id. 189 * 190 * sk_reg->ref_obj_id is set to sk_reg->id at line 1. 191 * sk_reg->id will stay as NULL-marking purpose only. 192 * After NULL-marking is done, sk_reg->id can be reset to 0. 193 * 194 * After "fullsock = bpf_sk_fullsock(sk);" at line 3, 195 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. 196 * 197 * After "tp = bpf_tcp_sock(fullsock);" at line 5, 198 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id 199 * which is the same as sk_reg->ref_obj_id. 200 * 201 * From the verifier perspective, if sk, fullsock and tp 202 * are not NULL, they are the same ptr with different 203 * reg->type. In particular, bpf_sk_release(tp) is also 204 * allowed and has the same effect as bpf_sk_release(sk). 205 */ 206 u32 ref_obj_id; 207 /* parentage chain for liveness checking */ 208 struct bpf_reg_state *parent; 209 /* Inside the callee two registers can be both PTR_TO_STACK like 210 * R1=fp-8 and R2=fp-8, but one of them points to this function stack 211 * while another to the caller's stack. To differentiate them 'frameno' 212 * is used which is an index in bpf_verifier_state->frame[] array 213 * pointing to bpf_func_state. 214 */ 215 u32 frameno; 216 /* Tracks subreg definition. The stored value is the insn_idx of the 217 * writing insn. This is safe because subreg_def is used before any insn 218 * patching which only happens after main verification finished. 219 */ 220 s32 subreg_def; 221 enum bpf_reg_liveness live; 222 /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ 223 bool precise; 224}; 225 226enum bpf_stack_slot_type { 227 STACK_INVALID, /* nothing was stored in this stack slot */ 228 STACK_SPILL, /* register spilled into stack */ 229 STACK_MISC, /* BPF program wrote some data into this slot */ 230 STACK_ZERO, /* BPF program wrote constant zero */ 231 /* A dynptr is stored in this stack slot. The type of dynptr 232 * is stored in bpf_stack_state->spilled_ptr.dynptr.type 233 */ 234 STACK_DYNPTR, 235 STACK_ITER, 236}; 237 238#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ 239 240#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \ 241 (1 << BPF_REG_3) | (1 << BPF_REG_4) | \ 242 (1 << BPF_REG_5)) 243 244#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern) 245#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE) 246 247struct bpf_stack_state { 248 struct bpf_reg_state spilled_ptr; 249 u8 slot_type[BPF_REG_SIZE]; 250}; 251 252struct bpf_reference_state { 253 /* Each reference object has a type. Ensure REF_TYPE_PTR is zero to 254 * default to pointer reference on zero initialization of a state. 255 */ 256 enum ref_state_type { 257 REF_TYPE_PTR = 0, 258 REF_TYPE_LOCK, 259 } type; 260 /* Track each reference created with a unique id, even if the same 261 * instruction creates the reference multiple times (eg, via CALL). 262 */ 263 int id; 264 /* Instruction where the allocation of this reference occurred. This 265 * is used purely to inform the user of a reference leak. 266 */ 267 int insn_idx; 268 /* Use to keep track of the source object of a lock, to ensure 269 * it matches on unlock. 270 */ 271 void *ptr; 272}; 273 274struct bpf_retval_range { 275 s32 minval; 276 s32 maxval; 277}; 278 279/* state of the program: 280 * type of all registers and stack info 281 */ 282struct bpf_func_state { 283 struct bpf_reg_state regs[MAX_BPF_REG]; 284 /* index of call instruction that called into this func */ 285 int callsite; 286 /* stack frame number of this function state from pov of 287 * enclosing bpf_verifier_state. 288 * 0 = main function, 1 = first callee. 289 */ 290 u32 frameno; 291 /* subprog number == index within subprog_info 292 * zero == main subprog 293 */ 294 u32 subprogno; 295 /* Every bpf_timer_start will increment async_entry_cnt. 296 * It's used to distinguish: 297 * void foo(void) { for(;;); } 298 * void foo(void) { bpf_timer_set_callback(,foo); } 299 */ 300 u32 async_entry_cnt; 301 struct bpf_retval_range callback_ret_range; 302 bool in_callback_fn; 303 bool in_async_callback_fn; 304 bool in_exception_callback_fn; 305 /* For callback calling functions that limit number of possible 306 * callback executions (e.g. bpf_loop) keeps track of current 307 * simulated iteration number. 308 * Value in frame N refers to number of times callback with frame 309 * N+1 was simulated, e.g. for the following call: 310 * 311 * bpf_loop(..., fn, ...); | suppose current frame is N 312 * | fn would be simulated in frame N+1 313 * | number of simulations is tracked in frame N 314 */ 315 u32 callback_depth; 316 317 /* The following fields should be last. See copy_func_state() */ 318 int acquired_refs; 319 int active_locks; 320 struct bpf_reference_state *refs; 321 /* The state of the stack. Each element of the array describes BPF_REG_SIZE 322 * (i.e. 8) bytes worth of stack memory. 323 * stack[0] represents bytes [*(r10-8)..*(r10-1)] 324 * stack[1] represents bytes [*(r10-16)..*(r10-9)] 325 * ... 326 * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)] 327 */ 328 struct bpf_stack_state *stack; 329 /* Size of the current stack, in bytes. The stack state is tracked below, in 330 * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE. 331 */ 332 int allocated_stack; 333}; 334 335#define MAX_CALL_FRAMES 8 336 337/* instruction history flags, used in bpf_insn_hist_entry.flags field */ 338enum { 339 /* instruction references stack slot through PTR_TO_STACK register; 340 * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8) 341 * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512, 342 * 8 bytes per slot, so slot index (spi) is [0, 63]) 343 */ 344 INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */ 345 346 INSN_F_SPI_MASK = 0x3f, /* 6 bits */ 347 INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */ 348 349 INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */ 350}; 351 352static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES); 353static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8); 354 355struct bpf_insn_hist_entry { 356 u32 idx; 357 /* insn idx can't be bigger than 1 million */ 358 u32 prev_idx : 22; 359 /* special flags, e.g., whether insn is doing register stack spill/load */ 360 u32 flags : 10; 361 /* additional registers that need precision tracking when this 362 * jump is backtracked, vector of six 10-bit records 363 */ 364 u64 linked_regs; 365}; 366 367/* Maximum number of register states that can exist at once */ 368#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES) 369struct bpf_verifier_state { 370 /* call stack tracking */ 371 struct bpf_func_state *frame[MAX_CALL_FRAMES]; 372 struct bpf_verifier_state *parent; 373 /* 374 * 'branches' field is the number of branches left to explore: 375 * 0 - all possible paths from this state reached bpf_exit or 376 * were safely pruned 377 * 1 - at least one path is being explored. 378 * This state hasn't reached bpf_exit 379 * 2 - at least two paths are being explored. 380 * This state is an immediate parent of two children. 381 * One is fallthrough branch with branches==1 and another 382 * state is pushed into stack (to be explored later) also with 383 * branches==1. The parent of this state has branches==1. 384 * The verifier state tree connected via 'parent' pointer looks like: 385 * 1 386 * 1 387 * 2 -> 1 (first 'if' pushed into stack) 388 * 1 389 * 2 -> 1 (second 'if' pushed into stack) 390 * 1 391 * 1 392 * 1 bpf_exit. 393 * 394 * Once do_check() reaches bpf_exit, it calls update_branch_counts() 395 * and the verifier state tree will look: 396 * 1 397 * 1 398 * 2 -> 1 (first 'if' pushed into stack) 399 * 1 400 * 1 -> 1 (second 'if' pushed into stack) 401 * 0 402 * 0 403 * 0 bpf_exit. 404 * After pop_stack() the do_check() will resume at second 'if'. 405 * 406 * If is_state_visited() sees a state with branches > 0 it means 407 * there is a loop. If such state is exactly equal to the current state 408 * it's an infinite loop. Note states_equal() checks for states 409 * equivalency, so two states being 'states_equal' does not mean 410 * infinite loop. The exact comparison is provided by 411 * states_maybe_looping() function. It's a stronger pre-check and 412 * much faster than states_equal(). 413 * 414 * This algorithm may not find all possible infinite loops or 415 * loop iteration count may be too high. 416 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. 417 */ 418 u32 branches; 419 u32 insn_idx; 420 u32 curframe; 421 422 bool speculative; 423 bool active_rcu_lock; 424 u32 active_preempt_lock; 425 /* If this state was ever pointed-to by other state's loop_entry field 426 * this flag would be set to true. Used to avoid freeing such states 427 * while they are still in use. 428 */ 429 bool used_as_loop_entry; 430 bool in_sleepable; 431 432 /* first and last insn idx of this verifier state */ 433 u32 first_insn_idx; 434 u32 last_insn_idx; 435 /* If this state is a part of states loop this field points to some 436 * parent of this state such that: 437 * - it is also a member of the same states loop; 438 * - DFS states traversal starting from initial state visits loop_entry 439 * state before this state. 440 * Used to compute topmost loop entry for state loops. 441 * State loops might appear because of open coded iterators logic. 442 * See get_loop_entry() for more information. 443 */ 444 struct bpf_verifier_state *loop_entry; 445 /* Sub-range of env->insn_hist[] corresponding to this state's 446 * instruction history. 447 * Backtracking is using it to go from last to first. 448 * For most states instruction history is short, 0-3 instructions. 449 * For loops can go up to ~40. 450 */ 451 u32 insn_hist_start; 452 u32 insn_hist_end; 453 u32 dfs_depth; 454 u32 callback_unroll_depth; 455 u32 may_goto_depth; 456}; 457 458#define bpf_get_spilled_reg(slot, frame, mask) \ 459 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ 460 ((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \ 461 ? &frame->stack[slot].spilled_ptr : NULL) 462 463/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ 464#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \ 465 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \ 466 iter < frame->allocated_stack / BPF_REG_SIZE; \ 467 iter++, reg = bpf_get_spilled_reg(iter, frame, mask)) 468 469#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \ 470 ({ \ 471 struct bpf_verifier_state *___vstate = __vst; \ 472 int ___i, ___j; \ 473 for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \ 474 struct bpf_reg_state *___regs; \ 475 __state = ___vstate->frame[___i]; \ 476 ___regs = __state->regs; \ 477 for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \ 478 __reg = &___regs[___j]; \ 479 (void)(__expr); \ 480 } \ 481 bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \ 482 if (!__reg) \ 483 continue; \ 484 (void)(__expr); \ 485 } \ 486 } \ 487 }) 488 489/* Invoke __expr over regsiters in __vst, setting __state and __reg */ 490#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \ 491 bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr) 492 493/* linked list of verifier states used to prune search */ 494struct bpf_verifier_state_list { 495 struct bpf_verifier_state state; 496 struct bpf_verifier_state_list *next; 497 int miss_cnt, hit_cnt; 498}; 499 500struct bpf_loop_inline_state { 501 unsigned int initialized:1; /* set to true upon first entry */ 502 unsigned int fit_for_inline:1; /* true if callback function is the same 503 * at each call and flags are always zero 504 */ 505 u32 callback_subprogno; /* valid when fit_for_inline is true */ 506}; 507 508/* pointer and state for maps */ 509struct bpf_map_ptr_state { 510 struct bpf_map *map_ptr; 511 bool poison; 512 bool unpriv; 513}; 514 515/* Possible states for alu_state member. */ 516#define BPF_ALU_SANITIZE_SRC (1U << 0) 517#define BPF_ALU_SANITIZE_DST (1U << 1) 518#define BPF_ALU_NEG_VALUE (1U << 2) 519#define BPF_ALU_NON_POINTER (1U << 3) 520#define BPF_ALU_IMMEDIATE (1U << 4) 521#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ 522 BPF_ALU_SANITIZE_DST) 523 524struct bpf_insn_aux_data { 525 union { 526 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 527 struct bpf_map_ptr_state map_ptr_state; 528 s32 call_imm; /* saved imm field of call insn */ 529 u32 alu_limit; /* limit for add/sub register with pointer */ 530 struct { 531 u32 map_index; /* index into used_maps[] */ 532 u32 map_off; /* offset from value base address */ 533 }; 534 struct { 535 enum bpf_reg_type reg_type; /* type of pseudo_btf_id */ 536 union { 537 struct { 538 struct btf *btf; 539 u32 btf_id; /* btf_id for struct typed var */ 540 }; 541 u32 mem_size; /* mem_size for non-struct typed var */ 542 }; 543 } btf_var; 544 /* if instruction is a call to bpf_loop this field tracks 545 * the state of the relevant registers to make decision about inlining 546 */ 547 struct bpf_loop_inline_state loop_inline_state; 548 }; 549 union { 550 /* remember the size of type passed to bpf_obj_new to rewrite R1 */ 551 u64 obj_new_size; 552 /* remember the offset of node field within type to rewrite */ 553 u64 insert_off; 554 }; 555 struct btf_struct_meta *kptr_struct_meta; 556 u64 map_key_state; /* constant (32 bit) key tracking for maps */ 557 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 558 u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ 559 bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ 560 bool zext_dst; /* this insn zero extends dst reg */ 561 bool needs_zext; /* alu op needs to clear upper bits */ 562 bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */ 563 bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */ 564 bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */ 565 u8 alu_state; /* used in combination with alu_limit */ 566 /* true if STX or LDX instruction is a part of a spill/fill 567 * pattern for a bpf_fastcall call. 568 */ 569 u8 fastcall_pattern:1; 570 /* for CALL instructions, a number of spill/fill pairs in the 571 * bpf_fastcall pattern. 572 */ 573 u8 fastcall_spills_num:3; 574 575 /* below fields are initialized once */ 576 unsigned int orig_idx; /* original instruction index */ 577 bool jmp_point; 578 bool prune_point; 579 /* ensure we check state equivalence and save state checkpoint and 580 * this instruction, regardless of any heuristics 581 */ 582 bool force_checkpoint; 583 /* true if instruction is a call to a helper function that 584 * accepts callback function as a parameter. 585 */ 586 bool calls_callback; 587}; 588 589#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 590#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */ 591 592#define BPF_VERIFIER_TMP_LOG_SIZE 1024 593 594struct bpf_verifier_log { 595 /* Logical start and end positions of a "log window" of the verifier log. 596 * start_pos == 0 means we haven't truncated anything. 597 * Once truncation starts to happen, start_pos + len_total == end_pos, 598 * except during log reset situations, in which (end_pos - start_pos) 599 * might get smaller than len_total (see bpf_vlog_reset()). 600 * Generally, (end_pos - start_pos) gives number of useful data in 601 * user log buffer. 602 */ 603 u64 start_pos; 604 u64 end_pos; 605 char __user *ubuf; 606 u32 level; 607 u32 len_total; 608 u32 len_max; 609 char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; 610}; 611 612#define BPF_LOG_LEVEL1 1 613#define BPF_LOG_LEVEL2 2 614#define BPF_LOG_STATS 4 615#define BPF_LOG_FIXED 8 616#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) 617#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED) 618#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */ 619#define BPF_LOG_MIN_ALIGNMENT 8U 620#define BPF_LOG_ALIGNMENT 40U 621 622static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) 623{ 624 return log && log->level; 625} 626 627#define BPF_MAX_SUBPROGS 256 628 629struct bpf_subprog_arg_info { 630 enum bpf_arg_type arg_type; 631 union { 632 u32 mem_size; 633 u32 btf_id; 634 }; 635}; 636 637enum priv_stack_mode { 638 PRIV_STACK_UNKNOWN, 639 NO_PRIV_STACK, 640 PRIV_STACK_ADAPTIVE, 641}; 642 643struct bpf_subprog_info { 644 /* 'start' has to be the first field otherwise find_subprog() won't work */ 645 u32 start; /* insn idx of function entry point */ 646 u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ 647 u16 stack_depth; /* max. stack depth used by this function */ 648 u16 stack_extra; 649 /* offsets in range [stack_depth .. fastcall_stack_off) 650 * are used for bpf_fastcall spills and fills. 651 */ 652 s16 fastcall_stack_off; 653 bool has_tail_call: 1; 654 bool tail_call_reachable: 1; 655 bool has_ld_abs: 1; 656 bool is_cb: 1; 657 bool is_async_cb: 1; 658 bool is_exception_cb: 1; 659 bool args_cached: 1; 660 /* true if bpf_fastcall stack region is used by functions that can't be inlined */ 661 bool keep_fastcall_stack: 1; 662 bool changes_pkt_data: 1; 663 664 enum priv_stack_mode priv_stack_mode; 665 u8 arg_cnt; 666 struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS]; 667}; 668 669struct bpf_verifier_env; 670 671struct backtrack_state { 672 struct bpf_verifier_env *env; 673 u32 frame; 674 u32 reg_masks[MAX_CALL_FRAMES]; 675 u64 stack_masks[MAX_CALL_FRAMES]; 676}; 677 678struct bpf_id_pair { 679 u32 old; 680 u32 cur; 681}; 682 683struct bpf_idmap { 684 u32 tmp_id_gen; 685 struct bpf_id_pair map[BPF_ID_MAP_SIZE]; 686}; 687 688struct bpf_idset { 689 u32 count; 690 u32 ids[BPF_ID_MAP_SIZE]; 691}; 692 693/* single container for all structs 694 * one verifier_env per bpf_check() call 695 */ 696struct bpf_verifier_env { 697 u32 insn_idx; 698 u32 prev_insn_idx; 699 struct bpf_prog *prog; /* eBPF program being verified */ 700 const struct bpf_verifier_ops *ops; 701 struct module *attach_btf_mod; /* The owner module of prog->aux->attach_btf */ 702 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ 703 int stack_size; /* number of states to be processed */ 704 bool strict_alignment; /* perform strict pointer alignment checks */ 705 bool test_state_freq; /* test verifier with different pruning frequency */ 706 bool test_reg_invariants; /* fail verification on register invariants violations */ 707 struct bpf_verifier_state *cur_state; /* current verifier state */ 708 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ 709 struct bpf_verifier_state_list *free_list; 710 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 711 struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */ 712 u32 used_map_cnt; /* number of used maps */ 713 u32 used_btf_cnt; /* number of used BTF objects */ 714 u32 id_gen; /* used to generate unique reg IDs */ 715 u32 hidden_subprog_cnt; /* number of hidden subprogs */ 716 int exception_callback_subprog; 717 bool explore_alu_limits; 718 bool allow_ptr_leaks; 719 /* Allow access to uninitialized stack memory. Writes with fixed offset are 720 * always allowed, so this refers to reads (with fixed or variable offset), 721 * to writes with variable offset and to indirect (helper) accesses. 722 */ 723 bool allow_uninit_stack; 724 bool bpf_capable; 725 bool bypass_spec_v1; 726 bool bypass_spec_v4; 727 bool seen_direct_write; 728 bool seen_exception; 729 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ 730 const struct bpf_line_info *prev_linfo; 731 struct bpf_verifier_log log; 732 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */ 733 union { 734 struct bpf_idmap idmap_scratch; 735 struct bpf_idset idset_scratch; 736 }; 737 struct { 738 int *insn_state; 739 int *insn_stack; 740 int cur_stack; 741 } cfg; 742 struct backtrack_state bt; 743 struct bpf_insn_hist_entry *insn_hist; 744 struct bpf_insn_hist_entry *cur_hist_ent; 745 u32 insn_hist_cap; 746 u32 pass_cnt; /* number of times do_check() was called */ 747 u32 subprog_cnt; 748 /* number of instructions analyzed by the verifier */ 749 u32 prev_insn_processed, insn_processed; 750 /* number of jmps, calls, exits analyzed so far */ 751 u32 prev_jmps_processed, jmps_processed; 752 /* total verification time */ 753 u64 verification_time; 754 /* maximum number of verifier states kept in 'branching' instructions */ 755 u32 max_states_per_insn; 756 /* total number of allocated verifier states */ 757 u32 total_states; 758 /* some states are freed during program analysis. 759 * this is peak number of states. this number dominates kernel 760 * memory consumption during verification 761 */ 762 u32 peak_states; 763 /* longest register parentage chain walked for liveness marking */ 764 u32 longest_mark_read_walk; 765 bpfptr_t fd_array; 766 767 /* bit mask to keep track of whether a register has been accessed 768 * since the last time the function state was printed 769 */ 770 u32 scratched_regs; 771 /* Same as scratched_regs but for stack slots */ 772 u64 scratched_stack_slots; 773 u64 prev_log_pos, prev_insn_print_pos; 774 /* buffer used to temporary hold constants as scalar registers */ 775 struct bpf_reg_state fake_reg[2]; 776 /* buffer used to generate temporary string representations, 777 * e.g., in reg_type_str() to generate reg_type string 778 */ 779 char tmp_str_buf[TMP_STR_BUF_LEN]; 780 struct bpf_insn insn_buf[INSN_BUF_SIZE]; 781 struct bpf_insn epilogue_buf[INSN_BUF_SIZE]; 782}; 783 784static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog) 785{ 786 return &env->prog->aux->func_info_aux[subprog]; 787} 788 789static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog) 790{ 791 return &env->subprog_info[subprog]; 792} 793 794__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, 795 const char *fmt, va_list args); 796__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 797 const char *fmt, ...); 798__printf(2, 3) void bpf_log(struct bpf_verifier_log *log, 799 const char *fmt, ...); 800int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level, 801 char __user *log_buf, u32 log_size); 802void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos); 803int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual); 804 805__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env, 806 u32 insn_off, 807 const char *prefix_fmt, ...); 808 809static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) 810{ 811 struct bpf_verifier_state *cur = env->cur_state; 812 813 return cur->frame[cur->curframe]; 814} 815 816static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) 817{ 818 return cur_func(env)->regs; 819} 820 821int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); 822int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, 823 int insn_idx, int prev_insn_idx); 824int bpf_prog_offload_finalize(struct bpf_verifier_env *env); 825void 826bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, 827 struct bpf_insn *insn); 828void 829bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); 830 831/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */ 832static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, 833 struct btf *btf, u32 btf_id) 834{ 835 if (tgt_prog) 836 return ((u64)tgt_prog->aux->id << 32) | btf_id; 837 else 838 return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id; 839} 840 841/* unpack the IDs from the key as constructed above */ 842static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id) 843{ 844 if (obj_id) 845 *obj_id = key >> 32; 846 if (btf_id) 847 *btf_id = key & 0x7FFFFFFF; 848} 849 850int bpf_check_attach_target(struct bpf_verifier_log *log, 851 const struct bpf_prog *prog, 852 const struct bpf_prog *tgt_prog, 853 u32 btf_id, 854 struct bpf_attach_target_info *tgt_info); 855void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab); 856 857int mark_chain_precision(struct bpf_verifier_env *env, int regno); 858 859#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0) 860 861/* extract base type from bpf_{arg, return, reg}_type. */ 862static inline u32 base_type(u32 type) 863{ 864 return type & BPF_BASE_TYPE_MASK; 865} 866 867/* extract flags from an extended type. See bpf_type_flag in bpf.h. */ 868static inline u32 type_flag(u32 type) 869{ 870 return type & ~BPF_BASE_TYPE_MASK; 871} 872 873/* only use after check_attach_btf_id() */ 874static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog) 875{ 876 return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ? 877 prog->aux->saved_dst_prog_type : prog->type; 878} 879 880static inline bool bpf_prog_check_recur(const struct bpf_prog *prog) 881{ 882 switch (resolve_prog_type(prog)) { 883 case BPF_PROG_TYPE_TRACING: 884 return prog->expected_attach_type != BPF_TRACE_ITER; 885 case BPF_PROG_TYPE_STRUCT_OPS: 886 return prog->aux->jits_use_priv_stack; 887 case BPF_PROG_TYPE_LSM: 888 return false; 889 default: 890 return true; 891 } 892} 893 894#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF) 895 896static inline bool bpf_type_has_unsafe_modifiers(u32 type) 897{ 898 return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS; 899} 900 901static inline bool type_is_ptr_alloc_obj(u32 type) 902{ 903 return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC; 904} 905 906static inline bool type_is_non_owning_ref(u32 type) 907{ 908 return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF; 909} 910 911static inline bool type_is_pkt_pointer(enum bpf_reg_type type) 912{ 913 type = base_type(type); 914 return type == PTR_TO_PACKET || 915 type == PTR_TO_PACKET_META; 916} 917 918static inline bool type_is_sk_pointer(enum bpf_reg_type type) 919{ 920 return type == PTR_TO_SOCKET || 921 type == PTR_TO_SOCK_COMMON || 922 type == PTR_TO_TCP_SOCK || 923 type == PTR_TO_XDP_SOCK; 924} 925 926static inline bool type_may_be_null(u32 type) 927{ 928 return type & PTR_MAYBE_NULL; 929} 930 931static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno) 932{ 933 env->scratched_regs |= 1U << regno; 934} 935 936static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi) 937{ 938 env->scratched_stack_slots |= 1ULL << spi; 939} 940 941static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno) 942{ 943 return (env->scratched_regs >> regno) & 1; 944} 945 946static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno) 947{ 948 return (env->scratched_stack_slots >> regno) & 1; 949} 950 951static inline bool verifier_state_scratched(const struct bpf_verifier_env *env) 952{ 953 return env->scratched_regs || env->scratched_stack_slots; 954} 955 956static inline void mark_verifier_state_clean(struct bpf_verifier_env *env) 957{ 958 env->scratched_regs = 0U; 959 env->scratched_stack_slots = 0ULL; 960} 961 962/* Used for printing the entire verifier state. */ 963static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env) 964{ 965 env->scratched_regs = ~0U; 966 env->scratched_stack_slots = ~0ULL; 967} 968 969static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size) 970{ 971#ifdef __BIG_ENDIAN 972 off -= spill_size - fill_size; 973#endif 974 975 return !(off % BPF_REG_SIZE); 976} 977 978const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type); 979const char *dynptr_type_str(enum bpf_dynptr_type type); 980const char *iter_type_str(const struct btf *btf, u32 btf_id); 981const char *iter_state_str(enum bpf_iter_state state); 982 983void print_verifier_state(struct bpf_verifier_env *env, 984 const struct bpf_func_state *state, bool print_all); 985void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state); 986 987#endif /* _LINUX_BPF_VERIFIER_H */