at v5.9 16 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4#ifndef _LINUX_BPF_VERIFIER_H 5#define _LINUX_BPF_VERIFIER_H 1 6 7#include <linux/bpf.h> /* for enum bpf_reg_type */ 8#include <linux/filter.h> /* for MAX_BPF_STACK */ 9#include <linux/tnum.h> 10 11/* Maximum variable offset umax_value permitted when resolving memory accesses. 12 * In practice this is far bigger than any realistic pointer offset; this limit 13 * ensures that umax_value + (int)off + (int)size cannot overflow a u64. 14 */ 15#define BPF_MAX_VAR_OFF (1 << 29) 16/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures 17 * that converting umax_value to int cannot overflow. 18 */ 19#define BPF_MAX_VAR_SIZ (1 << 29) 20 21/* Liveness marks, used for registers and spilled-regs (in stack slots). 22 * Read marks propagate upwards until they find a write mark; they record that 23 * "one of this state's descendants read this reg" (and therefore the reg is 24 * relevant for states_equal() checks). 25 * Write marks collect downwards and do not propagate; they record that "the 26 * straight-line code that reached this state (from its parent) wrote this reg" 27 * (and therefore that reads propagated from this state or its descendants 28 * should not propagate to its parent). 29 * A state with a write mark can receive read marks; it just won't propagate 30 * them to its parent, since the write mark is a property, not of the state, 31 * but of the link between it and its parent. See mark_reg_read() and 32 * mark_stack_slot_read() in kernel/bpf/verifier.c. 33 */ 34enum bpf_reg_liveness { 35 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ 36 REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ 37 REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ 38 REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, 39 REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ 40 REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ 41}; 42 43struct bpf_reg_state { 44 /* Ordering of fields matters. See states_equal() */ 45 enum bpf_reg_type type; 46 union { 47 /* valid when type == PTR_TO_PACKET */ 48 u16 range; 49 50 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | 51 * PTR_TO_MAP_VALUE_OR_NULL 52 */ 53 struct bpf_map *map_ptr; 54 55 u32 btf_id; /* for PTR_TO_BTF_ID */ 56 57 u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ 58 59 /* Max size from any of the above. */ 60 unsigned long raw; 61 }; 62 /* Fixed part of pointer offset, pointer types only */ 63 s32 off; 64 /* For PTR_TO_PACKET, used to find other pointers with the same variable 65 * offset, so they can share range knowledge. 66 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we 67 * came from, when one is tested for != NULL. 68 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation 69 * for the purpose of tracking that it's freed. 70 * For PTR_TO_SOCKET this is used to share which pointers retain the 71 * same reference to the socket, to determine proper reference freeing. 72 */ 73 u32 id; 74 /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned 75 * from a pointer-cast helper, bpf_sk_fullsock() and 76 * bpf_tcp_sock(). 77 * 78 * Consider the following where "sk" is a reference counted 79 * pointer returned from "sk = bpf_sk_lookup_tcp();": 80 * 81 * 1: sk = bpf_sk_lookup_tcp(); 82 * 2: if (!sk) { return 0; } 83 * 3: fullsock = bpf_sk_fullsock(sk); 84 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } 85 * 5: tp = bpf_tcp_sock(fullsock); 86 * 6: if (!tp) { bpf_sk_release(sk); return 0; } 87 * 7: bpf_sk_release(sk); 88 * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain 89 * 90 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and 91 * "tp" ptr should be invalidated also. In order to do that, 92 * the reg holding "fullsock" and "sk" need to remember 93 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id 94 * such that the verifier can reset all regs which have 95 * ref_obj_id matching the sk_reg->id. 96 * 97 * sk_reg->ref_obj_id is set to sk_reg->id at line 1. 98 * sk_reg->id will stay as NULL-marking purpose only. 99 * After NULL-marking is done, sk_reg->id can be reset to 0. 100 * 101 * After "fullsock = bpf_sk_fullsock(sk);" at line 3, 102 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. 103 * 104 * After "tp = bpf_tcp_sock(fullsock);" at line 5, 105 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id 106 * which is the same as sk_reg->ref_obj_id. 107 * 108 * From the verifier perspective, if sk, fullsock and tp 109 * are not NULL, they are the same ptr with different 110 * reg->type. In particular, bpf_sk_release(tp) is also 111 * allowed and has the same effect as bpf_sk_release(sk). 112 */ 113 u32 ref_obj_id; 114 /* For scalar types (SCALAR_VALUE), this represents our knowledge of 115 * the actual value. 116 * For pointer types, this represents the variable part of the offset 117 * from the pointed-to object, and is shared with all bpf_reg_states 118 * with the same id as us. 119 */ 120 struct tnum var_off; 121 /* Used to determine if any memory access using this register will 122 * result in a bad access. 123 * These refer to the same value as var_off, not necessarily the actual 124 * contents of the register. 125 */ 126 s64 smin_value; /* minimum possible (s64)value */ 127 s64 smax_value; /* maximum possible (s64)value */ 128 u64 umin_value; /* minimum possible (u64)value */ 129 u64 umax_value; /* maximum possible (u64)value */ 130 s32 s32_min_value; /* minimum possible (s32)value */ 131 s32 s32_max_value; /* maximum possible (s32)value */ 132 u32 u32_min_value; /* minimum possible (u32)value */ 133 u32 u32_max_value; /* maximum possible (u32)value */ 134 /* parentage chain for liveness checking */ 135 struct bpf_reg_state *parent; 136 /* Inside the callee two registers can be both PTR_TO_STACK like 137 * R1=fp-8 and R2=fp-8, but one of them points to this function stack 138 * while another to the caller's stack. To differentiate them 'frameno' 139 * is used which is an index in bpf_verifier_state->frame[] array 140 * pointing to bpf_func_state. 141 */ 142 u32 frameno; 143 /* Tracks subreg definition. The stored value is the insn_idx of the 144 * writing insn. This is safe because subreg_def is used before any insn 145 * patching which only happens after main verification finished. 146 */ 147 s32 subreg_def; 148 enum bpf_reg_liveness live; 149 /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ 150 bool precise; 151}; 152 153enum bpf_stack_slot_type { 154 STACK_INVALID, /* nothing was stored in this stack slot */ 155 STACK_SPILL, /* register spilled into stack */ 156 STACK_MISC, /* BPF program wrote some data into this slot */ 157 STACK_ZERO, /* BPF program wrote constant zero */ 158}; 159 160#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ 161 162struct bpf_stack_state { 163 struct bpf_reg_state spilled_ptr; 164 u8 slot_type[BPF_REG_SIZE]; 165}; 166 167struct bpf_reference_state { 168 /* Track each reference created with a unique id, even if the same 169 * instruction creates the reference multiple times (eg, via CALL). 170 */ 171 int id; 172 /* Instruction where the allocation of this reference occurred. This 173 * is used purely to inform the user of a reference leak. 174 */ 175 int insn_idx; 176}; 177 178/* state of the program: 179 * type of all registers and stack info 180 */ 181struct bpf_func_state { 182 struct bpf_reg_state regs[MAX_BPF_REG]; 183 /* index of call instruction that called into this func */ 184 int callsite; 185 /* stack frame number of this function state from pov of 186 * enclosing bpf_verifier_state. 187 * 0 = main function, 1 = first callee. 188 */ 189 u32 frameno; 190 /* subprog number == index within subprog_stack_depth 191 * zero == main subprog 192 */ 193 u32 subprogno; 194 195 /* The following fields should be last. See copy_func_state() */ 196 int acquired_refs; 197 struct bpf_reference_state *refs; 198 int allocated_stack; 199 struct bpf_stack_state *stack; 200}; 201 202struct bpf_idx_pair { 203 u32 prev_idx; 204 u32 idx; 205}; 206 207#define MAX_CALL_FRAMES 8 208struct bpf_verifier_state { 209 /* call stack tracking */ 210 struct bpf_func_state *frame[MAX_CALL_FRAMES]; 211 struct bpf_verifier_state *parent; 212 /* 213 * 'branches' field is the number of branches left to explore: 214 * 0 - all possible paths from this state reached bpf_exit or 215 * were safely pruned 216 * 1 - at least one path is being explored. 217 * This state hasn't reached bpf_exit 218 * 2 - at least two paths are being explored. 219 * This state is an immediate parent of two children. 220 * One is fallthrough branch with branches==1 and another 221 * state is pushed into stack (to be explored later) also with 222 * branches==1. The parent of this state has branches==1. 223 * The verifier state tree connected via 'parent' pointer looks like: 224 * 1 225 * 1 226 * 2 -> 1 (first 'if' pushed into stack) 227 * 1 228 * 2 -> 1 (second 'if' pushed into stack) 229 * 1 230 * 1 231 * 1 bpf_exit. 232 * 233 * Once do_check() reaches bpf_exit, it calls update_branch_counts() 234 * and the verifier state tree will look: 235 * 1 236 * 1 237 * 2 -> 1 (first 'if' pushed into stack) 238 * 1 239 * 1 -> 1 (second 'if' pushed into stack) 240 * 0 241 * 0 242 * 0 bpf_exit. 243 * After pop_stack() the do_check() will resume at second 'if'. 244 * 245 * If is_state_visited() sees a state with branches > 0 it means 246 * there is a loop. If such state is exactly equal to the current state 247 * it's an infinite loop. Note states_equal() checks for states 248 * equvalency, so two states being 'states_equal' does not mean 249 * infinite loop. The exact comparison is provided by 250 * states_maybe_looping() function. It's a stronger pre-check and 251 * much faster than states_equal(). 252 * 253 * This algorithm may not find all possible infinite loops or 254 * loop iteration count may be too high. 255 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. 256 */ 257 u32 branches; 258 u32 insn_idx; 259 u32 curframe; 260 u32 active_spin_lock; 261 bool speculative; 262 263 /* first and last insn idx of this verifier state */ 264 u32 first_insn_idx; 265 u32 last_insn_idx; 266 /* jmp history recorded from first to last. 267 * backtracking is using it to go from last to first. 268 * For most states jmp_history_cnt is [0-3]. 269 * For loops can go up to ~40. 270 */ 271 struct bpf_idx_pair *jmp_history; 272 u32 jmp_history_cnt; 273}; 274 275#define bpf_get_spilled_reg(slot, frame) \ 276 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ 277 (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ 278 ? &frame->stack[slot].spilled_ptr : NULL) 279 280/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ 281#define bpf_for_each_spilled_reg(iter, frame, reg) \ 282 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ 283 iter < frame->allocated_stack / BPF_REG_SIZE; \ 284 iter++, reg = bpf_get_spilled_reg(iter, frame)) 285 286/* linked list of verifier states used to prune search */ 287struct bpf_verifier_state_list { 288 struct bpf_verifier_state state; 289 struct bpf_verifier_state_list *next; 290 int miss_cnt, hit_cnt; 291}; 292 293/* Possible states for alu_state member. */ 294#define BPF_ALU_SANITIZE_SRC 1U 295#define BPF_ALU_SANITIZE_DST 2U 296#define BPF_ALU_NEG_VALUE (1U << 2) 297#define BPF_ALU_NON_POINTER (1U << 3) 298#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ 299 BPF_ALU_SANITIZE_DST) 300 301struct bpf_insn_aux_data { 302 union { 303 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 304 unsigned long map_ptr_state; /* pointer/poison value for maps */ 305 s32 call_imm; /* saved imm field of call insn */ 306 u32 alu_limit; /* limit for add/sub register with pointer */ 307 struct { 308 u32 map_index; /* index into used_maps[] */ 309 u32 map_off; /* offset from value base address */ 310 }; 311 }; 312 u64 map_key_state; /* constant (32 bit) key tracking for maps */ 313 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 314 int sanitize_stack_off; /* stack slot to be cleared */ 315 u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ 316 bool zext_dst; /* this insn zero extends dst reg */ 317 u8 alu_state; /* used in combination with alu_limit */ 318 319 /* below fields are initialized once */ 320 unsigned int orig_idx; /* original instruction index */ 321 bool prune_point; 322}; 323 324#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 325 326#define BPF_VERIFIER_TMP_LOG_SIZE 1024 327 328struct bpf_verifier_log { 329 u32 level; 330 char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; 331 char __user *ubuf; 332 u32 len_used; 333 u32 len_total; 334}; 335 336static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) 337{ 338 return log->len_used >= log->len_total - 1; 339} 340 341#define BPF_LOG_LEVEL1 1 342#define BPF_LOG_LEVEL2 2 343#define BPF_LOG_STATS 4 344#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) 345#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) 346#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */ 347 348static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) 349{ 350 return (log->level && log->ubuf && !bpf_verifier_log_full(log)) || 351 log->level == BPF_LOG_KERNEL; 352} 353 354#define BPF_MAX_SUBPROGS 256 355 356struct bpf_subprog_info { 357 /* 'start' has to be the first field otherwise find_subprog() won't work */ 358 u32 start; /* insn idx of function entry point */ 359 u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ 360 u16 stack_depth; /* max. stack depth used by this function */ 361}; 362 363/* single container for all structs 364 * one verifier_env per bpf_check() call 365 */ 366struct bpf_verifier_env { 367 u32 insn_idx; 368 u32 prev_insn_idx; 369 struct bpf_prog *prog; /* eBPF program being verified */ 370 const struct bpf_verifier_ops *ops; 371 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ 372 int stack_size; /* number of states to be processed */ 373 bool strict_alignment; /* perform strict pointer alignment checks */ 374 bool test_state_freq; /* test verifier with different pruning frequency */ 375 struct bpf_verifier_state *cur_state; /* current verifier state */ 376 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ 377 struct bpf_verifier_state_list *free_list; 378 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 379 u32 used_map_cnt; /* number of used maps */ 380 u32 id_gen; /* used to generate unique reg IDs */ 381 bool allow_ptr_leaks; 382 bool allow_ptr_to_map_access; 383 bool bpf_capable; 384 bool bypass_spec_v1; 385 bool bypass_spec_v4; 386 bool seen_direct_write; 387 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ 388 const struct bpf_line_info *prev_linfo; 389 struct bpf_verifier_log log; 390 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; 391 struct { 392 int *insn_state; 393 int *insn_stack; 394 int cur_stack; 395 } cfg; 396 u32 pass_cnt; /* number of times do_check() was called */ 397 u32 subprog_cnt; 398 /* number of instructions analyzed by the verifier */ 399 u32 prev_insn_processed, insn_processed; 400 /* number of jmps, calls, exits analyzed so far */ 401 u32 prev_jmps_processed, jmps_processed; 402 /* total verification time */ 403 u64 verification_time; 404 /* maximum number of verifier states kept in 'branching' instructions */ 405 u32 max_states_per_insn; 406 /* total number of allocated verifier states */ 407 u32 total_states; 408 /* some states are freed during program analysis. 409 * this is peak number of states. this number dominates kernel 410 * memory consumption during verification 411 */ 412 u32 peak_states; 413 /* longest register parentage chain walked for liveness marking */ 414 u32 longest_mark_read_walk; 415}; 416 417__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, 418 const char *fmt, va_list args); 419__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 420 const char *fmt, ...); 421__printf(2, 3) void bpf_log(struct bpf_verifier_log *log, 422 const char *fmt, ...); 423 424static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) 425{ 426 struct bpf_verifier_state *cur = env->cur_state; 427 428 return cur->frame[cur->curframe]; 429} 430 431static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) 432{ 433 return cur_func(env)->regs; 434} 435 436int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); 437int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, 438 int insn_idx, int prev_insn_idx); 439int bpf_prog_offload_finalize(struct bpf_verifier_env *env); 440void 441bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, 442 struct bpf_insn *insn); 443void 444bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); 445 446int check_ctx_reg(struct bpf_verifier_env *env, 447 const struct bpf_reg_state *reg, int regno); 448 449#endif /* _LINUX_BPF_VERIFIER_H */