at v6.8 104 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4#ifndef _LINUX_BPF_H 5#define _LINUX_BPF_H 1 6 7#include <uapi/linux/bpf.h> 8#include <uapi/linux/filter.h> 9 10#include <linux/workqueue.h> 11#include <linux/file.h> 12#include <linux/percpu.h> 13#include <linux/err.h> 14#include <linux/rbtree_latch.h> 15#include <linux/numa.h> 16#include <linux/mm_types.h> 17#include <linux/wait.h> 18#include <linux/refcount.h> 19#include <linux/mutex.h> 20#include <linux/module.h> 21#include <linux/kallsyms.h> 22#include <linux/capability.h> 23#include <linux/sched/mm.h> 24#include <linux/slab.h> 25#include <linux/percpu-refcount.h> 26#include <linux/stddef.h> 27#include <linux/bpfptr.h> 28#include <linux/btf.h> 29#include <linux/rcupdate_trace.h> 30#include <linux/static_call.h> 31#include <linux/memcontrol.h> 32#include <linux/cfi.h> 33 34struct bpf_verifier_env; 35struct bpf_verifier_log; 36struct perf_event; 37struct bpf_prog; 38struct bpf_prog_aux; 39struct bpf_map; 40struct sock; 41struct seq_file; 42struct btf; 43struct btf_type; 44struct exception_table_entry; 45struct seq_operations; 46struct bpf_iter_aux_info; 47struct bpf_local_storage; 48struct bpf_local_storage_map; 49struct kobject; 50struct mem_cgroup; 51struct module; 52struct bpf_func_state; 53struct ftrace_ops; 54struct cgroup; 55 56extern struct idr btf_idr; 57extern spinlock_t btf_idr_lock; 58extern struct kobject *btf_kobj; 59extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma; 60extern bool bpf_global_ma_set; 61 62typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); 63typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, 64 struct bpf_iter_aux_info *aux); 65typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); 66typedef unsigned int (*bpf_func_t)(const void *, 67 const struct bpf_insn *); 68struct bpf_iter_seq_info { 69 const struct seq_operations *seq_ops; 70 bpf_iter_init_seq_priv_t init_seq_private; 71 bpf_iter_fini_seq_priv_t fini_seq_private; 72 u32 seq_priv_size; 73}; 74 75/* map is generic key/value storage optionally accessible by eBPF programs */ 76struct bpf_map_ops { 77 /* funcs callable from userspace (via syscall) */ 78 int (*map_alloc_check)(union bpf_attr *attr); 79 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 80 void (*map_release)(struct bpf_map *map, struct file *map_file); 81 void (*map_free)(struct bpf_map *map); 82 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 83 void (*map_release_uref)(struct bpf_map *map); 84 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 85 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, 86 union bpf_attr __user *uattr); 87 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, 88 void *value, u64 flags); 89 int (*map_lookup_and_delete_batch)(struct bpf_map *map, 90 const union bpf_attr *attr, 91 union bpf_attr __user *uattr); 92 int (*map_update_batch)(struct bpf_map *map, struct file *map_file, 93 const union bpf_attr *attr, 94 union bpf_attr __user *uattr); 95 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, 96 union bpf_attr __user *uattr); 97 98 /* funcs callable from userspace and from eBPF programs */ 99 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 100 long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 101 long (*map_delete_elem)(struct bpf_map *map, void *key); 102 long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 103 long (*map_pop_elem)(struct bpf_map *map, void *value); 104 long (*map_peek_elem)(struct bpf_map *map, void *value); 105 void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu); 106 107 /* funcs called by prog_array and perf_event_array map */ 108 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 109 int fd); 110 /* If need_defer is true, the implementation should guarantee that 111 * the to-be-put element is still alive before the bpf program, which 112 * may manipulate it, exists. 113 */ 114 void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer); 115 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 116 u32 (*map_fd_sys_lookup_elem)(void *ptr); 117 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 118 struct seq_file *m); 119 int (*map_check_btf)(const struct bpf_map *map, 120 const struct btf *btf, 121 const struct btf_type *key_type, 122 const struct btf_type *value_type); 123 124 /* Prog poke tracking helpers. */ 125 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); 126 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); 127 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, 128 struct bpf_prog *new); 129 130 /* Direct value access helpers. */ 131 int (*map_direct_value_addr)(const struct bpf_map *map, 132 u64 *imm, u32 off); 133 int (*map_direct_value_meta)(const struct bpf_map *map, 134 u64 imm, u32 *off); 135 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); 136 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, 137 struct poll_table_struct *pts); 138 139 /* Functions called by bpf_local_storage maps */ 140 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, 141 void *owner, u32 size); 142 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, 143 void *owner, u32 size); 144 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); 145 146 /* Misc helpers.*/ 147 long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags); 148 149 /* map_meta_equal must be implemented for maps that can be 150 * used as an inner map. It is a runtime check to ensure 151 * an inner map can be inserted to an outer map. 152 * 153 * Some properties of the inner map has been used during the 154 * verification time. When inserting an inner map at the runtime, 155 * map_meta_equal has to ensure the inserting map has the same 156 * properties that the verifier has used earlier. 157 */ 158 bool (*map_meta_equal)(const struct bpf_map *meta0, 159 const struct bpf_map *meta1); 160 161 162 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, 163 struct bpf_func_state *caller, 164 struct bpf_func_state *callee); 165 long (*map_for_each_callback)(struct bpf_map *map, 166 bpf_callback_t callback_fn, 167 void *callback_ctx, u64 flags); 168 169 u64 (*map_mem_usage)(const struct bpf_map *map); 170 171 /* BTF id of struct allocated by map_alloc */ 172 int *map_btf_id; 173 174 /* bpf_iter info used to open a seq_file */ 175 const struct bpf_iter_seq_info *iter_seq_info; 176}; 177 178enum { 179 /* Support at most 10 fields in a BTF type */ 180 BTF_FIELDS_MAX = 10, 181}; 182 183enum btf_field_type { 184 BPF_SPIN_LOCK = (1 << 0), 185 BPF_TIMER = (1 << 1), 186 BPF_KPTR_UNREF = (1 << 2), 187 BPF_KPTR_REF = (1 << 3), 188 BPF_KPTR_PERCPU = (1 << 4), 189 BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU, 190 BPF_LIST_HEAD = (1 << 5), 191 BPF_LIST_NODE = (1 << 6), 192 BPF_RB_ROOT = (1 << 7), 193 BPF_RB_NODE = (1 << 8), 194 BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE, 195 BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD, 196 BPF_REFCOUNT = (1 << 9), 197}; 198 199typedef void (*btf_dtor_kfunc_t)(void *); 200 201struct btf_field_kptr { 202 struct btf *btf; 203 struct module *module; 204 /* dtor used if btf_is_kernel(btf), otherwise the type is 205 * program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used 206 */ 207 btf_dtor_kfunc_t dtor; 208 u32 btf_id; 209}; 210 211struct btf_field_graph_root { 212 struct btf *btf; 213 u32 value_btf_id; 214 u32 node_offset; 215 struct btf_record *value_rec; 216}; 217 218struct btf_field { 219 u32 offset; 220 u32 size; 221 enum btf_field_type type; 222 union { 223 struct btf_field_kptr kptr; 224 struct btf_field_graph_root graph_root; 225 }; 226}; 227 228struct btf_record { 229 u32 cnt; 230 u32 field_mask; 231 int spin_lock_off; 232 int timer_off; 233 int refcount_off; 234 struct btf_field fields[]; 235}; 236 237/* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */ 238struct bpf_rb_node_kern { 239 struct rb_node rb_node; 240 void *owner; 241} __attribute__((aligned(8))); 242 243/* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */ 244struct bpf_list_node_kern { 245 struct list_head list_head; 246 void *owner; 247} __attribute__((aligned(8))); 248 249struct bpf_map { 250 /* The first two cachelines with read-mostly members of which some 251 * are also accessed in fast-path (e.g. ops, max_entries). 252 */ 253 const struct bpf_map_ops *ops ____cacheline_aligned; 254 struct bpf_map *inner_map_meta; 255#ifdef CONFIG_SECURITY 256 void *security; 257#endif 258 enum bpf_map_type map_type; 259 u32 key_size; 260 u32 value_size; 261 u32 max_entries; 262 u64 map_extra; /* any per-map-type extra fields */ 263 u32 map_flags; 264 u32 id; 265 struct btf_record *record; 266 int numa_node; 267 u32 btf_key_type_id; 268 u32 btf_value_type_id; 269 u32 btf_vmlinux_value_type_id; 270 struct btf *btf; 271#ifdef CONFIG_MEMCG_KMEM 272 struct obj_cgroup *objcg; 273#endif 274 char name[BPF_OBJ_NAME_LEN]; 275 /* The 3rd and 4th cacheline with misc members to avoid false sharing 276 * particularly with refcounting. 277 */ 278 atomic64_t refcnt ____cacheline_aligned; 279 atomic64_t usercnt; 280 /* rcu is used before freeing and work is only used during freeing */ 281 union { 282 struct work_struct work; 283 struct rcu_head rcu; 284 }; 285 struct mutex freeze_mutex; 286 atomic64_t writecnt; 287 /* 'Ownership' of program-containing map is claimed by the first program 288 * that is going to use this map or by the first program which FD is 289 * stored in the map to make sure that all callers and callees have the 290 * same prog type, JITed flag and xdp_has_frags flag. 291 */ 292 struct { 293 spinlock_t lock; 294 enum bpf_prog_type type; 295 bool jited; 296 bool xdp_has_frags; 297 } owner; 298 bool bypass_spec_v1; 299 bool frozen; /* write-once; write-protected by freeze_mutex */ 300 bool free_after_mult_rcu_gp; 301 bool free_after_rcu_gp; 302 atomic64_t sleepable_refcnt; 303 s64 __percpu *elem_count; 304}; 305 306static inline const char *btf_field_type_name(enum btf_field_type type) 307{ 308 switch (type) { 309 case BPF_SPIN_LOCK: 310 return "bpf_spin_lock"; 311 case BPF_TIMER: 312 return "bpf_timer"; 313 case BPF_KPTR_UNREF: 314 case BPF_KPTR_REF: 315 return "kptr"; 316 case BPF_KPTR_PERCPU: 317 return "percpu_kptr"; 318 case BPF_LIST_HEAD: 319 return "bpf_list_head"; 320 case BPF_LIST_NODE: 321 return "bpf_list_node"; 322 case BPF_RB_ROOT: 323 return "bpf_rb_root"; 324 case BPF_RB_NODE: 325 return "bpf_rb_node"; 326 case BPF_REFCOUNT: 327 return "bpf_refcount"; 328 default: 329 WARN_ON_ONCE(1); 330 return "unknown"; 331 } 332} 333 334static inline u32 btf_field_type_size(enum btf_field_type type) 335{ 336 switch (type) { 337 case BPF_SPIN_LOCK: 338 return sizeof(struct bpf_spin_lock); 339 case BPF_TIMER: 340 return sizeof(struct bpf_timer); 341 case BPF_KPTR_UNREF: 342 case BPF_KPTR_REF: 343 case BPF_KPTR_PERCPU: 344 return sizeof(u64); 345 case BPF_LIST_HEAD: 346 return sizeof(struct bpf_list_head); 347 case BPF_LIST_NODE: 348 return sizeof(struct bpf_list_node); 349 case BPF_RB_ROOT: 350 return sizeof(struct bpf_rb_root); 351 case BPF_RB_NODE: 352 return sizeof(struct bpf_rb_node); 353 case BPF_REFCOUNT: 354 return sizeof(struct bpf_refcount); 355 default: 356 WARN_ON_ONCE(1); 357 return 0; 358 } 359} 360 361static inline u32 btf_field_type_align(enum btf_field_type type) 362{ 363 switch (type) { 364 case BPF_SPIN_LOCK: 365 return __alignof__(struct bpf_spin_lock); 366 case BPF_TIMER: 367 return __alignof__(struct bpf_timer); 368 case BPF_KPTR_UNREF: 369 case BPF_KPTR_REF: 370 case BPF_KPTR_PERCPU: 371 return __alignof__(u64); 372 case BPF_LIST_HEAD: 373 return __alignof__(struct bpf_list_head); 374 case BPF_LIST_NODE: 375 return __alignof__(struct bpf_list_node); 376 case BPF_RB_ROOT: 377 return __alignof__(struct bpf_rb_root); 378 case BPF_RB_NODE: 379 return __alignof__(struct bpf_rb_node); 380 case BPF_REFCOUNT: 381 return __alignof__(struct bpf_refcount); 382 default: 383 WARN_ON_ONCE(1); 384 return 0; 385 } 386} 387 388static inline void bpf_obj_init_field(const struct btf_field *field, void *addr) 389{ 390 memset(addr, 0, field->size); 391 392 switch (field->type) { 393 case BPF_REFCOUNT: 394 refcount_set((refcount_t *)addr, 1); 395 break; 396 case BPF_RB_NODE: 397 RB_CLEAR_NODE((struct rb_node *)addr); 398 break; 399 case BPF_LIST_HEAD: 400 case BPF_LIST_NODE: 401 INIT_LIST_HEAD((struct list_head *)addr); 402 break; 403 case BPF_RB_ROOT: 404 /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */ 405 case BPF_SPIN_LOCK: 406 case BPF_TIMER: 407 case BPF_KPTR_UNREF: 408 case BPF_KPTR_REF: 409 case BPF_KPTR_PERCPU: 410 break; 411 default: 412 WARN_ON_ONCE(1); 413 return; 414 } 415} 416 417static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type) 418{ 419 if (IS_ERR_OR_NULL(rec)) 420 return false; 421 return rec->field_mask & type; 422} 423 424static inline void bpf_obj_init(const struct btf_record *rec, void *obj) 425{ 426 int i; 427 428 if (IS_ERR_OR_NULL(rec)) 429 return; 430 for (i = 0; i < rec->cnt; i++) 431 bpf_obj_init_field(&rec->fields[i], obj + rec->fields[i].offset); 432} 433 434/* 'dst' must be a temporary buffer and should not point to memory that is being 435 * used in parallel by a bpf program or bpf syscall, otherwise the access from 436 * the bpf program or bpf syscall may be corrupted by the reinitialization, 437 * leading to weird problems. Even 'dst' is newly-allocated from bpf memory 438 * allocator, it is still possible for 'dst' to be used in parallel by a bpf 439 * program or bpf syscall. 440 */ 441static inline void check_and_init_map_value(struct bpf_map *map, void *dst) 442{ 443 bpf_obj_init(map->record, dst); 444} 445 446/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 447 * forced to use 'long' read/writes to try to atomically copy long counters. 448 * Best-effort only. No barriers here, since it _will_ race with concurrent 449 * updates from BPF programs. Called from bpf syscall and mostly used with 450 * size 8 or 16 bytes, so ask compiler to inline it. 451 */ 452static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 453{ 454 const long *lsrc = src; 455 long *ldst = dst; 456 457 size /= sizeof(long); 458 while (size--) 459 data_race(*ldst++ = *lsrc++); 460} 461 462/* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */ 463static inline void bpf_obj_memcpy(struct btf_record *rec, 464 void *dst, void *src, u32 size, 465 bool long_memcpy) 466{ 467 u32 curr_off = 0; 468 int i; 469 470 if (IS_ERR_OR_NULL(rec)) { 471 if (long_memcpy) 472 bpf_long_memcpy(dst, src, round_up(size, 8)); 473 else 474 memcpy(dst, src, size); 475 return; 476 } 477 478 for (i = 0; i < rec->cnt; i++) { 479 u32 next_off = rec->fields[i].offset; 480 u32 sz = next_off - curr_off; 481 482 memcpy(dst + curr_off, src + curr_off, sz); 483 curr_off += rec->fields[i].size + sz; 484 } 485 memcpy(dst + curr_off, src + curr_off, size - curr_off); 486} 487 488static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 489{ 490 bpf_obj_memcpy(map->record, dst, src, map->value_size, false); 491} 492 493static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src) 494{ 495 bpf_obj_memcpy(map->record, dst, src, map->value_size, true); 496} 497 498static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size) 499{ 500 u32 curr_off = 0; 501 int i; 502 503 if (IS_ERR_OR_NULL(rec)) { 504 memset(dst, 0, size); 505 return; 506 } 507 508 for (i = 0; i < rec->cnt; i++) { 509 u32 next_off = rec->fields[i].offset; 510 u32 sz = next_off - curr_off; 511 512 memset(dst + curr_off, 0, sz); 513 curr_off += rec->fields[i].size + sz; 514 } 515 memset(dst + curr_off, 0, size - curr_off); 516} 517 518static inline void zero_map_value(struct bpf_map *map, void *dst) 519{ 520 bpf_obj_memzero(map->record, dst, map->value_size); 521} 522 523void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 524 bool lock_src); 525void bpf_timer_cancel_and_free(void *timer); 526void bpf_list_head_free(const struct btf_field *field, void *list_head, 527 struct bpf_spin_lock *spin_lock); 528void bpf_rb_root_free(const struct btf_field *field, void *rb_root, 529 struct bpf_spin_lock *spin_lock); 530 531 532int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); 533 534struct bpf_offload_dev; 535struct bpf_offloaded_map; 536 537struct bpf_map_dev_ops { 538 int (*map_get_next_key)(struct bpf_offloaded_map *map, 539 void *key, void *next_key); 540 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 541 void *key, void *value); 542 int (*map_update_elem)(struct bpf_offloaded_map *map, 543 void *key, void *value, u64 flags); 544 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 545}; 546 547struct bpf_offloaded_map { 548 struct bpf_map map; 549 struct net_device *netdev; 550 const struct bpf_map_dev_ops *dev_ops; 551 void *dev_priv; 552 struct list_head offloads; 553}; 554 555static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 556{ 557 return container_of(map, struct bpf_offloaded_map, map); 558} 559 560static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 561{ 562 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 563} 564 565static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 566{ 567 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && 568 map->ops->map_seq_show_elem; 569} 570 571int map_check_no_btf(const struct bpf_map *map, 572 const struct btf *btf, 573 const struct btf_type *key_type, 574 const struct btf_type *value_type); 575 576bool bpf_map_meta_equal(const struct bpf_map *meta0, 577 const struct bpf_map *meta1); 578 579extern const struct bpf_map_ops bpf_map_offload_ops; 580 581/* bpf_type_flag contains a set of flags that are applicable to the values of 582 * arg_type, ret_type and reg_type. For example, a pointer value may be null, 583 * or a memory is read-only. We classify types into two categories: base types 584 * and extended types. Extended types are base types combined with a type flag. 585 * 586 * Currently there are no more than 32 base types in arg_type, ret_type and 587 * reg_types. 588 */ 589#define BPF_BASE_TYPE_BITS 8 590 591enum bpf_type_flag { 592 /* PTR may be NULL. */ 593 PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS), 594 595 /* MEM is read-only. When applied on bpf_arg, it indicates the arg is 596 * compatible with both mutable and immutable memory. 597 */ 598 MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS), 599 600 /* MEM points to BPF ring buffer reservation. */ 601 MEM_RINGBUF = BIT(2 + BPF_BASE_TYPE_BITS), 602 603 /* MEM is in user address space. */ 604 MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS), 605 606 /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged 607 * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In 608 * order to drop this tag, it must be passed into bpf_per_cpu_ptr() 609 * or bpf_this_cpu_ptr(), which will return the pointer corresponding 610 * to the specified cpu. 611 */ 612 MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS), 613 614 /* Indicates that the argument will be released. */ 615 OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS), 616 617 /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark 618 * unreferenced and referenced kptr loaded from map value using a load 619 * instruction, so that they can only be dereferenced but not escape the 620 * BPF program into the kernel (i.e. cannot be passed as arguments to 621 * kfunc or bpf helpers). 622 */ 623 PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS), 624 625 MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS), 626 627 /* DYNPTR points to memory local to the bpf program. */ 628 DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS), 629 630 /* DYNPTR points to a kernel-produced ringbuf record. */ 631 DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS), 632 633 /* Size is known at compile time. */ 634 MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS), 635 636 /* MEM is of an allocated object of type in program BTF. This is used to 637 * tag PTR_TO_BTF_ID allocated using bpf_obj_new. 638 */ 639 MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS), 640 641 /* PTR was passed from the kernel in a trusted context, and may be 642 * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions. 643 * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above. 644 * PTR_UNTRUSTED refers to a kptr that was read directly from a map 645 * without invoking bpf_kptr_xchg(). What we really need to know is 646 * whether a pointer is safe to pass to a kfunc or BPF helper function. 647 * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF 648 * helpers, they do not cover all possible instances of unsafe 649 * pointers. For example, a pointer that was obtained from walking a 650 * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the 651 * fact that it may be NULL, invalid, etc. This is due to backwards 652 * compatibility requirements, as this was the behavior that was first 653 * introduced when kptrs were added. The behavior is now considered 654 * deprecated, and PTR_UNTRUSTED will eventually be removed. 655 * 656 * PTR_TRUSTED, on the other hand, is a pointer that the kernel 657 * guarantees to be valid and safe to pass to kfuncs and BPF helpers. 658 * For example, pointers passed to tracepoint arguments are considered 659 * PTR_TRUSTED, as are pointers that are passed to struct_ops 660 * callbacks. As alluded to above, pointers that are obtained from 661 * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a 662 * struct task_struct *task is PTR_TRUSTED, then accessing 663 * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored 664 * in a BPF register. Similarly, pointers passed to certain programs 665 * types such as kretprobes are not guaranteed to be valid, as they may 666 * for example contain an object that was recently freed. 667 */ 668 PTR_TRUSTED = BIT(12 + BPF_BASE_TYPE_BITS), 669 670 /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */ 671 MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS), 672 673 /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning. 674 * Currently only valid for linked-list and rbtree nodes. If the nodes 675 * have a bpf_refcount_field, they must be tagged MEM_RCU as well. 676 */ 677 NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS), 678 679 /* DYNPTR points to sk_buff */ 680 DYNPTR_TYPE_SKB = BIT(15 + BPF_BASE_TYPE_BITS), 681 682 /* DYNPTR points to xdp_buff */ 683 DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS), 684 685 __BPF_TYPE_FLAG_MAX, 686 __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, 687}; 688 689#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \ 690 | DYNPTR_TYPE_XDP) 691 692/* Max number of base types. */ 693#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) 694 695/* Max number of all types. */ 696#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1)) 697 698/* function argument constraints */ 699enum bpf_arg_type { 700 ARG_DONTCARE = 0, /* unused argument in helper function */ 701 702 /* the following constraints used to prototype 703 * bpf_map_lookup/update/delete_elem() functions 704 */ 705 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 706 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 707 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 708 709 /* Used to prototype bpf_memcmp() and other functions that access data 710 * on eBPF program stack 711 */ 712 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 713 714 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 715 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 716 717 ARG_PTR_TO_CTX, /* pointer to context */ 718 ARG_ANYTHING, /* any (initialized) argument is ok */ 719 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 720 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 721 ARG_PTR_TO_INT, /* pointer to int */ 722 ARG_PTR_TO_LONG, /* pointer to long */ 723 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 724 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 725 ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */ 726 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ 727 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ 728 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ 729 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ 730 ARG_PTR_TO_STACK, /* pointer to stack */ 731 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ 732 ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ 733 ARG_PTR_TO_KPTR, /* pointer to referenced kptr */ 734 ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */ 735 __BPF_ARG_TYPE_MAX, 736 737 /* Extended arg_types. */ 738 ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE, 739 ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM, 740 ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX, 741 ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET, 742 ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK, 743 ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID, 744 /* pointer to memory does not need to be initialized, helper function must fill 745 * all bytes or clear them in error case. 746 */ 747 ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM, 748 /* Pointer to valid memory of size known at compile time. */ 749 ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM, 750 751 /* This must be the last entry. Its purpose is to ensure the enum is 752 * wide enough to hold the higher bits reserved for bpf_type_flag. 753 */ 754 __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT, 755}; 756static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 757 758/* type of values returned from helper functions */ 759enum bpf_return_type { 760 RET_INTEGER, /* function returns integer */ 761 RET_VOID, /* function doesn't return anything */ 762 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 763 RET_PTR_TO_SOCKET, /* returns a pointer to a socket */ 764 RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */ 765 RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */ 766 RET_PTR_TO_MEM, /* returns a pointer to memory */ 767 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ 768 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ 769 __BPF_RET_TYPE_MAX, 770 771 /* Extended ret_types. */ 772 RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE, 773 RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET, 774 RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK, 775 RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON, 776 RET_PTR_TO_RINGBUF_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM, 777 RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MEM, 778 RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID, 779 RET_PTR_TO_BTF_ID_TRUSTED = PTR_TRUSTED | RET_PTR_TO_BTF_ID, 780 781 /* This must be the last entry. Its purpose is to ensure the enum is 782 * wide enough to hold the higher bits reserved for bpf_type_flag. 783 */ 784 __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT, 785}; 786static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 787 788/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 789 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 790 * instructions after verifying 791 */ 792struct bpf_func_proto { 793 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 794 bool gpl_only; 795 bool pkt_access; 796 bool might_sleep; 797 enum bpf_return_type ret_type; 798 union { 799 struct { 800 enum bpf_arg_type arg1_type; 801 enum bpf_arg_type arg2_type; 802 enum bpf_arg_type arg3_type; 803 enum bpf_arg_type arg4_type; 804 enum bpf_arg_type arg5_type; 805 }; 806 enum bpf_arg_type arg_type[5]; 807 }; 808 union { 809 struct { 810 u32 *arg1_btf_id; 811 u32 *arg2_btf_id; 812 u32 *arg3_btf_id; 813 u32 *arg4_btf_id; 814 u32 *arg5_btf_id; 815 }; 816 u32 *arg_btf_id[5]; 817 struct { 818 size_t arg1_size; 819 size_t arg2_size; 820 size_t arg3_size; 821 size_t arg4_size; 822 size_t arg5_size; 823 }; 824 size_t arg_size[5]; 825 }; 826 int *ret_btf_id; /* return value btf_id */ 827 bool (*allowed)(const struct bpf_prog *prog); 828}; 829 830/* bpf_context is intentionally undefined structure. Pointer to bpf_context is 831 * the first argument to eBPF programs. 832 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 833 */ 834struct bpf_context; 835 836enum bpf_access_type { 837 BPF_READ = 1, 838 BPF_WRITE = 2 839}; 840 841/* types of values stored in eBPF registers */ 842/* Pointer types represent: 843 * pointer 844 * pointer + imm 845 * pointer + (u16) var 846 * pointer + (u16) var + imm 847 * if (range > 0) then [ptr, ptr + range - off) is safe to access 848 * if (id > 0) means that some 'var' was added 849 * if (off > 0) means that 'imm' was added 850 */ 851enum bpf_reg_type { 852 NOT_INIT = 0, /* nothing was written into register */ 853 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 854 PTR_TO_CTX, /* reg points to bpf_context */ 855 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 856 PTR_TO_MAP_VALUE, /* reg points to map element value */ 857 PTR_TO_MAP_KEY, /* reg points to a map element key */ 858 PTR_TO_STACK, /* reg == frame_pointer + offset */ 859 PTR_TO_PACKET_META, /* skb->data - meta_len */ 860 PTR_TO_PACKET, /* reg points to skb->data */ 861 PTR_TO_PACKET_END, /* skb->data + headlen */ 862 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 863 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 864 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 865 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 866 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 867 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 868 /* PTR_TO_BTF_ID points to a kernel struct that does not need 869 * to be null checked by the BPF program. This does not imply the 870 * pointer is _not_ null and in practice this can easily be a null 871 * pointer when reading pointer chains. The assumption is program 872 * context will handle null pointer dereference typically via fault 873 * handling. The verifier must keep this in mind and can make no 874 * assumptions about null or non-null when doing branch analysis. 875 * Further, when passed into helpers the helpers can not, without 876 * additional context, assume the value is non-null. 877 */ 878 PTR_TO_BTF_ID, 879 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not 880 * been checked for null. Used primarily to inform the verifier 881 * an explicit null check is required for this struct. 882 */ 883 PTR_TO_MEM, /* reg points to valid memory region */ 884 PTR_TO_BUF, /* reg points to a read/write buffer */ 885 PTR_TO_FUNC, /* reg points to a bpf program function */ 886 CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */ 887 __BPF_REG_TYPE_MAX, 888 889 /* Extended reg_types. */ 890 PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE, 891 PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET, 892 PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, 893 PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, 894 PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, 895 896 /* This must be the last entry. Its purpose is to ensure the enum is 897 * wide enough to hold the higher bits reserved for bpf_type_flag. 898 */ 899 __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT, 900}; 901static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 902 903/* The information passed from prog-specific *_is_valid_access 904 * back to the verifier. 905 */ 906struct bpf_insn_access_aux { 907 enum bpf_reg_type reg_type; 908 union { 909 int ctx_field_size; 910 struct { 911 struct btf *btf; 912 u32 btf_id; 913 }; 914 }; 915 struct bpf_verifier_log *log; /* for verbose logs */ 916}; 917 918static inline void 919bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 920{ 921 aux->ctx_field_size = size; 922} 923 924static bool bpf_is_ldimm64(const struct bpf_insn *insn) 925{ 926 return insn->code == (BPF_LD | BPF_IMM | BPF_DW); 927} 928 929static inline bool bpf_pseudo_func(const struct bpf_insn *insn) 930{ 931 return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC; 932} 933 934struct bpf_prog_ops { 935 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 936 union bpf_attr __user *uattr); 937}; 938 939struct bpf_reg_state; 940struct bpf_verifier_ops { 941 /* return eBPF function prototype for verification */ 942 const struct bpf_func_proto * 943 (*get_func_proto)(enum bpf_func_id func_id, 944 const struct bpf_prog *prog); 945 946 /* return true if 'size' wide access at offset 'off' within bpf_context 947 * with 'type' (read or write) is allowed 948 */ 949 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 950 const struct bpf_prog *prog, 951 struct bpf_insn_access_aux *info); 952 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 953 const struct bpf_prog *prog); 954 int (*gen_ld_abs)(const struct bpf_insn *orig, 955 struct bpf_insn *insn_buf); 956 u32 (*convert_ctx_access)(enum bpf_access_type type, 957 const struct bpf_insn *src, 958 struct bpf_insn *dst, 959 struct bpf_prog *prog, u32 *target_size); 960 int (*btf_struct_access)(struct bpf_verifier_log *log, 961 const struct bpf_reg_state *reg, 962 int off, int size); 963}; 964 965struct bpf_prog_offload_ops { 966 /* verifier basic callbacks */ 967 int (*insn_hook)(struct bpf_verifier_env *env, 968 int insn_idx, int prev_insn_idx); 969 int (*finalize)(struct bpf_verifier_env *env); 970 /* verifier optimization callbacks (called after .finalize) */ 971 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 972 struct bpf_insn *insn); 973 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 974 /* program management callbacks */ 975 int (*prepare)(struct bpf_prog *prog); 976 int (*translate)(struct bpf_prog *prog); 977 void (*destroy)(struct bpf_prog *prog); 978}; 979 980struct bpf_prog_offload { 981 struct bpf_prog *prog; 982 struct net_device *netdev; 983 struct bpf_offload_dev *offdev; 984 void *dev_priv; 985 struct list_head offloads; 986 bool dev_state; 987 bool opt_failed; 988 void *jited_image; 989 u32 jited_len; 990}; 991 992enum bpf_cgroup_storage_type { 993 BPF_CGROUP_STORAGE_SHARED, 994 BPF_CGROUP_STORAGE_PERCPU, 995 __BPF_CGROUP_STORAGE_MAX 996}; 997 998#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 999 1000/* The longest tracepoint has 12 args. 1001 * See include/trace/bpf_probe.h 1002 */ 1003#define MAX_BPF_FUNC_ARGS 12 1004 1005/* The maximum number of arguments passed through registers 1006 * a single function may have. 1007 */ 1008#define MAX_BPF_FUNC_REG_ARGS 5 1009 1010/* The argument is a structure. */ 1011#define BTF_FMODEL_STRUCT_ARG BIT(0) 1012 1013/* The argument is signed. */ 1014#define BTF_FMODEL_SIGNED_ARG BIT(1) 1015 1016struct btf_func_model { 1017 u8 ret_size; 1018 u8 ret_flags; 1019 u8 nr_args; 1020 u8 arg_size[MAX_BPF_FUNC_ARGS]; 1021 u8 arg_flags[MAX_BPF_FUNC_ARGS]; 1022}; 1023 1024/* Restore arguments before returning from trampoline to let original function 1025 * continue executing. This flag is used for fentry progs when there are no 1026 * fexit progs. 1027 */ 1028#define BPF_TRAMP_F_RESTORE_REGS BIT(0) 1029/* Call original function after fentry progs, but before fexit progs. 1030 * Makes sense for fentry/fexit, normal calls and indirect calls. 1031 */ 1032#define BPF_TRAMP_F_CALL_ORIG BIT(1) 1033/* Skip current frame and return to parent. Makes sense for fentry/fexit 1034 * programs only. Should not be used with normal calls and indirect calls. 1035 */ 1036#define BPF_TRAMP_F_SKIP_FRAME BIT(2) 1037/* Store IP address of the caller on the trampoline stack, 1038 * so it's available for trampoline's programs. 1039 */ 1040#define BPF_TRAMP_F_IP_ARG BIT(3) 1041/* Return the return value of fentry prog. Only used by bpf_struct_ops. */ 1042#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) 1043 1044/* Get original function from stack instead of from provided direct address. 1045 * Makes sense for trampolines with fexit or fmod_ret programs. 1046 */ 1047#define BPF_TRAMP_F_ORIG_STACK BIT(5) 1048 1049/* This trampoline is on a function with another ftrace_ops with IPMODIFY, 1050 * e.g., a live patch. This flag is set and cleared by ftrace call backs, 1051 */ 1052#define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6) 1053 1054/* Indicate that current trampoline is in a tail call context. Then, it has to 1055 * cache and restore tail_call_cnt to avoid infinite tail call loop. 1056 */ 1057#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7) 1058 1059/* 1060 * Indicate the trampoline should be suitable to receive indirect calls; 1061 * without this indirectly calling the generated code can result in #UD/#CP, 1062 * depending on the CFI options. 1063 * 1064 * Used by bpf_struct_ops. 1065 * 1066 * Incompatible with FENTRY usage, overloads @func_addr argument. 1067 */ 1068#define BPF_TRAMP_F_INDIRECT BIT(8) 1069 1070/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 1071 * bytes on x86. 1072 */ 1073enum { 1074#if defined(__s390x__) 1075 BPF_MAX_TRAMP_LINKS = 27, 1076#else 1077 BPF_MAX_TRAMP_LINKS = 38, 1078#endif 1079}; 1080 1081struct bpf_tramp_links { 1082 struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS]; 1083 int nr_links; 1084}; 1085 1086struct bpf_tramp_run_ctx; 1087 1088/* Different use cases for BPF trampoline: 1089 * 1. replace nop at the function entry (kprobe equivalent) 1090 * flags = BPF_TRAMP_F_RESTORE_REGS 1091 * fentry = a set of programs to run before returning from trampoline 1092 * 1093 * 2. replace nop at the function entry (kprobe + kretprobe equivalent) 1094 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME 1095 * orig_call = fentry_ip + MCOUNT_INSN_SIZE 1096 * fentry = a set of program to run before calling original function 1097 * fexit = a set of program to run after original function 1098 * 1099 * 3. replace direct call instruction anywhere in the function body 1100 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) 1101 * With flags = 0 1102 * fentry = a set of programs to run before returning from trampoline 1103 * With flags = BPF_TRAMP_F_CALL_ORIG 1104 * orig_call = original callback addr or direct function addr 1105 * fentry = a set of program to run before calling original function 1106 * fexit = a set of program to run after original function 1107 */ 1108struct bpf_tramp_image; 1109int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 1110 const struct btf_func_model *m, u32 flags, 1111 struct bpf_tramp_links *tlinks, 1112 void *func_addr); 1113void *arch_alloc_bpf_trampoline(unsigned int size); 1114void arch_free_bpf_trampoline(void *image, unsigned int size); 1115void arch_protect_bpf_trampoline(void *image, unsigned int size); 1116void arch_unprotect_bpf_trampoline(void *image, unsigned int size); 1117int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, 1118 struct bpf_tramp_links *tlinks, void *func_addr); 1119 1120u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog, 1121 struct bpf_tramp_run_ctx *run_ctx); 1122void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start, 1123 struct bpf_tramp_run_ctx *run_ctx); 1124void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); 1125void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); 1126typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog, 1127 struct bpf_tramp_run_ctx *run_ctx); 1128typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start, 1129 struct bpf_tramp_run_ctx *run_ctx); 1130bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog); 1131bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog); 1132 1133struct bpf_ksym { 1134 unsigned long start; 1135 unsigned long end; 1136 char name[KSYM_NAME_LEN]; 1137 struct list_head lnode; 1138 struct latch_tree_node tnode; 1139 bool prog; 1140}; 1141 1142enum bpf_tramp_prog_type { 1143 BPF_TRAMP_FENTRY, 1144 BPF_TRAMP_FEXIT, 1145 BPF_TRAMP_MODIFY_RETURN, 1146 BPF_TRAMP_MAX, 1147 BPF_TRAMP_REPLACE, /* more than MAX */ 1148}; 1149 1150struct bpf_tramp_image { 1151 void *image; 1152 int size; 1153 struct bpf_ksym ksym; 1154 struct percpu_ref pcref; 1155 void *ip_after_call; 1156 void *ip_epilogue; 1157 union { 1158 struct rcu_head rcu; 1159 struct work_struct work; 1160 }; 1161}; 1162 1163struct bpf_trampoline { 1164 /* hlist for trampoline_table */ 1165 struct hlist_node hlist; 1166 struct ftrace_ops *fops; 1167 /* serializes access to fields of this trampoline */ 1168 struct mutex mutex; 1169 refcount_t refcnt; 1170 u32 flags; 1171 u64 key; 1172 struct { 1173 struct btf_func_model model; 1174 void *addr; 1175 bool ftrace_managed; 1176 } func; 1177 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF 1178 * program by replacing one of its functions. func.addr is the address 1179 * of the function it replaced. 1180 */ 1181 struct bpf_prog *extension_prog; 1182 /* list of BPF programs using this trampoline */ 1183 struct hlist_head progs_hlist[BPF_TRAMP_MAX]; 1184 /* Number of attached programs. A counter per kind. */ 1185 int progs_cnt[BPF_TRAMP_MAX]; 1186 /* Executable image of trampoline */ 1187 struct bpf_tramp_image *cur_image; 1188 struct module *mod; 1189}; 1190 1191struct bpf_attach_target_info { 1192 struct btf_func_model fmodel; 1193 long tgt_addr; 1194 struct module *tgt_mod; 1195 const char *tgt_name; 1196 const struct btf_type *tgt_type; 1197}; 1198 1199#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ 1200 1201struct bpf_dispatcher_prog { 1202 struct bpf_prog *prog; 1203 refcount_t users; 1204}; 1205 1206struct bpf_dispatcher { 1207 /* dispatcher mutex */ 1208 struct mutex mutex; 1209 void *func; 1210 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; 1211 int num_progs; 1212 void *image; 1213 void *rw_image; 1214 u32 image_off; 1215 struct bpf_ksym ksym; 1216#ifdef CONFIG_HAVE_STATIC_CALL 1217 struct static_call_key *sc_key; 1218 void *sc_tramp; 1219#endif 1220}; 1221 1222#ifndef __bpfcall 1223#define __bpfcall __nocfi 1224#endif 1225 1226static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func( 1227 const void *ctx, 1228 const struct bpf_insn *insnsi, 1229 bpf_func_t bpf_func) 1230{ 1231 return bpf_func(ctx, insnsi); 1232} 1233 1234/* the implementation of the opaque uapi struct bpf_dynptr */ 1235struct bpf_dynptr_kern { 1236 void *data; 1237 /* Size represents the number of usable bytes of dynptr data. 1238 * If for example the offset is at 4 for a local dynptr whose data is 1239 * of type u64, the number of usable bytes is 4. 1240 * 1241 * The upper 8 bits are reserved. It is as follows: 1242 * Bits 0 - 23 = size 1243 * Bits 24 - 30 = dynptr type 1244 * Bit 31 = whether dynptr is read-only 1245 */ 1246 u32 size; 1247 u32 offset; 1248} __aligned(8); 1249 1250enum bpf_dynptr_type { 1251 BPF_DYNPTR_TYPE_INVALID, 1252 /* Points to memory that is local to the bpf program */ 1253 BPF_DYNPTR_TYPE_LOCAL, 1254 /* Underlying data is a ringbuf record */ 1255 BPF_DYNPTR_TYPE_RINGBUF, 1256 /* Underlying data is a sk_buff */ 1257 BPF_DYNPTR_TYPE_SKB, 1258 /* Underlying data is a xdp_buff */ 1259 BPF_DYNPTR_TYPE_XDP, 1260}; 1261 1262int bpf_dynptr_check_size(u32 size); 1263u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); 1264const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len); 1265void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len); 1266 1267#ifdef CONFIG_BPF_JIT 1268int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); 1269int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); 1270struct bpf_trampoline *bpf_trampoline_get(u64 key, 1271 struct bpf_attach_target_info *tgt_info); 1272void bpf_trampoline_put(struct bpf_trampoline *tr); 1273int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs); 1274 1275/* 1276 * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn 1277 * indirection with a direct call to the bpf program. If the architecture does 1278 * not have STATIC_CALL, avoid a double-indirection. 1279 */ 1280#ifdef CONFIG_HAVE_STATIC_CALL 1281 1282#define __BPF_DISPATCHER_SC_INIT(_name) \ 1283 .sc_key = &STATIC_CALL_KEY(_name), \ 1284 .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name), 1285 1286#define __BPF_DISPATCHER_SC(name) \ 1287 DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func) 1288 1289#define __BPF_DISPATCHER_CALL(name) \ 1290 static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func) 1291 1292#define __BPF_DISPATCHER_UPDATE(_d, _new) \ 1293 __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new)) 1294 1295#else 1296#define __BPF_DISPATCHER_SC_INIT(name) 1297#define __BPF_DISPATCHER_SC(name) 1298#define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi) 1299#define __BPF_DISPATCHER_UPDATE(_d, _new) 1300#endif 1301 1302#define BPF_DISPATCHER_INIT(_name) { \ 1303 .mutex = __MUTEX_INITIALIZER(_name.mutex), \ 1304 .func = &_name##_func, \ 1305 .progs = {}, \ 1306 .num_progs = 0, \ 1307 .image = NULL, \ 1308 .image_off = 0, \ 1309 .ksym = { \ 1310 .name = #_name, \ 1311 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ 1312 }, \ 1313 __BPF_DISPATCHER_SC_INIT(_name##_call) \ 1314} 1315 1316#define DEFINE_BPF_DISPATCHER(name) \ 1317 __BPF_DISPATCHER_SC(name); \ 1318 noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \ 1319 const void *ctx, \ 1320 const struct bpf_insn *insnsi, \ 1321 bpf_func_t bpf_func) \ 1322 { \ 1323 return __BPF_DISPATCHER_CALL(name); \ 1324 } \ 1325 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ 1326 struct bpf_dispatcher bpf_dispatcher_##name = \ 1327 BPF_DISPATCHER_INIT(bpf_dispatcher_##name); 1328 1329#define DECLARE_BPF_DISPATCHER(name) \ 1330 unsigned int bpf_dispatcher_##name##_func( \ 1331 const void *ctx, \ 1332 const struct bpf_insn *insnsi, \ 1333 bpf_func_t bpf_func); \ 1334 extern struct bpf_dispatcher bpf_dispatcher_##name; 1335 1336#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func 1337#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) 1338void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, 1339 struct bpf_prog *to); 1340/* Called only from JIT-enabled code, so there's no need for stubs. */ 1341void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym); 1342void bpf_image_ksym_del(struct bpf_ksym *ksym); 1343void bpf_ksym_add(struct bpf_ksym *ksym); 1344void bpf_ksym_del(struct bpf_ksym *ksym); 1345int bpf_jit_charge_modmem(u32 size); 1346void bpf_jit_uncharge_modmem(u32 size); 1347bool bpf_prog_has_trampoline(const struct bpf_prog *prog); 1348#else 1349static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, 1350 struct bpf_trampoline *tr) 1351{ 1352 return -ENOTSUPP; 1353} 1354static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, 1355 struct bpf_trampoline *tr) 1356{ 1357 return -ENOTSUPP; 1358} 1359static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, 1360 struct bpf_attach_target_info *tgt_info) 1361{ 1362 return NULL; 1363} 1364static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} 1365#define DEFINE_BPF_DISPATCHER(name) 1366#define DECLARE_BPF_DISPATCHER(name) 1367#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func 1368#define BPF_DISPATCHER_PTR(name) NULL 1369static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, 1370 struct bpf_prog *from, 1371 struct bpf_prog *to) {} 1372static inline bool is_bpf_image_address(unsigned long address) 1373{ 1374 return false; 1375} 1376static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog) 1377{ 1378 return false; 1379} 1380#endif 1381 1382struct bpf_func_info_aux { 1383 u16 linkage; 1384 bool unreliable; 1385 bool called : 1; 1386 bool verified : 1; 1387}; 1388 1389enum bpf_jit_poke_reason { 1390 BPF_POKE_REASON_TAIL_CALL, 1391}; 1392 1393/* Descriptor of pokes pointing /into/ the JITed image. */ 1394struct bpf_jit_poke_descriptor { 1395 void *tailcall_target; 1396 void *tailcall_bypass; 1397 void *bypass_addr; 1398 void *aux; 1399 union { 1400 struct { 1401 struct bpf_map *map; 1402 u32 key; 1403 } tail_call; 1404 }; 1405 bool tailcall_target_stable; 1406 u8 adj_off; 1407 u16 reason; 1408 u32 insn_idx; 1409}; 1410 1411/* reg_type info for ctx arguments */ 1412struct bpf_ctx_arg_aux { 1413 u32 offset; 1414 enum bpf_reg_type reg_type; 1415 u32 btf_id; 1416}; 1417 1418struct btf_mod_pair { 1419 struct btf *btf; 1420 struct module *module; 1421}; 1422 1423struct bpf_kfunc_desc_tab; 1424 1425struct bpf_prog_aux { 1426 atomic64_t refcnt; 1427 u32 used_map_cnt; 1428 u32 used_btf_cnt; 1429 u32 max_ctx_offset; 1430 u32 max_pkt_offset; 1431 u32 max_tp_access; 1432 u32 stack_depth; 1433 u32 id; 1434 u32 func_cnt; /* used by non-func prog as the number of func progs */ 1435 u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */ 1436 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 1437 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 1438 u32 ctx_arg_info_size; 1439 u32 max_rdonly_access; 1440 u32 max_rdwr_access; 1441 struct btf *attach_btf; 1442 const struct bpf_ctx_arg_aux *ctx_arg_info; 1443 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ 1444 struct bpf_prog *dst_prog; 1445 struct bpf_trampoline *dst_trampoline; 1446 enum bpf_prog_type saved_dst_prog_type; 1447 enum bpf_attach_type saved_dst_attach_type; 1448 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 1449 bool dev_bound; /* Program is bound to the netdev. */ 1450 bool offload_requested; /* Program is bound and offloaded to the netdev. */ 1451 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 1452 bool attach_tracing_prog; /* true if tracing another tracing program */ 1453 bool func_proto_unreliable; 1454 bool sleepable; 1455 bool tail_call_reachable; 1456 bool xdp_has_frags; 1457 bool exception_cb; 1458 bool exception_boundary; 1459 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 1460 const struct btf_type *attach_func_proto; 1461 /* function name for valid attach_btf_id */ 1462 const char *attach_func_name; 1463 struct bpf_prog **func; 1464 void *jit_data; /* JIT specific data. arch dependent */ 1465 struct bpf_jit_poke_descriptor *poke_tab; 1466 struct bpf_kfunc_desc_tab *kfunc_tab; 1467 struct bpf_kfunc_btf_tab *kfunc_btf_tab; 1468 u32 size_poke_tab; 1469#ifdef CONFIG_FINEIBT 1470 struct bpf_ksym ksym_prefix; 1471#endif 1472 struct bpf_ksym ksym; 1473 const struct bpf_prog_ops *ops; 1474 struct bpf_map **used_maps; 1475 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ 1476 struct btf_mod_pair *used_btfs; 1477 struct bpf_prog *prog; 1478 struct user_struct *user; 1479 u64 load_time; /* ns since boottime */ 1480 u32 verified_insns; 1481 int cgroup_atype; /* enum cgroup_bpf_attach_type */ 1482 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 1483 char name[BPF_OBJ_NAME_LEN]; 1484 u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64); 1485#ifdef CONFIG_SECURITY 1486 void *security; 1487#endif 1488 struct bpf_prog_offload *offload; 1489 struct btf *btf; 1490 struct bpf_func_info *func_info; 1491 struct bpf_func_info_aux *func_info_aux; 1492 /* bpf_line_info loaded from userspace. linfo->insn_off 1493 * has the xlated insn offset. 1494 * Both the main and sub prog share the same linfo. 1495 * The subprog can access its first linfo by 1496 * using the linfo_idx. 1497 */ 1498 struct bpf_line_info *linfo; 1499 /* jited_linfo is the jited addr of the linfo. It has a 1500 * one to one mapping to linfo: 1501 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 1502 * Both the main and sub prog share the same jited_linfo. 1503 * The subprog can access its first jited_linfo by 1504 * using the linfo_idx. 1505 */ 1506 void **jited_linfo; 1507 u32 func_info_cnt; 1508 u32 nr_linfo; 1509 /* subprog can use linfo_idx to access its first linfo and 1510 * jited_linfo. 1511 * main prog always has linfo_idx == 0 1512 */ 1513 u32 linfo_idx; 1514 struct module *mod; 1515 u32 num_exentries; 1516 struct exception_table_entry *extable; 1517 union { 1518 struct work_struct work; 1519 struct rcu_head rcu; 1520 }; 1521}; 1522 1523struct bpf_prog { 1524 u16 pages; /* Number of allocated pages */ 1525 u16 jited:1, /* Is our filter JIT'ed? */ 1526 jit_requested:1,/* archs need to JIT the prog */ 1527 gpl_compatible:1, /* Is filter GPL compatible? */ 1528 cb_access:1, /* Is control block accessed? */ 1529 dst_needed:1, /* Do we need dst entry? */ 1530 blinding_requested:1, /* needs constant blinding */ 1531 blinded:1, /* Was blinded */ 1532 is_func:1, /* program is a bpf function */ 1533 kprobe_override:1, /* Do we override a kprobe? */ 1534 has_callchain_buf:1, /* callchain buffer allocated? */ 1535 enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ 1536 call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ 1537 call_get_func_ip:1, /* Do we call get_func_ip() */ 1538 tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */ 1539 enum bpf_prog_type type; /* Type of BPF program */ 1540 enum bpf_attach_type expected_attach_type; /* For some prog types */ 1541 u32 len; /* Number of filter blocks */ 1542 u32 jited_len; /* Size of jited insns in bytes */ 1543 u8 tag[BPF_TAG_SIZE]; 1544 struct bpf_prog_stats __percpu *stats; 1545 int __percpu *active; 1546 unsigned int (*bpf_func)(const void *ctx, 1547 const struct bpf_insn *insn); 1548 struct bpf_prog_aux *aux; /* Auxiliary fields */ 1549 struct sock_fprog_kern *orig_prog; /* Original BPF program */ 1550 /* Instructions for interpreter */ 1551 union { 1552 DECLARE_FLEX_ARRAY(struct sock_filter, insns); 1553 DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi); 1554 }; 1555}; 1556 1557struct bpf_array_aux { 1558 /* Programs with direct jumps into programs part of this array. */ 1559 struct list_head poke_progs; 1560 struct bpf_map *map; 1561 struct mutex poke_mutex; 1562 struct work_struct work; 1563}; 1564 1565struct bpf_link { 1566 atomic64_t refcnt; 1567 u32 id; 1568 enum bpf_link_type type; 1569 const struct bpf_link_ops *ops; 1570 struct bpf_prog *prog; 1571 struct work_struct work; 1572}; 1573 1574struct bpf_link_ops { 1575 void (*release)(struct bpf_link *link); 1576 void (*dealloc)(struct bpf_link *link); 1577 int (*detach)(struct bpf_link *link); 1578 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, 1579 struct bpf_prog *old_prog); 1580 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); 1581 int (*fill_link_info)(const struct bpf_link *link, 1582 struct bpf_link_info *info); 1583 int (*update_map)(struct bpf_link *link, struct bpf_map *new_map, 1584 struct bpf_map *old_map); 1585}; 1586 1587struct bpf_tramp_link { 1588 struct bpf_link link; 1589 struct hlist_node tramp_hlist; 1590 u64 cookie; 1591}; 1592 1593struct bpf_shim_tramp_link { 1594 struct bpf_tramp_link link; 1595 struct bpf_trampoline *trampoline; 1596}; 1597 1598struct bpf_tracing_link { 1599 struct bpf_tramp_link link; 1600 enum bpf_attach_type attach_type; 1601 struct bpf_trampoline *trampoline; 1602 struct bpf_prog *tgt_prog; 1603}; 1604 1605struct bpf_link_primer { 1606 struct bpf_link *link; 1607 struct file *file; 1608 int fd; 1609 u32 id; 1610}; 1611 1612struct bpf_struct_ops_value; 1613struct btf_member; 1614 1615#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 1616/** 1617 * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to 1618 * define a BPF_MAP_TYPE_STRUCT_OPS map type composed 1619 * of BPF_PROG_TYPE_STRUCT_OPS progs. 1620 * @verifier_ops: A structure of callbacks that are invoked by the verifier 1621 * when determining whether the struct_ops progs in the 1622 * struct_ops map are valid. 1623 * @init: A callback that is invoked a single time, and before any other 1624 * callback, to initialize the structure. A nonzero return value means 1625 * the subsystem could not be initialized. 1626 * @check_member: When defined, a callback invoked by the verifier to allow 1627 * the subsystem to determine if an entry in the struct_ops map 1628 * is valid. A nonzero return value means that the map is 1629 * invalid and should be rejected by the verifier. 1630 * @init_member: A callback that is invoked for each member of the struct_ops 1631 * map to allow the subsystem to initialize the member. A nonzero 1632 * value means the member could not be initialized. This callback 1633 * is exclusive with the @type, @type_id, @value_type, and 1634 * @value_id fields. 1635 * @reg: A callback that is invoked when the struct_ops map has been 1636 * initialized and is being attached to. Zero means the struct_ops map 1637 * has been successfully registered and is live. A nonzero return value 1638 * means the struct_ops map could not be registered. 1639 * @unreg: A callback that is invoked when the struct_ops map should be 1640 * unregistered. 1641 * @update: A callback that is invoked when the live struct_ops map is being 1642 * updated to contain new values. This callback is only invoked when 1643 * the struct_ops map is loaded with BPF_F_LINK. If not defined, the 1644 * it is assumed that the struct_ops map cannot be updated. 1645 * @validate: A callback that is invoked after all of the members have been 1646 * initialized. This callback should perform static checks on the 1647 * map, meaning that it should either fail or succeed 1648 * deterministically. A struct_ops map that has been validated may 1649 * not necessarily succeed in being registered if the call to @reg 1650 * fails. For example, a valid struct_ops map may be loaded, but 1651 * then fail to be registered due to there being another active 1652 * struct_ops map on the system in the subsystem already. For this 1653 * reason, if this callback is not defined, the check is skipped as 1654 * the struct_ops map will have final verification performed in 1655 * @reg. 1656 * @type: BTF type. 1657 * @value_type: Value type. 1658 * @name: The name of the struct bpf_struct_ops object. 1659 * @func_models: Func models 1660 * @type_id: BTF type id. 1661 * @value_id: BTF value id. 1662 */ 1663struct bpf_struct_ops { 1664 const struct bpf_verifier_ops *verifier_ops; 1665 int (*init)(struct btf *btf); 1666 int (*check_member)(const struct btf_type *t, 1667 const struct btf_member *member, 1668 const struct bpf_prog *prog); 1669 int (*init_member)(const struct btf_type *t, 1670 const struct btf_member *member, 1671 void *kdata, const void *udata); 1672 int (*reg)(void *kdata); 1673 void (*unreg)(void *kdata); 1674 int (*update)(void *kdata, void *old_kdata); 1675 int (*validate)(void *kdata); 1676 const struct btf_type *type; 1677 const struct btf_type *value_type; 1678 const char *name; 1679 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; 1680 u32 type_id; 1681 u32 value_id; 1682 void *cfi_stubs; 1683}; 1684 1685#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 1686#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 1687const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 1688void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 1689bool bpf_struct_ops_get(const void *kdata); 1690void bpf_struct_ops_put(const void *kdata); 1691int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 1692 void *value); 1693int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, 1694 struct bpf_tramp_link *link, 1695 const struct btf_func_model *model, 1696 void *stub_func, 1697 void *image, void *image_end); 1698static inline bool bpf_try_module_get(const void *data, struct module *owner) 1699{ 1700 if (owner == BPF_MODULE_OWNER) 1701 return bpf_struct_ops_get(data); 1702 else 1703 return try_module_get(owner); 1704} 1705static inline void bpf_module_put(const void *data, struct module *owner) 1706{ 1707 if (owner == BPF_MODULE_OWNER) 1708 bpf_struct_ops_put(data); 1709 else 1710 module_put(owner); 1711} 1712int bpf_struct_ops_link_create(union bpf_attr *attr); 1713 1714#ifdef CONFIG_NET 1715/* Define it here to avoid the use of forward declaration */ 1716struct bpf_dummy_ops_state { 1717 int val; 1718}; 1719 1720struct bpf_dummy_ops { 1721 int (*test_1)(struct bpf_dummy_ops_state *cb); 1722 int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2, 1723 char a3, unsigned long a4); 1724 int (*test_sleepable)(struct bpf_dummy_ops_state *cb); 1725}; 1726 1727int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, 1728 union bpf_attr __user *uattr); 1729#endif 1730#else 1731static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) 1732{ 1733 return NULL; 1734} 1735static inline void bpf_struct_ops_init(struct btf *btf, 1736 struct bpf_verifier_log *log) 1737{ 1738} 1739static inline bool bpf_try_module_get(const void *data, struct module *owner) 1740{ 1741 return try_module_get(owner); 1742} 1743static inline void bpf_module_put(const void *data, struct module *owner) 1744{ 1745 module_put(owner); 1746} 1747static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, 1748 void *key, 1749 void *value) 1750{ 1751 return -EINVAL; 1752} 1753static inline int bpf_struct_ops_link_create(union bpf_attr *attr) 1754{ 1755 return -EOPNOTSUPP; 1756} 1757 1758#endif 1759 1760#if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM) 1761int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, 1762 int cgroup_atype); 1763void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog); 1764#else 1765static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, 1766 int cgroup_atype) 1767{ 1768 return -EOPNOTSUPP; 1769} 1770static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog) 1771{ 1772} 1773#endif 1774 1775struct bpf_array { 1776 struct bpf_map map; 1777 u32 elem_size; 1778 u32 index_mask; 1779 struct bpf_array_aux *aux; 1780 union { 1781 DECLARE_FLEX_ARRAY(char, value) __aligned(8); 1782 DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8); 1783 DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8); 1784 }; 1785}; 1786 1787#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 1788#define MAX_TAIL_CALL_CNT 33 1789 1790/* Maximum number of loops for bpf_loop and bpf_iter_num. 1791 * It's enum to expose it (and thus make it discoverable) through BTF. 1792 */ 1793enum { 1794 BPF_MAX_LOOPS = 8 * 1024 * 1024, 1795}; 1796 1797#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 1798 BPF_F_RDONLY_PROG | \ 1799 BPF_F_WRONLY | \ 1800 BPF_F_WRONLY_PROG) 1801 1802#define BPF_MAP_CAN_READ BIT(0) 1803#define BPF_MAP_CAN_WRITE BIT(1) 1804 1805/* Maximum number of user-producer ring buffer samples that can be drained in 1806 * a call to bpf_user_ringbuf_drain(). 1807 */ 1808#define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024) 1809 1810static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 1811{ 1812 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1813 1814 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 1815 * not possible. 1816 */ 1817 if (access_flags & BPF_F_RDONLY_PROG) 1818 return BPF_MAP_CAN_READ; 1819 else if (access_flags & BPF_F_WRONLY_PROG) 1820 return BPF_MAP_CAN_WRITE; 1821 else 1822 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 1823} 1824 1825static inline bool bpf_map_flags_access_ok(u32 access_flags) 1826{ 1827 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 1828 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1829} 1830 1831struct bpf_event_entry { 1832 struct perf_event *event; 1833 struct file *perf_file; 1834 struct file *map_file; 1835 struct rcu_head rcu; 1836}; 1837 1838static inline bool map_type_contains_progs(struct bpf_map *map) 1839{ 1840 return map->map_type == BPF_MAP_TYPE_PROG_ARRAY || 1841 map->map_type == BPF_MAP_TYPE_DEVMAP || 1842 map->map_type == BPF_MAP_TYPE_CPUMAP; 1843} 1844 1845bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp); 1846int bpf_prog_calc_tag(struct bpf_prog *fp); 1847 1848const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 1849const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void); 1850 1851typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 1852 unsigned long off, unsigned long len); 1853typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 1854 const struct bpf_insn *src, 1855 struct bpf_insn *dst, 1856 struct bpf_prog *prog, 1857 u32 *target_size); 1858 1859u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 1860 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 1861 1862/* an array of programs to be executed under rcu_lock. 1863 * 1864 * Typical usage: 1865 * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run); 1866 * 1867 * the structure returned by bpf_prog_array_alloc() should be populated 1868 * with program pointers and the last pointer must be NULL. 1869 * The user has to keep refcnt on the program and make sure the program 1870 * is removed from the array before bpf_prog_put(). 1871 * The 'struct bpf_prog_array *' should only be replaced with xchg() 1872 * since other cpus are walking the array of pointers in parallel. 1873 */ 1874struct bpf_prog_array_item { 1875 struct bpf_prog *prog; 1876 union { 1877 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 1878 u64 bpf_cookie; 1879 }; 1880}; 1881 1882struct bpf_prog_array { 1883 struct rcu_head rcu; 1884 struct bpf_prog_array_item items[]; 1885}; 1886 1887struct bpf_empty_prog_array { 1888 struct bpf_prog_array hdr; 1889 struct bpf_prog *null_prog; 1890}; 1891 1892/* to avoid allocating empty bpf_prog_array for cgroups that 1893 * don't have bpf program attached use one global 'bpf_empty_prog_array' 1894 * It will not be modified the caller of bpf_prog_array_alloc() 1895 * (since caller requested prog_cnt == 0) 1896 * that pointer should be 'freed' by bpf_prog_array_free() 1897 */ 1898extern struct bpf_empty_prog_array bpf_empty_prog_array; 1899 1900struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 1901void bpf_prog_array_free(struct bpf_prog_array *progs); 1902/* Use when traversal over the bpf_prog_array uses tasks_trace rcu */ 1903void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs); 1904int bpf_prog_array_length(struct bpf_prog_array *progs); 1905bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 1906int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 1907 __u32 __user *prog_ids, u32 cnt); 1908 1909void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 1910 struct bpf_prog *old_prog); 1911int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); 1912int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 1913 struct bpf_prog *prog); 1914int bpf_prog_array_copy_info(struct bpf_prog_array *array, 1915 u32 *prog_ids, u32 request_cnt, 1916 u32 *prog_cnt); 1917int bpf_prog_array_copy(struct bpf_prog_array *old_array, 1918 struct bpf_prog *exclude_prog, 1919 struct bpf_prog *include_prog, 1920 u64 bpf_cookie, 1921 struct bpf_prog_array **new_array); 1922 1923struct bpf_run_ctx {}; 1924 1925struct bpf_cg_run_ctx { 1926 struct bpf_run_ctx run_ctx; 1927 const struct bpf_prog_array_item *prog_item; 1928 int retval; 1929}; 1930 1931struct bpf_trace_run_ctx { 1932 struct bpf_run_ctx run_ctx; 1933 u64 bpf_cookie; 1934 bool is_uprobe; 1935}; 1936 1937struct bpf_tramp_run_ctx { 1938 struct bpf_run_ctx run_ctx; 1939 u64 bpf_cookie; 1940 struct bpf_run_ctx *saved_run_ctx; 1941}; 1942 1943static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) 1944{ 1945 struct bpf_run_ctx *old_ctx = NULL; 1946 1947#ifdef CONFIG_BPF_SYSCALL 1948 old_ctx = current->bpf_ctx; 1949 current->bpf_ctx = new_ctx; 1950#endif 1951 return old_ctx; 1952} 1953 1954static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) 1955{ 1956#ifdef CONFIG_BPF_SYSCALL 1957 current->bpf_ctx = old_ctx; 1958#endif 1959} 1960 1961/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ 1962#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) 1963/* BPF program asks to set CN on the packet. */ 1964#define BPF_RET_SET_CN (1 << 0) 1965 1966typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); 1967 1968static __always_inline u32 1969bpf_prog_run_array(const struct bpf_prog_array *array, 1970 const void *ctx, bpf_prog_run_fn run_prog) 1971{ 1972 const struct bpf_prog_array_item *item; 1973 const struct bpf_prog *prog; 1974 struct bpf_run_ctx *old_run_ctx; 1975 struct bpf_trace_run_ctx run_ctx; 1976 u32 ret = 1; 1977 1978 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held"); 1979 1980 if (unlikely(!array)) 1981 return ret; 1982 1983 run_ctx.is_uprobe = false; 1984 1985 migrate_disable(); 1986 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1987 item = &array->items[0]; 1988 while ((prog = READ_ONCE(item->prog))) { 1989 run_ctx.bpf_cookie = item->bpf_cookie; 1990 ret &= run_prog(prog, ctx); 1991 item++; 1992 } 1993 bpf_reset_run_ctx(old_run_ctx); 1994 migrate_enable(); 1995 return ret; 1996} 1997 1998/* Notes on RCU design for bpf_prog_arrays containing sleepable programs: 1999 * 2000 * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array 2001 * overall. As a result, we must use the bpf_prog_array_free_sleepable 2002 * in order to use the tasks_trace rcu grace period. 2003 * 2004 * When a non-sleepable program is inside the array, we take the rcu read 2005 * section and disable preemption for that program alone, so it can access 2006 * rcu-protected dynamically sized maps. 2007 */ 2008static __always_inline u32 2009bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu, 2010 const void *ctx, bpf_prog_run_fn run_prog) 2011{ 2012 const struct bpf_prog_array_item *item; 2013 const struct bpf_prog *prog; 2014 const struct bpf_prog_array *array; 2015 struct bpf_run_ctx *old_run_ctx; 2016 struct bpf_trace_run_ctx run_ctx; 2017 u32 ret = 1; 2018 2019 might_fault(); 2020 2021 rcu_read_lock_trace(); 2022 migrate_disable(); 2023 2024 run_ctx.is_uprobe = true; 2025 2026 array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held()); 2027 if (unlikely(!array)) 2028 goto out; 2029 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 2030 item = &array->items[0]; 2031 while ((prog = READ_ONCE(item->prog))) { 2032 if (!prog->aux->sleepable) 2033 rcu_read_lock(); 2034 2035 run_ctx.bpf_cookie = item->bpf_cookie; 2036 ret &= run_prog(prog, ctx); 2037 item++; 2038 2039 if (!prog->aux->sleepable) 2040 rcu_read_unlock(); 2041 } 2042 bpf_reset_run_ctx(old_run_ctx); 2043out: 2044 migrate_enable(); 2045 rcu_read_unlock_trace(); 2046 return ret; 2047} 2048 2049#ifdef CONFIG_BPF_SYSCALL 2050DECLARE_PER_CPU(int, bpf_prog_active); 2051extern struct mutex bpf_stats_enabled_mutex; 2052 2053/* 2054 * Block execution of BPF programs attached to instrumentation (perf, 2055 * kprobes, tracepoints) to prevent deadlocks on map operations as any of 2056 * these events can happen inside a region which holds a map bucket lock 2057 * and can deadlock on it. 2058 */ 2059static inline void bpf_disable_instrumentation(void) 2060{ 2061 migrate_disable(); 2062 this_cpu_inc(bpf_prog_active); 2063} 2064 2065static inline void bpf_enable_instrumentation(void) 2066{ 2067 this_cpu_dec(bpf_prog_active); 2068 migrate_enable(); 2069} 2070 2071extern const struct file_operations bpf_map_fops; 2072extern const struct file_operations bpf_prog_fops; 2073extern const struct file_operations bpf_iter_fops; 2074 2075#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 2076 extern const struct bpf_prog_ops _name ## _prog_ops; \ 2077 extern const struct bpf_verifier_ops _name ## _verifier_ops; 2078#define BPF_MAP_TYPE(_id, _ops) \ 2079 extern const struct bpf_map_ops _ops; 2080#define BPF_LINK_TYPE(_id, _name) 2081#include <linux/bpf_types.h> 2082#undef BPF_PROG_TYPE 2083#undef BPF_MAP_TYPE 2084#undef BPF_LINK_TYPE 2085 2086extern const struct bpf_prog_ops bpf_offload_prog_ops; 2087extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 2088extern const struct bpf_verifier_ops xdp_analyzer_ops; 2089 2090struct bpf_prog *bpf_prog_get(u32 ufd); 2091struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 2092 bool attach_drv); 2093void bpf_prog_add(struct bpf_prog *prog, int i); 2094void bpf_prog_sub(struct bpf_prog *prog, int i); 2095void bpf_prog_inc(struct bpf_prog *prog); 2096struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 2097void bpf_prog_put(struct bpf_prog *prog); 2098 2099void bpf_prog_free_id(struct bpf_prog *prog); 2100void bpf_map_free_id(struct bpf_map *map); 2101 2102struct btf_field *btf_record_find(const struct btf_record *rec, 2103 u32 offset, u32 field_mask); 2104void btf_record_free(struct btf_record *rec); 2105void bpf_map_free_record(struct bpf_map *map); 2106struct btf_record *btf_record_dup(const struct btf_record *rec); 2107bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b); 2108void bpf_obj_free_timer(const struct btf_record *rec, void *obj); 2109void bpf_obj_free_fields(const struct btf_record *rec, void *obj); 2110void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); 2111 2112struct bpf_map *bpf_map_get(u32 ufd); 2113struct bpf_map *bpf_map_get_with_uref(u32 ufd); 2114struct bpf_map *__bpf_map_get(struct fd f); 2115void bpf_map_inc(struct bpf_map *map); 2116void bpf_map_inc_with_uref(struct bpf_map *map); 2117struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref); 2118struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); 2119void bpf_map_put_with_uref(struct bpf_map *map); 2120void bpf_map_put(struct bpf_map *map); 2121void *bpf_map_area_alloc(u64 size, int numa_node); 2122void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); 2123void bpf_map_area_free(void *base); 2124bool bpf_map_write_active(const struct bpf_map *map); 2125void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 2126int generic_map_lookup_batch(struct bpf_map *map, 2127 const union bpf_attr *attr, 2128 union bpf_attr __user *uattr); 2129int generic_map_update_batch(struct bpf_map *map, struct file *map_file, 2130 const union bpf_attr *attr, 2131 union bpf_attr __user *uattr); 2132int generic_map_delete_batch(struct bpf_map *map, 2133 const union bpf_attr *attr, 2134 union bpf_attr __user *uattr); 2135struct bpf_map *bpf_map_get_curr_or_next(u32 *id); 2136struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); 2137 2138#ifdef CONFIG_MEMCG_KMEM 2139void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 2140 int node); 2141void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); 2142void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, 2143 gfp_t flags); 2144void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 2145 size_t align, gfp_t flags); 2146#else 2147static inline void * 2148bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 2149 int node) 2150{ 2151 return kmalloc_node(size, flags, node); 2152} 2153 2154static inline void * 2155bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 2156{ 2157 return kzalloc(size, flags); 2158} 2159 2160static inline void * 2161bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags) 2162{ 2163 return kvcalloc(n, size, flags); 2164} 2165 2166static inline void __percpu * 2167bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, 2168 gfp_t flags) 2169{ 2170 return __alloc_percpu_gfp(size, align, flags); 2171} 2172#endif 2173 2174static inline int 2175bpf_map_init_elem_count(struct bpf_map *map) 2176{ 2177 size_t size = sizeof(*map->elem_count), align = size; 2178 gfp_t flags = GFP_USER | __GFP_NOWARN; 2179 2180 map->elem_count = bpf_map_alloc_percpu(map, size, align, flags); 2181 if (!map->elem_count) 2182 return -ENOMEM; 2183 2184 return 0; 2185} 2186 2187static inline void 2188bpf_map_free_elem_count(struct bpf_map *map) 2189{ 2190 free_percpu(map->elem_count); 2191} 2192 2193static inline void bpf_map_inc_elem_count(struct bpf_map *map) 2194{ 2195 this_cpu_inc(*map->elem_count); 2196} 2197 2198static inline void bpf_map_dec_elem_count(struct bpf_map *map) 2199{ 2200 this_cpu_dec(*map->elem_count); 2201} 2202 2203extern int sysctl_unprivileged_bpf_disabled; 2204 2205static inline bool bpf_allow_ptr_leaks(void) 2206{ 2207 return perfmon_capable(); 2208} 2209 2210static inline bool bpf_allow_uninit_stack(void) 2211{ 2212 return perfmon_capable(); 2213} 2214 2215static inline bool bpf_bypass_spec_v1(void) 2216{ 2217 return cpu_mitigations_off() || perfmon_capable(); 2218} 2219 2220static inline bool bpf_bypass_spec_v4(void) 2221{ 2222 return cpu_mitigations_off() || perfmon_capable(); 2223} 2224 2225int bpf_map_new_fd(struct bpf_map *map, int flags); 2226int bpf_prog_new_fd(struct bpf_prog *prog); 2227 2228void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2229 const struct bpf_link_ops *ops, struct bpf_prog *prog); 2230int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); 2231int bpf_link_settle(struct bpf_link_primer *primer); 2232void bpf_link_cleanup(struct bpf_link_primer *primer); 2233void bpf_link_inc(struct bpf_link *link); 2234void bpf_link_put(struct bpf_link *link); 2235int bpf_link_new_fd(struct bpf_link *link); 2236struct bpf_link *bpf_link_get_from_fd(u32 ufd); 2237struct bpf_link *bpf_link_get_curr_or_next(u32 *id); 2238 2239int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname); 2240int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags); 2241 2242#define BPF_ITER_FUNC_PREFIX "bpf_iter_" 2243#define DEFINE_BPF_ITER_FUNC(target, args...) \ 2244 extern int bpf_iter_ ## target(args); \ 2245 int __init bpf_iter_ ## target(args) { return 0; } 2246 2247/* 2248 * The task type of iterators. 2249 * 2250 * For BPF task iterators, they can be parameterized with various 2251 * parameters to visit only some of tasks. 2252 * 2253 * BPF_TASK_ITER_ALL (default) 2254 * Iterate over resources of every task. 2255 * 2256 * BPF_TASK_ITER_TID 2257 * Iterate over resources of a task/tid. 2258 * 2259 * BPF_TASK_ITER_TGID 2260 * Iterate over resources of every task of a process / task group. 2261 */ 2262enum bpf_iter_task_type { 2263 BPF_TASK_ITER_ALL = 0, 2264 BPF_TASK_ITER_TID, 2265 BPF_TASK_ITER_TGID, 2266}; 2267 2268struct bpf_iter_aux_info { 2269 /* for map_elem iter */ 2270 struct bpf_map *map; 2271 2272 /* for cgroup iter */ 2273 struct { 2274 struct cgroup *start; /* starting cgroup */ 2275 enum bpf_cgroup_iter_order order; 2276 } cgroup; 2277 struct { 2278 enum bpf_iter_task_type type; 2279 u32 pid; 2280 } task; 2281}; 2282 2283typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, 2284 union bpf_iter_link_info *linfo, 2285 struct bpf_iter_aux_info *aux); 2286typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); 2287typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, 2288 struct seq_file *seq); 2289typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, 2290 struct bpf_link_info *info); 2291typedef const struct bpf_func_proto * 2292(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, 2293 const struct bpf_prog *prog); 2294 2295enum bpf_iter_feature { 2296 BPF_ITER_RESCHED = BIT(0), 2297}; 2298 2299#define BPF_ITER_CTX_ARG_MAX 2 2300struct bpf_iter_reg { 2301 const char *target; 2302 bpf_iter_attach_target_t attach_target; 2303 bpf_iter_detach_target_t detach_target; 2304 bpf_iter_show_fdinfo_t show_fdinfo; 2305 bpf_iter_fill_link_info_t fill_link_info; 2306 bpf_iter_get_func_proto_t get_func_proto; 2307 u32 ctx_arg_info_size; 2308 u32 feature; 2309 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; 2310 const struct bpf_iter_seq_info *seq_info; 2311}; 2312 2313struct bpf_iter_meta { 2314 __bpf_md_ptr(struct seq_file *, seq); 2315 u64 session_id; 2316 u64 seq_num; 2317}; 2318 2319struct bpf_iter__bpf_map_elem { 2320 __bpf_md_ptr(struct bpf_iter_meta *, meta); 2321 __bpf_md_ptr(struct bpf_map *, map); 2322 __bpf_md_ptr(void *, key); 2323 __bpf_md_ptr(void *, value); 2324}; 2325 2326int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); 2327void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); 2328bool bpf_iter_prog_supported(struct bpf_prog *prog); 2329const struct bpf_func_proto * 2330bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); 2331int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); 2332int bpf_iter_new_fd(struct bpf_link *link); 2333bool bpf_link_is_iter(struct bpf_link *link); 2334struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); 2335int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); 2336void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, 2337 struct seq_file *seq); 2338int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, 2339 struct bpf_link_info *info); 2340 2341int map_set_for_each_callback_args(struct bpf_verifier_env *env, 2342 struct bpf_func_state *caller, 2343 struct bpf_func_state *callee); 2344 2345int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 2346int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 2347int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 2348 u64 flags); 2349int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 2350 u64 flags); 2351 2352int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 2353 2354int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 2355 void *key, void *value, u64 map_flags); 2356int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 2357int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 2358 void *key, void *value, u64 map_flags); 2359int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 2360 2361int bpf_get_file_flag(int flags); 2362int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, 2363 size_t actual_size); 2364 2365/* verify correctness of eBPF program */ 2366int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size); 2367 2368#ifndef CONFIG_BPF_JIT_ALWAYS_ON 2369void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 2370#endif 2371 2372struct btf *bpf_get_btf_vmlinux(void); 2373 2374/* Map specifics */ 2375struct xdp_frame; 2376struct sk_buff; 2377struct bpf_dtab_netdev; 2378struct bpf_cpu_map_entry; 2379 2380void __dev_flush(void); 2381int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 2382 struct net_device *dev_rx); 2383int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, 2384 struct net_device *dev_rx); 2385int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, 2386 struct bpf_map *map, bool exclude_ingress); 2387int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 2388 struct bpf_prog *xdp_prog); 2389int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 2390 struct bpf_prog *xdp_prog, struct bpf_map *map, 2391 bool exclude_ingress); 2392 2393void __cpu_map_flush(void); 2394int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, 2395 struct net_device *dev_rx); 2396int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 2397 struct sk_buff *skb); 2398 2399/* Return map's numa specified by userspace */ 2400static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 2401{ 2402 return (attr->map_flags & BPF_F_NUMA_NODE) ? 2403 attr->numa_node : NUMA_NO_NODE; 2404} 2405 2406struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 2407int array_map_alloc_check(union bpf_attr *attr); 2408 2409int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 2410 union bpf_attr __user *uattr); 2411int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 2412 union bpf_attr __user *uattr); 2413int bpf_prog_test_run_tracing(struct bpf_prog *prog, 2414 const union bpf_attr *kattr, 2415 union bpf_attr __user *uattr); 2416int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 2417 const union bpf_attr *kattr, 2418 union bpf_attr __user *uattr); 2419int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, 2420 const union bpf_attr *kattr, 2421 union bpf_attr __user *uattr); 2422int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 2423 const union bpf_attr *kattr, 2424 union bpf_attr __user *uattr); 2425int bpf_prog_test_run_nf(struct bpf_prog *prog, 2426 const union bpf_attr *kattr, 2427 union bpf_attr __user *uattr); 2428bool btf_ctx_access(int off, int size, enum bpf_access_type type, 2429 const struct bpf_prog *prog, 2430 struct bpf_insn_access_aux *info); 2431 2432static inline bool bpf_tracing_ctx_access(int off, int size, 2433 enum bpf_access_type type) 2434{ 2435 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 2436 return false; 2437 if (type != BPF_READ) 2438 return false; 2439 if (off % size != 0) 2440 return false; 2441 return true; 2442} 2443 2444static inline bool bpf_tracing_btf_ctx_access(int off, int size, 2445 enum bpf_access_type type, 2446 const struct bpf_prog *prog, 2447 struct bpf_insn_access_aux *info) 2448{ 2449 if (!bpf_tracing_ctx_access(off, size, type)) 2450 return false; 2451 return btf_ctx_access(off, size, type, prog, info); 2452} 2453 2454int btf_struct_access(struct bpf_verifier_log *log, 2455 const struct bpf_reg_state *reg, 2456 int off, int size, enum bpf_access_type atype, 2457 u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name); 2458bool btf_struct_ids_match(struct bpf_verifier_log *log, 2459 const struct btf *btf, u32 id, int off, 2460 const struct btf *need_btf, u32 need_type_id, 2461 bool strict); 2462 2463int btf_distill_func_proto(struct bpf_verifier_log *log, 2464 struct btf *btf, 2465 const struct btf_type *func_proto, 2466 const char *func_name, 2467 struct btf_func_model *m); 2468 2469struct bpf_reg_state; 2470int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog); 2471int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, 2472 struct btf *btf, const struct btf_type *t); 2473const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt, 2474 int comp_idx, const char *tag_key); 2475 2476struct bpf_prog *bpf_prog_by_id(u32 id); 2477struct bpf_link *bpf_link_by_id(u32 id); 2478 2479const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); 2480void bpf_task_storage_free(struct task_struct *task); 2481void bpf_cgrp_storage_free(struct cgroup *cgroup); 2482bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); 2483const struct btf_func_model * 2484bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 2485 const struct bpf_insn *insn); 2486int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id, 2487 u16 btf_fd_idx, u8 **func_addr); 2488 2489struct bpf_core_ctx { 2490 struct bpf_verifier_log *log; 2491 const struct btf *btf; 2492}; 2493 2494bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, 2495 const struct bpf_reg_state *reg, 2496 const char *field_name, u32 btf_id, const char *suffix); 2497 2498bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, 2499 const struct btf *reg_btf, u32 reg_id, 2500 const struct btf *arg_btf, u32 arg_id); 2501 2502int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, 2503 int relo_idx, void *insn); 2504 2505static inline bool unprivileged_ebpf_enabled(void) 2506{ 2507 return !sysctl_unprivileged_bpf_disabled; 2508} 2509 2510/* Not all bpf prog type has the bpf_ctx. 2511 * For the bpf prog type that has initialized the bpf_ctx, 2512 * this function can be used to decide if a kernel function 2513 * is called by a bpf program. 2514 */ 2515static inline bool has_current_bpf_ctx(void) 2516{ 2517 return !!current->bpf_ctx; 2518} 2519 2520void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog); 2521 2522void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, 2523 enum bpf_dynptr_type type, u32 offset, u32 size); 2524void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); 2525void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr); 2526 2527bool dev_check_flush(void); 2528bool cpu_map_check_flush(void); 2529#else /* !CONFIG_BPF_SYSCALL */ 2530static inline struct bpf_prog *bpf_prog_get(u32 ufd) 2531{ 2532 return ERR_PTR(-EOPNOTSUPP); 2533} 2534 2535static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 2536 enum bpf_prog_type type, 2537 bool attach_drv) 2538{ 2539 return ERR_PTR(-EOPNOTSUPP); 2540} 2541 2542static inline void bpf_prog_add(struct bpf_prog *prog, int i) 2543{ 2544} 2545 2546static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 2547{ 2548} 2549 2550static inline void bpf_prog_put(struct bpf_prog *prog) 2551{ 2552} 2553 2554static inline void bpf_prog_inc(struct bpf_prog *prog) 2555{ 2556} 2557 2558static inline struct bpf_prog *__must_check 2559bpf_prog_inc_not_zero(struct bpf_prog *prog) 2560{ 2561 return ERR_PTR(-EOPNOTSUPP); 2562} 2563 2564static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2565 const struct bpf_link_ops *ops, 2566 struct bpf_prog *prog) 2567{ 2568} 2569 2570static inline int bpf_link_prime(struct bpf_link *link, 2571 struct bpf_link_primer *primer) 2572{ 2573 return -EOPNOTSUPP; 2574} 2575 2576static inline int bpf_link_settle(struct bpf_link_primer *primer) 2577{ 2578 return -EOPNOTSUPP; 2579} 2580 2581static inline void bpf_link_cleanup(struct bpf_link_primer *primer) 2582{ 2583} 2584 2585static inline void bpf_link_inc(struct bpf_link *link) 2586{ 2587} 2588 2589static inline void bpf_link_put(struct bpf_link *link) 2590{ 2591} 2592 2593static inline int bpf_obj_get_user(const char __user *pathname, int flags) 2594{ 2595 return -EOPNOTSUPP; 2596} 2597 2598static inline void __dev_flush(void) 2599{ 2600} 2601 2602struct xdp_frame; 2603struct bpf_dtab_netdev; 2604struct bpf_cpu_map_entry; 2605 2606static inline 2607int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 2608 struct net_device *dev_rx) 2609{ 2610 return 0; 2611} 2612 2613static inline 2614int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, 2615 struct net_device *dev_rx) 2616{ 2617 return 0; 2618} 2619 2620static inline 2621int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, 2622 struct bpf_map *map, bool exclude_ingress) 2623{ 2624 return 0; 2625} 2626 2627struct sk_buff; 2628 2629static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 2630 struct sk_buff *skb, 2631 struct bpf_prog *xdp_prog) 2632{ 2633 return 0; 2634} 2635 2636static inline 2637int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 2638 struct bpf_prog *xdp_prog, struct bpf_map *map, 2639 bool exclude_ingress) 2640{ 2641 return 0; 2642} 2643 2644static inline void __cpu_map_flush(void) 2645{ 2646} 2647 2648static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 2649 struct xdp_frame *xdpf, 2650 struct net_device *dev_rx) 2651{ 2652 return 0; 2653} 2654 2655static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 2656 struct sk_buff *skb) 2657{ 2658 return -EOPNOTSUPP; 2659} 2660 2661static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 2662 enum bpf_prog_type type) 2663{ 2664 return ERR_PTR(-EOPNOTSUPP); 2665} 2666 2667static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 2668 const union bpf_attr *kattr, 2669 union bpf_attr __user *uattr) 2670{ 2671 return -ENOTSUPP; 2672} 2673 2674static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 2675 const union bpf_attr *kattr, 2676 union bpf_attr __user *uattr) 2677{ 2678 return -ENOTSUPP; 2679} 2680 2681static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, 2682 const union bpf_attr *kattr, 2683 union bpf_attr __user *uattr) 2684{ 2685 return -ENOTSUPP; 2686} 2687 2688static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 2689 const union bpf_attr *kattr, 2690 union bpf_attr __user *uattr) 2691{ 2692 return -ENOTSUPP; 2693} 2694 2695static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 2696 const union bpf_attr *kattr, 2697 union bpf_attr __user *uattr) 2698{ 2699 return -ENOTSUPP; 2700} 2701 2702static inline void bpf_map_put(struct bpf_map *map) 2703{ 2704} 2705 2706static inline struct bpf_prog *bpf_prog_by_id(u32 id) 2707{ 2708 return ERR_PTR(-ENOTSUPP); 2709} 2710 2711static inline int btf_struct_access(struct bpf_verifier_log *log, 2712 const struct bpf_reg_state *reg, 2713 int off, int size, enum bpf_access_type atype, 2714 u32 *next_btf_id, enum bpf_type_flag *flag, 2715 const char **field_name) 2716{ 2717 return -EACCES; 2718} 2719 2720static inline const struct bpf_func_proto * 2721bpf_base_func_proto(enum bpf_func_id func_id) 2722{ 2723 return NULL; 2724} 2725 2726static inline void bpf_task_storage_free(struct task_struct *task) 2727{ 2728} 2729 2730static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 2731{ 2732 return false; 2733} 2734 2735static inline const struct btf_func_model * 2736bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 2737 const struct bpf_insn *insn) 2738{ 2739 return NULL; 2740} 2741 2742static inline int 2743bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id, 2744 u16 btf_fd_idx, u8 **func_addr) 2745{ 2746 return -ENOTSUPP; 2747} 2748 2749static inline bool unprivileged_ebpf_enabled(void) 2750{ 2751 return false; 2752} 2753 2754static inline bool has_current_bpf_ctx(void) 2755{ 2756 return false; 2757} 2758 2759static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog) 2760{ 2761} 2762 2763static inline void bpf_cgrp_storage_free(struct cgroup *cgroup) 2764{ 2765} 2766 2767static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, 2768 enum bpf_dynptr_type type, u32 offset, u32 size) 2769{ 2770} 2771 2772static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) 2773{ 2774} 2775 2776static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) 2777{ 2778} 2779#endif /* CONFIG_BPF_SYSCALL */ 2780 2781static __always_inline int 2782bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) 2783{ 2784 int ret = -EFAULT; 2785 2786 if (IS_ENABLED(CONFIG_BPF_EVENTS)) 2787 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); 2788 if (unlikely(ret < 0)) 2789 memset(dst, 0, size); 2790 return ret; 2791} 2792 2793void __bpf_free_used_btfs(struct bpf_prog_aux *aux, 2794 struct btf_mod_pair *used_btfs, u32 len); 2795 2796static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 2797 enum bpf_prog_type type) 2798{ 2799 return bpf_prog_get_type_dev(ufd, type, false); 2800} 2801 2802void __bpf_free_used_maps(struct bpf_prog_aux *aux, 2803 struct bpf_map **used_maps, u32 len); 2804 2805bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 2806 2807int bpf_prog_offload_compile(struct bpf_prog *prog); 2808void bpf_prog_dev_bound_destroy(struct bpf_prog *prog); 2809int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 2810 struct bpf_prog *prog); 2811 2812int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 2813 2814int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 2815int bpf_map_offload_update_elem(struct bpf_map *map, 2816 void *key, void *value, u64 flags); 2817int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 2818int bpf_map_offload_get_next_key(struct bpf_map *map, 2819 void *key, void *next_key); 2820 2821bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 2822 2823struct bpf_offload_dev * 2824bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 2825void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 2826void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 2827int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 2828 struct net_device *netdev); 2829void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 2830 struct net_device *netdev); 2831bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 2832 2833void unpriv_ebpf_notify(int new_state); 2834 2835#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 2836int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, 2837 struct bpf_prog_aux *prog_aux); 2838void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id); 2839int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr); 2840int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog); 2841void bpf_dev_bound_netdev_unregister(struct net_device *dev); 2842 2843static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 2844{ 2845 return aux->dev_bound; 2846} 2847 2848static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux) 2849{ 2850 return aux->offload_requested; 2851} 2852 2853bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs); 2854 2855static inline bool bpf_map_is_offloaded(struct bpf_map *map) 2856{ 2857 return unlikely(map->ops == &bpf_map_offload_ops); 2858} 2859 2860struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 2861void bpf_map_offload_map_free(struct bpf_map *map); 2862u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map); 2863int bpf_prog_test_run_syscall(struct bpf_prog *prog, 2864 const union bpf_attr *kattr, 2865 union bpf_attr __user *uattr); 2866 2867int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 2868int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); 2869int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); 2870int sock_map_bpf_prog_query(const union bpf_attr *attr, 2871 union bpf_attr __user *uattr); 2872 2873void sock_map_unhash(struct sock *sk); 2874void sock_map_destroy(struct sock *sk); 2875void sock_map_close(struct sock *sk, long timeout); 2876#else 2877static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, 2878 struct bpf_prog_aux *prog_aux) 2879{ 2880 return -EOPNOTSUPP; 2881} 2882 2883static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, 2884 u32 func_id) 2885{ 2886 return NULL; 2887} 2888 2889static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog, 2890 union bpf_attr *attr) 2891{ 2892 return -EOPNOTSUPP; 2893} 2894 2895static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, 2896 struct bpf_prog *old_prog) 2897{ 2898 return -EOPNOTSUPP; 2899} 2900 2901static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev) 2902{ 2903} 2904 2905static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 2906{ 2907 return false; 2908} 2909 2910static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux) 2911{ 2912 return false; 2913} 2914 2915static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs) 2916{ 2917 return false; 2918} 2919 2920static inline bool bpf_map_is_offloaded(struct bpf_map *map) 2921{ 2922 return false; 2923} 2924 2925static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 2926{ 2927 return ERR_PTR(-EOPNOTSUPP); 2928} 2929 2930static inline void bpf_map_offload_map_free(struct bpf_map *map) 2931{ 2932} 2933 2934static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map) 2935{ 2936 return 0; 2937} 2938 2939static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, 2940 const union bpf_attr *kattr, 2941 union bpf_attr __user *uattr) 2942{ 2943 return -ENOTSUPP; 2944} 2945 2946#ifdef CONFIG_BPF_SYSCALL 2947static inline int sock_map_get_from_fd(const union bpf_attr *attr, 2948 struct bpf_prog *prog) 2949{ 2950 return -EINVAL; 2951} 2952 2953static inline int sock_map_prog_detach(const union bpf_attr *attr, 2954 enum bpf_prog_type ptype) 2955{ 2956 return -EOPNOTSUPP; 2957} 2958 2959static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 2960 u64 flags) 2961{ 2962 return -EOPNOTSUPP; 2963} 2964 2965static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, 2966 union bpf_attr __user *uattr) 2967{ 2968 return -EINVAL; 2969} 2970#endif /* CONFIG_BPF_SYSCALL */ 2971#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 2972 2973static __always_inline void 2974bpf_prog_inc_misses_counters(const struct bpf_prog_array *array) 2975{ 2976 const struct bpf_prog_array_item *item; 2977 struct bpf_prog *prog; 2978 2979 if (unlikely(!array)) 2980 return; 2981 2982 item = &array->items[0]; 2983 while ((prog = READ_ONCE(item->prog))) { 2984 bpf_prog_inc_misses_counter(prog); 2985 item++; 2986 } 2987} 2988 2989#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 2990void bpf_sk_reuseport_detach(struct sock *sk); 2991int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 2992 void *value); 2993int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 2994 void *value, u64 map_flags); 2995#else 2996static inline void bpf_sk_reuseport_detach(struct sock *sk) 2997{ 2998} 2999 3000#ifdef CONFIG_BPF_SYSCALL 3001static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 3002 void *key, void *value) 3003{ 3004 return -EOPNOTSUPP; 3005} 3006 3007static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 3008 void *key, void *value, 3009 u64 map_flags) 3010{ 3011 return -EOPNOTSUPP; 3012} 3013#endif /* CONFIG_BPF_SYSCALL */ 3014#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 3015 3016/* verifier prototypes for helper functions called from eBPF programs */ 3017extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 3018extern const struct bpf_func_proto bpf_map_update_elem_proto; 3019extern const struct bpf_func_proto bpf_map_delete_elem_proto; 3020extern const struct bpf_func_proto bpf_map_push_elem_proto; 3021extern const struct bpf_func_proto bpf_map_pop_elem_proto; 3022extern const struct bpf_func_proto bpf_map_peek_elem_proto; 3023extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto; 3024 3025extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 3026extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 3027extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 3028extern const struct bpf_func_proto bpf_tail_call_proto; 3029extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 3030extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; 3031extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto; 3032extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 3033extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 3034extern const struct bpf_func_proto bpf_get_current_comm_proto; 3035extern const struct bpf_func_proto bpf_get_stackid_proto; 3036extern const struct bpf_func_proto bpf_get_stack_proto; 3037extern const struct bpf_func_proto bpf_get_task_stack_proto; 3038extern const struct bpf_func_proto bpf_get_stackid_proto_pe; 3039extern const struct bpf_func_proto bpf_get_stack_proto_pe; 3040extern const struct bpf_func_proto bpf_sock_map_update_proto; 3041extern const struct bpf_func_proto bpf_sock_hash_update_proto; 3042extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 3043extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; 3044extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto; 3045extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 3046extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 3047extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 3048extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 3049extern const struct bpf_func_proto bpf_spin_lock_proto; 3050extern const struct bpf_func_proto bpf_spin_unlock_proto; 3051extern const struct bpf_func_proto bpf_get_local_storage_proto; 3052extern const struct bpf_func_proto bpf_strtol_proto; 3053extern const struct bpf_func_proto bpf_strtoul_proto; 3054extern const struct bpf_func_proto bpf_tcp_sock_proto; 3055extern const struct bpf_func_proto bpf_jiffies64_proto; 3056extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; 3057extern const struct bpf_func_proto bpf_event_output_data_proto; 3058extern const struct bpf_func_proto bpf_ringbuf_output_proto; 3059extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; 3060extern const struct bpf_func_proto bpf_ringbuf_submit_proto; 3061extern const struct bpf_func_proto bpf_ringbuf_discard_proto; 3062extern const struct bpf_func_proto bpf_ringbuf_query_proto; 3063extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto; 3064extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto; 3065extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto; 3066extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; 3067extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; 3068extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; 3069extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; 3070extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; 3071extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto; 3072extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto; 3073extern const struct bpf_func_proto bpf_copy_from_user_proto; 3074extern const struct bpf_func_proto bpf_snprintf_btf_proto; 3075extern const struct bpf_func_proto bpf_snprintf_proto; 3076extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; 3077extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; 3078extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; 3079extern const struct bpf_func_proto bpf_sock_from_file_proto; 3080extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; 3081extern const struct bpf_func_proto bpf_task_storage_get_recur_proto; 3082extern const struct bpf_func_proto bpf_task_storage_get_proto; 3083extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto; 3084extern const struct bpf_func_proto bpf_task_storage_delete_proto; 3085extern const struct bpf_func_proto bpf_for_each_map_elem_proto; 3086extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; 3087extern const struct bpf_func_proto bpf_sk_setsockopt_proto; 3088extern const struct bpf_func_proto bpf_sk_getsockopt_proto; 3089extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto; 3090extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto; 3091extern const struct bpf_func_proto bpf_find_vma_proto; 3092extern const struct bpf_func_proto bpf_loop_proto; 3093extern const struct bpf_func_proto bpf_copy_from_user_task_proto; 3094extern const struct bpf_func_proto bpf_set_retval_proto; 3095extern const struct bpf_func_proto bpf_get_retval_proto; 3096extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto; 3097extern const struct bpf_func_proto bpf_cgrp_storage_get_proto; 3098extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto; 3099 3100const struct bpf_func_proto *tracing_prog_func_proto( 3101 enum bpf_func_id func_id, const struct bpf_prog *prog); 3102 3103/* Shared helpers among cBPF and eBPF. */ 3104void bpf_user_rnd_init_once(void); 3105u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 3106u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 3107 3108#if defined(CONFIG_NET) 3109bool bpf_sock_common_is_valid_access(int off, int size, 3110 enum bpf_access_type type, 3111 struct bpf_insn_access_aux *info); 3112bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 3113 struct bpf_insn_access_aux *info); 3114u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 3115 const struct bpf_insn *si, 3116 struct bpf_insn *insn_buf, 3117 struct bpf_prog *prog, 3118 u32 *target_size); 3119int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, 3120 struct bpf_dynptr_kern *ptr); 3121#else 3122static inline bool bpf_sock_common_is_valid_access(int off, int size, 3123 enum bpf_access_type type, 3124 struct bpf_insn_access_aux *info) 3125{ 3126 return false; 3127} 3128static inline bool bpf_sock_is_valid_access(int off, int size, 3129 enum bpf_access_type type, 3130 struct bpf_insn_access_aux *info) 3131{ 3132 return false; 3133} 3134static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 3135 const struct bpf_insn *si, 3136 struct bpf_insn *insn_buf, 3137 struct bpf_prog *prog, 3138 u32 *target_size) 3139{ 3140 return 0; 3141} 3142static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, 3143 struct bpf_dynptr_kern *ptr) 3144{ 3145 return -EOPNOTSUPP; 3146} 3147#endif 3148 3149#ifdef CONFIG_INET 3150struct sk_reuseport_kern { 3151 struct sk_buff *skb; 3152 struct sock *sk; 3153 struct sock *selected_sk; 3154 struct sock *migrating_sk; 3155 void *data_end; 3156 u32 hash; 3157 u32 reuseport_id; 3158 bool bind_inany; 3159}; 3160bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 3161 struct bpf_insn_access_aux *info); 3162 3163u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 3164 const struct bpf_insn *si, 3165 struct bpf_insn *insn_buf, 3166 struct bpf_prog *prog, 3167 u32 *target_size); 3168 3169bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 3170 struct bpf_insn_access_aux *info); 3171 3172u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 3173 const struct bpf_insn *si, 3174 struct bpf_insn *insn_buf, 3175 struct bpf_prog *prog, 3176 u32 *target_size); 3177#else 3178static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 3179 enum bpf_access_type type, 3180 struct bpf_insn_access_aux *info) 3181{ 3182 return false; 3183} 3184 3185static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 3186 const struct bpf_insn *si, 3187 struct bpf_insn *insn_buf, 3188 struct bpf_prog *prog, 3189 u32 *target_size) 3190{ 3191 return 0; 3192} 3193static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 3194 enum bpf_access_type type, 3195 struct bpf_insn_access_aux *info) 3196{ 3197 return false; 3198} 3199 3200static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 3201 const struct bpf_insn *si, 3202 struct bpf_insn *insn_buf, 3203 struct bpf_prog *prog, 3204 u32 *target_size) 3205{ 3206 return 0; 3207} 3208#endif /* CONFIG_INET */ 3209 3210enum bpf_text_poke_type { 3211 BPF_MOD_CALL, 3212 BPF_MOD_JUMP, 3213}; 3214 3215int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 3216 void *addr1, void *addr2); 3217 3218void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, 3219 struct bpf_prog *new, struct bpf_prog *old); 3220 3221void *bpf_arch_text_copy(void *dst, void *src, size_t len); 3222int bpf_arch_text_invalidate(void *dst, size_t len); 3223 3224struct btf_id_set; 3225bool btf_id_set_contains(const struct btf_id_set *set, u32 id); 3226 3227#define MAX_BPRINTF_VARARGS 12 3228#define MAX_BPRINTF_BUF 1024 3229 3230struct bpf_bprintf_data { 3231 u32 *bin_args; 3232 char *buf; 3233 bool get_bin_args; 3234 bool get_buf; 3235}; 3236 3237int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 3238 u32 num_args, struct bpf_bprintf_data *data); 3239void bpf_bprintf_cleanup(struct bpf_bprintf_data *data); 3240 3241#ifdef CONFIG_BPF_LSM 3242void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype); 3243void bpf_cgroup_atype_put(int cgroup_atype); 3244#else 3245static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {} 3246static inline void bpf_cgroup_atype_put(int cgroup_atype) {} 3247#endif /* CONFIG_BPF_LSM */ 3248 3249struct key; 3250 3251#ifdef CONFIG_KEYS 3252struct bpf_key { 3253 struct key *key; 3254 bool has_ref; 3255}; 3256#endif /* CONFIG_KEYS */ 3257 3258static inline bool type_is_alloc(u32 type) 3259{ 3260 return type & MEM_ALLOC; 3261} 3262 3263static inline gfp_t bpf_memcg_flags(gfp_t flags) 3264{ 3265 if (memcg_bpf_enabled()) 3266 return flags | __GFP_ACCOUNT; 3267 return flags; 3268} 3269 3270static inline bool bpf_is_subprog(const struct bpf_prog *prog) 3271{ 3272 return prog->aux->func_idx != 0; 3273} 3274 3275#endif /* _LINUX_BPF_H */