at v5.15 71 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4#ifndef _LINUX_BPF_H 5#define _LINUX_BPF_H 1 6 7#include <uapi/linux/bpf.h> 8 9#include <linux/workqueue.h> 10#include <linux/file.h> 11#include <linux/percpu.h> 12#include <linux/err.h> 13#include <linux/rbtree_latch.h> 14#include <linux/numa.h> 15#include <linux/mm_types.h> 16#include <linux/wait.h> 17#include <linux/refcount.h> 18#include <linux/mutex.h> 19#include <linux/module.h> 20#include <linux/kallsyms.h> 21#include <linux/capability.h> 22#include <linux/sched/mm.h> 23#include <linux/slab.h> 24#include <linux/percpu-refcount.h> 25#include <linux/bpfptr.h> 26 27struct bpf_verifier_env; 28struct bpf_verifier_log; 29struct perf_event; 30struct bpf_prog; 31struct bpf_prog_aux; 32struct bpf_map; 33struct sock; 34struct seq_file; 35struct btf; 36struct btf_type; 37struct exception_table_entry; 38struct seq_operations; 39struct bpf_iter_aux_info; 40struct bpf_local_storage; 41struct bpf_local_storage_map; 42struct kobject; 43struct mem_cgroup; 44struct module; 45struct bpf_func_state; 46 47extern struct idr btf_idr; 48extern spinlock_t btf_idr_lock; 49extern struct kobject *btf_kobj; 50 51typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, 52 struct bpf_iter_aux_info *aux); 53typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); 54struct bpf_iter_seq_info { 55 const struct seq_operations *seq_ops; 56 bpf_iter_init_seq_priv_t init_seq_private; 57 bpf_iter_fini_seq_priv_t fini_seq_private; 58 u32 seq_priv_size; 59}; 60 61/* map is generic key/value storage optionally accessible by eBPF programs */ 62struct bpf_map_ops { 63 /* funcs callable from userspace (via syscall) */ 64 int (*map_alloc_check)(union bpf_attr *attr); 65 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 66 void (*map_release)(struct bpf_map *map, struct file *map_file); 67 void (*map_free)(struct bpf_map *map); 68 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 69 void (*map_release_uref)(struct bpf_map *map); 70 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 71 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, 72 union bpf_attr __user *uattr); 73 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, 74 void *value, u64 flags); 75 int (*map_lookup_and_delete_batch)(struct bpf_map *map, 76 const union bpf_attr *attr, 77 union bpf_attr __user *uattr); 78 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, 79 union bpf_attr __user *uattr); 80 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, 81 union bpf_attr __user *uattr); 82 83 /* funcs callable from userspace and from eBPF programs */ 84 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 85 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 86 int (*map_delete_elem)(struct bpf_map *map, void *key); 87 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 88 int (*map_pop_elem)(struct bpf_map *map, void *value); 89 int (*map_peek_elem)(struct bpf_map *map, void *value); 90 91 /* funcs called by prog_array and perf_event_array map */ 92 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 93 int fd); 94 void (*map_fd_put_ptr)(void *ptr); 95 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 96 u32 (*map_fd_sys_lookup_elem)(void *ptr); 97 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 98 struct seq_file *m); 99 int (*map_check_btf)(const struct bpf_map *map, 100 const struct btf *btf, 101 const struct btf_type *key_type, 102 const struct btf_type *value_type); 103 104 /* Prog poke tracking helpers. */ 105 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); 106 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); 107 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, 108 struct bpf_prog *new); 109 110 /* Direct value access helpers. */ 111 int (*map_direct_value_addr)(const struct bpf_map *map, 112 u64 *imm, u32 off); 113 int (*map_direct_value_meta)(const struct bpf_map *map, 114 u64 imm, u32 *off); 115 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); 116 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, 117 struct poll_table_struct *pts); 118 119 /* Functions called by bpf_local_storage maps */ 120 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, 121 void *owner, u32 size); 122 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, 123 void *owner, u32 size); 124 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); 125 126 /* Misc helpers.*/ 127 int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags); 128 129 /* map_meta_equal must be implemented for maps that can be 130 * used as an inner map. It is a runtime check to ensure 131 * an inner map can be inserted to an outer map. 132 * 133 * Some properties of the inner map has been used during the 134 * verification time. When inserting an inner map at the runtime, 135 * map_meta_equal has to ensure the inserting map has the same 136 * properties that the verifier has used earlier. 137 */ 138 bool (*map_meta_equal)(const struct bpf_map *meta0, 139 const struct bpf_map *meta1); 140 141 142 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, 143 struct bpf_func_state *caller, 144 struct bpf_func_state *callee); 145 int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn, 146 void *callback_ctx, u64 flags); 147 148 /* BTF name and id of struct allocated by map_alloc */ 149 const char * const map_btf_name; 150 int *map_btf_id; 151 152 /* bpf_iter info used to open a seq_file */ 153 const struct bpf_iter_seq_info *iter_seq_info; 154}; 155 156struct bpf_map { 157 /* The first two cachelines with read-mostly members of which some 158 * are also accessed in fast-path (e.g. ops, max_entries). 159 */ 160 const struct bpf_map_ops *ops ____cacheline_aligned; 161 struct bpf_map *inner_map_meta; 162#ifdef CONFIG_SECURITY 163 void *security; 164#endif 165 enum bpf_map_type map_type; 166 u32 key_size; 167 u32 value_size; 168 u32 max_entries; 169 u32 map_flags; 170 int spin_lock_off; /* >=0 valid offset, <0 error */ 171 int timer_off; /* >=0 valid offset, <0 error */ 172 u32 id; 173 int numa_node; 174 u32 btf_key_type_id; 175 u32 btf_value_type_id; 176 struct btf *btf; 177#ifdef CONFIG_MEMCG_KMEM 178 struct mem_cgroup *memcg; 179#endif 180 char name[BPF_OBJ_NAME_LEN]; 181 u32 btf_vmlinux_value_type_id; 182 bool bypass_spec_v1; 183 bool frozen; /* write-once; write-protected by freeze_mutex */ 184 /* 22 bytes hole */ 185 186 /* The 3rd and 4th cacheline with misc members to avoid false sharing 187 * particularly with refcounting. 188 */ 189 atomic64_t refcnt ____cacheline_aligned; 190 atomic64_t usercnt; 191 struct work_struct work; 192 struct mutex freeze_mutex; 193 u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */ 194}; 195 196static inline bool map_value_has_spin_lock(const struct bpf_map *map) 197{ 198 return map->spin_lock_off >= 0; 199} 200 201static inline bool map_value_has_timer(const struct bpf_map *map) 202{ 203 return map->timer_off >= 0; 204} 205 206static inline void check_and_init_map_value(struct bpf_map *map, void *dst) 207{ 208 if (unlikely(map_value_has_spin_lock(map))) 209 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = 210 (struct bpf_spin_lock){}; 211 if (unlikely(map_value_has_timer(map))) 212 *(struct bpf_timer *)(dst + map->timer_off) = 213 (struct bpf_timer){}; 214} 215 216/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */ 217static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 218{ 219 u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0; 220 221 if (unlikely(map_value_has_spin_lock(map))) { 222 s_off = map->spin_lock_off; 223 s_sz = sizeof(struct bpf_spin_lock); 224 } else if (unlikely(map_value_has_timer(map))) { 225 t_off = map->timer_off; 226 t_sz = sizeof(struct bpf_timer); 227 } 228 229 if (unlikely(s_sz || t_sz)) { 230 if (s_off < t_off || !s_sz) { 231 swap(s_off, t_off); 232 swap(s_sz, t_sz); 233 } 234 memcpy(dst, src, t_off); 235 memcpy(dst + t_off + t_sz, 236 src + t_off + t_sz, 237 s_off - t_off - t_sz); 238 memcpy(dst + s_off + s_sz, 239 src + s_off + s_sz, 240 map->value_size - s_off - s_sz); 241 } else { 242 memcpy(dst, src, map->value_size); 243 } 244} 245void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 246 bool lock_src); 247void bpf_timer_cancel_and_free(void *timer); 248int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); 249 250struct bpf_offload_dev; 251struct bpf_offloaded_map; 252 253struct bpf_map_dev_ops { 254 int (*map_get_next_key)(struct bpf_offloaded_map *map, 255 void *key, void *next_key); 256 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 257 void *key, void *value); 258 int (*map_update_elem)(struct bpf_offloaded_map *map, 259 void *key, void *value, u64 flags); 260 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 261}; 262 263struct bpf_offloaded_map { 264 struct bpf_map map; 265 struct net_device *netdev; 266 const struct bpf_map_dev_ops *dev_ops; 267 void *dev_priv; 268 struct list_head offloads; 269}; 270 271static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 272{ 273 return container_of(map, struct bpf_offloaded_map, map); 274} 275 276static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 277{ 278 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 279} 280 281static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 282{ 283 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && 284 map->ops->map_seq_show_elem; 285} 286 287int map_check_no_btf(const struct bpf_map *map, 288 const struct btf *btf, 289 const struct btf_type *key_type, 290 const struct btf_type *value_type); 291 292bool bpf_map_meta_equal(const struct bpf_map *meta0, 293 const struct bpf_map *meta1); 294 295extern const struct bpf_map_ops bpf_map_offload_ops; 296 297/* function argument constraints */ 298enum bpf_arg_type { 299 ARG_DONTCARE = 0, /* unused argument in helper function */ 300 301 /* the following constraints used to prototype 302 * bpf_map_lookup/update/delete_elem() functions 303 */ 304 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 305 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 306 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 307 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 308 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ 309 310 /* the following constraints used to prototype bpf_memcmp() and other 311 * functions that access data on eBPF program stack 312 */ 313 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 314 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 315 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 316 * helper function must fill all bytes or clear 317 * them in error case. 318 */ 319 320 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 321 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 322 323 ARG_PTR_TO_CTX, /* pointer to context */ 324 ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */ 325 ARG_ANYTHING, /* any (initialized) argument is ok */ 326 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 327 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 328 ARG_PTR_TO_INT, /* pointer to int */ 329 ARG_PTR_TO_LONG, /* pointer to long */ 330 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 331 ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */ 332 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 333 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ 334 ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ 335 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ 336 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ 337 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ 338 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ 339 ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */ 340 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ 341 ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ 342 __BPF_ARG_TYPE_MAX, 343}; 344 345/* type of values returned from helper functions */ 346enum bpf_return_type { 347 RET_INTEGER, /* function returns integer */ 348 RET_VOID, /* function doesn't return anything */ 349 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 350 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 351 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ 352 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ 353 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ 354 RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ 355 RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ 356 RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */ 357 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ 358 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ 359}; 360 361/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 362 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 363 * instructions after verifying 364 */ 365struct bpf_func_proto { 366 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 367 bool gpl_only; 368 bool pkt_access; 369 enum bpf_return_type ret_type; 370 union { 371 struct { 372 enum bpf_arg_type arg1_type; 373 enum bpf_arg_type arg2_type; 374 enum bpf_arg_type arg3_type; 375 enum bpf_arg_type arg4_type; 376 enum bpf_arg_type arg5_type; 377 }; 378 enum bpf_arg_type arg_type[5]; 379 }; 380 union { 381 struct { 382 u32 *arg1_btf_id; 383 u32 *arg2_btf_id; 384 u32 *arg3_btf_id; 385 u32 *arg4_btf_id; 386 u32 *arg5_btf_id; 387 }; 388 u32 *arg_btf_id[5]; 389 }; 390 int *ret_btf_id; /* return value btf_id */ 391 bool (*allowed)(const struct bpf_prog *prog); 392}; 393 394/* bpf_context is intentionally undefined structure. Pointer to bpf_context is 395 * the first argument to eBPF programs. 396 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 397 */ 398struct bpf_context; 399 400enum bpf_access_type { 401 BPF_READ = 1, 402 BPF_WRITE = 2 403}; 404 405/* types of values stored in eBPF registers */ 406/* Pointer types represent: 407 * pointer 408 * pointer + imm 409 * pointer + (u16) var 410 * pointer + (u16) var + imm 411 * if (range > 0) then [ptr, ptr + range - off) is safe to access 412 * if (id > 0) means that some 'var' was added 413 * if (off > 0) means that 'imm' was added 414 */ 415enum bpf_reg_type { 416 NOT_INIT = 0, /* nothing was written into register */ 417 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 418 PTR_TO_CTX, /* reg points to bpf_context */ 419 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 420 PTR_TO_MAP_VALUE, /* reg points to map element value */ 421 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 422 PTR_TO_STACK, /* reg == frame_pointer + offset */ 423 PTR_TO_PACKET_META, /* skb->data - meta_len */ 424 PTR_TO_PACKET, /* reg points to skb->data */ 425 PTR_TO_PACKET_END, /* skb->data + headlen */ 426 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 427 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 428 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ 429 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 430 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ 431 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 432 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ 433 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 434 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 435 /* PTR_TO_BTF_ID points to a kernel struct that does not need 436 * to be null checked by the BPF program. This does not imply the 437 * pointer is _not_ null and in practice this can easily be a null 438 * pointer when reading pointer chains. The assumption is program 439 * context will handle null pointer dereference typically via fault 440 * handling. The verifier must keep this in mind and can make no 441 * assumptions about null or non-null when doing branch analysis. 442 * Further, when passed into helpers the helpers can not, without 443 * additional context, assume the value is non-null. 444 */ 445 PTR_TO_BTF_ID, 446 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not 447 * been checked for null. Used primarily to inform the verifier 448 * an explicit null check is required for this struct. 449 */ 450 PTR_TO_BTF_ID_OR_NULL, 451 PTR_TO_MEM, /* reg points to valid memory region */ 452 PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ 453 PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ 454 PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */ 455 PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ 456 PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ 457 PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ 458 PTR_TO_FUNC, /* reg points to a bpf program function */ 459 PTR_TO_MAP_KEY, /* reg points to a map element key */ 460 __BPF_REG_TYPE_MAX, 461}; 462 463/* The information passed from prog-specific *_is_valid_access 464 * back to the verifier. 465 */ 466struct bpf_insn_access_aux { 467 enum bpf_reg_type reg_type; 468 union { 469 int ctx_field_size; 470 struct { 471 struct btf *btf; 472 u32 btf_id; 473 }; 474 }; 475 struct bpf_verifier_log *log; /* for verbose logs */ 476}; 477 478static inline void 479bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 480{ 481 aux->ctx_field_size = size; 482} 483 484struct bpf_prog_ops { 485 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 486 union bpf_attr __user *uattr); 487}; 488 489struct bpf_verifier_ops { 490 /* return eBPF function prototype for verification */ 491 const struct bpf_func_proto * 492 (*get_func_proto)(enum bpf_func_id func_id, 493 const struct bpf_prog *prog); 494 495 /* return true if 'size' wide access at offset 'off' within bpf_context 496 * with 'type' (read or write) is allowed 497 */ 498 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 499 const struct bpf_prog *prog, 500 struct bpf_insn_access_aux *info); 501 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 502 const struct bpf_prog *prog); 503 int (*gen_ld_abs)(const struct bpf_insn *orig, 504 struct bpf_insn *insn_buf); 505 u32 (*convert_ctx_access)(enum bpf_access_type type, 506 const struct bpf_insn *src, 507 struct bpf_insn *dst, 508 struct bpf_prog *prog, u32 *target_size); 509 int (*btf_struct_access)(struct bpf_verifier_log *log, 510 const struct btf *btf, 511 const struct btf_type *t, int off, int size, 512 enum bpf_access_type atype, 513 u32 *next_btf_id); 514 bool (*check_kfunc_call)(u32 kfunc_btf_id); 515}; 516 517struct bpf_prog_offload_ops { 518 /* verifier basic callbacks */ 519 int (*insn_hook)(struct bpf_verifier_env *env, 520 int insn_idx, int prev_insn_idx); 521 int (*finalize)(struct bpf_verifier_env *env); 522 /* verifier optimization callbacks (called after .finalize) */ 523 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 524 struct bpf_insn *insn); 525 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 526 /* program management callbacks */ 527 int (*prepare)(struct bpf_prog *prog); 528 int (*translate)(struct bpf_prog *prog); 529 void (*destroy)(struct bpf_prog *prog); 530}; 531 532struct bpf_prog_offload { 533 struct bpf_prog *prog; 534 struct net_device *netdev; 535 struct bpf_offload_dev *offdev; 536 void *dev_priv; 537 struct list_head offloads; 538 bool dev_state; 539 bool opt_failed; 540 void *jited_image; 541 u32 jited_len; 542}; 543 544enum bpf_cgroup_storage_type { 545 BPF_CGROUP_STORAGE_SHARED, 546 BPF_CGROUP_STORAGE_PERCPU, 547 __BPF_CGROUP_STORAGE_MAX 548}; 549 550#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 551 552/* The longest tracepoint has 12 args. 553 * See include/trace/bpf_probe.h 554 */ 555#define MAX_BPF_FUNC_ARGS 12 556 557/* The maximum number of arguments passed through registers 558 * a single function may have. 559 */ 560#define MAX_BPF_FUNC_REG_ARGS 5 561 562struct btf_func_model { 563 u8 ret_size; 564 u8 nr_args; 565 u8 arg_size[MAX_BPF_FUNC_ARGS]; 566}; 567 568/* Restore arguments before returning from trampoline to let original function 569 * continue executing. This flag is used for fentry progs when there are no 570 * fexit progs. 571 */ 572#define BPF_TRAMP_F_RESTORE_REGS BIT(0) 573/* Call original function after fentry progs, but before fexit progs. 574 * Makes sense for fentry/fexit, normal calls and indirect calls. 575 */ 576#define BPF_TRAMP_F_CALL_ORIG BIT(1) 577/* Skip current frame and return to parent. Makes sense for fentry/fexit 578 * programs only. Should not be used with normal calls and indirect calls. 579 */ 580#define BPF_TRAMP_F_SKIP_FRAME BIT(2) 581/* Store IP address of the caller on the trampoline stack, 582 * so it's available for trampoline's programs. 583 */ 584#define BPF_TRAMP_F_IP_ARG BIT(3) 585/* Return the return value of fentry prog. Only used by bpf_struct_ops. */ 586#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) 587 588/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 589 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 590 */ 591#define BPF_MAX_TRAMP_PROGS 38 592 593struct bpf_tramp_progs { 594 struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; 595 int nr_progs; 596}; 597 598/* Different use cases for BPF trampoline: 599 * 1. replace nop at the function entry (kprobe equivalent) 600 * flags = BPF_TRAMP_F_RESTORE_REGS 601 * fentry = a set of programs to run before returning from trampoline 602 * 603 * 2. replace nop at the function entry (kprobe + kretprobe equivalent) 604 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME 605 * orig_call = fentry_ip + MCOUNT_INSN_SIZE 606 * fentry = a set of program to run before calling original function 607 * fexit = a set of program to run after original function 608 * 609 * 3. replace direct call instruction anywhere in the function body 610 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) 611 * With flags = 0 612 * fentry = a set of programs to run before returning from trampoline 613 * With flags = BPF_TRAMP_F_CALL_ORIG 614 * orig_call = original callback addr or direct function addr 615 * fentry = a set of program to run before calling original function 616 * fexit = a set of program to run after original function 617 */ 618struct bpf_tramp_image; 619int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, 620 const struct btf_func_model *m, u32 flags, 621 struct bpf_tramp_progs *tprogs, 622 void *orig_call); 623/* these two functions are called from generated trampoline */ 624u64 notrace __bpf_prog_enter(struct bpf_prog *prog); 625void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); 626u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog); 627void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start); 628void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); 629void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); 630 631struct bpf_ksym { 632 unsigned long start; 633 unsigned long end; 634 char name[KSYM_NAME_LEN]; 635 struct list_head lnode; 636 struct latch_tree_node tnode; 637 bool prog; 638}; 639 640enum bpf_tramp_prog_type { 641 BPF_TRAMP_FENTRY, 642 BPF_TRAMP_FEXIT, 643 BPF_TRAMP_MODIFY_RETURN, 644 BPF_TRAMP_MAX, 645 BPF_TRAMP_REPLACE, /* more than MAX */ 646}; 647 648struct bpf_tramp_image { 649 void *image; 650 struct bpf_ksym ksym; 651 struct percpu_ref pcref; 652 void *ip_after_call; 653 void *ip_epilogue; 654 union { 655 struct rcu_head rcu; 656 struct work_struct work; 657 }; 658}; 659 660struct bpf_trampoline { 661 /* hlist for trampoline_table */ 662 struct hlist_node hlist; 663 /* serializes access to fields of this trampoline */ 664 struct mutex mutex; 665 refcount_t refcnt; 666 u64 key; 667 struct { 668 struct btf_func_model model; 669 void *addr; 670 bool ftrace_managed; 671 } func; 672 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF 673 * program by replacing one of its functions. func.addr is the address 674 * of the function it replaced. 675 */ 676 struct bpf_prog *extension_prog; 677 /* list of BPF programs using this trampoline */ 678 struct hlist_head progs_hlist[BPF_TRAMP_MAX]; 679 /* Number of attached programs. A counter per kind. */ 680 int progs_cnt[BPF_TRAMP_MAX]; 681 /* Executable image of trampoline */ 682 struct bpf_tramp_image *cur_image; 683 u64 selector; 684 struct module *mod; 685}; 686 687struct bpf_attach_target_info { 688 struct btf_func_model fmodel; 689 long tgt_addr; 690 const char *tgt_name; 691 const struct btf_type *tgt_type; 692}; 693 694#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ 695 696struct bpf_dispatcher_prog { 697 struct bpf_prog *prog; 698 refcount_t users; 699}; 700 701struct bpf_dispatcher { 702 /* dispatcher mutex */ 703 struct mutex mutex; 704 void *func; 705 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; 706 int num_progs; 707 void *image; 708 u32 image_off; 709 struct bpf_ksym ksym; 710}; 711 712static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( 713 const void *ctx, 714 const struct bpf_insn *insnsi, 715 unsigned int (*bpf_func)(const void *, 716 const struct bpf_insn *)) 717{ 718 return bpf_func(ctx, insnsi); 719} 720#ifdef CONFIG_BPF_JIT 721int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); 722int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); 723struct bpf_trampoline *bpf_trampoline_get(u64 key, 724 struct bpf_attach_target_info *tgt_info); 725void bpf_trampoline_put(struct bpf_trampoline *tr); 726#define BPF_DISPATCHER_INIT(_name) { \ 727 .mutex = __MUTEX_INITIALIZER(_name.mutex), \ 728 .func = &_name##_func, \ 729 .progs = {}, \ 730 .num_progs = 0, \ 731 .image = NULL, \ 732 .image_off = 0, \ 733 .ksym = { \ 734 .name = #_name, \ 735 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ 736 }, \ 737} 738 739#define DEFINE_BPF_DISPATCHER(name) \ 740 noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ 741 const void *ctx, \ 742 const struct bpf_insn *insnsi, \ 743 unsigned int (*bpf_func)(const void *, \ 744 const struct bpf_insn *)) \ 745 { \ 746 return bpf_func(ctx, insnsi); \ 747 } \ 748 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ 749 struct bpf_dispatcher bpf_dispatcher_##name = \ 750 BPF_DISPATCHER_INIT(bpf_dispatcher_##name); 751#define DECLARE_BPF_DISPATCHER(name) \ 752 unsigned int bpf_dispatcher_##name##_func( \ 753 const void *ctx, \ 754 const struct bpf_insn *insnsi, \ 755 unsigned int (*bpf_func)(const void *, \ 756 const struct bpf_insn *)); \ 757 extern struct bpf_dispatcher bpf_dispatcher_##name; 758#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func 759#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) 760void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, 761 struct bpf_prog *to); 762/* Called only from JIT-enabled code, so there's no need for stubs. */ 763void *bpf_jit_alloc_exec_page(void); 764void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); 765void bpf_image_ksym_del(struct bpf_ksym *ksym); 766void bpf_ksym_add(struct bpf_ksym *ksym); 767void bpf_ksym_del(struct bpf_ksym *ksym); 768int bpf_jit_charge_modmem(u32 pages); 769void bpf_jit_uncharge_modmem(u32 pages); 770#else 771static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, 772 struct bpf_trampoline *tr) 773{ 774 return -ENOTSUPP; 775} 776static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog, 777 struct bpf_trampoline *tr) 778{ 779 return -ENOTSUPP; 780} 781static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, 782 struct bpf_attach_target_info *tgt_info) 783{ 784 return ERR_PTR(-EOPNOTSUPP); 785} 786static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} 787#define DEFINE_BPF_DISPATCHER(name) 788#define DECLARE_BPF_DISPATCHER(name) 789#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func 790#define BPF_DISPATCHER_PTR(name) NULL 791static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, 792 struct bpf_prog *from, 793 struct bpf_prog *to) {} 794static inline bool is_bpf_image_address(unsigned long address) 795{ 796 return false; 797} 798#endif 799 800struct bpf_func_info_aux { 801 u16 linkage; 802 bool unreliable; 803}; 804 805enum bpf_jit_poke_reason { 806 BPF_POKE_REASON_TAIL_CALL, 807}; 808 809/* Descriptor of pokes pointing /into/ the JITed image. */ 810struct bpf_jit_poke_descriptor { 811 void *tailcall_target; 812 void *tailcall_bypass; 813 void *bypass_addr; 814 void *aux; 815 union { 816 struct { 817 struct bpf_map *map; 818 u32 key; 819 } tail_call; 820 }; 821 bool tailcall_target_stable; 822 u8 adj_off; 823 u16 reason; 824 u32 insn_idx; 825}; 826 827/* reg_type info for ctx arguments */ 828struct bpf_ctx_arg_aux { 829 u32 offset; 830 enum bpf_reg_type reg_type; 831 u32 btf_id; 832}; 833 834struct btf_mod_pair { 835 struct btf *btf; 836 struct module *module; 837}; 838 839struct bpf_kfunc_desc_tab; 840 841struct bpf_prog_aux { 842 atomic64_t refcnt; 843 u32 used_map_cnt; 844 u32 used_btf_cnt; 845 u32 max_ctx_offset; 846 u32 max_pkt_offset; 847 u32 max_tp_access; 848 u32 stack_depth; 849 u32 id; 850 u32 func_cnt; /* used by non-func prog as the number of func progs */ 851 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 852 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 853 u32 ctx_arg_info_size; 854 u32 max_rdonly_access; 855 u32 max_rdwr_access; 856 struct btf *attach_btf; 857 const struct bpf_ctx_arg_aux *ctx_arg_info; 858 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ 859 struct bpf_prog *dst_prog; 860 struct bpf_trampoline *dst_trampoline; 861 enum bpf_prog_type saved_dst_prog_type; 862 enum bpf_attach_type saved_dst_attach_type; 863 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 864 bool offload_requested; 865 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 866 bool func_proto_unreliable; 867 bool sleepable; 868 bool tail_call_reachable; 869 struct hlist_node tramp_hlist; 870 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 871 const struct btf_type *attach_func_proto; 872 /* function name for valid attach_btf_id */ 873 const char *attach_func_name; 874 struct bpf_prog **func; 875 void *jit_data; /* JIT specific data. arch dependent */ 876 struct bpf_jit_poke_descriptor *poke_tab; 877 struct bpf_kfunc_desc_tab *kfunc_tab; 878 u32 size_poke_tab; 879 struct bpf_ksym ksym; 880 const struct bpf_prog_ops *ops; 881 struct bpf_map **used_maps; 882 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ 883 struct btf_mod_pair *used_btfs; 884 struct bpf_prog *prog; 885 struct user_struct *user; 886 u64 load_time; /* ns since boottime */ 887 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 888 char name[BPF_OBJ_NAME_LEN]; 889#ifdef CONFIG_SECURITY 890 void *security; 891#endif 892 struct bpf_prog_offload *offload; 893 struct btf *btf; 894 struct bpf_func_info *func_info; 895 struct bpf_func_info_aux *func_info_aux; 896 /* bpf_line_info loaded from userspace. linfo->insn_off 897 * has the xlated insn offset. 898 * Both the main and sub prog share the same linfo. 899 * The subprog can access its first linfo by 900 * using the linfo_idx. 901 */ 902 struct bpf_line_info *linfo; 903 /* jited_linfo is the jited addr of the linfo. It has a 904 * one to one mapping to linfo: 905 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 906 * Both the main and sub prog share the same jited_linfo. 907 * The subprog can access its first jited_linfo by 908 * using the linfo_idx. 909 */ 910 void **jited_linfo; 911 u32 func_info_cnt; 912 u32 nr_linfo; 913 /* subprog can use linfo_idx to access its first linfo and 914 * jited_linfo. 915 * main prog always has linfo_idx == 0 916 */ 917 u32 linfo_idx; 918 u32 num_exentries; 919 struct exception_table_entry *extable; 920 union { 921 struct work_struct work; 922 struct rcu_head rcu; 923 }; 924}; 925 926struct bpf_array_aux { 927 /* 'Ownership' of prog array is claimed by the first program that 928 * is going to use this map or by the first program which FD is 929 * stored in the map to make sure that all callers and callees have 930 * the same prog type and JITed flag. 931 */ 932 struct { 933 spinlock_t lock; 934 enum bpf_prog_type type; 935 bool jited; 936 } owner; 937 /* Programs with direct jumps into programs part of this array. */ 938 struct list_head poke_progs; 939 struct bpf_map *map; 940 struct mutex poke_mutex; 941 struct work_struct work; 942}; 943 944struct bpf_link { 945 atomic64_t refcnt; 946 u32 id; 947 enum bpf_link_type type; 948 const struct bpf_link_ops *ops; 949 struct bpf_prog *prog; 950 struct work_struct work; 951}; 952 953struct bpf_link_ops { 954 void (*release)(struct bpf_link *link); 955 void (*dealloc)(struct bpf_link *link); 956 int (*detach)(struct bpf_link *link); 957 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, 958 struct bpf_prog *old_prog); 959 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); 960 int (*fill_link_info)(const struct bpf_link *link, 961 struct bpf_link_info *info); 962}; 963 964struct bpf_link_primer { 965 struct bpf_link *link; 966 struct file *file; 967 int fd; 968 u32 id; 969}; 970 971struct bpf_struct_ops_value; 972struct btf_member; 973 974#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 975struct bpf_struct_ops { 976 const struct bpf_verifier_ops *verifier_ops; 977 int (*init)(struct btf *btf); 978 int (*check_member)(const struct btf_type *t, 979 const struct btf_member *member); 980 int (*init_member)(const struct btf_type *t, 981 const struct btf_member *member, 982 void *kdata, const void *udata); 983 int (*reg)(void *kdata); 984 void (*unreg)(void *kdata); 985 const struct btf_type *type; 986 const struct btf_type *value_type; 987 const char *name; 988 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; 989 u32 type_id; 990 u32 value_id; 991}; 992 993#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 994#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 995const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 996void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 997bool bpf_struct_ops_get(const void *kdata); 998void bpf_struct_ops_put(const void *kdata); 999int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 1000 void *value); 1001static inline bool bpf_try_module_get(const void *data, struct module *owner) 1002{ 1003 if (owner == BPF_MODULE_OWNER) 1004 return bpf_struct_ops_get(data); 1005 else 1006 return try_module_get(owner); 1007} 1008static inline void bpf_module_put(const void *data, struct module *owner) 1009{ 1010 if (owner == BPF_MODULE_OWNER) 1011 bpf_struct_ops_put(data); 1012 else 1013 module_put(owner); 1014} 1015#else 1016static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) 1017{ 1018 return NULL; 1019} 1020static inline void bpf_struct_ops_init(struct btf *btf, 1021 struct bpf_verifier_log *log) 1022{ 1023} 1024static inline bool bpf_try_module_get(const void *data, struct module *owner) 1025{ 1026 return try_module_get(owner); 1027} 1028static inline void bpf_module_put(const void *data, struct module *owner) 1029{ 1030 module_put(owner); 1031} 1032static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, 1033 void *key, 1034 void *value) 1035{ 1036 return -EINVAL; 1037} 1038#endif 1039 1040struct bpf_array { 1041 struct bpf_map map; 1042 u32 elem_size; 1043 u32 index_mask; 1044 struct bpf_array_aux *aux; 1045 union { 1046 char value[0] __aligned(8); 1047 void *ptrs[0] __aligned(8); 1048 void __percpu *pptrs[0] __aligned(8); 1049 }; 1050}; 1051 1052#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 1053#define MAX_TAIL_CALL_CNT 32 1054 1055#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 1056 BPF_F_RDONLY_PROG | \ 1057 BPF_F_WRONLY | \ 1058 BPF_F_WRONLY_PROG) 1059 1060#define BPF_MAP_CAN_READ BIT(0) 1061#define BPF_MAP_CAN_WRITE BIT(1) 1062 1063static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 1064{ 1065 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1066 1067 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 1068 * not possible. 1069 */ 1070 if (access_flags & BPF_F_RDONLY_PROG) 1071 return BPF_MAP_CAN_READ; 1072 else if (access_flags & BPF_F_WRONLY_PROG) 1073 return BPF_MAP_CAN_WRITE; 1074 else 1075 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 1076} 1077 1078static inline bool bpf_map_flags_access_ok(u32 access_flags) 1079{ 1080 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 1081 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1082} 1083 1084struct bpf_event_entry { 1085 struct perf_event *event; 1086 struct file *perf_file; 1087 struct file *map_file; 1088 struct rcu_head rcu; 1089}; 1090 1091bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 1092int bpf_prog_calc_tag(struct bpf_prog *fp); 1093 1094const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 1095 1096typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 1097 unsigned long off, unsigned long len); 1098typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 1099 const struct bpf_insn *src, 1100 struct bpf_insn *dst, 1101 struct bpf_prog *prog, 1102 u32 *target_size); 1103 1104u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 1105 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 1106 1107/* an array of programs to be executed under rcu_lock. 1108 * 1109 * Typical usage: 1110 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run); 1111 * 1112 * the structure returned by bpf_prog_array_alloc() should be populated 1113 * with program pointers and the last pointer must be NULL. 1114 * The user has to keep refcnt on the program and make sure the program 1115 * is removed from the array before bpf_prog_put(). 1116 * The 'struct bpf_prog_array *' should only be replaced with xchg() 1117 * since other cpus are walking the array of pointers in parallel. 1118 */ 1119struct bpf_prog_array_item { 1120 struct bpf_prog *prog; 1121 union { 1122 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 1123 u64 bpf_cookie; 1124 }; 1125}; 1126 1127struct bpf_prog_array { 1128 struct rcu_head rcu; 1129 struct bpf_prog_array_item items[]; 1130}; 1131 1132struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 1133void bpf_prog_array_free(struct bpf_prog_array *progs); 1134int bpf_prog_array_length(struct bpf_prog_array *progs); 1135bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 1136int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 1137 __u32 __user *prog_ids, u32 cnt); 1138 1139void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 1140 struct bpf_prog *old_prog); 1141int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); 1142int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 1143 struct bpf_prog *prog); 1144int bpf_prog_array_copy_info(struct bpf_prog_array *array, 1145 u32 *prog_ids, u32 request_cnt, 1146 u32 *prog_cnt); 1147int bpf_prog_array_copy(struct bpf_prog_array *old_array, 1148 struct bpf_prog *exclude_prog, 1149 struct bpf_prog *include_prog, 1150 u64 bpf_cookie, 1151 struct bpf_prog_array **new_array); 1152 1153struct bpf_run_ctx {}; 1154 1155struct bpf_cg_run_ctx { 1156 struct bpf_run_ctx run_ctx; 1157 const struct bpf_prog_array_item *prog_item; 1158}; 1159 1160struct bpf_trace_run_ctx { 1161 struct bpf_run_ctx run_ctx; 1162 u64 bpf_cookie; 1163}; 1164 1165static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) 1166{ 1167 struct bpf_run_ctx *old_ctx = NULL; 1168 1169#ifdef CONFIG_BPF_SYSCALL 1170 old_ctx = current->bpf_ctx; 1171 current->bpf_ctx = new_ctx; 1172#endif 1173 return old_ctx; 1174} 1175 1176static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) 1177{ 1178#ifdef CONFIG_BPF_SYSCALL 1179 current->bpf_ctx = old_ctx; 1180#endif 1181} 1182 1183/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ 1184#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) 1185/* BPF program asks to set CN on the packet. */ 1186#define BPF_RET_SET_CN (1 << 0) 1187 1188typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); 1189 1190static __always_inline u32 1191BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, 1192 const void *ctx, bpf_prog_run_fn run_prog, 1193 u32 *ret_flags) 1194{ 1195 const struct bpf_prog_array_item *item; 1196 const struct bpf_prog *prog; 1197 const struct bpf_prog_array *array; 1198 struct bpf_run_ctx *old_run_ctx; 1199 struct bpf_cg_run_ctx run_ctx; 1200 u32 ret = 1; 1201 u32 func_ret; 1202 1203 migrate_disable(); 1204 rcu_read_lock(); 1205 array = rcu_dereference(array_rcu); 1206 item = &array->items[0]; 1207 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1208 while ((prog = READ_ONCE(item->prog))) { 1209 run_ctx.prog_item = item; 1210 func_ret = run_prog(prog, ctx); 1211 ret &= (func_ret & 1); 1212 *(ret_flags) |= (func_ret >> 1); 1213 item++; 1214 } 1215 bpf_reset_run_ctx(old_run_ctx); 1216 rcu_read_unlock(); 1217 migrate_enable(); 1218 return ret; 1219} 1220 1221static __always_inline u32 1222BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, 1223 const void *ctx, bpf_prog_run_fn run_prog) 1224{ 1225 const struct bpf_prog_array_item *item; 1226 const struct bpf_prog *prog; 1227 const struct bpf_prog_array *array; 1228 struct bpf_run_ctx *old_run_ctx; 1229 struct bpf_cg_run_ctx run_ctx; 1230 u32 ret = 1; 1231 1232 migrate_disable(); 1233 rcu_read_lock(); 1234 array = rcu_dereference(array_rcu); 1235 item = &array->items[0]; 1236 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1237 while ((prog = READ_ONCE(item->prog))) { 1238 run_ctx.prog_item = item; 1239 ret &= run_prog(prog, ctx); 1240 item++; 1241 } 1242 bpf_reset_run_ctx(old_run_ctx); 1243 rcu_read_unlock(); 1244 migrate_enable(); 1245 return ret; 1246} 1247 1248static __always_inline u32 1249BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu, 1250 const void *ctx, bpf_prog_run_fn run_prog) 1251{ 1252 const struct bpf_prog_array_item *item; 1253 const struct bpf_prog *prog; 1254 const struct bpf_prog_array *array; 1255 struct bpf_run_ctx *old_run_ctx; 1256 struct bpf_trace_run_ctx run_ctx; 1257 u32 ret = 1; 1258 1259 migrate_disable(); 1260 rcu_read_lock(); 1261 array = rcu_dereference(array_rcu); 1262 if (unlikely(!array)) 1263 goto out; 1264 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1265 item = &array->items[0]; 1266 while ((prog = READ_ONCE(item->prog))) { 1267 run_ctx.bpf_cookie = item->bpf_cookie; 1268 ret &= run_prog(prog, ctx); 1269 item++; 1270 } 1271 bpf_reset_run_ctx(old_run_ctx); 1272out: 1273 rcu_read_unlock(); 1274 migrate_enable(); 1275 return ret; 1276} 1277 1278/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs 1279 * so BPF programs can request cwr for TCP packets. 1280 * 1281 * Current cgroup skb programs can only return 0 or 1 (0 to drop the 1282 * packet. This macro changes the behavior so the low order bit 1283 * indicates whether the packet should be dropped (0) or not (1) 1284 * and the next bit is a congestion notification bit. This could be 1285 * used by TCP to call tcp_enter_cwr() 1286 * 1287 * Hence, new allowed return values of CGROUP EGRESS BPF programs are: 1288 * 0: drop packet 1289 * 1: keep packet 1290 * 2: drop packet and cn 1291 * 3: keep packet and cn 1292 * 1293 * This macro then converts it to one of the NET_XMIT or an error 1294 * code that is then interpreted as drop packet (and no cn): 1295 * 0: NET_XMIT_SUCCESS skb should be transmitted 1296 * 1: NET_XMIT_DROP skb should be dropped and cn 1297 * 2: NET_XMIT_CN skb should be transmitted and cn 1298 * 3: -EPERM skb should be dropped 1299 */ 1300#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ 1301 ({ \ 1302 u32 _flags = 0; \ 1303 bool _cn; \ 1304 u32 _ret; \ 1305 _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \ 1306 _cn = _flags & BPF_RET_SET_CN; \ 1307 if (_ret) \ 1308 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ 1309 else \ 1310 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ 1311 _ret; \ 1312 }) 1313 1314#ifdef CONFIG_BPF_SYSCALL 1315DECLARE_PER_CPU(int, bpf_prog_active); 1316extern struct mutex bpf_stats_enabled_mutex; 1317 1318/* 1319 * Block execution of BPF programs attached to instrumentation (perf, 1320 * kprobes, tracepoints) to prevent deadlocks on map operations as any of 1321 * these events can happen inside a region which holds a map bucket lock 1322 * and can deadlock on it. 1323 * 1324 * Use the preemption safe inc/dec variants on RT because migrate disable 1325 * is preemptible on RT and preemption in the middle of the RMW operation 1326 * might lead to inconsistent state. Use the raw variants for non RT 1327 * kernels as migrate_disable() maps to preempt_disable() so the slightly 1328 * more expensive save operation can be avoided. 1329 */ 1330static inline void bpf_disable_instrumentation(void) 1331{ 1332 migrate_disable(); 1333 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 1334 this_cpu_inc(bpf_prog_active); 1335 else 1336 __this_cpu_inc(bpf_prog_active); 1337} 1338 1339static inline void bpf_enable_instrumentation(void) 1340{ 1341 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 1342 this_cpu_dec(bpf_prog_active); 1343 else 1344 __this_cpu_dec(bpf_prog_active); 1345 migrate_enable(); 1346} 1347 1348extern const struct file_operations bpf_map_fops; 1349extern const struct file_operations bpf_prog_fops; 1350extern const struct file_operations bpf_iter_fops; 1351 1352#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1353 extern const struct bpf_prog_ops _name ## _prog_ops; \ 1354 extern const struct bpf_verifier_ops _name ## _verifier_ops; 1355#define BPF_MAP_TYPE(_id, _ops) \ 1356 extern const struct bpf_map_ops _ops; 1357#define BPF_LINK_TYPE(_id, _name) 1358#include <linux/bpf_types.h> 1359#undef BPF_PROG_TYPE 1360#undef BPF_MAP_TYPE 1361#undef BPF_LINK_TYPE 1362 1363extern const struct bpf_prog_ops bpf_offload_prog_ops; 1364extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 1365extern const struct bpf_verifier_ops xdp_analyzer_ops; 1366 1367struct bpf_prog *bpf_prog_get(u32 ufd); 1368struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1369 bool attach_drv); 1370void bpf_prog_add(struct bpf_prog *prog, int i); 1371void bpf_prog_sub(struct bpf_prog *prog, int i); 1372void bpf_prog_inc(struct bpf_prog *prog); 1373struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 1374void bpf_prog_put(struct bpf_prog *prog); 1375 1376void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 1377void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 1378 1379struct bpf_map *bpf_map_get(u32 ufd); 1380struct bpf_map *bpf_map_get_with_uref(u32 ufd); 1381struct bpf_map *__bpf_map_get(struct fd f); 1382void bpf_map_inc(struct bpf_map *map); 1383void bpf_map_inc_with_uref(struct bpf_map *map); 1384struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); 1385void bpf_map_put_with_uref(struct bpf_map *map); 1386void bpf_map_put(struct bpf_map *map); 1387void *bpf_map_area_alloc(u64 size, int numa_node); 1388void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); 1389void bpf_map_area_free(void *base); 1390void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 1391int generic_map_lookup_batch(struct bpf_map *map, 1392 const union bpf_attr *attr, 1393 union bpf_attr __user *uattr); 1394int generic_map_update_batch(struct bpf_map *map, 1395 const union bpf_attr *attr, 1396 union bpf_attr __user *uattr); 1397int generic_map_delete_batch(struct bpf_map *map, 1398 const union bpf_attr *attr, 1399 union bpf_attr __user *uattr); 1400struct bpf_map *bpf_map_get_curr_or_next(u32 *id); 1401struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); 1402 1403#ifdef CONFIG_MEMCG_KMEM 1404void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1405 int node); 1406void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); 1407void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 1408 size_t align, gfp_t flags); 1409#else 1410static inline void * 1411bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1412 int node) 1413{ 1414 return kmalloc_node(size, flags, node); 1415} 1416 1417static inline void * 1418bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 1419{ 1420 return kzalloc(size, flags); 1421} 1422 1423static inline void __percpu * 1424bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, 1425 gfp_t flags) 1426{ 1427 return __alloc_percpu_gfp(size, align, flags); 1428} 1429#endif 1430 1431extern int sysctl_unprivileged_bpf_disabled; 1432 1433static inline bool bpf_allow_ptr_leaks(void) 1434{ 1435 return perfmon_capable(); 1436} 1437 1438static inline bool bpf_allow_uninit_stack(void) 1439{ 1440 return perfmon_capable(); 1441} 1442 1443static inline bool bpf_allow_ptr_to_map_access(void) 1444{ 1445 return perfmon_capable(); 1446} 1447 1448static inline bool bpf_bypass_spec_v1(void) 1449{ 1450 return perfmon_capable(); 1451} 1452 1453static inline bool bpf_bypass_spec_v4(void) 1454{ 1455 return perfmon_capable(); 1456} 1457 1458int bpf_map_new_fd(struct bpf_map *map, int flags); 1459int bpf_prog_new_fd(struct bpf_prog *prog); 1460 1461void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1462 const struct bpf_link_ops *ops, struct bpf_prog *prog); 1463int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); 1464int bpf_link_settle(struct bpf_link_primer *primer); 1465void bpf_link_cleanup(struct bpf_link_primer *primer); 1466void bpf_link_inc(struct bpf_link *link); 1467void bpf_link_put(struct bpf_link *link); 1468int bpf_link_new_fd(struct bpf_link *link); 1469struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); 1470struct bpf_link *bpf_link_get_from_fd(u32 ufd); 1471 1472int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 1473int bpf_obj_get_user(const char __user *pathname, int flags); 1474 1475#define BPF_ITER_FUNC_PREFIX "bpf_iter_" 1476#define DEFINE_BPF_ITER_FUNC(target, args...) \ 1477 extern int bpf_iter_ ## target(args); \ 1478 int __init bpf_iter_ ## target(args) { return 0; } 1479 1480struct bpf_iter_aux_info { 1481 struct bpf_map *map; 1482}; 1483 1484typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, 1485 union bpf_iter_link_info *linfo, 1486 struct bpf_iter_aux_info *aux); 1487typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); 1488typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, 1489 struct seq_file *seq); 1490typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, 1491 struct bpf_link_info *info); 1492typedef const struct bpf_func_proto * 1493(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, 1494 const struct bpf_prog *prog); 1495 1496enum bpf_iter_feature { 1497 BPF_ITER_RESCHED = BIT(0), 1498}; 1499 1500#define BPF_ITER_CTX_ARG_MAX 2 1501struct bpf_iter_reg { 1502 const char *target; 1503 bpf_iter_attach_target_t attach_target; 1504 bpf_iter_detach_target_t detach_target; 1505 bpf_iter_show_fdinfo_t show_fdinfo; 1506 bpf_iter_fill_link_info_t fill_link_info; 1507 bpf_iter_get_func_proto_t get_func_proto; 1508 u32 ctx_arg_info_size; 1509 u32 feature; 1510 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; 1511 const struct bpf_iter_seq_info *seq_info; 1512}; 1513 1514struct bpf_iter_meta { 1515 __bpf_md_ptr(struct seq_file *, seq); 1516 u64 session_id; 1517 u64 seq_num; 1518}; 1519 1520struct bpf_iter__bpf_map_elem { 1521 __bpf_md_ptr(struct bpf_iter_meta *, meta); 1522 __bpf_md_ptr(struct bpf_map *, map); 1523 __bpf_md_ptr(void *, key); 1524 __bpf_md_ptr(void *, value); 1525}; 1526 1527int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); 1528void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); 1529bool bpf_iter_prog_supported(struct bpf_prog *prog); 1530const struct bpf_func_proto * 1531bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); 1532int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); 1533int bpf_iter_new_fd(struct bpf_link *link); 1534bool bpf_link_is_iter(struct bpf_link *link); 1535struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); 1536int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); 1537void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, 1538 struct seq_file *seq); 1539int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, 1540 struct bpf_link_info *info); 1541 1542int map_set_for_each_callback_args(struct bpf_verifier_env *env, 1543 struct bpf_func_state *caller, 1544 struct bpf_func_state *callee); 1545 1546int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 1547int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 1548int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 1549 u64 flags); 1550int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 1551 u64 flags); 1552 1553int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 1554 1555int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 1556 void *key, void *value, u64 map_flags); 1557int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1558int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 1559 void *key, void *value, u64 map_flags); 1560int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1561 1562int bpf_get_file_flag(int flags); 1563int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, 1564 size_t actual_size); 1565 1566/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 1567 * forced to use 'long' read/writes to try to atomically copy long counters. 1568 * Best-effort only. No barriers here, since it _will_ race with concurrent 1569 * updates from BPF programs. Called from bpf syscall and mostly used with 1570 * size 8 or 16 bytes, so ask compiler to inline it. 1571 */ 1572static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 1573{ 1574 const long *lsrc = src; 1575 long *ldst = dst; 1576 1577 size /= sizeof(long); 1578 while (size--) 1579 *ldst++ = *lsrc++; 1580} 1581 1582/* verify correctness of eBPF program */ 1583int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr); 1584 1585#ifndef CONFIG_BPF_JIT_ALWAYS_ON 1586void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 1587#endif 1588 1589struct btf *bpf_get_btf_vmlinux(void); 1590 1591/* Map specifics */ 1592struct xdp_buff; 1593struct sk_buff; 1594struct bpf_dtab_netdev; 1595struct bpf_cpu_map_entry; 1596 1597void __dev_flush(void); 1598int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1599 struct net_device *dev_rx); 1600int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1601 struct net_device *dev_rx); 1602int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, 1603 struct bpf_map *map, bool exclude_ingress); 1604int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 1605 struct bpf_prog *xdp_prog); 1606int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 1607 struct bpf_prog *xdp_prog, struct bpf_map *map, 1608 bool exclude_ingress); 1609 1610void __cpu_map_flush(void); 1611int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, 1612 struct net_device *dev_rx); 1613int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 1614 struct sk_buff *skb); 1615 1616/* Return map's numa specified by userspace */ 1617static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 1618{ 1619 return (attr->map_flags & BPF_F_NUMA_NODE) ? 1620 attr->numa_node : NUMA_NO_NODE; 1621} 1622 1623struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 1624int array_map_alloc_check(union bpf_attr *attr); 1625 1626int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1627 union bpf_attr __user *uattr); 1628int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1629 union bpf_attr __user *uattr); 1630int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1631 const union bpf_attr *kattr, 1632 union bpf_attr __user *uattr); 1633int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1634 const union bpf_attr *kattr, 1635 union bpf_attr __user *uattr); 1636int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, 1637 const union bpf_attr *kattr, 1638 union bpf_attr __user *uattr); 1639int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 1640 const union bpf_attr *kattr, 1641 union bpf_attr __user *uattr); 1642bool bpf_prog_test_check_kfunc_call(u32 kfunc_id); 1643bool btf_ctx_access(int off, int size, enum bpf_access_type type, 1644 const struct bpf_prog *prog, 1645 struct bpf_insn_access_aux *info); 1646int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, 1647 const struct btf_type *t, int off, int size, 1648 enum bpf_access_type atype, 1649 u32 *next_btf_id); 1650bool btf_struct_ids_match(struct bpf_verifier_log *log, 1651 const struct btf *btf, u32 id, int off, 1652 const struct btf *need_btf, u32 need_type_id); 1653 1654int btf_distill_func_proto(struct bpf_verifier_log *log, 1655 struct btf *btf, 1656 const struct btf_type *func_proto, 1657 const char *func_name, 1658 struct btf_func_model *m); 1659 1660struct bpf_reg_state; 1661int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, 1662 struct bpf_reg_state *regs); 1663int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, 1664 const struct btf *btf, u32 func_id, 1665 struct bpf_reg_state *regs); 1666int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 1667 struct bpf_reg_state *reg); 1668int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, 1669 struct btf *btf, const struct btf_type *t); 1670 1671struct bpf_prog *bpf_prog_by_id(u32 id); 1672struct bpf_link *bpf_link_by_id(u32 id); 1673 1674const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); 1675void bpf_task_storage_free(struct task_struct *task); 1676bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); 1677const struct btf_func_model * 1678bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 1679 const struct bpf_insn *insn); 1680#else /* !CONFIG_BPF_SYSCALL */ 1681static inline struct bpf_prog *bpf_prog_get(u32 ufd) 1682{ 1683 return ERR_PTR(-EOPNOTSUPP); 1684} 1685 1686static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 1687 enum bpf_prog_type type, 1688 bool attach_drv) 1689{ 1690 return ERR_PTR(-EOPNOTSUPP); 1691} 1692 1693static inline void bpf_prog_add(struct bpf_prog *prog, int i) 1694{ 1695} 1696 1697static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 1698{ 1699} 1700 1701static inline void bpf_prog_put(struct bpf_prog *prog) 1702{ 1703} 1704 1705static inline void bpf_prog_inc(struct bpf_prog *prog) 1706{ 1707} 1708 1709static inline struct bpf_prog *__must_check 1710bpf_prog_inc_not_zero(struct bpf_prog *prog) 1711{ 1712 return ERR_PTR(-EOPNOTSUPP); 1713} 1714 1715static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1716 const struct bpf_link_ops *ops, 1717 struct bpf_prog *prog) 1718{ 1719} 1720 1721static inline int bpf_link_prime(struct bpf_link *link, 1722 struct bpf_link_primer *primer) 1723{ 1724 return -EOPNOTSUPP; 1725} 1726 1727static inline int bpf_link_settle(struct bpf_link_primer *primer) 1728{ 1729 return -EOPNOTSUPP; 1730} 1731 1732static inline void bpf_link_cleanup(struct bpf_link_primer *primer) 1733{ 1734} 1735 1736static inline void bpf_link_inc(struct bpf_link *link) 1737{ 1738} 1739 1740static inline void bpf_link_put(struct bpf_link *link) 1741{ 1742} 1743 1744static inline int bpf_obj_get_user(const char __user *pathname, int flags) 1745{ 1746 return -EOPNOTSUPP; 1747} 1748 1749static inline bool dev_map_can_have_prog(struct bpf_map *map) 1750{ 1751 return false; 1752} 1753 1754static inline void __dev_flush(void) 1755{ 1756} 1757 1758struct xdp_buff; 1759struct bpf_dtab_netdev; 1760struct bpf_cpu_map_entry; 1761 1762static inline 1763int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1764 struct net_device *dev_rx) 1765{ 1766 return 0; 1767} 1768 1769static inline 1770int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1771 struct net_device *dev_rx) 1772{ 1773 return 0; 1774} 1775 1776static inline 1777int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, 1778 struct bpf_map *map, bool exclude_ingress) 1779{ 1780 return 0; 1781} 1782 1783struct sk_buff; 1784 1785static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 1786 struct sk_buff *skb, 1787 struct bpf_prog *xdp_prog) 1788{ 1789 return 0; 1790} 1791 1792static inline 1793int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 1794 struct bpf_prog *xdp_prog, struct bpf_map *map, 1795 bool exclude_ingress) 1796{ 1797 return 0; 1798} 1799 1800static inline void __cpu_map_flush(void) 1801{ 1802} 1803 1804static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 1805 struct xdp_buff *xdp, 1806 struct net_device *dev_rx) 1807{ 1808 return 0; 1809} 1810 1811static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 1812 struct sk_buff *skb) 1813{ 1814 return -EOPNOTSUPP; 1815} 1816 1817static inline bool cpu_map_prog_allowed(struct bpf_map *map) 1818{ 1819 return false; 1820} 1821 1822static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 1823 enum bpf_prog_type type) 1824{ 1825 return ERR_PTR(-EOPNOTSUPP); 1826} 1827 1828static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 1829 const union bpf_attr *kattr, 1830 union bpf_attr __user *uattr) 1831{ 1832 return -ENOTSUPP; 1833} 1834 1835static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 1836 const union bpf_attr *kattr, 1837 union bpf_attr __user *uattr) 1838{ 1839 return -ENOTSUPP; 1840} 1841 1842static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1843 const union bpf_attr *kattr, 1844 union bpf_attr __user *uattr) 1845{ 1846 return -ENOTSUPP; 1847} 1848 1849static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1850 const union bpf_attr *kattr, 1851 union bpf_attr __user *uattr) 1852{ 1853 return -ENOTSUPP; 1854} 1855 1856static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 1857 const union bpf_attr *kattr, 1858 union bpf_attr __user *uattr) 1859{ 1860 return -ENOTSUPP; 1861} 1862 1863static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id) 1864{ 1865 return false; 1866} 1867 1868static inline void bpf_map_put(struct bpf_map *map) 1869{ 1870} 1871 1872static inline struct bpf_prog *bpf_prog_by_id(u32 id) 1873{ 1874 return ERR_PTR(-ENOTSUPP); 1875} 1876 1877static inline const struct bpf_func_proto * 1878bpf_base_func_proto(enum bpf_func_id func_id) 1879{ 1880 return NULL; 1881} 1882 1883static inline void bpf_task_storage_free(struct task_struct *task) 1884{ 1885} 1886 1887static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 1888{ 1889 return false; 1890} 1891 1892static inline const struct btf_func_model * 1893bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 1894 const struct bpf_insn *insn) 1895{ 1896 return NULL; 1897} 1898#endif /* CONFIG_BPF_SYSCALL */ 1899 1900void __bpf_free_used_btfs(struct bpf_prog_aux *aux, 1901 struct btf_mod_pair *used_btfs, u32 len); 1902 1903static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 1904 enum bpf_prog_type type) 1905{ 1906 return bpf_prog_get_type_dev(ufd, type, false); 1907} 1908 1909void __bpf_free_used_maps(struct bpf_prog_aux *aux, 1910 struct bpf_map **used_maps, u32 len); 1911 1912bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 1913 1914int bpf_prog_offload_compile(struct bpf_prog *prog); 1915void bpf_prog_offload_destroy(struct bpf_prog *prog); 1916int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 1917 struct bpf_prog *prog); 1918 1919int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 1920 1921int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 1922int bpf_map_offload_update_elem(struct bpf_map *map, 1923 void *key, void *value, u64 flags); 1924int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 1925int bpf_map_offload_get_next_key(struct bpf_map *map, 1926 void *key, void *next_key); 1927 1928bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 1929 1930struct bpf_offload_dev * 1931bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 1932void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 1933void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 1934int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 1935 struct net_device *netdev); 1936void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 1937 struct net_device *netdev); 1938bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 1939 1940#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 1941int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 1942 1943static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 1944{ 1945 return aux->offload_requested; 1946} 1947 1948static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1949{ 1950 return unlikely(map->ops == &bpf_map_offload_ops); 1951} 1952 1953struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 1954void bpf_map_offload_map_free(struct bpf_map *map); 1955int bpf_prog_test_run_syscall(struct bpf_prog *prog, 1956 const union bpf_attr *kattr, 1957 union bpf_attr __user *uattr); 1958 1959int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 1960int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); 1961int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); 1962void sock_map_unhash(struct sock *sk); 1963void sock_map_close(struct sock *sk, long timeout); 1964#else 1965static inline int bpf_prog_offload_init(struct bpf_prog *prog, 1966 union bpf_attr *attr) 1967{ 1968 return -EOPNOTSUPP; 1969} 1970 1971static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 1972{ 1973 return false; 1974} 1975 1976static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1977{ 1978 return false; 1979} 1980 1981static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 1982{ 1983 return ERR_PTR(-EOPNOTSUPP); 1984} 1985 1986static inline void bpf_map_offload_map_free(struct bpf_map *map) 1987{ 1988} 1989 1990static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, 1991 const union bpf_attr *kattr, 1992 union bpf_attr __user *uattr) 1993{ 1994 return -ENOTSUPP; 1995} 1996 1997#ifdef CONFIG_BPF_SYSCALL 1998static inline int sock_map_get_from_fd(const union bpf_attr *attr, 1999 struct bpf_prog *prog) 2000{ 2001 return -EINVAL; 2002} 2003 2004static inline int sock_map_prog_detach(const union bpf_attr *attr, 2005 enum bpf_prog_type ptype) 2006{ 2007 return -EOPNOTSUPP; 2008} 2009 2010static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 2011 u64 flags) 2012{ 2013 return -EOPNOTSUPP; 2014} 2015#endif /* CONFIG_BPF_SYSCALL */ 2016#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 2017 2018#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 2019void bpf_sk_reuseport_detach(struct sock *sk); 2020int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 2021 void *value); 2022int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 2023 void *value, u64 map_flags); 2024#else 2025static inline void bpf_sk_reuseport_detach(struct sock *sk) 2026{ 2027} 2028 2029#ifdef CONFIG_BPF_SYSCALL 2030static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 2031 void *key, void *value) 2032{ 2033 return -EOPNOTSUPP; 2034} 2035 2036static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 2037 void *key, void *value, 2038 u64 map_flags) 2039{ 2040 return -EOPNOTSUPP; 2041} 2042#endif /* CONFIG_BPF_SYSCALL */ 2043#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 2044 2045/* verifier prototypes for helper functions called from eBPF programs */ 2046extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 2047extern const struct bpf_func_proto bpf_map_update_elem_proto; 2048extern const struct bpf_func_proto bpf_map_delete_elem_proto; 2049extern const struct bpf_func_proto bpf_map_push_elem_proto; 2050extern const struct bpf_func_proto bpf_map_pop_elem_proto; 2051extern const struct bpf_func_proto bpf_map_peek_elem_proto; 2052 2053extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 2054extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 2055extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 2056extern const struct bpf_func_proto bpf_tail_call_proto; 2057extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 2058extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; 2059extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 2060extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 2061extern const struct bpf_func_proto bpf_get_current_comm_proto; 2062extern const struct bpf_func_proto bpf_get_stackid_proto; 2063extern const struct bpf_func_proto bpf_get_stack_proto; 2064extern const struct bpf_func_proto bpf_get_task_stack_proto; 2065extern const struct bpf_func_proto bpf_get_stackid_proto_pe; 2066extern const struct bpf_func_proto bpf_get_stack_proto_pe; 2067extern const struct bpf_func_proto bpf_sock_map_update_proto; 2068extern const struct bpf_func_proto bpf_sock_hash_update_proto; 2069extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 2070extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; 2071extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 2072extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 2073extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 2074extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 2075extern const struct bpf_func_proto bpf_spin_lock_proto; 2076extern const struct bpf_func_proto bpf_spin_unlock_proto; 2077extern const struct bpf_func_proto bpf_get_local_storage_proto; 2078extern const struct bpf_func_proto bpf_strtol_proto; 2079extern const struct bpf_func_proto bpf_strtoul_proto; 2080extern const struct bpf_func_proto bpf_tcp_sock_proto; 2081extern const struct bpf_func_proto bpf_jiffies64_proto; 2082extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; 2083extern const struct bpf_func_proto bpf_event_output_data_proto; 2084extern const struct bpf_func_proto bpf_ringbuf_output_proto; 2085extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; 2086extern const struct bpf_func_proto bpf_ringbuf_submit_proto; 2087extern const struct bpf_func_proto bpf_ringbuf_discard_proto; 2088extern const struct bpf_func_proto bpf_ringbuf_query_proto; 2089extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; 2090extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; 2091extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; 2092extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; 2093extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; 2094extern const struct bpf_func_proto bpf_copy_from_user_proto; 2095extern const struct bpf_func_proto bpf_snprintf_btf_proto; 2096extern const struct bpf_func_proto bpf_snprintf_proto; 2097extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; 2098extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; 2099extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; 2100extern const struct bpf_func_proto bpf_sock_from_file_proto; 2101extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; 2102extern const struct bpf_func_proto bpf_task_storage_get_proto; 2103extern const struct bpf_func_proto bpf_task_storage_delete_proto; 2104extern const struct bpf_func_proto bpf_for_each_map_elem_proto; 2105extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; 2106extern const struct bpf_func_proto bpf_sk_setsockopt_proto; 2107extern const struct bpf_func_proto bpf_sk_getsockopt_proto; 2108 2109const struct bpf_func_proto *tracing_prog_func_proto( 2110 enum bpf_func_id func_id, const struct bpf_prog *prog); 2111 2112/* Shared helpers among cBPF and eBPF. */ 2113void bpf_user_rnd_init_once(void); 2114u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2115u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2116 2117#if defined(CONFIG_NET) 2118bool bpf_sock_common_is_valid_access(int off, int size, 2119 enum bpf_access_type type, 2120 struct bpf_insn_access_aux *info); 2121bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2122 struct bpf_insn_access_aux *info); 2123u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2124 const struct bpf_insn *si, 2125 struct bpf_insn *insn_buf, 2126 struct bpf_prog *prog, 2127 u32 *target_size); 2128#else 2129static inline bool bpf_sock_common_is_valid_access(int off, int size, 2130 enum bpf_access_type type, 2131 struct bpf_insn_access_aux *info) 2132{ 2133 return false; 2134} 2135static inline bool bpf_sock_is_valid_access(int off, int size, 2136 enum bpf_access_type type, 2137 struct bpf_insn_access_aux *info) 2138{ 2139 return false; 2140} 2141static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2142 const struct bpf_insn *si, 2143 struct bpf_insn *insn_buf, 2144 struct bpf_prog *prog, 2145 u32 *target_size) 2146{ 2147 return 0; 2148} 2149#endif 2150 2151#ifdef CONFIG_INET 2152struct sk_reuseport_kern { 2153 struct sk_buff *skb; 2154 struct sock *sk; 2155 struct sock *selected_sk; 2156 struct sock *migrating_sk; 2157 void *data_end; 2158 u32 hash; 2159 u32 reuseport_id; 2160 bool bind_inany; 2161}; 2162bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2163 struct bpf_insn_access_aux *info); 2164 2165u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2166 const struct bpf_insn *si, 2167 struct bpf_insn *insn_buf, 2168 struct bpf_prog *prog, 2169 u32 *target_size); 2170 2171bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2172 struct bpf_insn_access_aux *info); 2173 2174u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2175 const struct bpf_insn *si, 2176 struct bpf_insn *insn_buf, 2177 struct bpf_prog *prog, 2178 u32 *target_size); 2179#else 2180static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 2181 enum bpf_access_type type, 2182 struct bpf_insn_access_aux *info) 2183{ 2184 return false; 2185} 2186 2187static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2188 const struct bpf_insn *si, 2189 struct bpf_insn *insn_buf, 2190 struct bpf_prog *prog, 2191 u32 *target_size) 2192{ 2193 return 0; 2194} 2195static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 2196 enum bpf_access_type type, 2197 struct bpf_insn_access_aux *info) 2198{ 2199 return false; 2200} 2201 2202static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2203 const struct bpf_insn *si, 2204 struct bpf_insn *insn_buf, 2205 struct bpf_prog *prog, 2206 u32 *target_size) 2207{ 2208 return 0; 2209} 2210#endif /* CONFIG_INET */ 2211 2212enum bpf_text_poke_type { 2213 BPF_MOD_CALL, 2214 BPF_MOD_JUMP, 2215}; 2216 2217int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2218 void *addr1, void *addr2); 2219 2220struct btf_id_set; 2221bool btf_id_set_contains(const struct btf_id_set *set, u32 id); 2222 2223int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 2224 u32 **bin_buf, u32 num_args); 2225void bpf_bprintf_cleanup(void); 2226 2227#endif /* _LINUX_BPF_H */