at v5.14 69 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4#ifndef _LINUX_BPF_H 5#define _LINUX_BPF_H 1 6 7#include <uapi/linux/bpf.h> 8 9#include <linux/workqueue.h> 10#include <linux/file.h> 11#include <linux/percpu.h> 12#include <linux/err.h> 13#include <linux/rbtree_latch.h> 14#include <linux/numa.h> 15#include <linux/mm_types.h> 16#include <linux/wait.h> 17#include <linux/refcount.h> 18#include <linux/mutex.h> 19#include <linux/module.h> 20#include <linux/kallsyms.h> 21#include <linux/capability.h> 22#include <linux/sched/mm.h> 23#include <linux/slab.h> 24#include <linux/percpu-refcount.h> 25#include <linux/bpfptr.h> 26 27struct bpf_verifier_env; 28struct bpf_verifier_log; 29struct perf_event; 30struct bpf_prog; 31struct bpf_prog_aux; 32struct bpf_map; 33struct sock; 34struct seq_file; 35struct btf; 36struct btf_type; 37struct exception_table_entry; 38struct seq_operations; 39struct bpf_iter_aux_info; 40struct bpf_local_storage; 41struct bpf_local_storage_map; 42struct kobject; 43struct mem_cgroup; 44struct module; 45struct bpf_func_state; 46 47extern struct idr btf_idr; 48extern spinlock_t btf_idr_lock; 49extern struct kobject *btf_kobj; 50 51typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, 52 struct bpf_iter_aux_info *aux); 53typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); 54struct bpf_iter_seq_info { 55 const struct seq_operations *seq_ops; 56 bpf_iter_init_seq_priv_t init_seq_private; 57 bpf_iter_fini_seq_priv_t fini_seq_private; 58 u32 seq_priv_size; 59}; 60 61/* map is generic key/value storage optionally accessible by eBPF programs */ 62struct bpf_map_ops { 63 /* funcs callable from userspace (via syscall) */ 64 int (*map_alloc_check)(union bpf_attr *attr); 65 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 66 void (*map_release)(struct bpf_map *map, struct file *map_file); 67 void (*map_free)(struct bpf_map *map); 68 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 69 void (*map_release_uref)(struct bpf_map *map); 70 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 71 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, 72 union bpf_attr __user *uattr); 73 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, 74 void *value, u64 flags); 75 int (*map_lookup_and_delete_batch)(struct bpf_map *map, 76 const union bpf_attr *attr, 77 union bpf_attr __user *uattr); 78 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, 79 union bpf_attr __user *uattr); 80 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, 81 union bpf_attr __user *uattr); 82 83 /* funcs callable from userspace and from eBPF programs */ 84 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 85 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 86 int (*map_delete_elem)(struct bpf_map *map, void *key); 87 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 88 int (*map_pop_elem)(struct bpf_map *map, void *value); 89 int (*map_peek_elem)(struct bpf_map *map, void *value); 90 91 /* funcs called by prog_array and perf_event_array map */ 92 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 93 int fd); 94 void (*map_fd_put_ptr)(void *ptr); 95 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 96 u32 (*map_fd_sys_lookup_elem)(void *ptr); 97 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 98 struct seq_file *m); 99 int (*map_check_btf)(const struct bpf_map *map, 100 const struct btf *btf, 101 const struct btf_type *key_type, 102 const struct btf_type *value_type); 103 104 /* Prog poke tracking helpers. */ 105 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); 106 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); 107 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, 108 struct bpf_prog *new); 109 110 /* Direct value access helpers. */ 111 int (*map_direct_value_addr)(const struct bpf_map *map, 112 u64 *imm, u32 off); 113 int (*map_direct_value_meta)(const struct bpf_map *map, 114 u64 imm, u32 *off); 115 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); 116 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, 117 struct poll_table_struct *pts); 118 119 /* Functions called by bpf_local_storage maps */ 120 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, 121 void *owner, u32 size); 122 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, 123 void *owner, u32 size); 124 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); 125 126 /* Misc helpers.*/ 127 int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags); 128 129 /* map_meta_equal must be implemented for maps that can be 130 * used as an inner map. It is a runtime check to ensure 131 * an inner map can be inserted to an outer map. 132 * 133 * Some properties of the inner map has been used during the 134 * verification time. When inserting an inner map at the runtime, 135 * map_meta_equal has to ensure the inserting map has the same 136 * properties that the verifier has used earlier. 137 */ 138 bool (*map_meta_equal)(const struct bpf_map *meta0, 139 const struct bpf_map *meta1); 140 141 142 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, 143 struct bpf_func_state *caller, 144 struct bpf_func_state *callee); 145 int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn, 146 void *callback_ctx, u64 flags); 147 148 /* BTF name and id of struct allocated by map_alloc */ 149 const char * const map_btf_name; 150 int *map_btf_id; 151 152 /* bpf_iter info used to open a seq_file */ 153 const struct bpf_iter_seq_info *iter_seq_info; 154}; 155 156struct bpf_map { 157 /* The first two cachelines with read-mostly members of which some 158 * are also accessed in fast-path (e.g. ops, max_entries). 159 */ 160 const struct bpf_map_ops *ops ____cacheline_aligned; 161 struct bpf_map *inner_map_meta; 162#ifdef CONFIG_SECURITY 163 void *security; 164#endif 165 enum bpf_map_type map_type; 166 u32 key_size; 167 u32 value_size; 168 u32 max_entries; 169 u32 map_flags; 170 int spin_lock_off; /* >=0 valid offset, <0 error */ 171 u32 id; 172 int numa_node; 173 u32 btf_key_type_id; 174 u32 btf_value_type_id; 175 struct btf *btf; 176#ifdef CONFIG_MEMCG_KMEM 177 struct mem_cgroup *memcg; 178#endif 179 char name[BPF_OBJ_NAME_LEN]; 180 u32 btf_vmlinux_value_type_id; 181 bool bypass_spec_v1; 182 bool frozen; /* write-once; write-protected by freeze_mutex */ 183 /* 22 bytes hole */ 184 185 /* The 3rd and 4th cacheline with misc members to avoid false sharing 186 * particularly with refcounting. 187 */ 188 atomic64_t refcnt ____cacheline_aligned; 189 atomic64_t usercnt; 190 struct work_struct work; 191 struct mutex freeze_mutex; 192 u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */ 193}; 194 195static inline bool map_value_has_spin_lock(const struct bpf_map *map) 196{ 197 return map->spin_lock_off >= 0; 198} 199 200static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) 201{ 202 if (likely(!map_value_has_spin_lock(map))) 203 return; 204 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = 205 (struct bpf_spin_lock){}; 206} 207 208/* copy everything but bpf_spin_lock */ 209static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 210{ 211 if (unlikely(map_value_has_spin_lock(map))) { 212 u32 off = map->spin_lock_off; 213 214 memcpy(dst, src, off); 215 memcpy(dst + off + sizeof(struct bpf_spin_lock), 216 src + off + sizeof(struct bpf_spin_lock), 217 map->value_size - off - sizeof(struct bpf_spin_lock)); 218 } else { 219 memcpy(dst, src, map->value_size); 220 } 221} 222void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 223 bool lock_src); 224int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); 225 226struct bpf_offload_dev; 227struct bpf_offloaded_map; 228 229struct bpf_map_dev_ops { 230 int (*map_get_next_key)(struct bpf_offloaded_map *map, 231 void *key, void *next_key); 232 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 233 void *key, void *value); 234 int (*map_update_elem)(struct bpf_offloaded_map *map, 235 void *key, void *value, u64 flags); 236 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 237}; 238 239struct bpf_offloaded_map { 240 struct bpf_map map; 241 struct net_device *netdev; 242 const struct bpf_map_dev_ops *dev_ops; 243 void *dev_priv; 244 struct list_head offloads; 245}; 246 247static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 248{ 249 return container_of(map, struct bpf_offloaded_map, map); 250} 251 252static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 253{ 254 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 255} 256 257static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 258{ 259 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && 260 map->ops->map_seq_show_elem; 261} 262 263int map_check_no_btf(const struct bpf_map *map, 264 const struct btf *btf, 265 const struct btf_type *key_type, 266 const struct btf_type *value_type); 267 268bool bpf_map_meta_equal(const struct bpf_map *meta0, 269 const struct bpf_map *meta1); 270 271extern const struct bpf_map_ops bpf_map_offload_ops; 272 273/* function argument constraints */ 274enum bpf_arg_type { 275 ARG_DONTCARE = 0, /* unused argument in helper function */ 276 277 /* the following constraints used to prototype 278 * bpf_map_lookup/update/delete_elem() functions 279 */ 280 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 281 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 282 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 283 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 284 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ 285 286 /* the following constraints used to prototype bpf_memcmp() and other 287 * functions that access data on eBPF program stack 288 */ 289 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 290 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 291 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 292 * helper function must fill all bytes or clear 293 * them in error case. 294 */ 295 296 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 297 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 298 299 ARG_PTR_TO_CTX, /* pointer to context */ 300 ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */ 301 ARG_ANYTHING, /* any (initialized) argument is ok */ 302 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 303 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 304 ARG_PTR_TO_INT, /* pointer to int */ 305 ARG_PTR_TO_LONG, /* pointer to long */ 306 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 307 ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */ 308 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 309 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ 310 ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ 311 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ 312 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ 313 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ 314 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ 315 ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */ 316 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ 317 __BPF_ARG_TYPE_MAX, 318}; 319 320/* type of values returned from helper functions */ 321enum bpf_return_type { 322 RET_INTEGER, /* function returns integer */ 323 RET_VOID, /* function doesn't return anything */ 324 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 325 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 326 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ 327 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ 328 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ 329 RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ 330 RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ 331 RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */ 332 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ 333 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ 334}; 335 336/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 337 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 338 * instructions after verifying 339 */ 340struct bpf_func_proto { 341 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 342 bool gpl_only; 343 bool pkt_access; 344 enum bpf_return_type ret_type; 345 union { 346 struct { 347 enum bpf_arg_type arg1_type; 348 enum bpf_arg_type arg2_type; 349 enum bpf_arg_type arg3_type; 350 enum bpf_arg_type arg4_type; 351 enum bpf_arg_type arg5_type; 352 }; 353 enum bpf_arg_type arg_type[5]; 354 }; 355 union { 356 struct { 357 u32 *arg1_btf_id; 358 u32 *arg2_btf_id; 359 u32 *arg3_btf_id; 360 u32 *arg4_btf_id; 361 u32 *arg5_btf_id; 362 }; 363 u32 *arg_btf_id[5]; 364 }; 365 int *ret_btf_id; /* return value btf_id */ 366 bool (*allowed)(const struct bpf_prog *prog); 367}; 368 369/* bpf_context is intentionally undefined structure. Pointer to bpf_context is 370 * the first argument to eBPF programs. 371 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 372 */ 373struct bpf_context; 374 375enum bpf_access_type { 376 BPF_READ = 1, 377 BPF_WRITE = 2 378}; 379 380/* types of values stored in eBPF registers */ 381/* Pointer types represent: 382 * pointer 383 * pointer + imm 384 * pointer + (u16) var 385 * pointer + (u16) var + imm 386 * if (range > 0) then [ptr, ptr + range - off) is safe to access 387 * if (id > 0) means that some 'var' was added 388 * if (off > 0) means that 'imm' was added 389 */ 390enum bpf_reg_type { 391 NOT_INIT = 0, /* nothing was written into register */ 392 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 393 PTR_TO_CTX, /* reg points to bpf_context */ 394 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 395 PTR_TO_MAP_VALUE, /* reg points to map element value */ 396 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 397 PTR_TO_STACK, /* reg == frame_pointer + offset */ 398 PTR_TO_PACKET_META, /* skb->data - meta_len */ 399 PTR_TO_PACKET, /* reg points to skb->data */ 400 PTR_TO_PACKET_END, /* skb->data + headlen */ 401 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 402 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 403 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ 404 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 405 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ 406 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 407 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ 408 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 409 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 410 /* PTR_TO_BTF_ID points to a kernel struct that does not need 411 * to be null checked by the BPF program. This does not imply the 412 * pointer is _not_ null and in practice this can easily be a null 413 * pointer when reading pointer chains. The assumption is program 414 * context will handle null pointer dereference typically via fault 415 * handling. The verifier must keep this in mind and can make no 416 * assumptions about null or non-null when doing branch analysis. 417 * Further, when passed into helpers the helpers can not, without 418 * additional context, assume the value is non-null. 419 */ 420 PTR_TO_BTF_ID, 421 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not 422 * been checked for null. Used primarily to inform the verifier 423 * an explicit null check is required for this struct. 424 */ 425 PTR_TO_BTF_ID_OR_NULL, 426 PTR_TO_MEM, /* reg points to valid memory region */ 427 PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ 428 PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ 429 PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */ 430 PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ 431 PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ 432 PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ 433 PTR_TO_FUNC, /* reg points to a bpf program function */ 434 PTR_TO_MAP_KEY, /* reg points to a map element key */ 435 __BPF_REG_TYPE_MAX, 436}; 437 438/* The information passed from prog-specific *_is_valid_access 439 * back to the verifier. 440 */ 441struct bpf_insn_access_aux { 442 enum bpf_reg_type reg_type; 443 union { 444 int ctx_field_size; 445 struct { 446 struct btf *btf; 447 u32 btf_id; 448 }; 449 }; 450 struct bpf_verifier_log *log; /* for verbose logs */ 451}; 452 453static inline void 454bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 455{ 456 aux->ctx_field_size = size; 457} 458 459struct bpf_prog_ops { 460 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 461 union bpf_attr __user *uattr); 462}; 463 464struct bpf_verifier_ops { 465 /* return eBPF function prototype for verification */ 466 const struct bpf_func_proto * 467 (*get_func_proto)(enum bpf_func_id func_id, 468 const struct bpf_prog *prog); 469 470 /* return true if 'size' wide access at offset 'off' within bpf_context 471 * with 'type' (read or write) is allowed 472 */ 473 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 474 const struct bpf_prog *prog, 475 struct bpf_insn_access_aux *info); 476 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 477 const struct bpf_prog *prog); 478 int (*gen_ld_abs)(const struct bpf_insn *orig, 479 struct bpf_insn *insn_buf); 480 u32 (*convert_ctx_access)(enum bpf_access_type type, 481 const struct bpf_insn *src, 482 struct bpf_insn *dst, 483 struct bpf_prog *prog, u32 *target_size); 484 int (*btf_struct_access)(struct bpf_verifier_log *log, 485 const struct btf *btf, 486 const struct btf_type *t, int off, int size, 487 enum bpf_access_type atype, 488 u32 *next_btf_id); 489 bool (*check_kfunc_call)(u32 kfunc_btf_id); 490}; 491 492struct bpf_prog_offload_ops { 493 /* verifier basic callbacks */ 494 int (*insn_hook)(struct bpf_verifier_env *env, 495 int insn_idx, int prev_insn_idx); 496 int (*finalize)(struct bpf_verifier_env *env); 497 /* verifier optimization callbacks (called after .finalize) */ 498 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 499 struct bpf_insn *insn); 500 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 501 /* program management callbacks */ 502 int (*prepare)(struct bpf_prog *prog); 503 int (*translate)(struct bpf_prog *prog); 504 void (*destroy)(struct bpf_prog *prog); 505}; 506 507struct bpf_prog_offload { 508 struct bpf_prog *prog; 509 struct net_device *netdev; 510 struct bpf_offload_dev *offdev; 511 void *dev_priv; 512 struct list_head offloads; 513 bool dev_state; 514 bool opt_failed; 515 void *jited_image; 516 u32 jited_len; 517}; 518 519enum bpf_cgroup_storage_type { 520 BPF_CGROUP_STORAGE_SHARED, 521 BPF_CGROUP_STORAGE_PERCPU, 522 __BPF_CGROUP_STORAGE_MAX 523}; 524 525#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 526 527/* The longest tracepoint has 12 args. 528 * See include/trace/bpf_probe.h 529 */ 530#define MAX_BPF_FUNC_ARGS 12 531 532/* The maximum number of arguments passed through registers 533 * a single function may have. 534 */ 535#define MAX_BPF_FUNC_REG_ARGS 5 536 537struct btf_func_model { 538 u8 ret_size; 539 u8 nr_args; 540 u8 arg_size[MAX_BPF_FUNC_ARGS]; 541}; 542 543/* Restore arguments before returning from trampoline to let original function 544 * continue executing. This flag is used for fentry progs when there are no 545 * fexit progs. 546 */ 547#define BPF_TRAMP_F_RESTORE_REGS BIT(0) 548/* Call original function after fentry progs, but before fexit progs. 549 * Makes sense for fentry/fexit, normal calls and indirect calls. 550 */ 551#define BPF_TRAMP_F_CALL_ORIG BIT(1) 552/* Skip current frame and return to parent. Makes sense for fentry/fexit 553 * programs only. Should not be used with normal calls and indirect calls. 554 */ 555#define BPF_TRAMP_F_SKIP_FRAME BIT(2) 556 557/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 558 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 559 */ 560#define BPF_MAX_TRAMP_PROGS 38 561 562struct bpf_tramp_progs { 563 struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; 564 int nr_progs; 565}; 566 567/* Different use cases for BPF trampoline: 568 * 1. replace nop at the function entry (kprobe equivalent) 569 * flags = BPF_TRAMP_F_RESTORE_REGS 570 * fentry = a set of programs to run before returning from trampoline 571 * 572 * 2. replace nop at the function entry (kprobe + kretprobe equivalent) 573 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME 574 * orig_call = fentry_ip + MCOUNT_INSN_SIZE 575 * fentry = a set of program to run before calling original function 576 * fexit = a set of program to run after original function 577 * 578 * 3. replace direct call instruction anywhere in the function body 579 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) 580 * With flags = 0 581 * fentry = a set of programs to run before returning from trampoline 582 * With flags = BPF_TRAMP_F_CALL_ORIG 583 * orig_call = original callback addr or direct function addr 584 * fentry = a set of program to run before calling original function 585 * fexit = a set of program to run after original function 586 */ 587struct bpf_tramp_image; 588int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, 589 const struct btf_func_model *m, u32 flags, 590 struct bpf_tramp_progs *tprogs, 591 void *orig_call); 592/* these two functions are called from generated trampoline */ 593u64 notrace __bpf_prog_enter(struct bpf_prog *prog); 594void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); 595u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog); 596void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start); 597void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); 598void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); 599 600struct bpf_ksym { 601 unsigned long start; 602 unsigned long end; 603 char name[KSYM_NAME_LEN]; 604 struct list_head lnode; 605 struct latch_tree_node tnode; 606 bool prog; 607}; 608 609enum bpf_tramp_prog_type { 610 BPF_TRAMP_FENTRY, 611 BPF_TRAMP_FEXIT, 612 BPF_TRAMP_MODIFY_RETURN, 613 BPF_TRAMP_MAX, 614 BPF_TRAMP_REPLACE, /* more than MAX */ 615}; 616 617struct bpf_tramp_image { 618 void *image; 619 struct bpf_ksym ksym; 620 struct percpu_ref pcref; 621 void *ip_after_call; 622 void *ip_epilogue; 623 union { 624 struct rcu_head rcu; 625 struct work_struct work; 626 }; 627}; 628 629struct bpf_trampoline { 630 /* hlist for trampoline_table */ 631 struct hlist_node hlist; 632 /* serializes access to fields of this trampoline */ 633 struct mutex mutex; 634 refcount_t refcnt; 635 u64 key; 636 struct { 637 struct btf_func_model model; 638 void *addr; 639 bool ftrace_managed; 640 } func; 641 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF 642 * program by replacing one of its functions. func.addr is the address 643 * of the function it replaced. 644 */ 645 struct bpf_prog *extension_prog; 646 /* list of BPF programs using this trampoline */ 647 struct hlist_head progs_hlist[BPF_TRAMP_MAX]; 648 /* Number of attached programs. A counter per kind. */ 649 int progs_cnt[BPF_TRAMP_MAX]; 650 /* Executable image of trampoline */ 651 struct bpf_tramp_image *cur_image; 652 u64 selector; 653 struct module *mod; 654}; 655 656struct bpf_attach_target_info { 657 struct btf_func_model fmodel; 658 long tgt_addr; 659 const char *tgt_name; 660 const struct btf_type *tgt_type; 661}; 662 663#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ 664 665struct bpf_dispatcher_prog { 666 struct bpf_prog *prog; 667 refcount_t users; 668}; 669 670struct bpf_dispatcher { 671 /* dispatcher mutex */ 672 struct mutex mutex; 673 void *func; 674 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; 675 int num_progs; 676 void *image; 677 u32 image_off; 678 struct bpf_ksym ksym; 679}; 680 681static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( 682 const void *ctx, 683 const struct bpf_insn *insnsi, 684 unsigned int (*bpf_func)(const void *, 685 const struct bpf_insn *)) 686{ 687 return bpf_func(ctx, insnsi); 688} 689#ifdef CONFIG_BPF_JIT 690int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); 691int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); 692struct bpf_trampoline *bpf_trampoline_get(u64 key, 693 struct bpf_attach_target_info *tgt_info); 694void bpf_trampoline_put(struct bpf_trampoline *tr); 695#define BPF_DISPATCHER_INIT(_name) { \ 696 .mutex = __MUTEX_INITIALIZER(_name.mutex), \ 697 .func = &_name##_func, \ 698 .progs = {}, \ 699 .num_progs = 0, \ 700 .image = NULL, \ 701 .image_off = 0, \ 702 .ksym = { \ 703 .name = #_name, \ 704 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ 705 }, \ 706} 707 708#define DEFINE_BPF_DISPATCHER(name) \ 709 noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ 710 const void *ctx, \ 711 const struct bpf_insn *insnsi, \ 712 unsigned int (*bpf_func)(const void *, \ 713 const struct bpf_insn *)) \ 714 { \ 715 return bpf_func(ctx, insnsi); \ 716 } \ 717 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ 718 struct bpf_dispatcher bpf_dispatcher_##name = \ 719 BPF_DISPATCHER_INIT(bpf_dispatcher_##name); 720#define DECLARE_BPF_DISPATCHER(name) \ 721 unsigned int bpf_dispatcher_##name##_func( \ 722 const void *ctx, \ 723 const struct bpf_insn *insnsi, \ 724 unsigned int (*bpf_func)(const void *, \ 725 const struct bpf_insn *)); \ 726 extern struct bpf_dispatcher bpf_dispatcher_##name; 727#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func 728#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) 729void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, 730 struct bpf_prog *to); 731/* Called only from JIT-enabled code, so there's no need for stubs. */ 732void *bpf_jit_alloc_exec_page(void); 733void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); 734void bpf_image_ksym_del(struct bpf_ksym *ksym); 735void bpf_ksym_add(struct bpf_ksym *ksym); 736void bpf_ksym_del(struct bpf_ksym *ksym); 737int bpf_jit_charge_modmem(u32 pages); 738void bpf_jit_uncharge_modmem(u32 pages); 739#else 740static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, 741 struct bpf_trampoline *tr) 742{ 743 return -ENOTSUPP; 744} 745static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog, 746 struct bpf_trampoline *tr) 747{ 748 return -ENOTSUPP; 749} 750static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, 751 struct bpf_attach_target_info *tgt_info) 752{ 753 return ERR_PTR(-EOPNOTSUPP); 754} 755static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} 756#define DEFINE_BPF_DISPATCHER(name) 757#define DECLARE_BPF_DISPATCHER(name) 758#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func 759#define BPF_DISPATCHER_PTR(name) NULL 760static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, 761 struct bpf_prog *from, 762 struct bpf_prog *to) {} 763static inline bool is_bpf_image_address(unsigned long address) 764{ 765 return false; 766} 767#endif 768 769struct bpf_func_info_aux { 770 u16 linkage; 771 bool unreliable; 772}; 773 774enum bpf_jit_poke_reason { 775 BPF_POKE_REASON_TAIL_CALL, 776}; 777 778/* Descriptor of pokes pointing /into/ the JITed image. */ 779struct bpf_jit_poke_descriptor { 780 void *tailcall_target; 781 void *tailcall_bypass; 782 void *bypass_addr; 783 void *aux; 784 union { 785 struct { 786 struct bpf_map *map; 787 u32 key; 788 } tail_call; 789 }; 790 bool tailcall_target_stable; 791 u8 adj_off; 792 u16 reason; 793 u32 insn_idx; 794}; 795 796/* reg_type info for ctx arguments */ 797struct bpf_ctx_arg_aux { 798 u32 offset; 799 enum bpf_reg_type reg_type; 800 u32 btf_id; 801}; 802 803struct btf_mod_pair { 804 struct btf *btf; 805 struct module *module; 806}; 807 808struct bpf_kfunc_desc_tab; 809 810struct bpf_prog_aux { 811 atomic64_t refcnt; 812 u32 used_map_cnt; 813 u32 used_btf_cnt; 814 u32 max_ctx_offset; 815 u32 max_pkt_offset; 816 u32 max_tp_access; 817 u32 stack_depth; 818 u32 id; 819 u32 func_cnt; /* used by non-func prog as the number of func progs */ 820 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 821 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 822 u32 ctx_arg_info_size; 823 u32 max_rdonly_access; 824 u32 max_rdwr_access; 825 struct btf *attach_btf; 826 const struct bpf_ctx_arg_aux *ctx_arg_info; 827 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ 828 struct bpf_prog *dst_prog; 829 struct bpf_trampoline *dst_trampoline; 830 enum bpf_prog_type saved_dst_prog_type; 831 enum bpf_attach_type saved_dst_attach_type; 832 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 833 bool offload_requested; 834 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 835 bool func_proto_unreliable; 836 bool sleepable; 837 bool tail_call_reachable; 838 struct hlist_node tramp_hlist; 839 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 840 const struct btf_type *attach_func_proto; 841 /* function name for valid attach_btf_id */ 842 const char *attach_func_name; 843 struct bpf_prog **func; 844 void *jit_data; /* JIT specific data. arch dependent */ 845 struct bpf_jit_poke_descriptor *poke_tab; 846 struct bpf_kfunc_desc_tab *kfunc_tab; 847 u32 size_poke_tab; 848 struct bpf_ksym ksym; 849 const struct bpf_prog_ops *ops; 850 struct bpf_map **used_maps; 851 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ 852 struct btf_mod_pair *used_btfs; 853 struct bpf_prog *prog; 854 struct user_struct *user; 855 u64 load_time; /* ns since boottime */ 856 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 857 char name[BPF_OBJ_NAME_LEN]; 858#ifdef CONFIG_SECURITY 859 void *security; 860#endif 861 struct bpf_prog_offload *offload; 862 struct btf *btf; 863 struct bpf_func_info *func_info; 864 struct bpf_func_info_aux *func_info_aux; 865 /* bpf_line_info loaded from userspace. linfo->insn_off 866 * has the xlated insn offset. 867 * Both the main and sub prog share the same linfo. 868 * The subprog can access its first linfo by 869 * using the linfo_idx. 870 */ 871 struct bpf_line_info *linfo; 872 /* jited_linfo is the jited addr of the linfo. It has a 873 * one to one mapping to linfo: 874 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 875 * Both the main and sub prog share the same jited_linfo. 876 * The subprog can access its first jited_linfo by 877 * using the linfo_idx. 878 */ 879 void **jited_linfo; 880 u32 func_info_cnt; 881 u32 nr_linfo; 882 /* subprog can use linfo_idx to access its first linfo and 883 * jited_linfo. 884 * main prog always has linfo_idx == 0 885 */ 886 u32 linfo_idx; 887 u32 num_exentries; 888 struct exception_table_entry *extable; 889 union { 890 struct work_struct work; 891 struct rcu_head rcu; 892 }; 893}; 894 895struct bpf_array_aux { 896 /* 'Ownership' of prog array is claimed by the first program that 897 * is going to use this map or by the first program which FD is 898 * stored in the map to make sure that all callers and callees have 899 * the same prog type and JITed flag. 900 */ 901 enum bpf_prog_type type; 902 bool jited; 903 /* Programs with direct jumps into programs part of this array. */ 904 struct list_head poke_progs; 905 struct bpf_map *map; 906 struct mutex poke_mutex; 907 struct work_struct work; 908}; 909 910struct bpf_link { 911 atomic64_t refcnt; 912 u32 id; 913 enum bpf_link_type type; 914 const struct bpf_link_ops *ops; 915 struct bpf_prog *prog; 916 struct work_struct work; 917}; 918 919struct bpf_link_ops { 920 void (*release)(struct bpf_link *link); 921 void (*dealloc)(struct bpf_link *link); 922 int (*detach)(struct bpf_link *link); 923 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, 924 struct bpf_prog *old_prog); 925 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); 926 int (*fill_link_info)(const struct bpf_link *link, 927 struct bpf_link_info *info); 928}; 929 930struct bpf_link_primer { 931 struct bpf_link *link; 932 struct file *file; 933 int fd; 934 u32 id; 935}; 936 937struct bpf_struct_ops_value; 938struct btf_member; 939 940#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 941struct bpf_struct_ops { 942 const struct bpf_verifier_ops *verifier_ops; 943 int (*init)(struct btf *btf); 944 int (*check_member)(const struct btf_type *t, 945 const struct btf_member *member); 946 int (*init_member)(const struct btf_type *t, 947 const struct btf_member *member, 948 void *kdata, const void *udata); 949 int (*reg)(void *kdata); 950 void (*unreg)(void *kdata); 951 const struct btf_type *type; 952 const struct btf_type *value_type; 953 const char *name; 954 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; 955 u32 type_id; 956 u32 value_id; 957}; 958 959#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 960#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 961const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 962void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 963bool bpf_struct_ops_get(const void *kdata); 964void bpf_struct_ops_put(const void *kdata); 965int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 966 void *value); 967static inline bool bpf_try_module_get(const void *data, struct module *owner) 968{ 969 if (owner == BPF_MODULE_OWNER) 970 return bpf_struct_ops_get(data); 971 else 972 return try_module_get(owner); 973} 974static inline void bpf_module_put(const void *data, struct module *owner) 975{ 976 if (owner == BPF_MODULE_OWNER) 977 bpf_struct_ops_put(data); 978 else 979 module_put(owner); 980} 981#else 982static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) 983{ 984 return NULL; 985} 986static inline void bpf_struct_ops_init(struct btf *btf, 987 struct bpf_verifier_log *log) 988{ 989} 990static inline bool bpf_try_module_get(const void *data, struct module *owner) 991{ 992 return try_module_get(owner); 993} 994static inline void bpf_module_put(const void *data, struct module *owner) 995{ 996 module_put(owner); 997} 998static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, 999 void *key, 1000 void *value) 1001{ 1002 return -EINVAL; 1003} 1004#endif 1005 1006struct bpf_array { 1007 struct bpf_map map; 1008 u32 elem_size; 1009 u32 index_mask; 1010 struct bpf_array_aux *aux; 1011 union { 1012 char value[0] __aligned(8); 1013 void *ptrs[0] __aligned(8); 1014 void __percpu *pptrs[0] __aligned(8); 1015 }; 1016}; 1017 1018#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 1019#define MAX_TAIL_CALL_CNT 32 1020 1021#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 1022 BPF_F_RDONLY_PROG | \ 1023 BPF_F_WRONLY | \ 1024 BPF_F_WRONLY_PROG) 1025 1026#define BPF_MAP_CAN_READ BIT(0) 1027#define BPF_MAP_CAN_WRITE BIT(1) 1028 1029static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 1030{ 1031 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1032 1033 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 1034 * not possible. 1035 */ 1036 if (access_flags & BPF_F_RDONLY_PROG) 1037 return BPF_MAP_CAN_READ; 1038 else if (access_flags & BPF_F_WRONLY_PROG) 1039 return BPF_MAP_CAN_WRITE; 1040 else 1041 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 1042} 1043 1044static inline bool bpf_map_flags_access_ok(u32 access_flags) 1045{ 1046 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 1047 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1048} 1049 1050struct bpf_event_entry { 1051 struct perf_event *event; 1052 struct file *perf_file; 1053 struct file *map_file; 1054 struct rcu_head rcu; 1055}; 1056 1057bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 1058int bpf_prog_calc_tag(struct bpf_prog *fp); 1059 1060const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 1061 1062typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 1063 unsigned long off, unsigned long len); 1064typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 1065 const struct bpf_insn *src, 1066 struct bpf_insn *dst, 1067 struct bpf_prog *prog, 1068 u32 *target_size); 1069 1070u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 1071 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 1072 1073/* an array of programs to be executed under rcu_lock. 1074 * 1075 * Typical usage: 1076 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); 1077 * 1078 * the structure returned by bpf_prog_array_alloc() should be populated 1079 * with program pointers and the last pointer must be NULL. 1080 * The user has to keep refcnt on the program and make sure the program 1081 * is removed from the array before bpf_prog_put(). 1082 * The 'struct bpf_prog_array *' should only be replaced with xchg() 1083 * since other cpus are walking the array of pointers in parallel. 1084 */ 1085struct bpf_prog_array_item { 1086 struct bpf_prog *prog; 1087 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 1088}; 1089 1090struct bpf_prog_array { 1091 struct rcu_head rcu; 1092 struct bpf_prog_array_item items[]; 1093}; 1094 1095struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 1096void bpf_prog_array_free(struct bpf_prog_array *progs); 1097int bpf_prog_array_length(struct bpf_prog_array *progs); 1098bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 1099int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 1100 __u32 __user *prog_ids, u32 cnt); 1101 1102void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 1103 struct bpf_prog *old_prog); 1104int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); 1105int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 1106 struct bpf_prog *prog); 1107int bpf_prog_array_copy_info(struct bpf_prog_array *array, 1108 u32 *prog_ids, u32 request_cnt, 1109 u32 *prog_cnt); 1110int bpf_prog_array_copy(struct bpf_prog_array *old_array, 1111 struct bpf_prog *exclude_prog, 1112 struct bpf_prog *include_prog, 1113 struct bpf_prog_array **new_array); 1114 1115/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ 1116#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) 1117/* BPF program asks to set CN on the packet. */ 1118#define BPF_RET_SET_CN (1 << 0) 1119 1120/* For BPF_PROG_RUN_ARRAY_FLAGS and __BPF_PROG_RUN_ARRAY, 1121 * if bpf_cgroup_storage_set() failed, the rest of programs 1122 * will not execute. This should be a really rare scenario 1123 * as it requires BPF_CGROUP_STORAGE_NEST_MAX number of 1124 * preemptions all between bpf_cgroup_storage_set() and 1125 * bpf_cgroup_storage_unset() on the same cpu. 1126 */ 1127#define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags) \ 1128 ({ \ 1129 struct bpf_prog_array_item *_item; \ 1130 struct bpf_prog *_prog; \ 1131 struct bpf_prog_array *_array; \ 1132 u32 _ret = 1; \ 1133 u32 func_ret; \ 1134 migrate_disable(); \ 1135 rcu_read_lock(); \ 1136 _array = rcu_dereference(array); \ 1137 _item = &_array->items[0]; \ 1138 while ((_prog = READ_ONCE(_item->prog))) { \ 1139 if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \ 1140 break; \ 1141 func_ret = func(_prog, ctx); \ 1142 _ret &= (func_ret & 1); \ 1143 *(ret_flags) |= (func_ret >> 1); \ 1144 bpf_cgroup_storage_unset(); \ 1145 _item++; \ 1146 } \ 1147 rcu_read_unlock(); \ 1148 migrate_enable(); \ 1149 _ret; \ 1150 }) 1151 1152#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \ 1153 ({ \ 1154 struct bpf_prog_array_item *_item; \ 1155 struct bpf_prog *_prog; \ 1156 struct bpf_prog_array *_array; \ 1157 u32 _ret = 1; \ 1158 migrate_disable(); \ 1159 rcu_read_lock(); \ 1160 _array = rcu_dereference(array); \ 1161 if (unlikely(check_non_null && !_array))\ 1162 goto _out; \ 1163 _item = &_array->items[0]; \ 1164 while ((_prog = READ_ONCE(_item->prog))) { \ 1165 if (!set_cg_storage) { \ 1166 _ret &= func(_prog, ctx); \ 1167 } else { \ 1168 if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \ 1169 break; \ 1170 _ret &= func(_prog, ctx); \ 1171 bpf_cgroup_storage_unset(); \ 1172 } \ 1173 _item++; \ 1174 } \ 1175_out: \ 1176 rcu_read_unlock(); \ 1177 migrate_enable(); \ 1178 _ret; \ 1179 }) 1180 1181/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs 1182 * so BPF programs can request cwr for TCP packets. 1183 * 1184 * Current cgroup skb programs can only return 0 or 1 (0 to drop the 1185 * packet. This macro changes the behavior so the low order bit 1186 * indicates whether the packet should be dropped (0) or not (1) 1187 * and the next bit is a congestion notification bit. This could be 1188 * used by TCP to call tcp_enter_cwr() 1189 * 1190 * Hence, new allowed return values of CGROUP EGRESS BPF programs are: 1191 * 0: drop packet 1192 * 1: keep packet 1193 * 2: drop packet and cn 1194 * 3: keep packet and cn 1195 * 1196 * This macro then converts it to one of the NET_XMIT or an error 1197 * code that is then interpreted as drop packet (and no cn): 1198 * 0: NET_XMIT_SUCCESS skb should be transmitted 1199 * 1: NET_XMIT_DROP skb should be dropped and cn 1200 * 2: NET_XMIT_CN skb should be transmitted and cn 1201 * 3: -EPERM skb should be dropped 1202 */ 1203#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ 1204 ({ \ 1205 u32 _flags = 0; \ 1206 bool _cn; \ 1207 u32 _ret; \ 1208 _ret = BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, &_flags); \ 1209 _cn = _flags & BPF_RET_SET_CN; \ 1210 if (_ret) \ 1211 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ 1212 else \ 1213 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ 1214 _ret; \ 1215 }) 1216 1217#define BPF_PROG_RUN_ARRAY(array, ctx, func) \ 1218 __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true) 1219 1220#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ 1221 __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false) 1222 1223#ifdef CONFIG_BPF_SYSCALL 1224DECLARE_PER_CPU(int, bpf_prog_active); 1225extern struct mutex bpf_stats_enabled_mutex; 1226 1227/* 1228 * Block execution of BPF programs attached to instrumentation (perf, 1229 * kprobes, tracepoints) to prevent deadlocks on map operations as any of 1230 * these events can happen inside a region which holds a map bucket lock 1231 * and can deadlock on it. 1232 * 1233 * Use the preemption safe inc/dec variants on RT because migrate disable 1234 * is preemptible on RT and preemption in the middle of the RMW operation 1235 * might lead to inconsistent state. Use the raw variants for non RT 1236 * kernels as migrate_disable() maps to preempt_disable() so the slightly 1237 * more expensive save operation can be avoided. 1238 */ 1239static inline void bpf_disable_instrumentation(void) 1240{ 1241 migrate_disable(); 1242 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 1243 this_cpu_inc(bpf_prog_active); 1244 else 1245 __this_cpu_inc(bpf_prog_active); 1246} 1247 1248static inline void bpf_enable_instrumentation(void) 1249{ 1250 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 1251 this_cpu_dec(bpf_prog_active); 1252 else 1253 __this_cpu_dec(bpf_prog_active); 1254 migrate_enable(); 1255} 1256 1257extern const struct file_operations bpf_map_fops; 1258extern const struct file_operations bpf_prog_fops; 1259extern const struct file_operations bpf_iter_fops; 1260 1261#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1262 extern const struct bpf_prog_ops _name ## _prog_ops; \ 1263 extern const struct bpf_verifier_ops _name ## _verifier_ops; 1264#define BPF_MAP_TYPE(_id, _ops) \ 1265 extern const struct bpf_map_ops _ops; 1266#define BPF_LINK_TYPE(_id, _name) 1267#include <linux/bpf_types.h> 1268#undef BPF_PROG_TYPE 1269#undef BPF_MAP_TYPE 1270#undef BPF_LINK_TYPE 1271 1272extern const struct bpf_prog_ops bpf_offload_prog_ops; 1273extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 1274extern const struct bpf_verifier_ops xdp_analyzer_ops; 1275 1276struct bpf_prog *bpf_prog_get(u32 ufd); 1277struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1278 bool attach_drv); 1279void bpf_prog_add(struct bpf_prog *prog, int i); 1280void bpf_prog_sub(struct bpf_prog *prog, int i); 1281void bpf_prog_inc(struct bpf_prog *prog); 1282struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 1283void bpf_prog_put(struct bpf_prog *prog); 1284 1285void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 1286void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 1287 1288struct bpf_map *bpf_map_get(u32 ufd); 1289struct bpf_map *bpf_map_get_with_uref(u32 ufd); 1290struct bpf_map *__bpf_map_get(struct fd f); 1291void bpf_map_inc(struct bpf_map *map); 1292void bpf_map_inc_with_uref(struct bpf_map *map); 1293struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); 1294void bpf_map_put_with_uref(struct bpf_map *map); 1295void bpf_map_put(struct bpf_map *map); 1296void *bpf_map_area_alloc(u64 size, int numa_node); 1297void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); 1298void bpf_map_area_free(void *base); 1299void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 1300int generic_map_lookup_batch(struct bpf_map *map, 1301 const union bpf_attr *attr, 1302 union bpf_attr __user *uattr); 1303int generic_map_update_batch(struct bpf_map *map, 1304 const union bpf_attr *attr, 1305 union bpf_attr __user *uattr); 1306int generic_map_delete_batch(struct bpf_map *map, 1307 const union bpf_attr *attr, 1308 union bpf_attr __user *uattr); 1309struct bpf_map *bpf_map_get_curr_or_next(u32 *id); 1310struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); 1311 1312#ifdef CONFIG_MEMCG_KMEM 1313void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1314 int node); 1315void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); 1316void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 1317 size_t align, gfp_t flags); 1318#else 1319static inline void * 1320bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1321 int node) 1322{ 1323 return kmalloc_node(size, flags, node); 1324} 1325 1326static inline void * 1327bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 1328{ 1329 return kzalloc(size, flags); 1330} 1331 1332static inline void __percpu * 1333bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, 1334 gfp_t flags) 1335{ 1336 return __alloc_percpu_gfp(size, align, flags); 1337} 1338#endif 1339 1340extern int sysctl_unprivileged_bpf_disabled; 1341 1342static inline bool bpf_allow_ptr_leaks(void) 1343{ 1344 return perfmon_capable(); 1345} 1346 1347static inline bool bpf_allow_uninit_stack(void) 1348{ 1349 return perfmon_capable(); 1350} 1351 1352static inline bool bpf_allow_ptr_to_map_access(void) 1353{ 1354 return perfmon_capable(); 1355} 1356 1357static inline bool bpf_bypass_spec_v1(void) 1358{ 1359 return perfmon_capable(); 1360} 1361 1362static inline bool bpf_bypass_spec_v4(void) 1363{ 1364 return perfmon_capable(); 1365} 1366 1367int bpf_map_new_fd(struct bpf_map *map, int flags); 1368int bpf_prog_new_fd(struct bpf_prog *prog); 1369 1370void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1371 const struct bpf_link_ops *ops, struct bpf_prog *prog); 1372int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); 1373int bpf_link_settle(struct bpf_link_primer *primer); 1374void bpf_link_cleanup(struct bpf_link_primer *primer); 1375void bpf_link_inc(struct bpf_link *link); 1376void bpf_link_put(struct bpf_link *link); 1377int bpf_link_new_fd(struct bpf_link *link); 1378struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); 1379struct bpf_link *bpf_link_get_from_fd(u32 ufd); 1380 1381int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 1382int bpf_obj_get_user(const char __user *pathname, int flags); 1383 1384#define BPF_ITER_FUNC_PREFIX "bpf_iter_" 1385#define DEFINE_BPF_ITER_FUNC(target, args...) \ 1386 extern int bpf_iter_ ## target(args); \ 1387 int __init bpf_iter_ ## target(args) { return 0; } 1388 1389struct bpf_iter_aux_info { 1390 struct bpf_map *map; 1391}; 1392 1393typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, 1394 union bpf_iter_link_info *linfo, 1395 struct bpf_iter_aux_info *aux); 1396typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); 1397typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, 1398 struct seq_file *seq); 1399typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, 1400 struct bpf_link_info *info); 1401 1402enum bpf_iter_feature { 1403 BPF_ITER_RESCHED = BIT(0), 1404}; 1405 1406#define BPF_ITER_CTX_ARG_MAX 2 1407struct bpf_iter_reg { 1408 const char *target; 1409 bpf_iter_attach_target_t attach_target; 1410 bpf_iter_detach_target_t detach_target; 1411 bpf_iter_show_fdinfo_t show_fdinfo; 1412 bpf_iter_fill_link_info_t fill_link_info; 1413 u32 ctx_arg_info_size; 1414 u32 feature; 1415 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; 1416 const struct bpf_iter_seq_info *seq_info; 1417}; 1418 1419struct bpf_iter_meta { 1420 __bpf_md_ptr(struct seq_file *, seq); 1421 u64 session_id; 1422 u64 seq_num; 1423}; 1424 1425struct bpf_iter__bpf_map_elem { 1426 __bpf_md_ptr(struct bpf_iter_meta *, meta); 1427 __bpf_md_ptr(struct bpf_map *, map); 1428 __bpf_md_ptr(void *, key); 1429 __bpf_md_ptr(void *, value); 1430}; 1431 1432int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); 1433void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); 1434bool bpf_iter_prog_supported(struct bpf_prog *prog); 1435int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); 1436int bpf_iter_new_fd(struct bpf_link *link); 1437bool bpf_link_is_iter(struct bpf_link *link); 1438struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); 1439int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); 1440void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, 1441 struct seq_file *seq); 1442int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, 1443 struct bpf_link_info *info); 1444 1445int map_set_for_each_callback_args(struct bpf_verifier_env *env, 1446 struct bpf_func_state *caller, 1447 struct bpf_func_state *callee); 1448 1449int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 1450int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 1451int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 1452 u64 flags); 1453int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 1454 u64 flags); 1455 1456int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 1457 1458int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 1459 void *key, void *value, u64 map_flags); 1460int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1461int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 1462 void *key, void *value, u64 map_flags); 1463int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1464 1465int bpf_get_file_flag(int flags); 1466int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, 1467 size_t actual_size); 1468 1469/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 1470 * forced to use 'long' read/writes to try to atomically copy long counters. 1471 * Best-effort only. No barriers here, since it _will_ race with concurrent 1472 * updates from BPF programs. Called from bpf syscall and mostly used with 1473 * size 8 or 16 bytes, so ask compiler to inline it. 1474 */ 1475static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 1476{ 1477 const long *lsrc = src; 1478 long *ldst = dst; 1479 1480 size /= sizeof(long); 1481 while (size--) 1482 *ldst++ = *lsrc++; 1483} 1484 1485/* verify correctness of eBPF program */ 1486int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr); 1487 1488#ifndef CONFIG_BPF_JIT_ALWAYS_ON 1489void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 1490#endif 1491 1492struct btf *bpf_get_btf_vmlinux(void); 1493 1494/* Map specifics */ 1495struct xdp_buff; 1496struct sk_buff; 1497struct bpf_dtab_netdev; 1498struct bpf_cpu_map_entry; 1499 1500void __dev_flush(void); 1501int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1502 struct net_device *dev_rx); 1503int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1504 struct net_device *dev_rx); 1505int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, 1506 struct bpf_map *map, bool exclude_ingress); 1507int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 1508 struct bpf_prog *xdp_prog); 1509int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 1510 struct bpf_prog *xdp_prog, struct bpf_map *map, 1511 bool exclude_ingress); 1512bool dev_map_can_have_prog(struct bpf_map *map); 1513 1514void __cpu_map_flush(void); 1515int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, 1516 struct net_device *dev_rx); 1517bool cpu_map_prog_allowed(struct bpf_map *map); 1518 1519/* Return map's numa specified by userspace */ 1520static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 1521{ 1522 return (attr->map_flags & BPF_F_NUMA_NODE) ? 1523 attr->numa_node : NUMA_NO_NODE; 1524} 1525 1526struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 1527int array_map_alloc_check(union bpf_attr *attr); 1528 1529int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1530 union bpf_attr __user *uattr); 1531int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1532 union bpf_attr __user *uattr); 1533int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1534 const union bpf_attr *kattr, 1535 union bpf_attr __user *uattr); 1536int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1537 const union bpf_attr *kattr, 1538 union bpf_attr __user *uattr); 1539int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, 1540 const union bpf_attr *kattr, 1541 union bpf_attr __user *uattr); 1542int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 1543 const union bpf_attr *kattr, 1544 union bpf_attr __user *uattr); 1545bool bpf_prog_test_check_kfunc_call(u32 kfunc_id); 1546bool btf_ctx_access(int off, int size, enum bpf_access_type type, 1547 const struct bpf_prog *prog, 1548 struct bpf_insn_access_aux *info); 1549int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, 1550 const struct btf_type *t, int off, int size, 1551 enum bpf_access_type atype, 1552 u32 *next_btf_id); 1553bool btf_struct_ids_match(struct bpf_verifier_log *log, 1554 const struct btf *btf, u32 id, int off, 1555 const struct btf *need_btf, u32 need_type_id); 1556 1557int btf_distill_func_proto(struct bpf_verifier_log *log, 1558 struct btf *btf, 1559 const struct btf_type *func_proto, 1560 const char *func_name, 1561 struct btf_func_model *m); 1562 1563struct bpf_reg_state; 1564int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, 1565 struct bpf_reg_state *regs); 1566int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, 1567 const struct btf *btf, u32 func_id, 1568 struct bpf_reg_state *regs); 1569int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 1570 struct bpf_reg_state *reg); 1571int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, 1572 struct btf *btf, const struct btf_type *t); 1573 1574struct bpf_prog *bpf_prog_by_id(u32 id); 1575struct bpf_link *bpf_link_by_id(u32 id); 1576 1577const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); 1578void bpf_task_storage_free(struct task_struct *task); 1579bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); 1580const struct btf_func_model * 1581bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 1582 const struct bpf_insn *insn); 1583#else /* !CONFIG_BPF_SYSCALL */ 1584static inline struct bpf_prog *bpf_prog_get(u32 ufd) 1585{ 1586 return ERR_PTR(-EOPNOTSUPP); 1587} 1588 1589static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 1590 enum bpf_prog_type type, 1591 bool attach_drv) 1592{ 1593 return ERR_PTR(-EOPNOTSUPP); 1594} 1595 1596static inline void bpf_prog_add(struct bpf_prog *prog, int i) 1597{ 1598} 1599 1600static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 1601{ 1602} 1603 1604static inline void bpf_prog_put(struct bpf_prog *prog) 1605{ 1606} 1607 1608static inline void bpf_prog_inc(struct bpf_prog *prog) 1609{ 1610} 1611 1612static inline struct bpf_prog *__must_check 1613bpf_prog_inc_not_zero(struct bpf_prog *prog) 1614{ 1615 return ERR_PTR(-EOPNOTSUPP); 1616} 1617 1618static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1619 const struct bpf_link_ops *ops, 1620 struct bpf_prog *prog) 1621{ 1622} 1623 1624static inline int bpf_link_prime(struct bpf_link *link, 1625 struct bpf_link_primer *primer) 1626{ 1627 return -EOPNOTSUPP; 1628} 1629 1630static inline int bpf_link_settle(struct bpf_link_primer *primer) 1631{ 1632 return -EOPNOTSUPP; 1633} 1634 1635static inline void bpf_link_cleanup(struct bpf_link_primer *primer) 1636{ 1637} 1638 1639static inline void bpf_link_inc(struct bpf_link *link) 1640{ 1641} 1642 1643static inline void bpf_link_put(struct bpf_link *link) 1644{ 1645} 1646 1647static inline int bpf_obj_get_user(const char __user *pathname, int flags) 1648{ 1649 return -EOPNOTSUPP; 1650} 1651 1652static inline bool dev_map_can_have_prog(struct bpf_map *map) 1653{ 1654 return false; 1655} 1656 1657static inline void __dev_flush(void) 1658{ 1659} 1660 1661struct xdp_buff; 1662struct bpf_dtab_netdev; 1663struct bpf_cpu_map_entry; 1664 1665static inline 1666int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 1667 struct net_device *dev_rx) 1668{ 1669 return 0; 1670} 1671 1672static inline 1673int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 1674 struct net_device *dev_rx) 1675{ 1676 return 0; 1677} 1678 1679static inline 1680int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, 1681 struct bpf_map *map, bool exclude_ingress) 1682{ 1683 return 0; 1684} 1685 1686struct sk_buff; 1687 1688static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 1689 struct sk_buff *skb, 1690 struct bpf_prog *xdp_prog) 1691{ 1692 return 0; 1693} 1694 1695static inline 1696int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 1697 struct bpf_prog *xdp_prog, struct bpf_map *map, 1698 bool exclude_ingress) 1699{ 1700 return 0; 1701} 1702 1703static inline void __cpu_map_flush(void) 1704{ 1705} 1706 1707static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 1708 struct xdp_buff *xdp, 1709 struct net_device *dev_rx) 1710{ 1711 return 0; 1712} 1713 1714static inline bool cpu_map_prog_allowed(struct bpf_map *map) 1715{ 1716 return false; 1717} 1718 1719static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 1720 enum bpf_prog_type type) 1721{ 1722 return ERR_PTR(-EOPNOTSUPP); 1723} 1724 1725static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 1726 const union bpf_attr *kattr, 1727 union bpf_attr __user *uattr) 1728{ 1729 return -ENOTSUPP; 1730} 1731 1732static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 1733 const union bpf_attr *kattr, 1734 union bpf_attr __user *uattr) 1735{ 1736 return -ENOTSUPP; 1737} 1738 1739static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1740 const union bpf_attr *kattr, 1741 union bpf_attr __user *uattr) 1742{ 1743 return -ENOTSUPP; 1744} 1745 1746static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1747 const union bpf_attr *kattr, 1748 union bpf_attr __user *uattr) 1749{ 1750 return -ENOTSUPP; 1751} 1752 1753static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 1754 const union bpf_attr *kattr, 1755 union bpf_attr __user *uattr) 1756{ 1757 return -ENOTSUPP; 1758} 1759 1760static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id) 1761{ 1762 return false; 1763} 1764 1765static inline void bpf_map_put(struct bpf_map *map) 1766{ 1767} 1768 1769static inline struct bpf_prog *bpf_prog_by_id(u32 id) 1770{ 1771 return ERR_PTR(-ENOTSUPP); 1772} 1773 1774static inline const struct bpf_func_proto * 1775bpf_base_func_proto(enum bpf_func_id func_id) 1776{ 1777 return NULL; 1778} 1779 1780static inline void bpf_task_storage_free(struct task_struct *task) 1781{ 1782} 1783 1784static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 1785{ 1786 return false; 1787} 1788 1789static inline const struct btf_func_model * 1790bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 1791 const struct bpf_insn *insn) 1792{ 1793 return NULL; 1794} 1795#endif /* CONFIG_BPF_SYSCALL */ 1796 1797void __bpf_free_used_btfs(struct bpf_prog_aux *aux, 1798 struct btf_mod_pair *used_btfs, u32 len); 1799 1800static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 1801 enum bpf_prog_type type) 1802{ 1803 return bpf_prog_get_type_dev(ufd, type, false); 1804} 1805 1806void __bpf_free_used_maps(struct bpf_prog_aux *aux, 1807 struct bpf_map **used_maps, u32 len); 1808 1809bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 1810 1811int bpf_prog_offload_compile(struct bpf_prog *prog); 1812void bpf_prog_offload_destroy(struct bpf_prog *prog); 1813int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 1814 struct bpf_prog *prog); 1815 1816int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 1817 1818int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 1819int bpf_map_offload_update_elem(struct bpf_map *map, 1820 void *key, void *value, u64 flags); 1821int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 1822int bpf_map_offload_get_next_key(struct bpf_map *map, 1823 void *key, void *next_key); 1824 1825bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 1826 1827struct bpf_offload_dev * 1828bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 1829void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 1830void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 1831int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 1832 struct net_device *netdev); 1833void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 1834 struct net_device *netdev); 1835bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 1836 1837#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 1838int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 1839 1840static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 1841{ 1842 return aux->offload_requested; 1843} 1844 1845static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1846{ 1847 return unlikely(map->ops == &bpf_map_offload_ops); 1848} 1849 1850struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 1851void bpf_map_offload_map_free(struct bpf_map *map); 1852int bpf_prog_test_run_syscall(struct bpf_prog *prog, 1853 const union bpf_attr *kattr, 1854 union bpf_attr __user *uattr); 1855#else 1856static inline int bpf_prog_offload_init(struct bpf_prog *prog, 1857 union bpf_attr *attr) 1858{ 1859 return -EOPNOTSUPP; 1860} 1861 1862static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 1863{ 1864 return false; 1865} 1866 1867static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1868{ 1869 return false; 1870} 1871 1872static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 1873{ 1874 return ERR_PTR(-EOPNOTSUPP); 1875} 1876 1877static inline void bpf_map_offload_map_free(struct bpf_map *map) 1878{ 1879} 1880 1881static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, 1882 const union bpf_attr *kattr, 1883 union bpf_attr __user *uattr) 1884{ 1885 return -ENOTSUPP; 1886} 1887#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 1888 1889#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 1890int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 1891int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); 1892int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); 1893void sock_map_unhash(struct sock *sk); 1894void sock_map_close(struct sock *sk, long timeout); 1895 1896void bpf_sk_reuseport_detach(struct sock *sk); 1897int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 1898 void *value); 1899int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 1900 void *value, u64 map_flags); 1901#else 1902static inline void bpf_sk_reuseport_detach(struct sock *sk) 1903{ 1904} 1905 1906#ifdef CONFIG_BPF_SYSCALL 1907static inline int sock_map_get_from_fd(const union bpf_attr *attr, 1908 struct bpf_prog *prog) 1909{ 1910 return -EINVAL; 1911} 1912 1913static inline int sock_map_prog_detach(const union bpf_attr *attr, 1914 enum bpf_prog_type ptype) 1915{ 1916 return -EOPNOTSUPP; 1917} 1918 1919static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 1920 u64 flags) 1921{ 1922 return -EOPNOTSUPP; 1923} 1924 1925static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 1926 void *key, void *value) 1927{ 1928 return -EOPNOTSUPP; 1929} 1930 1931static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 1932 void *key, void *value, 1933 u64 map_flags) 1934{ 1935 return -EOPNOTSUPP; 1936} 1937#endif /* CONFIG_BPF_SYSCALL */ 1938#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 1939 1940/* verifier prototypes for helper functions called from eBPF programs */ 1941extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 1942extern const struct bpf_func_proto bpf_map_update_elem_proto; 1943extern const struct bpf_func_proto bpf_map_delete_elem_proto; 1944extern const struct bpf_func_proto bpf_map_push_elem_proto; 1945extern const struct bpf_func_proto bpf_map_pop_elem_proto; 1946extern const struct bpf_func_proto bpf_map_peek_elem_proto; 1947 1948extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 1949extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 1950extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 1951extern const struct bpf_func_proto bpf_tail_call_proto; 1952extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 1953extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; 1954extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 1955extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 1956extern const struct bpf_func_proto bpf_get_current_comm_proto; 1957extern const struct bpf_func_proto bpf_get_stackid_proto; 1958extern const struct bpf_func_proto bpf_get_stack_proto; 1959extern const struct bpf_func_proto bpf_get_task_stack_proto; 1960extern const struct bpf_func_proto bpf_get_stackid_proto_pe; 1961extern const struct bpf_func_proto bpf_get_stack_proto_pe; 1962extern const struct bpf_func_proto bpf_sock_map_update_proto; 1963extern const struct bpf_func_proto bpf_sock_hash_update_proto; 1964extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 1965extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; 1966extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 1967extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 1968extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 1969extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 1970extern const struct bpf_func_proto bpf_spin_lock_proto; 1971extern const struct bpf_func_proto bpf_spin_unlock_proto; 1972extern const struct bpf_func_proto bpf_get_local_storage_proto; 1973extern const struct bpf_func_proto bpf_strtol_proto; 1974extern const struct bpf_func_proto bpf_strtoul_proto; 1975extern const struct bpf_func_proto bpf_tcp_sock_proto; 1976extern const struct bpf_func_proto bpf_jiffies64_proto; 1977extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; 1978extern const struct bpf_func_proto bpf_event_output_data_proto; 1979extern const struct bpf_func_proto bpf_ringbuf_output_proto; 1980extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; 1981extern const struct bpf_func_proto bpf_ringbuf_submit_proto; 1982extern const struct bpf_func_proto bpf_ringbuf_discard_proto; 1983extern const struct bpf_func_proto bpf_ringbuf_query_proto; 1984extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; 1985extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; 1986extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; 1987extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; 1988extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; 1989extern const struct bpf_func_proto bpf_copy_from_user_proto; 1990extern const struct bpf_func_proto bpf_snprintf_btf_proto; 1991extern const struct bpf_func_proto bpf_snprintf_proto; 1992extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; 1993extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; 1994extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; 1995extern const struct bpf_func_proto bpf_sock_from_file_proto; 1996extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; 1997extern const struct bpf_func_proto bpf_task_storage_get_proto; 1998extern const struct bpf_func_proto bpf_task_storage_delete_proto; 1999extern const struct bpf_func_proto bpf_for_each_map_elem_proto; 2000extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; 2001 2002const struct bpf_func_proto *bpf_tracing_func_proto( 2003 enum bpf_func_id func_id, const struct bpf_prog *prog); 2004 2005const struct bpf_func_proto *tracing_prog_func_proto( 2006 enum bpf_func_id func_id, const struct bpf_prog *prog); 2007 2008/* Shared helpers among cBPF and eBPF. */ 2009void bpf_user_rnd_init_once(void); 2010u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2011u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2012 2013#if defined(CONFIG_NET) 2014bool bpf_sock_common_is_valid_access(int off, int size, 2015 enum bpf_access_type type, 2016 struct bpf_insn_access_aux *info); 2017bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2018 struct bpf_insn_access_aux *info); 2019u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2020 const struct bpf_insn *si, 2021 struct bpf_insn *insn_buf, 2022 struct bpf_prog *prog, 2023 u32 *target_size); 2024#else 2025static inline bool bpf_sock_common_is_valid_access(int off, int size, 2026 enum bpf_access_type type, 2027 struct bpf_insn_access_aux *info) 2028{ 2029 return false; 2030} 2031static inline bool bpf_sock_is_valid_access(int off, int size, 2032 enum bpf_access_type type, 2033 struct bpf_insn_access_aux *info) 2034{ 2035 return false; 2036} 2037static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2038 const struct bpf_insn *si, 2039 struct bpf_insn *insn_buf, 2040 struct bpf_prog *prog, 2041 u32 *target_size) 2042{ 2043 return 0; 2044} 2045#endif 2046 2047#ifdef CONFIG_INET 2048struct sk_reuseport_kern { 2049 struct sk_buff *skb; 2050 struct sock *sk; 2051 struct sock *selected_sk; 2052 struct sock *migrating_sk; 2053 void *data_end; 2054 u32 hash; 2055 u32 reuseport_id; 2056 bool bind_inany; 2057}; 2058bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2059 struct bpf_insn_access_aux *info); 2060 2061u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2062 const struct bpf_insn *si, 2063 struct bpf_insn *insn_buf, 2064 struct bpf_prog *prog, 2065 u32 *target_size); 2066 2067bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2068 struct bpf_insn_access_aux *info); 2069 2070u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2071 const struct bpf_insn *si, 2072 struct bpf_insn *insn_buf, 2073 struct bpf_prog *prog, 2074 u32 *target_size); 2075#else 2076static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 2077 enum bpf_access_type type, 2078 struct bpf_insn_access_aux *info) 2079{ 2080 return false; 2081} 2082 2083static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2084 const struct bpf_insn *si, 2085 struct bpf_insn *insn_buf, 2086 struct bpf_prog *prog, 2087 u32 *target_size) 2088{ 2089 return 0; 2090} 2091static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 2092 enum bpf_access_type type, 2093 struct bpf_insn_access_aux *info) 2094{ 2095 return false; 2096} 2097 2098static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2099 const struct bpf_insn *si, 2100 struct bpf_insn *insn_buf, 2101 struct bpf_prog *prog, 2102 u32 *target_size) 2103{ 2104 return 0; 2105} 2106#endif /* CONFIG_INET */ 2107 2108enum bpf_text_poke_type { 2109 BPF_MOD_CALL, 2110 BPF_MOD_JUMP, 2111}; 2112 2113int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2114 void *addr1, void *addr2); 2115 2116struct btf_id_set; 2117bool btf_id_set_contains(const struct btf_id_set *set, u32 id); 2118 2119int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 2120 u32 **bin_buf, u32 num_args); 2121void bpf_bprintf_cleanup(void); 2122 2123#endif /* _LINUX_BPF_H */