at v5.4 37 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4#ifndef _LINUX_BPF_H 5#define _LINUX_BPF_H 1 6 7#include <uapi/linux/bpf.h> 8 9#include <linux/workqueue.h> 10#include <linux/file.h> 11#include <linux/percpu.h> 12#include <linux/err.h> 13#include <linux/rbtree_latch.h> 14#include <linux/numa.h> 15#include <linux/wait.h> 16#include <linux/u64_stats_sync.h> 17 18struct bpf_verifier_env; 19struct perf_event; 20struct bpf_prog; 21struct bpf_map; 22struct sock; 23struct seq_file; 24struct btf; 25struct btf_type; 26 27extern struct idr btf_idr; 28extern spinlock_t btf_idr_lock; 29 30/* map is generic key/value storage optionally accesible by eBPF programs */ 31struct bpf_map_ops { 32 /* funcs callable from userspace (via syscall) */ 33 int (*map_alloc_check)(union bpf_attr *attr); 34 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 35 void (*map_release)(struct bpf_map *map, struct file *map_file); 36 void (*map_free)(struct bpf_map *map); 37 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 38 void (*map_release_uref)(struct bpf_map *map); 39 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 40 41 /* funcs callable from userspace and from eBPF programs */ 42 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 43 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 44 int (*map_delete_elem)(struct bpf_map *map, void *key); 45 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 46 int (*map_pop_elem)(struct bpf_map *map, void *value); 47 int (*map_peek_elem)(struct bpf_map *map, void *value); 48 49 /* funcs called by prog_array and perf_event_array map */ 50 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 51 int fd); 52 void (*map_fd_put_ptr)(void *ptr); 53 u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 54 u32 (*map_fd_sys_lookup_elem)(void *ptr); 55 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 56 struct seq_file *m); 57 int (*map_check_btf)(const struct bpf_map *map, 58 const struct btf *btf, 59 const struct btf_type *key_type, 60 const struct btf_type *value_type); 61 62 /* Direct value access helpers. */ 63 int (*map_direct_value_addr)(const struct bpf_map *map, 64 u64 *imm, u32 off); 65 int (*map_direct_value_meta)(const struct bpf_map *map, 66 u64 imm, u32 *off); 67}; 68 69struct bpf_map_memory { 70 u32 pages; 71 struct user_struct *user; 72}; 73 74struct bpf_map { 75 /* The first two cachelines with read-mostly members of which some 76 * are also accessed in fast-path (e.g. ops, max_entries). 77 */ 78 const struct bpf_map_ops *ops ____cacheline_aligned; 79 struct bpf_map *inner_map_meta; 80#ifdef CONFIG_SECURITY 81 void *security; 82#endif 83 enum bpf_map_type map_type; 84 u32 key_size; 85 u32 value_size; 86 u32 max_entries; 87 u32 map_flags; 88 int spin_lock_off; /* >=0 valid offset, <0 error */ 89 u32 id; 90 int numa_node; 91 u32 btf_key_type_id; 92 u32 btf_value_type_id; 93 struct btf *btf; 94 struct bpf_map_memory memory; 95 bool unpriv_array; 96 bool frozen; /* write-once */ 97 /* 48 bytes hole */ 98 99 /* The 3rd and 4th cacheline with misc members to avoid false sharing 100 * particularly with refcounting. 101 */ 102 atomic_t refcnt ____cacheline_aligned; 103 atomic_t usercnt; 104 struct work_struct work; 105 char name[BPF_OBJ_NAME_LEN]; 106}; 107 108static inline bool map_value_has_spin_lock(const struct bpf_map *map) 109{ 110 return map->spin_lock_off >= 0; 111} 112 113static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) 114{ 115 if (likely(!map_value_has_spin_lock(map))) 116 return; 117 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = 118 (struct bpf_spin_lock){}; 119} 120 121/* copy everything but bpf_spin_lock */ 122static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 123{ 124 if (unlikely(map_value_has_spin_lock(map))) { 125 u32 off = map->spin_lock_off; 126 127 memcpy(dst, src, off); 128 memcpy(dst + off + sizeof(struct bpf_spin_lock), 129 src + off + sizeof(struct bpf_spin_lock), 130 map->value_size - off - sizeof(struct bpf_spin_lock)); 131 } else { 132 memcpy(dst, src, map->value_size); 133 } 134} 135void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 136 bool lock_src); 137 138struct bpf_offload_dev; 139struct bpf_offloaded_map; 140 141struct bpf_map_dev_ops { 142 int (*map_get_next_key)(struct bpf_offloaded_map *map, 143 void *key, void *next_key); 144 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 145 void *key, void *value); 146 int (*map_update_elem)(struct bpf_offloaded_map *map, 147 void *key, void *value, u64 flags); 148 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 149}; 150 151struct bpf_offloaded_map { 152 struct bpf_map map; 153 struct net_device *netdev; 154 const struct bpf_map_dev_ops *dev_ops; 155 void *dev_priv; 156 struct list_head offloads; 157}; 158 159static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 160{ 161 return container_of(map, struct bpf_offloaded_map, map); 162} 163 164static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 165{ 166 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 167} 168 169static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 170{ 171 return map->btf && map->ops->map_seq_show_elem; 172} 173 174int map_check_no_btf(const struct bpf_map *map, 175 const struct btf *btf, 176 const struct btf_type *key_type, 177 const struct btf_type *value_type); 178 179extern const struct bpf_map_ops bpf_map_offload_ops; 180 181/* function argument constraints */ 182enum bpf_arg_type { 183 ARG_DONTCARE = 0, /* unused argument in helper function */ 184 185 /* the following constraints used to prototype 186 * bpf_map_lookup/update/delete_elem() functions 187 */ 188 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 189 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 190 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 191 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 192 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ 193 194 /* the following constraints used to prototype bpf_memcmp() and other 195 * functions that access data on eBPF program stack 196 */ 197 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 198 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 199 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 200 * helper function must fill all bytes or clear 201 * them in error case. 202 */ 203 204 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 205 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 206 207 ARG_PTR_TO_CTX, /* pointer to context */ 208 ARG_ANYTHING, /* any (initialized) argument is ok */ 209 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 210 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 211 ARG_PTR_TO_INT, /* pointer to int */ 212 ARG_PTR_TO_LONG, /* pointer to long */ 213 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 214}; 215 216/* type of values returned from helper functions */ 217enum bpf_return_type { 218 RET_INTEGER, /* function returns integer */ 219 RET_VOID, /* function doesn't return anything */ 220 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 221 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 222 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ 223 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ 224 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ 225}; 226 227/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 228 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 229 * instructions after verifying 230 */ 231struct bpf_func_proto { 232 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 233 bool gpl_only; 234 bool pkt_access; 235 enum bpf_return_type ret_type; 236 enum bpf_arg_type arg1_type; 237 enum bpf_arg_type arg2_type; 238 enum bpf_arg_type arg3_type; 239 enum bpf_arg_type arg4_type; 240 enum bpf_arg_type arg5_type; 241}; 242 243/* bpf_context is intentionally undefined structure. Pointer to bpf_context is 244 * the first argument to eBPF programs. 245 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 246 */ 247struct bpf_context; 248 249enum bpf_access_type { 250 BPF_READ = 1, 251 BPF_WRITE = 2 252}; 253 254/* types of values stored in eBPF registers */ 255/* Pointer types represent: 256 * pointer 257 * pointer + imm 258 * pointer + (u16) var 259 * pointer + (u16) var + imm 260 * if (range > 0) then [ptr, ptr + range - off) is safe to access 261 * if (id > 0) means that some 'var' was added 262 * if (off > 0) means that 'imm' was added 263 */ 264enum bpf_reg_type { 265 NOT_INIT = 0, /* nothing was written into register */ 266 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 267 PTR_TO_CTX, /* reg points to bpf_context */ 268 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 269 PTR_TO_MAP_VALUE, /* reg points to map element value */ 270 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 271 PTR_TO_STACK, /* reg == frame_pointer + offset */ 272 PTR_TO_PACKET_META, /* skb->data - meta_len */ 273 PTR_TO_PACKET, /* reg points to skb->data */ 274 PTR_TO_PACKET_END, /* skb->data + headlen */ 275 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 276 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 277 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ 278 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 279 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ 280 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 281 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ 282 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 283 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 284}; 285 286/* The information passed from prog-specific *_is_valid_access 287 * back to the verifier. 288 */ 289struct bpf_insn_access_aux { 290 enum bpf_reg_type reg_type; 291 int ctx_field_size; 292}; 293 294static inline void 295bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 296{ 297 aux->ctx_field_size = size; 298} 299 300struct bpf_prog_ops { 301 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 302 union bpf_attr __user *uattr); 303}; 304 305struct bpf_verifier_ops { 306 /* return eBPF function prototype for verification */ 307 const struct bpf_func_proto * 308 (*get_func_proto)(enum bpf_func_id func_id, 309 const struct bpf_prog *prog); 310 311 /* return true if 'size' wide access at offset 'off' within bpf_context 312 * with 'type' (read or write) is allowed 313 */ 314 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 315 const struct bpf_prog *prog, 316 struct bpf_insn_access_aux *info); 317 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 318 const struct bpf_prog *prog); 319 int (*gen_ld_abs)(const struct bpf_insn *orig, 320 struct bpf_insn *insn_buf); 321 u32 (*convert_ctx_access)(enum bpf_access_type type, 322 const struct bpf_insn *src, 323 struct bpf_insn *dst, 324 struct bpf_prog *prog, u32 *target_size); 325}; 326 327struct bpf_prog_offload_ops { 328 /* verifier basic callbacks */ 329 int (*insn_hook)(struct bpf_verifier_env *env, 330 int insn_idx, int prev_insn_idx); 331 int (*finalize)(struct bpf_verifier_env *env); 332 /* verifier optimization callbacks (called after .finalize) */ 333 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 334 struct bpf_insn *insn); 335 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 336 /* program management callbacks */ 337 int (*prepare)(struct bpf_prog *prog); 338 int (*translate)(struct bpf_prog *prog); 339 void (*destroy)(struct bpf_prog *prog); 340}; 341 342struct bpf_prog_offload { 343 struct bpf_prog *prog; 344 struct net_device *netdev; 345 struct bpf_offload_dev *offdev; 346 void *dev_priv; 347 struct list_head offloads; 348 bool dev_state; 349 bool opt_failed; 350 void *jited_image; 351 u32 jited_len; 352}; 353 354enum bpf_cgroup_storage_type { 355 BPF_CGROUP_STORAGE_SHARED, 356 BPF_CGROUP_STORAGE_PERCPU, 357 __BPF_CGROUP_STORAGE_MAX 358}; 359 360#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 361 362struct bpf_prog_stats { 363 u64 cnt; 364 u64 nsecs; 365 struct u64_stats_sync syncp; 366}; 367 368struct bpf_prog_aux { 369 atomic_t refcnt; 370 u32 used_map_cnt; 371 u32 max_ctx_offset; 372 u32 max_pkt_offset; 373 u32 max_tp_access; 374 u32 stack_depth; 375 u32 id; 376 u32 func_cnt; /* used by non-func prog as the number of func progs */ 377 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 378 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 379 bool offload_requested; 380 struct bpf_prog **func; 381 void *jit_data; /* JIT specific data. arch dependent */ 382 struct latch_tree_node ksym_tnode; 383 struct list_head ksym_lnode; 384 const struct bpf_prog_ops *ops; 385 struct bpf_map **used_maps; 386 struct bpf_prog *prog; 387 struct user_struct *user; 388 u64 load_time; /* ns since boottime */ 389 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 390 char name[BPF_OBJ_NAME_LEN]; 391#ifdef CONFIG_SECURITY 392 void *security; 393#endif 394 struct bpf_prog_offload *offload; 395 struct btf *btf; 396 struct bpf_func_info *func_info; 397 /* bpf_line_info loaded from userspace. linfo->insn_off 398 * has the xlated insn offset. 399 * Both the main and sub prog share the same linfo. 400 * The subprog can access its first linfo by 401 * using the linfo_idx. 402 */ 403 struct bpf_line_info *linfo; 404 /* jited_linfo is the jited addr of the linfo. It has a 405 * one to one mapping to linfo: 406 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 407 * Both the main and sub prog share the same jited_linfo. 408 * The subprog can access its first jited_linfo by 409 * using the linfo_idx. 410 */ 411 void **jited_linfo; 412 u32 func_info_cnt; 413 u32 nr_linfo; 414 /* subprog can use linfo_idx to access its first linfo and 415 * jited_linfo. 416 * main prog always has linfo_idx == 0 417 */ 418 u32 linfo_idx; 419 struct bpf_prog_stats __percpu *stats; 420 union { 421 struct work_struct work; 422 struct rcu_head rcu; 423 }; 424}; 425 426struct bpf_array { 427 struct bpf_map map; 428 u32 elem_size; 429 u32 index_mask; 430 /* 'ownership' of prog_array is claimed by the first program that 431 * is going to use this map or by the first program which FD is stored 432 * in the map to make sure that all callers and callees have the same 433 * prog_type and JITed flag 434 */ 435 enum bpf_prog_type owner_prog_type; 436 bool owner_jited; 437 union { 438 char value[0] __aligned(8); 439 void *ptrs[0] __aligned(8); 440 void __percpu *pptrs[0] __aligned(8); 441 }; 442}; 443 444#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 445#define MAX_TAIL_CALL_CNT 32 446 447#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 448 BPF_F_RDONLY_PROG | \ 449 BPF_F_WRONLY | \ 450 BPF_F_WRONLY_PROG) 451 452#define BPF_MAP_CAN_READ BIT(0) 453#define BPF_MAP_CAN_WRITE BIT(1) 454 455static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 456{ 457 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 458 459 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 460 * not possible. 461 */ 462 if (access_flags & BPF_F_RDONLY_PROG) 463 return BPF_MAP_CAN_READ; 464 else if (access_flags & BPF_F_WRONLY_PROG) 465 return BPF_MAP_CAN_WRITE; 466 else 467 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 468} 469 470static inline bool bpf_map_flags_access_ok(u32 access_flags) 471{ 472 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 473 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 474} 475 476struct bpf_event_entry { 477 struct perf_event *event; 478 struct file *perf_file; 479 struct file *map_file; 480 struct rcu_head rcu; 481}; 482 483bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 484int bpf_prog_calc_tag(struct bpf_prog *fp); 485 486const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 487 488typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 489 unsigned long off, unsigned long len); 490typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 491 const struct bpf_insn *src, 492 struct bpf_insn *dst, 493 struct bpf_prog *prog, 494 u32 *target_size); 495 496u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 497 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 498 499/* an array of programs to be executed under rcu_lock. 500 * 501 * Typical usage: 502 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); 503 * 504 * the structure returned by bpf_prog_array_alloc() should be populated 505 * with program pointers and the last pointer must be NULL. 506 * The user has to keep refcnt on the program and make sure the program 507 * is removed from the array before bpf_prog_put(). 508 * The 'struct bpf_prog_array *' should only be replaced with xchg() 509 * since other cpus are walking the array of pointers in parallel. 510 */ 511struct bpf_prog_array_item { 512 struct bpf_prog *prog; 513 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 514}; 515 516struct bpf_prog_array { 517 struct rcu_head rcu; 518 struct bpf_prog_array_item items[0]; 519}; 520 521struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 522void bpf_prog_array_free(struct bpf_prog_array *progs); 523int bpf_prog_array_length(struct bpf_prog_array *progs); 524bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 525int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 526 __u32 __user *prog_ids, u32 cnt); 527 528void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 529 struct bpf_prog *old_prog); 530int bpf_prog_array_copy_info(struct bpf_prog_array *array, 531 u32 *prog_ids, u32 request_cnt, 532 u32 *prog_cnt); 533int bpf_prog_array_copy(struct bpf_prog_array *old_array, 534 struct bpf_prog *exclude_prog, 535 struct bpf_prog *include_prog, 536 struct bpf_prog_array **new_array); 537 538#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ 539 ({ \ 540 struct bpf_prog_array_item *_item; \ 541 struct bpf_prog *_prog; \ 542 struct bpf_prog_array *_array; \ 543 u32 _ret = 1; \ 544 preempt_disable(); \ 545 rcu_read_lock(); \ 546 _array = rcu_dereference(array); \ 547 if (unlikely(check_non_null && !_array))\ 548 goto _out; \ 549 _item = &_array->items[0]; \ 550 while ((_prog = READ_ONCE(_item->prog))) { \ 551 bpf_cgroup_storage_set(_item->cgroup_storage); \ 552 _ret &= func(_prog, ctx); \ 553 _item++; \ 554 } \ 555_out: \ 556 rcu_read_unlock(); \ 557 preempt_enable(); \ 558 _ret; \ 559 }) 560 561/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs 562 * so BPF programs can request cwr for TCP packets. 563 * 564 * Current cgroup skb programs can only return 0 or 1 (0 to drop the 565 * packet. This macro changes the behavior so the low order bit 566 * indicates whether the packet should be dropped (0) or not (1) 567 * and the next bit is a congestion notification bit. This could be 568 * used by TCP to call tcp_enter_cwr() 569 * 570 * Hence, new allowed return values of CGROUP EGRESS BPF programs are: 571 * 0: drop packet 572 * 1: keep packet 573 * 2: drop packet and cn 574 * 3: keep packet and cn 575 * 576 * This macro then converts it to one of the NET_XMIT or an error 577 * code that is then interpreted as drop packet (and no cn): 578 * 0: NET_XMIT_SUCCESS skb should be transmitted 579 * 1: NET_XMIT_DROP skb should be dropped and cn 580 * 2: NET_XMIT_CN skb should be transmitted and cn 581 * 3: -EPERM skb should be dropped 582 */ 583#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ 584 ({ \ 585 struct bpf_prog_array_item *_item; \ 586 struct bpf_prog *_prog; \ 587 struct bpf_prog_array *_array; \ 588 u32 ret; \ 589 u32 _ret = 1; \ 590 u32 _cn = 0; \ 591 preempt_disable(); \ 592 rcu_read_lock(); \ 593 _array = rcu_dereference(array); \ 594 _item = &_array->items[0]; \ 595 while ((_prog = READ_ONCE(_item->prog))) { \ 596 bpf_cgroup_storage_set(_item->cgroup_storage); \ 597 ret = func(_prog, ctx); \ 598 _ret &= (ret & 1); \ 599 _cn |= (ret & 2); \ 600 _item++; \ 601 } \ 602 rcu_read_unlock(); \ 603 preempt_enable(); \ 604 if (_ret) \ 605 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ 606 else \ 607 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ 608 _ret; \ 609 }) 610 611#define BPF_PROG_RUN_ARRAY(array, ctx, func) \ 612 __BPF_PROG_RUN_ARRAY(array, ctx, func, false) 613 614#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ 615 __BPF_PROG_RUN_ARRAY(array, ctx, func, true) 616 617#ifdef CONFIG_BPF_SYSCALL 618DECLARE_PER_CPU(int, bpf_prog_active); 619 620extern const struct file_operations bpf_map_fops; 621extern const struct file_operations bpf_prog_fops; 622 623#define BPF_PROG_TYPE(_id, _name) \ 624 extern const struct bpf_prog_ops _name ## _prog_ops; \ 625 extern const struct bpf_verifier_ops _name ## _verifier_ops; 626#define BPF_MAP_TYPE(_id, _ops) \ 627 extern const struct bpf_map_ops _ops; 628#include <linux/bpf_types.h> 629#undef BPF_PROG_TYPE 630#undef BPF_MAP_TYPE 631 632extern const struct bpf_prog_ops bpf_offload_prog_ops; 633extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 634extern const struct bpf_verifier_ops xdp_analyzer_ops; 635 636struct bpf_prog *bpf_prog_get(u32 ufd); 637struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 638 bool attach_drv); 639struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); 640void bpf_prog_sub(struct bpf_prog *prog, int i); 641struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); 642struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 643void bpf_prog_put(struct bpf_prog *prog); 644int __bpf_prog_charge(struct user_struct *user, u32 pages); 645void __bpf_prog_uncharge(struct user_struct *user, u32 pages); 646 647void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 648void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 649 650struct bpf_map *bpf_map_get_with_uref(u32 ufd); 651struct bpf_map *__bpf_map_get(struct fd f); 652struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); 653struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map, 654 bool uref); 655void bpf_map_put_with_uref(struct bpf_map *map); 656void bpf_map_put(struct bpf_map *map); 657int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); 658void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); 659int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); 660void bpf_map_charge_finish(struct bpf_map_memory *mem); 661void bpf_map_charge_move(struct bpf_map_memory *dst, 662 struct bpf_map_memory *src); 663void *bpf_map_area_alloc(u64 size, int numa_node); 664void bpf_map_area_free(void *base); 665void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 666 667extern int sysctl_unprivileged_bpf_disabled; 668 669int bpf_map_new_fd(struct bpf_map *map, int flags); 670int bpf_prog_new_fd(struct bpf_prog *prog); 671 672int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 673int bpf_obj_get_user(const char __user *pathname, int flags); 674 675int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 676int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 677int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 678 u64 flags); 679int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 680 u64 flags); 681 682int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 683 684int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 685 void *key, void *value, u64 map_flags); 686int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 687int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 688 void *key, void *value, u64 map_flags); 689int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 690 691int bpf_get_file_flag(int flags); 692int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, 693 size_t actual_size); 694 695/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 696 * forced to use 'long' read/writes to try to atomically copy long counters. 697 * Best-effort only. No barriers here, since it _will_ race with concurrent 698 * updates from BPF programs. Called from bpf syscall and mostly used with 699 * size 8 or 16 bytes, so ask compiler to inline it. 700 */ 701static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 702{ 703 const long *lsrc = src; 704 long *ldst = dst; 705 706 size /= sizeof(long); 707 while (size--) 708 *ldst++ = *lsrc++; 709} 710 711/* verify correctness of eBPF program */ 712int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, 713 union bpf_attr __user *uattr); 714void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 715 716/* Map specifics */ 717struct xdp_buff; 718struct sk_buff; 719 720struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); 721struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); 722void __dev_map_flush(struct bpf_map *map); 723int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 724 struct net_device *dev_rx); 725int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 726 struct bpf_prog *xdp_prog); 727 728struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); 729void __cpu_map_flush(struct bpf_map *map); 730int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, 731 struct net_device *dev_rx); 732 733/* Return map's numa specified by userspace */ 734static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 735{ 736 return (attr->map_flags & BPF_F_NUMA_NODE) ? 737 attr->numa_node : NUMA_NO_NODE; 738} 739 740struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 741int array_map_alloc_check(union bpf_attr *attr); 742 743int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 744 union bpf_attr __user *uattr); 745int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 746 union bpf_attr __user *uattr); 747int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 748 const union bpf_attr *kattr, 749 union bpf_attr __user *uattr); 750#else /* !CONFIG_BPF_SYSCALL */ 751static inline struct bpf_prog *bpf_prog_get(u32 ufd) 752{ 753 return ERR_PTR(-EOPNOTSUPP); 754} 755 756static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 757 enum bpf_prog_type type, 758 bool attach_drv) 759{ 760 return ERR_PTR(-EOPNOTSUPP); 761} 762 763static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, 764 int i) 765{ 766 return ERR_PTR(-EOPNOTSUPP); 767} 768 769static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 770{ 771} 772 773static inline void bpf_prog_put(struct bpf_prog *prog) 774{ 775} 776 777static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) 778{ 779 return ERR_PTR(-EOPNOTSUPP); 780} 781 782static inline struct bpf_prog *__must_check 783bpf_prog_inc_not_zero(struct bpf_prog *prog) 784{ 785 return ERR_PTR(-EOPNOTSUPP); 786} 787 788static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) 789{ 790 return 0; 791} 792 793static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 794{ 795} 796 797static inline int bpf_obj_get_user(const char __user *pathname, int flags) 798{ 799 return -EOPNOTSUPP; 800} 801 802static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, 803 u32 key) 804{ 805 return NULL; 806} 807 808static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, 809 u32 key) 810{ 811 return NULL; 812} 813 814static inline void __dev_map_flush(struct bpf_map *map) 815{ 816} 817 818struct xdp_buff; 819struct bpf_dtab_netdev; 820 821static inline 822int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 823 struct net_device *dev_rx) 824{ 825 return 0; 826} 827 828struct sk_buff; 829 830static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 831 struct sk_buff *skb, 832 struct bpf_prog *xdp_prog) 833{ 834 return 0; 835} 836 837static inline 838struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) 839{ 840 return NULL; 841} 842 843static inline void __cpu_map_flush(struct bpf_map *map) 844{ 845} 846 847static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 848 struct xdp_buff *xdp, 849 struct net_device *dev_rx) 850{ 851 return 0; 852} 853 854static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 855 enum bpf_prog_type type) 856{ 857 return ERR_PTR(-EOPNOTSUPP); 858} 859 860static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 861 const union bpf_attr *kattr, 862 union bpf_attr __user *uattr) 863{ 864 return -ENOTSUPP; 865} 866 867static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 868 const union bpf_attr *kattr, 869 union bpf_attr __user *uattr) 870{ 871 return -ENOTSUPP; 872} 873 874static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 875 const union bpf_attr *kattr, 876 union bpf_attr __user *uattr) 877{ 878 return -ENOTSUPP; 879} 880#endif /* CONFIG_BPF_SYSCALL */ 881 882static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 883 enum bpf_prog_type type) 884{ 885 return bpf_prog_get_type_dev(ufd, type, false); 886} 887 888bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 889 890int bpf_prog_offload_compile(struct bpf_prog *prog); 891void bpf_prog_offload_destroy(struct bpf_prog *prog); 892int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 893 struct bpf_prog *prog); 894 895int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 896 897int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 898int bpf_map_offload_update_elem(struct bpf_map *map, 899 void *key, void *value, u64 flags); 900int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 901int bpf_map_offload_get_next_key(struct bpf_map *map, 902 void *key, void *next_key); 903 904bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 905 906struct bpf_offload_dev * 907bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 908void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 909void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 910int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 911 struct net_device *netdev); 912void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 913 struct net_device *netdev); 914bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 915 916#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 917int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 918 919static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 920{ 921 return aux->offload_requested; 922} 923 924static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 925{ 926 return unlikely(map->ops == &bpf_map_offload_ops); 927} 928 929struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 930void bpf_map_offload_map_free(struct bpf_map *map); 931#else 932static inline int bpf_prog_offload_init(struct bpf_prog *prog, 933 union bpf_attr *attr) 934{ 935 return -EOPNOTSUPP; 936} 937 938static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 939{ 940 return false; 941} 942 943static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 944{ 945 return false; 946} 947 948static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 949{ 950 return ERR_PTR(-EOPNOTSUPP); 951} 952 953static inline void bpf_map_offload_map_free(struct bpf_map *map) 954{ 955} 956#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 957 958#if defined(CONFIG_BPF_STREAM_PARSER) 959int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); 960int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 961#else 962static inline int sock_map_prog_update(struct bpf_map *map, 963 struct bpf_prog *prog, u32 which) 964{ 965 return -EOPNOTSUPP; 966} 967 968static inline int sock_map_get_from_fd(const union bpf_attr *attr, 969 struct bpf_prog *prog) 970{ 971 return -EINVAL; 972} 973#endif 974 975#if defined(CONFIG_XDP_SOCKETS) 976struct xdp_sock; 977struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key); 978int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, 979 struct xdp_sock *xs); 980void __xsk_map_flush(struct bpf_map *map); 981#else 982struct xdp_sock; 983static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, 984 u32 key) 985{ 986 return NULL; 987} 988 989static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, 990 struct xdp_sock *xs) 991{ 992 return -EOPNOTSUPP; 993} 994 995static inline void __xsk_map_flush(struct bpf_map *map) 996{ 997} 998#endif 999 1000#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 1001void bpf_sk_reuseport_detach(struct sock *sk); 1002int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 1003 void *value); 1004int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 1005 void *value, u64 map_flags); 1006#else 1007static inline void bpf_sk_reuseport_detach(struct sock *sk) 1008{ 1009} 1010 1011#ifdef CONFIG_BPF_SYSCALL 1012static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 1013 void *key, void *value) 1014{ 1015 return -EOPNOTSUPP; 1016} 1017 1018static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 1019 void *key, void *value, 1020 u64 map_flags) 1021{ 1022 return -EOPNOTSUPP; 1023} 1024#endif /* CONFIG_BPF_SYSCALL */ 1025#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 1026 1027/* verifier prototypes for helper functions called from eBPF programs */ 1028extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 1029extern const struct bpf_func_proto bpf_map_update_elem_proto; 1030extern const struct bpf_func_proto bpf_map_delete_elem_proto; 1031extern const struct bpf_func_proto bpf_map_push_elem_proto; 1032extern const struct bpf_func_proto bpf_map_pop_elem_proto; 1033extern const struct bpf_func_proto bpf_map_peek_elem_proto; 1034 1035extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 1036extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 1037extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 1038extern const struct bpf_func_proto bpf_tail_call_proto; 1039extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 1040extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 1041extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 1042extern const struct bpf_func_proto bpf_get_current_comm_proto; 1043extern const struct bpf_func_proto bpf_get_stackid_proto; 1044extern const struct bpf_func_proto bpf_get_stack_proto; 1045extern const struct bpf_func_proto bpf_sock_map_update_proto; 1046extern const struct bpf_func_proto bpf_sock_hash_update_proto; 1047extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 1048extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 1049extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 1050extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 1051extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 1052extern const struct bpf_func_proto bpf_spin_lock_proto; 1053extern const struct bpf_func_proto bpf_spin_unlock_proto; 1054extern const struct bpf_func_proto bpf_get_local_storage_proto; 1055extern const struct bpf_func_proto bpf_strtol_proto; 1056extern const struct bpf_func_proto bpf_strtoul_proto; 1057extern const struct bpf_func_proto bpf_tcp_sock_proto; 1058 1059/* Shared helpers among cBPF and eBPF. */ 1060void bpf_user_rnd_init_once(void); 1061u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 1062 1063#if defined(CONFIG_NET) 1064bool bpf_sock_common_is_valid_access(int off, int size, 1065 enum bpf_access_type type, 1066 struct bpf_insn_access_aux *info); 1067bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1068 struct bpf_insn_access_aux *info); 1069u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1070 const struct bpf_insn *si, 1071 struct bpf_insn *insn_buf, 1072 struct bpf_prog *prog, 1073 u32 *target_size); 1074#else 1075static inline bool bpf_sock_common_is_valid_access(int off, int size, 1076 enum bpf_access_type type, 1077 struct bpf_insn_access_aux *info) 1078{ 1079 return false; 1080} 1081static inline bool bpf_sock_is_valid_access(int off, int size, 1082 enum bpf_access_type type, 1083 struct bpf_insn_access_aux *info) 1084{ 1085 return false; 1086} 1087static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 1088 const struct bpf_insn *si, 1089 struct bpf_insn *insn_buf, 1090 struct bpf_prog *prog, 1091 u32 *target_size) 1092{ 1093 return 0; 1094} 1095#endif 1096 1097#ifdef CONFIG_INET 1098bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1099 struct bpf_insn_access_aux *info); 1100 1101u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1102 const struct bpf_insn *si, 1103 struct bpf_insn *insn_buf, 1104 struct bpf_prog *prog, 1105 u32 *target_size); 1106 1107bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1108 struct bpf_insn_access_aux *info); 1109 1110u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 1111 const struct bpf_insn *si, 1112 struct bpf_insn *insn_buf, 1113 struct bpf_prog *prog, 1114 u32 *target_size); 1115#else 1116static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 1117 enum bpf_access_type type, 1118 struct bpf_insn_access_aux *info) 1119{ 1120 return false; 1121} 1122 1123static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 1124 const struct bpf_insn *si, 1125 struct bpf_insn *insn_buf, 1126 struct bpf_prog *prog, 1127 u32 *target_size) 1128{ 1129 return 0; 1130} 1131static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 1132 enum bpf_access_type type, 1133 struct bpf_insn_access_aux *info) 1134{ 1135 return false; 1136} 1137 1138static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 1139 const struct bpf_insn *si, 1140 struct bpf_insn *insn_buf, 1141 struct bpf_prog *prog, 1142 u32 *target_size) 1143{ 1144 return 0; 1145} 1146#endif /* CONFIG_INET */ 1147 1148#endif /* _LINUX_BPF_H */