at v4.17 21 kB view raw
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7#ifndef _LINUX_BPF_H 8#define _LINUX_BPF_H 1 9 10#include <uapi/linux/bpf.h> 11 12#include <linux/workqueue.h> 13#include <linux/file.h> 14#include <linux/percpu.h> 15#include <linux/err.h> 16#include <linux/rbtree_latch.h> 17#include <linux/numa.h> 18#include <linux/wait.h> 19 20struct bpf_verifier_env; 21struct perf_event; 22struct bpf_prog; 23struct bpf_map; 24struct sock; 25 26/* map is generic key/value storage optionally accesible by eBPF programs */ 27struct bpf_map_ops { 28 /* funcs callable from userspace (via syscall) */ 29 int (*map_alloc_check)(union bpf_attr *attr); 30 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 31 void (*map_release)(struct bpf_map *map, struct file *map_file); 32 void (*map_free)(struct bpf_map *map); 33 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 34 void (*map_release_uref)(struct bpf_map *map); 35 36 /* funcs callable from userspace and from eBPF programs */ 37 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 38 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 39 int (*map_delete_elem)(struct bpf_map *map, void *key); 40 41 /* funcs called by prog_array and perf_event_array map */ 42 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 43 int fd); 44 void (*map_fd_put_ptr)(void *ptr); 45 u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 46 u32 (*map_fd_sys_lookup_elem)(void *ptr); 47}; 48 49struct bpf_map { 50 /* 1st cacheline with read-mostly members of which some 51 * are also accessed in fast-path (e.g. ops, max_entries). 52 */ 53 const struct bpf_map_ops *ops ____cacheline_aligned; 54 struct bpf_map *inner_map_meta; 55#ifdef CONFIG_SECURITY 56 void *security; 57#endif 58 enum bpf_map_type map_type; 59 u32 key_size; 60 u32 value_size; 61 u32 max_entries; 62 u32 map_flags; 63 u32 pages; 64 u32 id; 65 int numa_node; 66 bool unpriv_array; 67 /* 7 bytes hole */ 68 69 /* 2nd cacheline with misc members to avoid false sharing 70 * particularly with refcounting. 71 */ 72 struct user_struct *user ____cacheline_aligned; 73 atomic_t refcnt; 74 atomic_t usercnt; 75 struct work_struct work; 76 char name[BPF_OBJ_NAME_LEN]; 77}; 78 79struct bpf_offloaded_map; 80 81struct bpf_map_dev_ops { 82 int (*map_get_next_key)(struct bpf_offloaded_map *map, 83 void *key, void *next_key); 84 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 85 void *key, void *value); 86 int (*map_update_elem)(struct bpf_offloaded_map *map, 87 void *key, void *value, u64 flags); 88 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 89}; 90 91struct bpf_offloaded_map { 92 struct bpf_map map; 93 struct net_device *netdev; 94 const struct bpf_map_dev_ops *dev_ops; 95 void *dev_priv; 96 struct list_head offloads; 97}; 98 99static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 100{ 101 return container_of(map, struct bpf_offloaded_map, map); 102} 103 104extern const struct bpf_map_ops bpf_map_offload_ops; 105 106/* function argument constraints */ 107enum bpf_arg_type { 108 ARG_DONTCARE = 0, /* unused argument in helper function */ 109 110 /* the following constraints used to prototype 111 * bpf_map_lookup/update/delete_elem() functions 112 */ 113 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 114 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 115 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 116 117 /* the following constraints used to prototype bpf_memcmp() and other 118 * functions that access data on eBPF program stack 119 */ 120 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 121 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 122 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 123 * helper function must fill all bytes or clear 124 * them in error case. 125 */ 126 127 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 128 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 129 130 ARG_PTR_TO_CTX, /* pointer to context */ 131 ARG_ANYTHING, /* any (initialized) argument is ok */ 132}; 133 134/* type of values returned from helper functions */ 135enum bpf_return_type { 136 RET_INTEGER, /* function returns integer */ 137 RET_VOID, /* function doesn't return anything */ 138 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 139}; 140 141/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 142 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 143 * instructions after verifying 144 */ 145struct bpf_func_proto { 146 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 147 bool gpl_only; 148 bool pkt_access; 149 enum bpf_return_type ret_type; 150 enum bpf_arg_type arg1_type; 151 enum bpf_arg_type arg2_type; 152 enum bpf_arg_type arg3_type; 153 enum bpf_arg_type arg4_type; 154 enum bpf_arg_type arg5_type; 155}; 156 157/* bpf_context is intentionally undefined structure. Pointer to bpf_context is 158 * the first argument to eBPF programs. 159 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 160 */ 161struct bpf_context; 162 163enum bpf_access_type { 164 BPF_READ = 1, 165 BPF_WRITE = 2 166}; 167 168/* types of values stored in eBPF registers */ 169/* Pointer types represent: 170 * pointer 171 * pointer + imm 172 * pointer + (u16) var 173 * pointer + (u16) var + imm 174 * if (range > 0) then [ptr, ptr + range - off) is safe to access 175 * if (id > 0) means that some 'var' was added 176 * if (off > 0) means that 'imm' was added 177 */ 178enum bpf_reg_type { 179 NOT_INIT = 0, /* nothing was written into register */ 180 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 181 PTR_TO_CTX, /* reg points to bpf_context */ 182 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 183 PTR_TO_MAP_VALUE, /* reg points to map element value */ 184 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 185 PTR_TO_STACK, /* reg == frame_pointer + offset */ 186 PTR_TO_PACKET_META, /* skb->data - meta_len */ 187 PTR_TO_PACKET, /* reg points to skb->data */ 188 PTR_TO_PACKET_END, /* skb->data + headlen */ 189}; 190 191/* The information passed from prog-specific *_is_valid_access 192 * back to the verifier. 193 */ 194struct bpf_insn_access_aux { 195 enum bpf_reg_type reg_type; 196 int ctx_field_size; 197}; 198 199static inline void 200bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 201{ 202 aux->ctx_field_size = size; 203} 204 205struct bpf_prog_ops { 206 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 207 union bpf_attr __user *uattr); 208}; 209 210struct bpf_verifier_ops { 211 /* return eBPF function prototype for verification */ 212 const struct bpf_func_proto * 213 (*get_func_proto)(enum bpf_func_id func_id, 214 const struct bpf_prog *prog); 215 216 /* return true if 'size' wide access at offset 'off' within bpf_context 217 * with 'type' (read or write) is allowed 218 */ 219 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 220 const struct bpf_prog *prog, 221 struct bpf_insn_access_aux *info); 222 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 223 const struct bpf_prog *prog); 224 u32 (*convert_ctx_access)(enum bpf_access_type type, 225 const struct bpf_insn *src, 226 struct bpf_insn *dst, 227 struct bpf_prog *prog, u32 *target_size); 228}; 229 230struct bpf_prog_offload_ops { 231 int (*insn_hook)(struct bpf_verifier_env *env, 232 int insn_idx, int prev_insn_idx); 233}; 234 235struct bpf_prog_offload { 236 struct bpf_prog *prog; 237 struct net_device *netdev; 238 void *dev_priv; 239 struct list_head offloads; 240 bool dev_state; 241 const struct bpf_prog_offload_ops *dev_ops; 242 void *jited_image; 243 u32 jited_len; 244}; 245 246struct bpf_prog_aux { 247 atomic_t refcnt; 248 u32 used_map_cnt; 249 u32 max_ctx_offset; 250 u32 stack_depth; 251 u32 id; 252 u32 func_cnt; 253 bool offload_requested; 254 struct bpf_prog **func; 255 void *jit_data; /* JIT specific data. arch dependent */ 256 struct latch_tree_node ksym_tnode; 257 struct list_head ksym_lnode; 258 const struct bpf_prog_ops *ops; 259 struct bpf_map **used_maps; 260 struct bpf_prog *prog; 261 struct user_struct *user; 262 u64 load_time; /* ns since boottime */ 263 char name[BPF_OBJ_NAME_LEN]; 264#ifdef CONFIG_SECURITY 265 void *security; 266#endif 267 struct bpf_prog_offload *offload; 268 union { 269 struct work_struct work; 270 struct rcu_head rcu; 271 }; 272}; 273 274struct bpf_array { 275 struct bpf_map map; 276 u32 elem_size; 277 u32 index_mask; 278 /* 'ownership' of prog_array is claimed by the first program that 279 * is going to use this map or by the first program which FD is stored 280 * in the map to make sure that all callers and callees have the same 281 * prog_type and JITed flag 282 */ 283 enum bpf_prog_type owner_prog_type; 284 bool owner_jited; 285 union { 286 char value[0] __aligned(8); 287 void *ptrs[0] __aligned(8); 288 void __percpu *pptrs[0] __aligned(8); 289 }; 290}; 291 292#define MAX_TAIL_CALL_CNT 32 293 294struct bpf_event_entry { 295 struct perf_event *event; 296 struct file *perf_file; 297 struct file *map_file; 298 struct rcu_head rcu; 299}; 300 301bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 302int bpf_prog_calc_tag(struct bpf_prog *fp); 303 304const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 305 306typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 307 unsigned long off, unsigned long len); 308 309u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 310 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 311 312int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 313 union bpf_attr __user *uattr); 314int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 315 union bpf_attr __user *uattr); 316 317/* an array of programs to be executed under rcu_lock. 318 * 319 * Typical usage: 320 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); 321 * 322 * the structure returned by bpf_prog_array_alloc() should be populated 323 * with program pointers and the last pointer must be NULL. 324 * The user has to keep refcnt on the program and make sure the program 325 * is removed from the array before bpf_prog_put(). 326 * The 'struct bpf_prog_array *' should only be replaced with xchg() 327 * since other cpus are walking the array of pointers in parallel. 328 */ 329struct bpf_prog_array { 330 struct rcu_head rcu; 331 struct bpf_prog *progs[0]; 332}; 333 334struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 335void bpf_prog_array_free(struct bpf_prog_array __rcu *progs); 336int bpf_prog_array_length(struct bpf_prog_array __rcu *progs); 337int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, 338 __u32 __user *prog_ids, u32 cnt); 339 340void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, 341 struct bpf_prog *old_prog); 342int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, 343 u32 *prog_ids, u32 request_cnt, 344 u32 *prog_cnt); 345int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, 346 struct bpf_prog *exclude_prog, 347 struct bpf_prog *include_prog, 348 struct bpf_prog_array **new_array); 349 350#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ 351 ({ \ 352 struct bpf_prog **_prog, *__prog; \ 353 struct bpf_prog_array *_array; \ 354 u32 _ret = 1; \ 355 preempt_disable(); \ 356 rcu_read_lock(); \ 357 _array = rcu_dereference(array); \ 358 if (unlikely(check_non_null && !_array))\ 359 goto _out; \ 360 _prog = _array->progs; \ 361 while ((__prog = READ_ONCE(*_prog))) { \ 362 _ret &= func(__prog, ctx); \ 363 _prog++; \ 364 } \ 365_out: \ 366 rcu_read_unlock(); \ 367 preempt_enable_no_resched(); \ 368 _ret; \ 369 }) 370 371#define BPF_PROG_RUN_ARRAY(array, ctx, func) \ 372 __BPF_PROG_RUN_ARRAY(array, ctx, func, false) 373 374#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ 375 __BPF_PROG_RUN_ARRAY(array, ctx, func, true) 376 377#ifdef CONFIG_BPF_SYSCALL 378DECLARE_PER_CPU(int, bpf_prog_active); 379 380extern const struct file_operations bpf_map_fops; 381extern const struct file_operations bpf_prog_fops; 382 383#define BPF_PROG_TYPE(_id, _name) \ 384 extern const struct bpf_prog_ops _name ## _prog_ops; \ 385 extern const struct bpf_verifier_ops _name ## _verifier_ops; 386#define BPF_MAP_TYPE(_id, _ops) \ 387 extern const struct bpf_map_ops _ops; 388#include <linux/bpf_types.h> 389#undef BPF_PROG_TYPE 390#undef BPF_MAP_TYPE 391 392extern const struct bpf_prog_ops bpf_offload_prog_ops; 393extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 394extern const struct bpf_verifier_ops xdp_analyzer_ops; 395 396struct bpf_prog *bpf_prog_get(u32 ufd); 397struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 398 bool attach_drv); 399struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); 400void bpf_prog_sub(struct bpf_prog *prog, int i); 401struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); 402struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 403void bpf_prog_put(struct bpf_prog *prog); 404int __bpf_prog_charge(struct user_struct *user, u32 pages); 405void __bpf_prog_uncharge(struct user_struct *user, u32 pages); 406 407void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 408void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 409 410struct bpf_map *bpf_map_get_with_uref(u32 ufd); 411struct bpf_map *__bpf_map_get(struct fd f); 412struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); 413void bpf_map_put_with_uref(struct bpf_map *map); 414void bpf_map_put(struct bpf_map *map); 415int bpf_map_precharge_memlock(u32 pages); 416void *bpf_map_area_alloc(size_t size, int numa_node); 417void bpf_map_area_free(void *base); 418void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 419 420extern int sysctl_unprivileged_bpf_disabled; 421 422int bpf_map_new_fd(struct bpf_map *map, int flags); 423int bpf_prog_new_fd(struct bpf_prog *prog); 424 425int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 426int bpf_obj_get_user(const char __user *pathname, int flags); 427 428int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 429int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 430int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 431 u64 flags); 432int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 433 u64 flags); 434 435int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 436 437int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 438 void *key, void *value, u64 map_flags); 439int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 440int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 441 void *key, void *value, u64 map_flags); 442int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 443 444int bpf_get_file_flag(int flags); 445 446/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 447 * forced to use 'long' read/writes to try to atomically copy long counters. 448 * Best-effort only. No barriers here, since it _will_ race with concurrent 449 * updates from BPF programs. Called from bpf syscall and mostly used with 450 * size 8 or 16 bytes, so ask compiler to inline it. 451 */ 452static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 453{ 454 const long *lsrc = src; 455 long *ldst = dst; 456 457 size /= sizeof(long); 458 while (size--) 459 *ldst++ = *lsrc++; 460} 461 462/* verify correctness of eBPF program */ 463int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); 464void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 465 466/* Map specifics */ 467struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); 468void __dev_map_insert_ctx(struct bpf_map *map, u32 index); 469void __dev_map_flush(struct bpf_map *map); 470 471struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); 472void __cpu_map_insert_ctx(struct bpf_map *map, u32 index); 473void __cpu_map_flush(struct bpf_map *map); 474struct xdp_buff; 475int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, 476 struct net_device *dev_rx); 477 478/* Return map's numa specified by userspace */ 479static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 480{ 481 return (attr->map_flags & BPF_F_NUMA_NODE) ? 482 attr->numa_node : NUMA_NO_NODE; 483} 484 485struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 486 487#else /* !CONFIG_BPF_SYSCALL */ 488static inline struct bpf_prog *bpf_prog_get(u32 ufd) 489{ 490 return ERR_PTR(-EOPNOTSUPP); 491} 492 493static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 494 enum bpf_prog_type type, 495 bool attach_drv) 496{ 497 return ERR_PTR(-EOPNOTSUPP); 498} 499 500static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, 501 int i) 502{ 503 return ERR_PTR(-EOPNOTSUPP); 504} 505 506static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 507{ 508} 509 510static inline void bpf_prog_put(struct bpf_prog *prog) 511{ 512} 513 514static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) 515{ 516 return ERR_PTR(-EOPNOTSUPP); 517} 518 519static inline struct bpf_prog *__must_check 520bpf_prog_inc_not_zero(struct bpf_prog *prog) 521{ 522 return ERR_PTR(-EOPNOTSUPP); 523} 524 525static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) 526{ 527 return 0; 528} 529 530static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 531{ 532} 533 534static inline int bpf_obj_get_user(const char __user *pathname, int flags) 535{ 536 return -EOPNOTSUPP; 537} 538 539static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, 540 u32 key) 541{ 542 return NULL; 543} 544 545static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index) 546{ 547} 548 549static inline void __dev_map_flush(struct bpf_map *map) 550{ 551} 552 553static inline 554struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) 555{ 556 return NULL; 557} 558 559static inline void __cpu_map_insert_ctx(struct bpf_map *map, u32 index) 560{ 561} 562 563static inline void __cpu_map_flush(struct bpf_map *map) 564{ 565} 566 567struct xdp_buff; 568static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 569 struct xdp_buff *xdp, 570 struct net_device *dev_rx) 571{ 572 return 0; 573} 574 575static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 576 enum bpf_prog_type type) 577{ 578 return ERR_PTR(-EOPNOTSUPP); 579} 580#endif /* CONFIG_BPF_SYSCALL */ 581 582static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 583 enum bpf_prog_type type) 584{ 585 return bpf_prog_get_type_dev(ufd, type, false); 586} 587 588bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 589 590int bpf_prog_offload_compile(struct bpf_prog *prog); 591void bpf_prog_offload_destroy(struct bpf_prog *prog); 592int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 593 struct bpf_prog *prog); 594 595int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 596 597int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 598int bpf_map_offload_update_elem(struct bpf_map *map, 599 void *key, void *value, u64 flags); 600int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 601int bpf_map_offload_get_next_key(struct bpf_map *map, 602 void *key, void *next_key); 603 604bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map); 605 606#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 607int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 608 609static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 610{ 611 return aux->offload_requested; 612} 613 614static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 615{ 616 return unlikely(map->ops == &bpf_map_offload_ops); 617} 618 619struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 620void bpf_map_offload_map_free(struct bpf_map *map); 621#else 622static inline int bpf_prog_offload_init(struct bpf_prog *prog, 623 union bpf_attr *attr) 624{ 625 return -EOPNOTSUPP; 626} 627 628static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 629{ 630 return false; 631} 632 633static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 634{ 635 return false; 636} 637 638static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 639{ 640 return ERR_PTR(-EOPNOTSUPP); 641} 642 643static inline void bpf_map_offload_map_free(struct bpf_map *map) 644{ 645} 646#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 647 648#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) 649struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); 650int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); 651#else 652static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) 653{ 654 return NULL; 655} 656 657static inline int sock_map_prog(struct bpf_map *map, 658 struct bpf_prog *prog, 659 u32 type) 660{ 661 return -EOPNOTSUPP; 662} 663#endif 664 665/* verifier prototypes for helper functions called from eBPF programs */ 666extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 667extern const struct bpf_func_proto bpf_map_update_elem_proto; 668extern const struct bpf_func_proto bpf_map_delete_elem_proto; 669 670extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 671extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 672extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 673extern const struct bpf_func_proto bpf_tail_call_proto; 674extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 675extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 676extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 677extern const struct bpf_func_proto bpf_get_current_comm_proto; 678extern const struct bpf_func_proto bpf_skb_vlan_push_proto; 679extern const struct bpf_func_proto bpf_skb_vlan_pop_proto; 680extern const struct bpf_func_proto bpf_get_stackid_proto; 681extern const struct bpf_func_proto bpf_sock_map_update_proto; 682 683/* Shared helpers among cBPF and eBPF. */ 684void bpf_user_rnd_init_once(void); 685u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 686 687#endif /* _LINUX_BPF_H */