Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v5.1-rc3 3259 lines 76 kB view raw
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3/* 4 * Common eBPF ELF object loading operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * Copyright (C) 2017 Nicira, Inc. 10 */ 11 12#ifndef _GNU_SOURCE 13#define _GNU_SOURCE 14#endif 15#include <stdlib.h> 16#include <stdio.h> 17#include <stdarg.h> 18#include <libgen.h> 19#include <inttypes.h> 20#include <string.h> 21#include <unistd.h> 22#include <fcntl.h> 23#include <errno.h> 24#include <asm/unistd.h> 25#include <linux/err.h> 26#include <linux/kernel.h> 27#include <linux/bpf.h> 28#include <linux/btf.h> 29#include <linux/filter.h> 30#include <linux/list.h> 31#include <linux/limits.h> 32#include <linux/perf_event.h> 33#include <linux/ring_buffer.h> 34#include <sys/stat.h> 35#include <sys/types.h> 36#include <sys/vfs.h> 37#include <tools/libc_compat.h> 38#include <libelf.h> 39#include <gelf.h> 40 41#include "libbpf.h" 42#include "bpf.h" 43#include "btf.h" 44#include "str_error.h" 45#include "libbpf_util.h" 46 47#ifndef EM_BPF 48#define EM_BPF 247 49#endif 50 51#ifndef BPF_FS_MAGIC 52#define BPF_FS_MAGIC 0xcafe4a11 53#endif 54 55#define __printf(a, b) __attribute__((format(printf, a, b))) 56 57static int __base_pr(enum libbpf_print_level level, const char *format, 58 va_list args) 59{ 60 if (level == LIBBPF_DEBUG) 61 return 0; 62 63 return vfprintf(stderr, format, args); 64} 65 66static libbpf_print_fn_t __libbpf_pr = __base_pr; 67 68void libbpf_set_print(libbpf_print_fn_t fn) 69{ 70 __libbpf_pr = fn; 71} 72 73__printf(2, 3) 74void libbpf_print(enum libbpf_print_level level, const char *format, ...) 75{ 76 va_list args; 77 78 if (!__libbpf_pr) 79 return; 80 81 va_start(args, format); 82 __libbpf_pr(level, format, args); 83 va_end(args); 84} 85 86#define STRERR_BUFSIZE 128 87 88#define CHECK_ERR(action, err, out) do { \ 89 err = action; \ 90 if (err) \ 91 goto out; \ 92} while(0) 93 94 95/* Copied from tools/perf/util/util.h */ 96#ifndef zfree 97# define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) 98#endif 99 100#ifndef zclose 101# define zclose(fd) ({ \ 102 int ___err = 0; \ 103 if ((fd) >= 0) \ 104 ___err = close((fd)); \ 105 fd = -1; \ 106 ___err; }) 107#endif 108 109#ifdef HAVE_LIBELF_MMAP_SUPPORT 110# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP 111#else 112# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ 113#endif 114 115static inline __u64 ptr_to_u64(const void *ptr) 116{ 117 return (__u64) (unsigned long) ptr; 118} 119 120struct bpf_capabilities { 121 /* v4.14: kernel support for program & map names. */ 122 __u32 name:1; 123}; 124 125/* 126 * bpf_prog should be a better name but it has been used in 127 * linux/filter.h. 128 */ 129struct bpf_program { 130 /* Index in elf obj file, for relocation use. */ 131 int idx; 132 char *name; 133 int prog_ifindex; 134 char *section_name; 135 /* section_name with / replaced by _; makes recursive pinning 136 * in bpf_object__pin_programs easier 137 */ 138 char *pin_name; 139 struct bpf_insn *insns; 140 size_t insns_cnt, main_prog_cnt; 141 enum bpf_prog_type type; 142 143 struct reloc_desc { 144 enum { 145 RELO_LD64, 146 RELO_CALL, 147 } type; 148 int insn_idx; 149 union { 150 int map_idx; 151 int text_off; 152 }; 153 } *reloc_desc; 154 int nr_reloc; 155 156 struct { 157 int nr; 158 int *fds; 159 } instances; 160 bpf_program_prep_t preprocessor; 161 162 struct bpf_object *obj; 163 void *priv; 164 bpf_program_clear_priv_t clear_priv; 165 166 enum bpf_attach_type expected_attach_type; 167 int btf_fd; 168 void *func_info; 169 __u32 func_info_rec_size; 170 __u32 func_info_cnt; 171 172 struct bpf_capabilities *caps; 173 174 void *line_info; 175 __u32 line_info_rec_size; 176 __u32 line_info_cnt; 177}; 178 179struct bpf_map { 180 int fd; 181 char *name; 182 size_t offset; 183 int map_ifindex; 184 int inner_map_fd; 185 struct bpf_map_def def; 186 __u32 btf_key_type_id; 187 __u32 btf_value_type_id; 188 void *priv; 189 bpf_map_clear_priv_t clear_priv; 190}; 191 192static LIST_HEAD(bpf_objects_list); 193 194struct bpf_object { 195 char license[64]; 196 __u32 kern_version; 197 198 struct bpf_program *programs; 199 size_t nr_programs; 200 struct bpf_map *maps; 201 size_t nr_maps; 202 203 bool loaded; 204 bool has_pseudo_calls; 205 206 /* 207 * Information when doing elf related work. Only valid if fd 208 * is valid. 209 */ 210 struct { 211 int fd; 212 void *obj_buf; 213 size_t obj_buf_sz; 214 Elf *elf; 215 GElf_Ehdr ehdr; 216 Elf_Data *symbols; 217 size_t strtabidx; 218 struct { 219 GElf_Shdr shdr; 220 Elf_Data *data; 221 } *reloc; 222 int nr_reloc; 223 int maps_shndx; 224 int text_shndx; 225 } efile; 226 /* 227 * All loaded bpf_object is linked in a list, which is 228 * hidden to caller. bpf_objects__<func> handlers deal with 229 * all objects. 230 */ 231 struct list_head list; 232 233 struct btf *btf; 234 struct btf_ext *btf_ext; 235 236 void *priv; 237 bpf_object_clear_priv_t clear_priv; 238 239 struct bpf_capabilities caps; 240 241 char path[]; 242}; 243#define obj_elf_valid(o) ((o)->efile.elf) 244 245void bpf_program__unload(struct bpf_program *prog) 246{ 247 int i; 248 249 if (!prog) 250 return; 251 252 /* 253 * If the object is opened but the program was never loaded, 254 * it is possible that prog->instances.nr == -1. 255 */ 256 if (prog->instances.nr > 0) { 257 for (i = 0; i < prog->instances.nr; i++) 258 zclose(prog->instances.fds[i]); 259 } else if (prog->instances.nr != -1) { 260 pr_warning("Internal error: instances.nr is %d\n", 261 prog->instances.nr); 262 } 263 264 prog->instances.nr = -1; 265 zfree(&prog->instances.fds); 266 267 zclose(prog->btf_fd); 268 zfree(&prog->func_info); 269 zfree(&prog->line_info); 270} 271 272static void bpf_program__exit(struct bpf_program *prog) 273{ 274 if (!prog) 275 return; 276 277 if (prog->clear_priv) 278 prog->clear_priv(prog, prog->priv); 279 280 prog->priv = NULL; 281 prog->clear_priv = NULL; 282 283 bpf_program__unload(prog); 284 zfree(&prog->name); 285 zfree(&prog->section_name); 286 zfree(&prog->pin_name); 287 zfree(&prog->insns); 288 zfree(&prog->reloc_desc); 289 290 prog->nr_reloc = 0; 291 prog->insns_cnt = 0; 292 prog->idx = -1; 293} 294 295static char *__bpf_program__pin_name(struct bpf_program *prog) 296{ 297 char *name, *p; 298 299 name = p = strdup(prog->section_name); 300 while ((p = strchr(p, '/'))) 301 *p = '_'; 302 303 return name; 304} 305 306static int 307bpf_program__init(void *data, size_t size, char *section_name, int idx, 308 struct bpf_program *prog) 309{ 310 if (size < sizeof(struct bpf_insn)) { 311 pr_warning("corrupted section '%s'\n", section_name); 312 return -EINVAL; 313 } 314 315 memset(prog, 0, sizeof(*prog)); 316 317 prog->section_name = strdup(section_name); 318 if (!prog->section_name) { 319 pr_warning("failed to alloc name for prog under section(%d) %s\n", 320 idx, section_name); 321 goto errout; 322 } 323 324 prog->pin_name = __bpf_program__pin_name(prog); 325 if (!prog->pin_name) { 326 pr_warning("failed to alloc pin name for prog under section(%d) %s\n", 327 idx, section_name); 328 goto errout; 329 } 330 331 prog->insns = malloc(size); 332 if (!prog->insns) { 333 pr_warning("failed to alloc insns for prog under section %s\n", 334 section_name); 335 goto errout; 336 } 337 prog->insns_cnt = size / sizeof(struct bpf_insn); 338 memcpy(prog->insns, data, 339 prog->insns_cnt * sizeof(struct bpf_insn)); 340 prog->idx = idx; 341 prog->instances.fds = NULL; 342 prog->instances.nr = -1; 343 prog->type = BPF_PROG_TYPE_UNSPEC; 344 prog->btf_fd = -1; 345 346 return 0; 347errout: 348 bpf_program__exit(prog); 349 return -ENOMEM; 350} 351 352static int 353bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, 354 char *section_name, int idx) 355{ 356 struct bpf_program prog, *progs; 357 int nr_progs, err; 358 359 err = bpf_program__init(data, size, section_name, idx, &prog); 360 if (err) 361 return err; 362 363 prog.caps = &obj->caps; 364 progs = obj->programs; 365 nr_progs = obj->nr_programs; 366 367 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0])); 368 if (!progs) { 369 /* 370 * In this case the original obj->programs 371 * is still valid, so don't need special treat for 372 * bpf_close_object(). 373 */ 374 pr_warning("failed to alloc a new program under section '%s'\n", 375 section_name); 376 bpf_program__exit(&prog); 377 return -ENOMEM; 378 } 379 380 pr_debug("found program %s\n", prog.section_name); 381 obj->programs = progs; 382 obj->nr_programs = nr_progs + 1; 383 prog.obj = obj; 384 progs[nr_progs] = prog; 385 return 0; 386} 387 388static int 389bpf_object__init_prog_names(struct bpf_object *obj) 390{ 391 Elf_Data *symbols = obj->efile.symbols; 392 struct bpf_program *prog; 393 size_t pi, si; 394 395 for (pi = 0; pi < obj->nr_programs; pi++) { 396 const char *name = NULL; 397 398 prog = &obj->programs[pi]; 399 400 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; 401 si++) { 402 GElf_Sym sym; 403 404 if (!gelf_getsym(symbols, si, &sym)) 405 continue; 406 if (sym.st_shndx != prog->idx) 407 continue; 408 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL) 409 continue; 410 411 name = elf_strptr(obj->efile.elf, 412 obj->efile.strtabidx, 413 sym.st_name); 414 if (!name) { 415 pr_warning("failed to get sym name string for prog %s\n", 416 prog->section_name); 417 return -LIBBPF_ERRNO__LIBELF; 418 } 419 } 420 421 if (!name && prog->idx == obj->efile.text_shndx) 422 name = ".text"; 423 424 if (!name) { 425 pr_warning("failed to find sym for prog %s\n", 426 prog->section_name); 427 return -EINVAL; 428 } 429 430 prog->name = strdup(name); 431 if (!prog->name) { 432 pr_warning("failed to allocate memory for prog sym %s\n", 433 name); 434 return -ENOMEM; 435 } 436 } 437 438 return 0; 439} 440 441static struct bpf_object *bpf_object__new(const char *path, 442 void *obj_buf, 443 size_t obj_buf_sz) 444{ 445 struct bpf_object *obj; 446 447 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); 448 if (!obj) { 449 pr_warning("alloc memory failed for %s\n", path); 450 return ERR_PTR(-ENOMEM); 451 } 452 453 strcpy(obj->path, path); 454 obj->efile.fd = -1; 455 456 /* 457 * Caller of this function should also calls 458 * bpf_object__elf_finish() after data collection to return 459 * obj_buf to user. If not, we should duplicate the buffer to 460 * avoid user freeing them before elf finish. 461 */ 462 obj->efile.obj_buf = obj_buf; 463 obj->efile.obj_buf_sz = obj_buf_sz; 464 obj->efile.maps_shndx = -1; 465 466 obj->loaded = false; 467 468 INIT_LIST_HEAD(&obj->list); 469 list_add(&obj->list, &bpf_objects_list); 470 return obj; 471} 472 473static void bpf_object__elf_finish(struct bpf_object *obj) 474{ 475 if (!obj_elf_valid(obj)) 476 return; 477 478 if (obj->efile.elf) { 479 elf_end(obj->efile.elf); 480 obj->efile.elf = NULL; 481 } 482 obj->efile.symbols = NULL; 483 484 zfree(&obj->efile.reloc); 485 obj->efile.nr_reloc = 0; 486 zclose(obj->efile.fd); 487 obj->efile.obj_buf = NULL; 488 obj->efile.obj_buf_sz = 0; 489} 490 491static int bpf_object__elf_init(struct bpf_object *obj) 492{ 493 int err = 0; 494 GElf_Ehdr *ep; 495 496 if (obj_elf_valid(obj)) { 497 pr_warning("elf init: internal error\n"); 498 return -LIBBPF_ERRNO__LIBELF; 499 } 500 501 if (obj->efile.obj_buf_sz > 0) { 502 /* 503 * obj_buf should have been validated by 504 * bpf_object__open_buffer(). 505 */ 506 obj->efile.elf = elf_memory(obj->efile.obj_buf, 507 obj->efile.obj_buf_sz); 508 } else { 509 obj->efile.fd = open(obj->path, O_RDONLY); 510 if (obj->efile.fd < 0) { 511 char errmsg[STRERR_BUFSIZE]; 512 char *cp = libbpf_strerror_r(errno, errmsg, 513 sizeof(errmsg)); 514 515 pr_warning("failed to open %s: %s\n", obj->path, cp); 516 return -errno; 517 } 518 519 obj->efile.elf = elf_begin(obj->efile.fd, 520 LIBBPF_ELF_C_READ_MMAP, 521 NULL); 522 } 523 524 if (!obj->efile.elf) { 525 pr_warning("failed to open %s as ELF file\n", 526 obj->path); 527 err = -LIBBPF_ERRNO__LIBELF; 528 goto errout; 529 } 530 531 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { 532 pr_warning("failed to get EHDR from %s\n", 533 obj->path); 534 err = -LIBBPF_ERRNO__FORMAT; 535 goto errout; 536 } 537 ep = &obj->efile.ehdr; 538 539 /* Old LLVM set e_machine to EM_NONE */ 540 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) { 541 pr_warning("%s is not an eBPF object file\n", 542 obj->path); 543 err = -LIBBPF_ERRNO__FORMAT; 544 goto errout; 545 } 546 547 return 0; 548errout: 549 bpf_object__elf_finish(obj); 550 return err; 551} 552 553static int 554bpf_object__check_endianness(struct bpf_object *obj) 555{ 556 static unsigned int const endian = 1; 557 558 switch (obj->efile.ehdr.e_ident[EI_DATA]) { 559 case ELFDATA2LSB: 560 /* We are big endian, BPF obj is little endian. */ 561 if (*(unsigned char const *)&endian != 1) 562 goto mismatch; 563 break; 564 565 case ELFDATA2MSB: 566 /* We are little endian, BPF obj is big endian. */ 567 if (*(unsigned char const *)&endian != 0) 568 goto mismatch; 569 break; 570 default: 571 return -LIBBPF_ERRNO__ENDIAN; 572 } 573 574 return 0; 575 576mismatch: 577 pr_warning("Error: endianness mismatch.\n"); 578 return -LIBBPF_ERRNO__ENDIAN; 579} 580 581static int 582bpf_object__init_license(struct bpf_object *obj, 583 void *data, size_t size) 584{ 585 memcpy(obj->license, data, 586 min(size, sizeof(obj->license) - 1)); 587 pr_debug("license of %s is %s\n", obj->path, obj->license); 588 return 0; 589} 590 591static int 592bpf_object__init_kversion(struct bpf_object *obj, 593 void *data, size_t size) 594{ 595 __u32 kver; 596 597 if (size != sizeof(kver)) { 598 pr_warning("invalid kver section in %s\n", obj->path); 599 return -LIBBPF_ERRNO__FORMAT; 600 } 601 memcpy(&kver, data, sizeof(kver)); 602 obj->kern_version = kver; 603 pr_debug("kernel version of %s is %x\n", obj->path, 604 obj->kern_version); 605 return 0; 606} 607 608static int compare_bpf_map(const void *_a, const void *_b) 609{ 610 const struct bpf_map *a = _a; 611 const struct bpf_map *b = _b; 612 613 return a->offset - b->offset; 614} 615 616static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) 617{ 618 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || 619 type == BPF_MAP_TYPE_HASH_OF_MAPS) 620 return true; 621 return false; 622} 623 624static int 625bpf_object__init_maps(struct bpf_object *obj, int flags) 626{ 627 bool strict = !(flags & MAPS_RELAX_COMPAT); 628 int i, map_idx, map_def_sz, nr_maps = 0; 629 Elf_Scn *scn; 630 Elf_Data *data = NULL; 631 Elf_Data *symbols = obj->efile.symbols; 632 633 if (obj->efile.maps_shndx < 0) 634 return -EINVAL; 635 if (!symbols) 636 return -EINVAL; 637 638 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx); 639 if (scn) 640 data = elf_getdata(scn, NULL); 641 if (!scn || !data) { 642 pr_warning("failed to get Elf_Data from map section %d\n", 643 obj->efile.maps_shndx); 644 return -EINVAL; 645 } 646 647 /* 648 * Count number of maps. Each map has a name. 649 * Array of maps is not supported: only the first element is 650 * considered. 651 * 652 * TODO: Detect array of map and report error. 653 */ 654 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) { 655 GElf_Sym sym; 656 657 if (!gelf_getsym(symbols, i, &sym)) 658 continue; 659 if (sym.st_shndx != obj->efile.maps_shndx) 660 continue; 661 nr_maps++; 662 } 663 664 /* Alloc obj->maps and fill nr_maps. */ 665 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path, 666 nr_maps, data->d_size); 667 668 if (!nr_maps) 669 return 0; 670 671 /* Assume equally sized map definitions */ 672 map_def_sz = data->d_size / nr_maps; 673 if (!data->d_size || (data->d_size % nr_maps) != 0) { 674 pr_warning("unable to determine map definition size " 675 "section %s, %d maps in %zd bytes\n", 676 obj->path, nr_maps, data->d_size); 677 return -EINVAL; 678 } 679 680 obj->maps = calloc(nr_maps, sizeof(obj->maps[0])); 681 if (!obj->maps) { 682 pr_warning("alloc maps for object failed\n"); 683 return -ENOMEM; 684 } 685 obj->nr_maps = nr_maps; 686 687 for (i = 0; i < nr_maps; i++) { 688 /* 689 * fill all fd with -1 so won't close incorrect 690 * fd (fd=0 is stdin) when failure (zclose won't close 691 * negative fd)). 692 */ 693 obj->maps[i].fd = -1; 694 obj->maps[i].inner_map_fd = -1; 695 } 696 697 /* 698 * Fill obj->maps using data in "maps" section. 699 */ 700 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) { 701 GElf_Sym sym; 702 const char *map_name; 703 struct bpf_map_def *def; 704 705 if (!gelf_getsym(symbols, i, &sym)) 706 continue; 707 if (sym.st_shndx != obj->efile.maps_shndx) 708 continue; 709 710 map_name = elf_strptr(obj->efile.elf, 711 obj->efile.strtabidx, 712 sym.st_name); 713 obj->maps[map_idx].offset = sym.st_value; 714 if (sym.st_value + map_def_sz > data->d_size) { 715 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n", 716 obj->path, map_name); 717 return -EINVAL; 718 } 719 720 obj->maps[map_idx].name = strdup(map_name); 721 if (!obj->maps[map_idx].name) { 722 pr_warning("failed to alloc map name\n"); 723 return -ENOMEM; 724 } 725 pr_debug("map %d is \"%s\"\n", map_idx, 726 obj->maps[map_idx].name); 727 def = (struct bpf_map_def *)(data->d_buf + sym.st_value); 728 /* 729 * If the definition of the map in the object file fits in 730 * bpf_map_def, copy it. Any extra fields in our version 731 * of bpf_map_def will default to zero as a result of the 732 * calloc above. 733 */ 734 if (map_def_sz <= sizeof(struct bpf_map_def)) { 735 memcpy(&obj->maps[map_idx].def, def, map_def_sz); 736 } else { 737 /* 738 * Here the map structure being read is bigger than what 739 * we expect, truncate if the excess bits are all zero. 740 * If they are not zero, reject this map as 741 * incompatible. 742 */ 743 char *b; 744 for (b = ((char *)def) + sizeof(struct bpf_map_def); 745 b < ((char *)def) + map_def_sz; b++) { 746 if (*b != 0) { 747 pr_warning("maps section in %s: \"%s\" " 748 "has unrecognized, non-zero " 749 "options\n", 750 obj->path, map_name); 751 if (strict) 752 return -EINVAL; 753 } 754 } 755 memcpy(&obj->maps[map_idx].def, def, 756 sizeof(struct bpf_map_def)); 757 } 758 map_idx++; 759 } 760 761 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map); 762 return 0; 763} 764 765static bool section_have_execinstr(struct bpf_object *obj, int idx) 766{ 767 Elf_Scn *scn; 768 GElf_Shdr sh; 769 770 scn = elf_getscn(obj->efile.elf, idx); 771 if (!scn) 772 return false; 773 774 if (gelf_getshdr(scn, &sh) != &sh) 775 return false; 776 777 if (sh.sh_flags & SHF_EXECINSTR) 778 return true; 779 780 return false; 781} 782 783static int bpf_object__elf_collect(struct bpf_object *obj, int flags) 784{ 785 Elf *elf = obj->efile.elf; 786 GElf_Ehdr *ep = &obj->efile.ehdr; 787 Elf_Data *btf_ext_data = NULL; 788 Elf_Scn *scn = NULL; 789 int idx = 0, err = 0; 790 791 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 792 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) { 793 pr_warning("failed to get e_shstrndx from %s\n", 794 obj->path); 795 return -LIBBPF_ERRNO__FORMAT; 796 } 797 798 while ((scn = elf_nextscn(elf, scn)) != NULL) { 799 char *name; 800 GElf_Shdr sh; 801 Elf_Data *data; 802 803 idx++; 804 if (gelf_getshdr(scn, &sh) != &sh) { 805 pr_warning("failed to get section(%d) header from %s\n", 806 idx, obj->path); 807 err = -LIBBPF_ERRNO__FORMAT; 808 goto out; 809 } 810 811 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); 812 if (!name) { 813 pr_warning("failed to get section(%d) name from %s\n", 814 idx, obj->path); 815 err = -LIBBPF_ERRNO__FORMAT; 816 goto out; 817 } 818 819 data = elf_getdata(scn, 0); 820 if (!data) { 821 pr_warning("failed to get section(%d) data from %s(%s)\n", 822 idx, name, obj->path); 823 err = -LIBBPF_ERRNO__FORMAT; 824 goto out; 825 } 826 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", 827 idx, name, (unsigned long)data->d_size, 828 (int)sh.sh_link, (unsigned long)sh.sh_flags, 829 (int)sh.sh_type); 830 831 if (strcmp(name, "license") == 0) 832 err = bpf_object__init_license(obj, 833 data->d_buf, 834 data->d_size); 835 else if (strcmp(name, "version") == 0) 836 err = bpf_object__init_kversion(obj, 837 data->d_buf, 838 data->d_size); 839 else if (strcmp(name, "maps") == 0) 840 obj->efile.maps_shndx = idx; 841 else if (strcmp(name, BTF_ELF_SEC) == 0) { 842 obj->btf = btf__new(data->d_buf, data->d_size); 843 if (IS_ERR(obj->btf)) { 844 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", 845 BTF_ELF_SEC, PTR_ERR(obj->btf)); 846 obj->btf = NULL; 847 continue; 848 } 849 err = btf__load(obj->btf); 850 if (err) { 851 pr_warning("Error loading %s into kernel: %d. Ignored and continue.\n", 852 BTF_ELF_SEC, err); 853 btf__free(obj->btf); 854 obj->btf = NULL; 855 err = 0; 856 } 857 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { 858 btf_ext_data = data; 859 } else if (sh.sh_type == SHT_SYMTAB) { 860 if (obj->efile.symbols) { 861 pr_warning("bpf: multiple SYMTAB in %s\n", 862 obj->path); 863 err = -LIBBPF_ERRNO__FORMAT; 864 } else { 865 obj->efile.symbols = data; 866 obj->efile.strtabidx = sh.sh_link; 867 } 868 } else if ((sh.sh_type == SHT_PROGBITS) && 869 (sh.sh_flags & SHF_EXECINSTR) && 870 (data->d_size > 0)) { 871 if (strcmp(name, ".text") == 0) 872 obj->efile.text_shndx = idx; 873 err = bpf_object__add_program(obj, data->d_buf, 874 data->d_size, name, idx); 875 if (err) { 876 char errmsg[STRERR_BUFSIZE]; 877 char *cp = libbpf_strerror_r(-err, errmsg, 878 sizeof(errmsg)); 879 880 pr_warning("failed to alloc program %s (%s): %s", 881 name, obj->path, cp); 882 } 883 } else if (sh.sh_type == SHT_REL) { 884 void *reloc = obj->efile.reloc; 885 int nr_reloc = obj->efile.nr_reloc + 1; 886 int sec = sh.sh_info; /* points to other section */ 887 888 /* Only do relo for section with exec instructions */ 889 if (!section_have_execinstr(obj, sec)) { 890 pr_debug("skip relo %s(%d) for section(%d)\n", 891 name, idx, sec); 892 continue; 893 } 894 895 reloc = reallocarray(reloc, nr_reloc, 896 sizeof(*obj->efile.reloc)); 897 if (!reloc) { 898 pr_warning("realloc failed\n"); 899 err = -ENOMEM; 900 } else { 901 int n = nr_reloc - 1; 902 903 obj->efile.reloc = reloc; 904 obj->efile.nr_reloc = nr_reloc; 905 906 obj->efile.reloc[n].shdr = sh; 907 obj->efile.reloc[n].data = data; 908 } 909 } else { 910 pr_debug("skip section(%d) %s\n", idx, name); 911 } 912 if (err) 913 goto out; 914 } 915 916 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { 917 pr_warning("Corrupted ELF file: index of strtab invalid\n"); 918 return LIBBPF_ERRNO__FORMAT; 919 } 920 if (btf_ext_data) { 921 if (!obj->btf) { 922 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", 923 BTF_EXT_ELF_SEC, BTF_ELF_SEC); 924 } else { 925 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, 926 btf_ext_data->d_size); 927 if (IS_ERR(obj->btf_ext)) { 928 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", 929 BTF_EXT_ELF_SEC, 930 PTR_ERR(obj->btf_ext)); 931 obj->btf_ext = NULL; 932 } 933 } 934 } 935 if (obj->efile.maps_shndx >= 0) { 936 err = bpf_object__init_maps(obj, flags); 937 if (err) 938 goto out; 939 } 940 err = bpf_object__init_prog_names(obj); 941out: 942 return err; 943} 944 945static struct bpf_program * 946bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx) 947{ 948 struct bpf_program *prog; 949 size_t i; 950 951 for (i = 0; i < obj->nr_programs; i++) { 952 prog = &obj->programs[i]; 953 if (prog->idx == idx) 954 return prog; 955 } 956 return NULL; 957} 958 959struct bpf_program * 960bpf_object__find_program_by_title(struct bpf_object *obj, const char *title) 961{ 962 struct bpf_program *pos; 963 964 bpf_object__for_each_program(pos, obj) { 965 if (pos->section_name && !strcmp(pos->section_name, title)) 966 return pos; 967 } 968 return NULL; 969} 970 971static int 972bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, 973 Elf_Data *data, struct bpf_object *obj) 974{ 975 Elf_Data *symbols = obj->efile.symbols; 976 int text_shndx = obj->efile.text_shndx; 977 int maps_shndx = obj->efile.maps_shndx; 978 struct bpf_map *maps = obj->maps; 979 size_t nr_maps = obj->nr_maps; 980 int i, nrels; 981 982 pr_debug("collecting relocating info for: '%s'\n", 983 prog->section_name); 984 nrels = shdr->sh_size / shdr->sh_entsize; 985 986 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels); 987 if (!prog->reloc_desc) { 988 pr_warning("failed to alloc memory in relocation\n"); 989 return -ENOMEM; 990 } 991 prog->nr_reloc = nrels; 992 993 for (i = 0; i < nrels; i++) { 994 GElf_Sym sym; 995 GElf_Rel rel; 996 unsigned int insn_idx; 997 struct bpf_insn *insns = prog->insns; 998 size_t map_idx; 999 1000 if (!gelf_getrel(data, i, &rel)) { 1001 pr_warning("relocation: failed to get %d reloc\n", i); 1002 return -LIBBPF_ERRNO__FORMAT; 1003 } 1004 1005 if (!gelf_getsym(symbols, 1006 GELF_R_SYM(rel.r_info), 1007 &sym)) { 1008 pr_warning("relocation: symbol %"PRIx64" not found\n", 1009 GELF_R_SYM(rel.r_info)); 1010 return -LIBBPF_ERRNO__FORMAT; 1011 } 1012 pr_debug("relo for %lld value %lld name %d\n", 1013 (long long) (rel.r_info >> 32), 1014 (long long) sym.st_value, sym.st_name); 1015 1016 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) { 1017 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n", 1018 prog->section_name, sym.st_shndx); 1019 return -LIBBPF_ERRNO__RELOC; 1020 } 1021 1022 insn_idx = rel.r_offset / sizeof(struct bpf_insn); 1023 pr_debug("relocation: insn_idx=%u\n", insn_idx); 1024 1025 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) { 1026 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) { 1027 pr_warning("incorrect bpf_call opcode\n"); 1028 return -LIBBPF_ERRNO__RELOC; 1029 } 1030 prog->reloc_desc[i].type = RELO_CALL; 1031 prog->reloc_desc[i].insn_idx = insn_idx; 1032 prog->reloc_desc[i].text_off = sym.st_value; 1033 obj->has_pseudo_calls = true; 1034 continue; 1035 } 1036 1037 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { 1038 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n", 1039 insn_idx, insns[insn_idx].code); 1040 return -LIBBPF_ERRNO__RELOC; 1041 } 1042 1043 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */ 1044 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 1045 if (maps[map_idx].offset == sym.st_value) { 1046 pr_debug("relocation: find map %zd (%s) for insn %u\n", 1047 map_idx, maps[map_idx].name, insn_idx); 1048 break; 1049 } 1050 } 1051 1052 if (map_idx >= nr_maps) { 1053 pr_warning("bpf relocation: map_idx %d large than %d\n", 1054 (int)map_idx, (int)nr_maps - 1); 1055 return -LIBBPF_ERRNO__RELOC; 1056 } 1057 1058 prog->reloc_desc[i].type = RELO_LD64; 1059 prog->reloc_desc[i].insn_idx = insn_idx; 1060 prog->reloc_desc[i].map_idx = map_idx; 1061 } 1062 return 0; 1063} 1064 1065static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) 1066{ 1067 struct bpf_map_def *def = &map->def; 1068 __u32 key_type_id, value_type_id; 1069 int ret; 1070 1071 ret = btf__get_map_kv_tids(btf, map->name, def->key_size, 1072 def->value_size, &key_type_id, 1073 &value_type_id); 1074 if (ret) 1075 return ret; 1076 1077 map->btf_key_type_id = key_type_id; 1078 map->btf_value_type_id = value_type_id; 1079 1080 return 0; 1081} 1082 1083int bpf_map__reuse_fd(struct bpf_map *map, int fd) 1084{ 1085 struct bpf_map_info info = {}; 1086 __u32 len = sizeof(info); 1087 int new_fd, err; 1088 char *new_name; 1089 1090 err = bpf_obj_get_info_by_fd(fd, &info, &len); 1091 if (err) 1092 return err; 1093 1094 new_name = strdup(info.name); 1095 if (!new_name) 1096 return -errno; 1097 1098 new_fd = open("/", O_RDONLY | O_CLOEXEC); 1099 if (new_fd < 0) 1100 goto err_free_new_name; 1101 1102 new_fd = dup3(fd, new_fd, O_CLOEXEC); 1103 if (new_fd < 0) 1104 goto err_close_new_fd; 1105 1106 err = zclose(map->fd); 1107 if (err) 1108 goto err_close_new_fd; 1109 free(map->name); 1110 1111 map->fd = new_fd; 1112 map->name = new_name; 1113 map->def.type = info.type; 1114 map->def.key_size = info.key_size; 1115 map->def.value_size = info.value_size; 1116 map->def.max_entries = info.max_entries; 1117 map->def.map_flags = info.map_flags; 1118 map->btf_key_type_id = info.btf_key_type_id; 1119 map->btf_value_type_id = info.btf_value_type_id; 1120 1121 return 0; 1122 1123err_close_new_fd: 1124 close(new_fd); 1125err_free_new_name: 1126 free(new_name); 1127 return -errno; 1128} 1129 1130int bpf_map__resize(struct bpf_map *map, __u32 max_entries) 1131{ 1132 if (!map || !max_entries) 1133 return -EINVAL; 1134 1135 /* If map already created, its attributes can't be changed. */ 1136 if (map->fd >= 0) 1137 return -EBUSY; 1138 1139 map->def.max_entries = max_entries; 1140 1141 return 0; 1142} 1143 1144static int 1145bpf_object__probe_name(struct bpf_object *obj) 1146{ 1147 struct bpf_load_program_attr attr; 1148 char *cp, errmsg[STRERR_BUFSIZE]; 1149 struct bpf_insn insns[] = { 1150 BPF_MOV64_IMM(BPF_REG_0, 0), 1151 BPF_EXIT_INSN(), 1152 }; 1153 int ret; 1154 1155 /* make sure basic loading works */ 1156 1157 memset(&attr, 0, sizeof(attr)); 1158 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 1159 attr.insns = insns; 1160 attr.insns_cnt = ARRAY_SIZE(insns); 1161 attr.license = "GPL"; 1162 1163 ret = bpf_load_program_xattr(&attr, NULL, 0); 1164 if (ret < 0) { 1165 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1166 pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n", 1167 __func__, cp, errno); 1168 return -errno; 1169 } 1170 close(ret); 1171 1172 /* now try the same program, but with the name */ 1173 1174 attr.name = "test"; 1175 ret = bpf_load_program_xattr(&attr, NULL, 0); 1176 if (ret >= 0) { 1177 obj->caps.name = 1; 1178 close(ret); 1179 } 1180 1181 return 0; 1182} 1183 1184static int 1185bpf_object__probe_caps(struct bpf_object *obj) 1186{ 1187 return bpf_object__probe_name(obj); 1188} 1189 1190static int 1191bpf_object__create_maps(struct bpf_object *obj) 1192{ 1193 struct bpf_create_map_attr create_attr = {}; 1194 unsigned int i; 1195 int err; 1196 1197 for (i = 0; i < obj->nr_maps; i++) { 1198 struct bpf_map *map = &obj->maps[i]; 1199 struct bpf_map_def *def = &map->def; 1200 char *cp, errmsg[STRERR_BUFSIZE]; 1201 int *pfd = &map->fd; 1202 1203 if (map->fd >= 0) { 1204 pr_debug("skip map create (preset) %s: fd=%d\n", 1205 map->name, map->fd); 1206 continue; 1207 } 1208 1209 if (obj->caps.name) 1210 create_attr.name = map->name; 1211 create_attr.map_ifindex = map->map_ifindex; 1212 create_attr.map_type = def->type; 1213 create_attr.map_flags = def->map_flags; 1214 create_attr.key_size = def->key_size; 1215 create_attr.value_size = def->value_size; 1216 create_attr.max_entries = def->max_entries; 1217 create_attr.btf_fd = 0; 1218 create_attr.btf_key_type_id = 0; 1219 create_attr.btf_value_type_id = 0; 1220 if (bpf_map_type__is_map_in_map(def->type) && 1221 map->inner_map_fd >= 0) 1222 create_attr.inner_map_fd = map->inner_map_fd; 1223 1224 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) { 1225 create_attr.btf_fd = btf__fd(obj->btf); 1226 create_attr.btf_key_type_id = map->btf_key_type_id; 1227 create_attr.btf_value_type_id = map->btf_value_type_id; 1228 } 1229 1230 *pfd = bpf_create_map_xattr(&create_attr); 1231 if (*pfd < 0 && create_attr.btf_key_type_id) { 1232 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1233 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", 1234 map->name, cp, errno); 1235 create_attr.btf_fd = 0; 1236 create_attr.btf_key_type_id = 0; 1237 create_attr.btf_value_type_id = 0; 1238 map->btf_key_type_id = 0; 1239 map->btf_value_type_id = 0; 1240 *pfd = bpf_create_map_xattr(&create_attr); 1241 } 1242 1243 if (*pfd < 0) { 1244 size_t j; 1245 1246 err = *pfd; 1247 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1248 pr_warning("failed to create map (name: '%s'): %s\n", 1249 map->name, cp); 1250 for (j = 0; j < i; j++) 1251 zclose(obj->maps[j].fd); 1252 return err; 1253 } 1254 pr_debug("create map %s: fd=%d\n", map->name, *pfd); 1255 } 1256 1257 return 0; 1258} 1259 1260static int 1261check_btf_ext_reloc_err(struct bpf_program *prog, int err, 1262 void *btf_prog_info, const char *info_name) 1263{ 1264 if (err != -ENOENT) { 1265 pr_warning("Error in loading %s for sec %s.\n", 1266 info_name, prog->section_name); 1267 return err; 1268 } 1269 1270 /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */ 1271 1272 if (btf_prog_info) { 1273 /* 1274 * Some info has already been found but has problem 1275 * in the last btf_ext reloc. Must have to error 1276 * out. 1277 */ 1278 pr_warning("Error in relocating %s for sec %s.\n", 1279 info_name, prog->section_name); 1280 return err; 1281 } 1282 1283 /* 1284 * Have problem loading the very first info. Ignore 1285 * the rest. 1286 */ 1287 pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n", 1288 info_name, prog->section_name, info_name); 1289 return 0; 1290} 1291 1292static int 1293bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj, 1294 const char *section_name, __u32 insn_offset) 1295{ 1296 int err; 1297 1298 if (!insn_offset || prog->func_info) { 1299 /* 1300 * !insn_offset => main program 1301 * 1302 * For sub prog, the main program's func_info has to 1303 * be loaded first (i.e. prog->func_info != NULL) 1304 */ 1305 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext, 1306 section_name, insn_offset, 1307 &prog->func_info, 1308 &prog->func_info_cnt); 1309 if (err) 1310 return check_btf_ext_reloc_err(prog, err, 1311 prog->func_info, 1312 "bpf_func_info"); 1313 1314 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext); 1315 } 1316 1317 if (!insn_offset || prog->line_info) { 1318 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext, 1319 section_name, insn_offset, 1320 &prog->line_info, 1321 &prog->line_info_cnt); 1322 if (err) 1323 return check_btf_ext_reloc_err(prog, err, 1324 prog->line_info, 1325 "bpf_line_info"); 1326 1327 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); 1328 } 1329 1330 if (!insn_offset) 1331 prog->btf_fd = btf__fd(obj->btf); 1332 1333 return 0; 1334} 1335 1336static int 1337bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, 1338 struct reloc_desc *relo) 1339{ 1340 struct bpf_insn *insn, *new_insn; 1341 struct bpf_program *text; 1342 size_t new_cnt; 1343 int err; 1344 1345 if (relo->type != RELO_CALL) 1346 return -LIBBPF_ERRNO__RELOC; 1347 1348 if (prog->idx == obj->efile.text_shndx) { 1349 pr_warning("relo in .text insn %d into off %d\n", 1350 relo->insn_idx, relo->text_off); 1351 return -LIBBPF_ERRNO__RELOC; 1352 } 1353 1354 if (prog->main_prog_cnt == 0) { 1355 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx); 1356 if (!text) { 1357 pr_warning("no .text section found yet relo into text exist\n"); 1358 return -LIBBPF_ERRNO__RELOC; 1359 } 1360 new_cnt = prog->insns_cnt + text->insns_cnt; 1361 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn)); 1362 if (!new_insn) { 1363 pr_warning("oom in prog realloc\n"); 1364 return -ENOMEM; 1365 } 1366 1367 if (obj->btf_ext) { 1368 err = bpf_program_reloc_btf_ext(prog, obj, 1369 text->section_name, 1370 prog->insns_cnt); 1371 if (err) 1372 return err; 1373 } 1374 1375 memcpy(new_insn + prog->insns_cnt, text->insns, 1376 text->insns_cnt * sizeof(*insn)); 1377 prog->insns = new_insn; 1378 prog->main_prog_cnt = prog->insns_cnt; 1379 prog->insns_cnt = new_cnt; 1380 pr_debug("added %zd insn from %s to prog %s\n", 1381 text->insns_cnt, text->section_name, 1382 prog->section_name); 1383 } 1384 insn = &prog->insns[relo->insn_idx]; 1385 insn->imm += prog->main_prog_cnt - relo->insn_idx; 1386 return 0; 1387} 1388 1389static int 1390bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) 1391{ 1392 int i, err; 1393 1394 if (!prog) 1395 return 0; 1396 1397 if (obj->btf_ext) { 1398 err = bpf_program_reloc_btf_ext(prog, obj, 1399 prog->section_name, 0); 1400 if (err) 1401 return err; 1402 } 1403 1404 if (!prog->reloc_desc) 1405 return 0; 1406 1407 for (i = 0; i < prog->nr_reloc; i++) { 1408 if (prog->reloc_desc[i].type == RELO_LD64) { 1409 struct bpf_insn *insns = prog->insns; 1410 int insn_idx, map_idx; 1411 1412 insn_idx = prog->reloc_desc[i].insn_idx; 1413 map_idx = prog->reloc_desc[i].map_idx; 1414 1415 if (insn_idx >= (int)prog->insns_cnt) { 1416 pr_warning("relocation out of range: '%s'\n", 1417 prog->section_name); 1418 return -LIBBPF_ERRNO__RELOC; 1419 } 1420 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; 1421 insns[insn_idx].imm = obj->maps[map_idx].fd; 1422 } else { 1423 err = bpf_program__reloc_text(prog, obj, 1424 &prog->reloc_desc[i]); 1425 if (err) 1426 return err; 1427 } 1428 } 1429 1430 zfree(&prog->reloc_desc); 1431 prog->nr_reloc = 0; 1432 return 0; 1433} 1434 1435 1436static int 1437bpf_object__relocate(struct bpf_object *obj) 1438{ 1439 struct bpf_program *prog; 1440 size_t i; 1441 int err; 1442 1443 for (i = 0; i < obj->nr_programs; i++) { 1444 prog = &obj->programs[i]; 1445 1446 err = bpf_program__relocate(prog, obj); 1447 if (err) { 1448 pr_warning("failed to relocate '%s'\n", 1449 prog->section_name); 1450 return err; 1451 } 1452 } 1453 return 0; 1454} 1455 1456static int bpf_object__collect_reloc(struct bpf_object *obj) 1457{ 1458 int i, err; 1459 1460 if (!obj_elf_valid(obj)) { 1461 pr_warning("Internal error: elf object is closed\n"); 1462 return -LIBBPF_ERRNO__INTERNAL; 1463 } 1464 1465 for (i = 0; i < obj->efile.nr_reloc; i++) { 1466 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr; 1467 Elf_Data *data = obj->efile.reloc[i].data; 1468 int idx = shdr->sh_info; 1469 struct bpf_program *prog; 1470 1471 if (shdr->sh_type != SHT_REL) { 1472 pr_warning("internal error at %d\n", __LINE__); 1473 return -LIBBPF_ERRNO__INTERNAL; 1474 } 1475 1476 prog = bpf_object__find_prog_by_idx(obj, idx); 1477 if (!prog) { 1478 pr_warning("relocation failed: no section(%d)\n", idx); 1479 return -LIBBPF_ERRNO__RELOC; 1480 } 1481 1482 err = bpf_program__collect_reloc(prog, 1483 shdr, data, 1484 obj); 1485 if (err) 1486 return err; 1487 } 1488 return 0; 1489} 1490 1491static int 1492load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, 1493 char *license, __u32 kern_version, int *pfd) 1494{ 1495 struct bpf_load_program_attr load_attr; 1496 char *cp, errmsg[STRERR_BUFSIZE]; 1497 char *log_buf; 1498 int ret; 1499 1500 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); 1501 load_attr.prog_type = prog->type; 1502 load_attr.expected_attach_type = prog->expected_attach_type; 1503 if (prog->caps->name) 1504 load_attr.name = prog->name; 1505 load_attr.insns = insns; 1506 load_attr.insns_cnt = insns_cnt; 1507 load_attr.license = license; 1508 load_attr.kern_version = kern_version; 1509 load_attr.prog_ifindex = prog->prog_ifindex; 1510 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; 1511 load_attr.func_info = prog->func_info; 1512 load_attr.func_info_rec_size = prog->func_info_rec_size; 1513 load_attr.func_info_cnt = prog->func_info_cnt; 1514 load_attr.line_info = prog->line_info; 1515 load_attr.line_info_rec_size = prog->line_info_rec_size; 1516 load_attr.line_info_cnt = prog->line_info_cnt; 1517 if (!load_attr.insns || !load_attr.insns_cnt) 1518 return -EINVAL; 1519 1520 log_buf = malloc(BPF_LOG_BUF_SIZE); 1521 if (!log_buf) 1522 pr_warning("Alloc log buffer for bpf loader error, continue without log\n"); 1523 1524 ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE); 1525 1526 if (ret >= 0) { 1527 *pfd = ret; 1528 ret = 0; 1529 goto out; 1530 } 1531 1532 ret = -LIBBPF_ERRNO__LOAD; 1533 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1534 pr_warning("load bpf program failed: %s\n", cp); 1535 1536 if (log_buf && log_buf[0] != '\0') { 1537 ret = -LIBBPF_ERRNO__VERIFY; 1538 pr_warning("-- BEGIN DUMP LOG ---\n"); 1539 pr_warning("\n%s\n", log_buf); 1540 pr_warning("-- END LOG --\n"); 1541 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) { 1542 pr_warning("Program too large (%zu insns), at most %d insns\n", 1543 load_attr.insns_cnt, BPF_MAXINSNS); 1544 ret = -LIBBPF_ERRNO__PROG2BIG; 1545 } else { 1546 /* Wrong program type? */ 1547 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { 1548 int fd; 1549 1550 load_attr.prog_type = BPF_PROG_TYPE_KPROBE; 1551 load_attr.expected_attach_type = 0; 1552 fd = bpf_load_program_xattr(&load_attr, NULL, 0); 1553 if (fd >= 0) { 1554 close(fd); 1555 ret = -LIBBPF_ERRNO__PROGTYPE; 1556 goto out; 1557 } 1558 } 1559 1560 if (log_buf) 1561 ret = -LIBBPF_ERRNO__KVER; 1562 } 1563 1564out: 1565 free(log_buf); 1566 return ret; 1567} 1568 1569int 1570bpf_program__load(struct bpf_program *prog, 1571 char *license, __u32 kern_version) 1572{ 1573 int err = 0, fd, i; 1574 1575 if (prog->instances.nr < 0 || !prog->instances.fds) { 1576 if (prog->preprocessor) { 1577 pr_warning("Internal error: can't load program '%s'\n", 1578 prog->section_name); 1579 return -LIBBPF_ERRNO__INTERNAL; 1580 } 1581 1582 prog->instances.fds = malloc(sizeof(int)); 1583 if (!prog->instances.fds) { 1584 pr_warning("Not enough memory for BPF fds\n"); 1585 return -ENOMEM; 1586 } 1587 prog->instances.nr = 1; 1588 prog->instances.fds[0] = -1; 1589 } 1590 1591 if (!prog->preprocessor) { 1592 if (prog->instances.nr != 1) { 1593 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n", 1594 prog->section_name, prog->instances.nr); 1595 } 1596 err = load_program(prog, prog->insns, prog->insns_cnt, 1597 license, kern_version, &fd); 1598 if (!err) 1599 prog->instances.fds[0] = fd; 1600 goto out; 1601 } 1602 1603 for (i = 0; i < prog->instances.nr; i++) { 1604 struct bpf_prog_prep_result result; 1605 bpf_program_prep_t preprocessor = prog->preprocessor; 1606 1607 memset(&result, 0, sizeof(result)); 1608 err = preprocessor(prog, i, prog->insns, 1609 prog->insns_cnt, &result); 1610 if (err) { 1611 pr_warning("Preprocessing the %dth instance of program '%s' failed\n", 1612 i, prog->section_name); 1613 goto out; 1614 } 1615 1616 if (!result.new_insn_ptr || !result.new_insn_cnt) { 1617 pr_debug("Skip loading the %dth instance of program '%s'\n", 1618 i, prog->section_name); 1619 prog->instances.fds[i] = -1; 1620 if (result.pfd) 1621 *result.pfd = -1; 1622 continue; 1623 } 1624 1625 err = load_program(prog, result.new_insn_ptr, 1626 result.new_insn_cnt, 1627 license, kern_version, &fd); 1628 1629 if (err) { 1630 pr_warning("Loading the %dth instance of program '%s' failed\n", 1631 i, prog->section_name); 1632 goto out; 1633 } 1634 1635 if (result.pfd) 1636 *result.pfd = fd; 1637 prog->instances.fds[i] = fd; 1638 } 1639out: 1640 if (err) 1641 pr_warning("failed to load program '%s'\n", 1642 prog->section_name); 1643 zfree(&prog->insns); 1644 prog->insns_cnt = 0; 1645 return err; 1646} 1647 1648static bool bpf_program__is_function_storage(struct bpf_program *prog, 1649 struct bpf_object *obj) 1650{ 1651 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls; 1652} 1653 1654static int 1655bpf_object__load_progs(struct bpf_object *obj) 1656{ 1657 size_t i; 1658 int err; 1659 1660 for (i = 0; i < obj->nr_programs; i++) { 1661 if (bpf_program__is_function_storage(&obj->programs[i], obj)) 1662 continue; 1663 err = bpf_program__load(&obj->programs[i], 1664 obj->license, 1665 obj->kern_version); 1666 if (err) 1667 return err; 1668 } 1669 return 0; 1670} 1671 1672static bool bpf_prog_type__needs_kver(enum bpf_prog_type type) 1673{ 1674 switch (type) { 1675 case BPF_PROG_TYPE_SOCKET_FILTER: 1676 case BPF_PROG_TYPE_SCHED_CLS: 1677 case BPF_PROG_TYPE_SCHED_ACT: 1678 case BPF_PROG_TYPE_XDP: 1679 case BPF_PROG_TYPE_CGROUP_SKB: 1680 case BPF_PROG_TYPE_CGROUP_SOCK: 1681 case BPF_PROG_TYPE_LWT_IN: 1682 case BPF_PROG_TYPE_LWT_OUT: 1683 case BPF_PROG_TYPE_LWT_XMIT: 1684 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 1685 case BPF_PROG_TYPE_SOCK_OPS: 1686 case BPF_PROG_TYPE_SK_SKB: 1687 case BPF_PROG_TYPE_CGROUP_DEVICE: 1688 case BPF_PROG_TYPE_SK_MSG: 1689 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1690 case BPF_PROG_TYPE_LIRC_MODE2: 1691 case BPF_PROG_TYPE_SK_REUSEPORT: 1692 case BPF_PROG_TYPE_FLOW_DISSECTOR: 1693 case BPF_PROG_TYPE_UNSPEC: 1694 case BPF_PROG_TYPE_TRACEPOINT: 1695 case BPF_PROG_TYPE_RAW_TRACEPOINT: 1696 case BPF_PROG_TYPE_PERF_EVENT: 1697 return false; 1698 case BPF_PROG_TYPE_KPROBE: 1699 default: 1700 return true; 1701 } 1702} 1703 1704static int bpf_object__validate(struct bpf_object *obj, bool needs_kver) 1705{ 1706 if (needs_kver && obj->kern_version == 0) { 1707 pr_warning("%s doesn't provide kernel version\n", 1708 obj->path); 1709 return -LIBBPF_ERRNO__KVERSION; 1710 } 1711 return 0; 1712} 1713 1714static struct bpf_object * 1715__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz, 1716 bool needs_kver, int flags) 1717{ 1718 struct bpf_object *obj; 1719 int err; 1720 1721 if (elf_version(EV_CURRENT) == EV_NONE) { 1722 pr_warning("failed to init libelf for %s\n", path); 1723 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 1724 } 1725 1726 obj = bpf_object__new(path, obj_buf, obj_buf_sz); 1727 if (IS_ERR(obj)) 1728 return obj; 1729 1730 CHECK_ERR(bpf_object__elf_init(obj), err, out); 1731 CHECK_ERR(bpf_object__check_endianness(obj), err, out); 1732 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out); 1733 CHECK_ERR(bpf_object__collect_reloc(obj), err, out); 1734 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out); 1735 1736 bpf_object__elf_finish(obj); 1737 return obj; 1738out: 1739 bpf_object__close(obj); 1740 return ERR_PTR(err); 1741} 1742 1743struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr, 1744 int flags) 1745{ 1746 /* param validation */ 1747 if (!attr->file) 1748 return NULL; 1749 1750 pr_debug("loading %s\n", attr->file); 1751 1752 return __bpf_object__open(attr->file, NULL, 0, 1753 bpf_prog_type__needs_kver(attr->prog_type), 1754 flags); 1755} 1756 1757struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) 1758{ 1759 return __bpf_object__open_xattr(attr, 0); 1760} 1761 1762struct bpf_object *bpf_object__open(const char *path) 1763{ 1764 struct bpf_object_open_attr attr = { 1765 .file = path, 1766 .prog_type = BPF_PROG_TYPE_UNSPEC, 1767 }; 1768 1769 return bpf_object__open_xattr(&attr); 1770} 1771 1772struct bpf_object *bpf_object__open_buffer(void *obj_buf, 1773 size_t obj_buf_sz, 1774 const char *name) 1775{ 1776 char tmp_name[64]; 1777 1778 /* param validation */ 1779 if (!obj_buf || obj_buf_sz <= 0) 1780 return NULL; 1781 1782 if (!name) { 1783 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", 1784 (unsigned long)obj_buf, 1785 (unsigned long)obj_buf_sz); 1786 tmp_name[sizeof(tmp_name) - 1] = '\0'; 1787 name = tmp_name; 1788 } 1789 pr_debug("loading object '%s' from buffer\n", 1790 name); 1791 1792 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true); 1793} 1794 1795int bpf_object__unload(struct bpf_object *obj) 1796{ 1797 size_t i; 1798 1799 if (!obj) 1800 return -EINVAL; 1801 1802 for (i = 0; i < obj->nr_maps; i++) 1803 zclose(obj->maps[i].fd); 1804 1805 for (i = 0; i < obj->nr_programs; i++) 1806 bpf_program__unload(&obj->programs[i]); 1807 1808 return 0; 1809} 1810 1811int bpf_object__load(struct bpf_object *obj) 1812{ 1813 int err; 1814 1815 if (!obj) 1816 return -EINVAL; 1817 1818 if (obj->loaded) { 1819 pr_warning("object should not be loaded twice\n"); 1820 return -EINVAL; 1821 } 1822 1823 obj->loaded = true; 1824 1825 CHECK_ERR(bpf_object__probe_caps(obj), err, out); 1826 CHECK_ERR(bpf_object__create_maps(obj), err, out); 1827 CHECK_ERR(bpf_object__relocate(obj), err, out); 1828 CHECK_ERR(bpf_object__load_progs(obj), err, out); 1829 1830 return 0; 1831out: 1832 bpf_object__unload(obj); 1833 pr_warning("failed to load object '%s'\n", obj->path); 1834 return err; 1835} 1836 1837static int check_path(const char *path) 1838{ 1839 char *cp, errmsg[STRERR_BUFSIZE]; 1840 struct statfs st_fs; 1841 char *dname, *dir; 1842 int err = 0; 1843 1844 if (path == NULL) 1845 return -EINVAL; 1846 1847 dname = strdup(path); 1848 if (dname == NULL) 1849 return -ENOMEM; 1850 1851 dir = dirname(dname); 1852 if (statfs(dir, &st_fs)) { 1853 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1854 pr_warning("failed to statfs %s: %s\n", dir, cp); 1855 err = -errno; 1856 } 1857 free(dname); 1858 1859 if (!err && st_fs.f_type != BPF_FS_MAGIC) { 1860 pr_warning("specified path %s is not on BPF FS\n", path); 1861 err = -EINVAL; 1862 } 1863 1864 return err; 1865} 1866 1867int bpf_program__pin_instance(struct bpf_program *prog, const char *path, 1868 int instance) 1869{ 1870 char *cp, errmsg[STRERR_BUFSIZE]; 1871 int err; 1872 1873 err = check_path(path); 1874 if (err) 1875 return err; 1876 1877 if (prog == NULL) { 1878 pr_warning("invalid program pointer\n"); 1879 return -EINVAL; 1880 } 1881 1882 if (instance < 0 || instance >= prog->instances.nr) { 1883 pr_warning("invalid prog instance %d of prog %s (max %d)\n", 1884 instance, prog->section_name, prog->instances.nr); 1885 return -EINVAL; 1886 } 1887 1888 if (bpf_obj_pin(prog->instances.fds[instance], path)) { 1889 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1890 pr_warning("failed to pin program: %s\n", cp); 1891 return -errno; 1892 } 1893 pr_debug("pinned program '%s'\n", path); 1894 1895 return 0; 1896} 1897 1898int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, 1899 int instance) 1900{ 1901 int err; 1902 1903 err = check_path(path); 1904 if (err) 1905 return err; 1906 1907 if (prog == NULL) { 1908 pr_warning("invalid program pointer\n"); 1909 return -EINVAL; 1910 } 1911 1912 if (instance < 0 || instance >= prog->instances.nr) { 1913 pr_warning("invalid prog instance %d of prog %s (max %d)\n", 1914 instance, prog->section_name, prog->instances.nr); 1915 return -EINVAL; 1916 } 1917 1918 err = unlink(path); 1919 if (err != 0) 1920 return -errno; 1921 pr_debug("unpinned program '%s'\n", path); 1922 1923 return 0; 1924} 1925 1926static int make_dir(const char *path) 1927{ 1928 char *cp, errmsg[STRERR_BUFSIZE]; 1929 int err = 0; 1930 1931 if (mkdir(path, 0700) && errno != EEXIST) 1932 err = -errno; 1933 1934 if (err) { 1935 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); 1936 pr_warning("failed to mkdir %s: %s\n", path, cp); 1937 } 1938 return err; 1939} 1940 1941int bpf_program__pin(struct bpf_program *prog, const char *path) 1942{ 1943 int i, err; 1944 1945 err = check_path(path); 1946 if (err) 1947 return err; 1948 1949 if (prog == NULL) { 1950 pr_warning("invalid program pointer\n"); 1951 return -EINVAL; 1952 } 1953 1954 if (prog->instances.nr <= 0) { 1955 pr_warning("no instances of prog %s to pin\n", 1956 prog->section_name); 1957 return -EINVAL; 1958 } 1959 1960 if (prog->instances.nr == 1) { 1961 /* don't create subdirs when pinning single instance */ 1962 return bpf_program__pin_instance(prog, path, 0); 1963 } 1964 1965 err = make_dir(path); 1966 if (err) 1967 return err; 1968 1969 for (i = 0; i < prog->instances.nr; i++) { 1970 char buf[PATH_MAX]; 1971 int len; 1972 1973 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 1974 if (len < 0) { 1975 err = -EINVAL; 1976 goto err_unpin; 1977 } else if (len >= PATH_MAX) { 1978 err = -ENAMETOOLONG; 1979 goto err_unpin; 1980 } 1981 1982 err = bpf_program__pin_instance(prog, buf, i); 1983 if (err) 1984 goto err_unpin; 1985 } 1986 1987 return 0; 1988 1989err_unpin: 1990 for (i = i - 1; i >= 0; i--) { 1991 char buf[PATH_MAX]; 1992 int len; 1993 1994 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 1995 if (len < 0) 1996 continue; 1997 else if (len >= PATH_MAX) 1998 continue; 1999 2000 bpf_program__unpin_instance(prog, buf, i); 2001 } 2002 2003 rmdir(path); 2004 2005 return err; 2006} 2007 2008int bpf_program__unpin(struct bpf_program *prog, const char *path) 2009{ 2010 int i, err; 2011 2012 err = check_path(path); 2013 if (err) 2014 return err; 2015 2016 if (prog == NULL) { 2017 pr_warning("invalid program pointer\n"); 2018 return -EINVAL; 2019 } 2020 2021 if (prog->instances.nr <= 0) { 2022 pr_warning("no instances of prog %s to pin\n", 2023 prog->section_name); 2024 return -EINVAL; 2025 } 2026 2027 if (prog->instances.nr == 1) { 2028 /* don't create subdirs when pinning single instance */ 2029 return bpf_program__unpin_instance(prog, path, 0); 2030 } 2031 2032 for (i = 0; i < prog->instances.nr; i++) { 2033 char buf[PATH_MAX]; 2034 int len; 2035 2036 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 2037 if (len < 0) 2038 return -EINVAL; 2039 else if (len >= PATH_MAX) 2040 return -ENAMETOOLONG; 2041 2042 err = bpf_program__unpin_instance(prog, buf, i); 2043 if (err) 2044 return err; 2045 } 2046 2047 err = rmdir(path); 2048 if (err) 2049 return -errno; 2050 2051 return 0; 2052} 2053 2054int bpf_map__pin(struct bpf_map *map, const char *path) 2055{ 2056 char *cp, errmsg[STRERR_BUFSIZE]; 2057 int err; 2058 2059 err = check_path(path); 2060 if (err) 2061 return err; 2062 2063 if (map == NULL) { 2064 pr_warning("invalid map pointer\n"); 2065 return -EINVAL; 2066 } 2067 2068 if (bpf_obj_pin(map->fd, path)) { 2069 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 2070 pr_warning("failed to pin map: %s\n", cp); 2071 return -errno; 2072 } 2073 2074 pr_debug("pinned map '%s'\n", path); 2075 2076 return 0; 2077} 2078 2079int bpf_map__unpin(struct bpf_map *map, const char *path) 2080{ 2081 int err; 2082 2083 err = check_path(path); 2084 if (err) 2085 return err; 2086 2087 if (map == NULL) { 2088 pr_warning("invalid map pointer\n"); 2089 return -EINVAL; 2090 } 2091 2092 err = unlink(path); 2093 if (err != 0) 2094 return -errno; 2095 pr_debug("unpinned map '%s'\n", path); 2096 2097 return 0; 2098} 2099 2100int bpf_object__pin_maps(struct bpf_object *obj, const char *path) 2101{ 2102 struct bpf_map *map; 2103 int err; 2104 2105 if (!obj) 2106 return -ENOENT; 2107 2108 if (!obj->loaded) { 2109 pr_warning("object not yet loaded; load it first\n"); 2110 return -ENOENT; 2111 } 2112 2113 err = make_dir(path); 2114 if (err) 2115 return err; 2116 2117 bpf_object__for_each_map(map, obj) { 2118 char buf[PATH_MAX]; 2119 int len; 2120 2121 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2122 bpf_map__name(map)); 2123 if (len < 0) { 2124 err = -EINVAL; 2125 goto err_unpin_maps; 2126 } else if (len >= PATH_MAX) { 2127 err = -ENAMETOOLONG; 2128 goto err_unpin_maps; 2129 } 2130 2131 err = bpf_map__pin(map, buf); 2132 if (err) 2133 goto err_unpin_maps; 2134 } 2135 2136 return 0; 2137 2138err_unpin_maps: 2139 while ((map = bpf_map__prev(map, obj))) { 2140 char buf[PATH_MAX]; 2141 int len; 2142 2143 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2144 bpf_map__name(map)); 2145 if (len < 0) 2146 continue; 2147 else if (len >= PATH_MAX) 2148 continue; 2149 2150 bpf_map__unpin(map, buf); 2151 } 2152 2153 return err; 2154} 2155 2156int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) 2157{ 2158 struct bpf_map *map; 2159 int err; 2160 2161 if (!obj) 2162 return -ENOENT; 2163 2164 bpf_object__for_each_map(map, obj) { 2165 char buf[PATH_MAX]; 2166 int len; 2167 2168 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2169 bpf_map__name(map)); 2170 if (len < 0) 2171 return -EINVAL; 2172 else if (len >= PATH_MAX) 2173 return -ENAMETOOLONG; 2174 2175 err = bpf_map__unpin(map, buf); 2176 if (err) 2177 return err; 2178 } 2179 2180 return 0; 2181} 2182 2183int bpf_object__pin_programs(struct bpf_object *obj, const char *path) 2184{ 2185 struct bpf_program *prog; 2186 int err; 2187 2188 if (!obj) 2189 return -ENOENT; 2190 2191 if (!obj->loaded) { 2192 pr_warning("object not yet loaded; load it first\n"); 2193 return -ENOENT; 2194 } 2195 2196 err = make_dir(path); 2197 if (err) 2198 return err; 2199 2200 bpf_object__for_each_program(prog, obj) { 2201 char buf[PATH_MAX]; 2202 int len; 2203 2204 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2205 prog->pin_name); 2206 if (len < 0) { 2207 err = -EINVAL; 2208 goto err_unpin_programs; 2209 } else if (len >= PATH_MAX) { 2210 err = -ENAMETOOLONG; 2211 goto err_unpin_programs; 2212 } 2213 2214 err = bpf_program__pin(prog, buf); 2215 if (err) 2216 goto err_unpin_programs; 2217 } 2218 2219 return 0; 2220 2221err_unpin_programs: 2222 while ((prog = bpf_program__prev(prog, obj))) { 2223 char buf[PATH_MAX]; 2224 int len; 2225 2226 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2227 prog->pin_name); 2228 if (len < 0) 2229 continue; 2230 else if (len >= PATH_MAX) 2231 continue; 2232 2233 bpf_program__unpin(prog, buf); 2234 } 2235 2236 return err; 2237} 2238 2239int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) 2240{ 2241 struct bpf_program *prog; 2242 int err; 2243 2244 if (!obj) 2245 return -ENOENT; 2246 2247 bpf_object__for_each_program(prog, obj) { 2248 char buf[PATH_MAX]; 2249 int len; 2250 2251 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2252 prog->pin_name); 2253 if (len < 0) 2254 return -EINVAL; 2255 else if (len >= PATH_MAX) 2256 return -ENAMETOOLONG; 2257 2258 err = bpf_program__unpin(prog, buf); 2259 if (err) 2260 return err; 2261 } 2262 2263 return 0; 2264} 2265 2266int bpf_object__pin(struct bpf_object *obj, const char *path) 2267{ 2268 int err; 2269 2270 err = bpf_object__pin_maps(obj, path); 2271 if (err) 2272 return err; 2273 2274 err = bpf_object__pin_programs(obj, path); 2275 if (err) { 2276 bpf_object__unpin_maps(obj, path); 2277 return err; 2278 } 2279 2280 return 0; 2281} 2282 2283void bpf_object__close(struct bpf_object *obj) 2284{ 2285 size_t i; 2286 2287 if (!obj) 2288 return; 2289 2290 if (obj->clear_priv) 2291 obj->clear_priv(obj, obj->priv); 2292 2293 bpf_object__elf_finish(obj); 2294 bpf_object__unload(obj); 2295 btf__free(obj->btf); 2296 btf_ext__free(obj->btf_ext); 2297 2298 for (i = 0; i < obj->nr_maps; i++) { 2299 zfree(&obj->maps[i].name); 2300 if (obj->maps[i].clear_priv) 2301 obj->maps[i].clear_priv(&obj->maps[i], 2302 obj->maps[i].priv); 2303 obj->maps[i].priv = NULL; 2304 obj->maps[i].clear_priv = NULL; 2305 } 2306 zfree(&obj->maps); 2307 obj->nr_maps = 0; 2308 2309 if (obj->programs && obj->nr_programs) { 2310 for (i = 0; i < obj->nr_programs; i++) 2311 bpf_program__exit(&obj->programs[i]); 2312 } 2313 zfree(&obj->programs); 2314 2315 list_del(&obj->list); 2316 free(obj); 2317} 2318 2319struct bpf_object * 2320bpf_object__next(struct bpf_object *prev) 2321{ 2322 struct bpf_object *next; 2323 2324 if (!prev) 2325 next = list_first_entry(&bpf_objects_list, 2326 struct bpf_object, 2327 list); 2328 else 2329 next = list_next_entry(prev, list); 2330 2331 /* Empty list is noticed here so don't need checking on entry. */ 2332 if (&next->list == &bpf_objects_list) 2333 return NULL; 2334 2335 return next; 2336} 2337 2338const char *bpf_object__name(struct bpf_object *obj) 2339{ 2340 return obj ? obj->path : ERR_PTR(-EINVAL); 2341} 2342 2343unsigned int bpf_object__kversion(struct bpf_object *obj) 2344{ 2345 return obj ? obj->kern_version : 0; 2346} 2347 2348struct btf *bpf_object__btf(struct bpf_object *obj) 2349{ 2350 return obj ? obj->btf : NULL; 2351} 2352 2353int bpf_object__btf_fd(const struct bpf_object *obj) 2354{ 2355 return obj->btf ? btf__fd(obj->btf) : -1; 2356} 2357 2358int bpf_object__set_priv(struct bpf_object *obj, void *priv, 2359 bpf_object_clear_priv_t clear_priv) 2360{ 2361 if (obj->priv && obj->clear_priv) 2362 obj->clear_priv(obj, obj->priv); 2363 2364 obj->priv = priv; 2365 obj->clear_priv = clear_priv; 2366 return 0; 2367} 2368 2369void *bpf_object__priv(struct bpf_object *obj) 2370{ 2371 return obj ? obj->priv : ERR_PTR(-EINVAL); 2372} 2373 2374static struct bpf_program * 2375__bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward) 2376{ 2377 size_t nr_programs = obj->nr_programs; 2378 ssize_t idx; 2379 2380 if (!nr_programs) 2381 return NULL; 2382 2383 if (!p) 2384 /* Iter from the beginning */ 2385 return forward ? &obj->programs[0] : 2386 &obj->programs[nr_programs - 1]; 2387 2388 if (p->obj != obj) { 2389 pr_warning("error: program handler doesn't match object\n"); 2390 return NULL; 2391 } 2392 2393 idx = (p - obj->programs) + (forward ? 1 : -1); 2394 if (idx >= obj->nr_programs || idx < 0) 2395 return NULL; 2396 return &obj->programs[idx]; 2397} 2398 2399struct bpf_program * 2400bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) 2401{ 2402 struct bpf_program *prog = prev; 2403 2404 do { 2405 prog = __bpf_program__iter(prog, obj, true); 2406 } while (prog && bpf_program__is_function_storage(prog, obj)); 2407 2408 return prog; 2409} 2410 2411struct bpf_program * 2412bpf_program__prev(struct bpf_program *next, struct bpf_object *obj) 2413{ 2414 struct bpf_program *prog = next; 2415 2416 do { 2417 prog = __bpf_program__iter(prog, obj, false); 2418 } while (prog && bpf_program__is_function_storage(prog, obj)); 2419 2420 return prog; 2421} 2422 2423int bpf_program__set_priv(struct bpf_program *prog, void *priv, 2424 bpf_program_clear_priv_t clear_priv) 2425{ 2426 if (prog->priv && prog->clear_priv) 2427 prog->clear_priv(prog, prog->priv); 2428 2429 prog->priv = priv; 2430 prog->clear_priv = clear_priv; 2431 return 0; 2432} 2433 2434void *bpf_program__priv(struct bpf_program *prog) 2435{ 2436 return prog ? prog->priv : ERR_PTR(-EINVAL); 2437} 2438 2439void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) 2440{ 2441 prog->prog_ifindex = ifindex; 2442} 2443 2444const char *bpf_program__title(struct bpf_program *prog, bool needs_copy) 2445{ 2446 const char *title; 2447 2448 title = prog->section_name; 2449 if (needs_copy) { 2450 title = strdup(title); 2451 if (!title) { 2452 pr_warning("failed to strdup program title\n"); 2453 return ERR_PTR(-ENOMEM); 2454 } 2455 } 2456 2457 return title; 2458} 2459 2460int bpf_program__fd(struct bpf_program *prog) 2461{ 2462 return bpf_program__nth_fd(prog, 0); 2463} 2464 2465int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, 2466 bpf_program_prep_t prep) 2467{ 2468 int *instances_fds; 2469 2470 if (nr_instances <= 0 || !prep) 2471 return -EINVAL; 2472 2473 if (prog->instances.nr > 0 || prog->instances.fds) { 2474 pr_warning("Can't set pre-processor after loading\n"); 2475 return -EINVAL; 2476 } 2477 2478 instances_fds = malloc(sizeof(int) * nr_instances); 2479 if (!instances_fds) { 2480 pr_warning("alloc memory failed for fds\n"); 2481 return -ENOMEM; 2482 } 2483 2484 /* fill all fd with -1 */ 2485 memset(instances_fds, -1, sizeof(int) * nr_instances); 2486 2487 prog->instances.nr = nr_instances; 2488 prog->instances.fds = instances_fds; 2489 prog->preprocessor = prep; 2490 return 0; 2491} 2492 2493int bpf_program__nth_fd(struct bpf_program *prog, int n) 2494{ 2495 int fd; 2496 2497 if (!prog) 2498 return -EINVAL; 2499 2500 if (n >= prog->instances.nr || n < 0) { 2501 pr_warning("Can't get the %dth fd from program %s: only %d instances\n", 2502 n, prog->section_name, prog->instances.nr); 2503 return -EINVAL; 2504 } 2505 2506 fd = prog->instances.fds[n]; 2507 if (fd < 0) { 2508 pr_warning("%dth instance of program '%s' is invalid\n", 2509 n, prog->section_name); 2510 return -ENOENT; 2511 } 2512 2513 return fd; 2514} 2515 2516void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) 2517{ 2518 prog->type = type; 2519} 2520 2521static bool bpf_program__is_type(struct bpf_program *prog, 2522 enum bpf_prog_type type) 2523{ 2524 return prog ? (prog->type == type) : false; 2525} 2526 2527#define BPF_PROG_TYPE_FNS(NAME, TYPE) \ 2528int bpf_program__set_##NAME(struct bpf_program *prog) \ 2529{ \ 2530 if (!prog) \ 2531 return -EINVAL; \ 2532 bpf_program__set_type(prog, TYPE); \ 2533 return 0; \ 2534} \ 2535 \ 2536bool bpf_program__is_##NAME(struct bpf_program *prog) \ 2537{ \ 2538 return bpf_program__is_type(prog, TYPE); \ 2539} \ 2540 2541BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER); 2542BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE); 2543BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS); 2544BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT); 2545BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); 2546BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); 2547BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); 2548BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); 2549 2550void bpf_program__set_expected_attach_type(struct bpf_program *prog, 2551 enum bpf_attach_type type) 2552{ 2553 prog->expected_attach_type = type; 2554} 2555 2556#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \ 2557 { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype } 2558 2559/* Programs that can NOT be attached. */ 2560#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0) 2561 2562/* Programs that can be attached. */ 2563#define BPF_APROG_SEC(string, ptype, atype) \ 2564 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype) 2565 2566/* Programs that must specify expected attach type at load time. */ 2567#define BPF_EAPROG_SEC(string, ptype, eatype) \ 2568 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype) 2569 2570/* Programs that can be attached but attach type can't be identified by section 2571 * name. Kept for backward compatibility. 2572 */ 2573#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype) 2574 2575static const struct { 2576 const char *sec; 2577 size_t len; 2578 enum bpf_prog_type prog_type; 2579 enum bpf_attach_type expected_attach_type; 2580 int is_attachable; 2581 enum bpf_attach_type attach_type; 2582} section_names[] = { 2583 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), 2584 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE), 2585 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE), 2586 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS), 2587 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT), 2588 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT), 2589 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT), 2590 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), 2591 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), 2592 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), 2593 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), 2594 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), 2595 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL), 2596 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB, 2597 BPF_CGROUP_INET_INGRESS), 2598 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, 2599 BPF_CGROUP_INET_EGRESS), 2600 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), 2601 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, 2602 BPF_CGROUP_INET_SOCK_CREATE), 2603 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, 2604 BPF_CGROUP_INET4_POST_BIND), 2605 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK, 2606 BPF_CGROUP_INET6_POST_BIND), 2607 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE, 2608 BPF_CGROUP_DEVICE), 2609 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS, 2610 BPF_CGROUP_SOCK_OPS), 2611 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB, 2612 BPF_SK_SKB_STREAM_PARSER), 2613 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB, 2614 BPF_SK_SKB_STREAM_VERDICT), 2615 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB), 2616 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG, 2617 BPF_SK_MSG_VERDICT), 2618 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2, 2619 BPF_LIRC_MODE2), 2620 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR, 2621 BPF_FLOW_DISSECTOR), 2622 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2623 BPF_CGROUP_INET4_BIND), 2624 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2625 BPF_CGROUP_INET6_BIND), 2626 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2627 BPF_CGROUP_INET4_CONNECT), 2628 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2629 BPF_CGROUP_INET6_CONNECT), 2630 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2631 BPF_CGROUP_UDP4_SENDMSG), 2632 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2633 BPF_CGROUP_UDP6_SENDMSG), 2634}; 2635 2636#undef BPF_PROG_SEC_IMPL 2637#undef BPF_PROG_SEC 2638#undef BPF_APROG_SEC 2639#undef BPF_EAPROG_SEC 2640#undef BPF_APROG_COMPAT 2641 2642#define MAX_TYPE_NAME_SIZE 32 2643 2644static char *libbpf_get_type_names(bool attach_type) 2645{ 2646 int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE; 2647 char *buf; 2648 2649 buf = malloc(len); 2650 if (!buf) 2651 return NULL; 2652 2653 buf[0] = '\0'; 2654 /* Forge string buf with all available names */ 2655 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 2656 if (attach_type && !section_names[i].is_attachable) 2657 continue; 2658 2659 if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) { 2660 free(buf); 2661 return NULL; 2662 } 2663 strcat(buf, " "); 2664 strcat(buf, section_names[i].sec); 2665 } 2666 2667 return buf; 2668} 2669 2670int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, 2671 enum bpf_attach_type *expected_attach_type) 2672{ 2673 char *type_names; 2674 int i; 2675 2676 if (!name) 2677 return -EINVAL; 2678 2679 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 2680 if (strncmp(name, section_names[i].sec, section_names[i].len)) 2681 continue; 2682 *prog_type = section_names[i].prog_type; 2683 *expected_attach_type = section_names[i].expected_attach_type; 2684 return 0; 2685 } 2686 pr_warning("failed to guess program type based on ELF section name '%s'\n", name); 2687 type_names = libbpf_get_type_names(false); 2688 if (type_names != NULL) { 2689 pr_info("supported section(type) names are:%s\n", type_names); 2690 free(type_names); 2691 } 2692 2693 return -EINVAL; 2694} 2695 2696int libbpf_attach_type_by_name(const char *name, 2697 enum bpf_attach_type *attach_type) 2698{ 2699 char *type_names; 2700 int i; 2701 2702 if (!name) 2703 return -EINVAL; 2704 2705 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 2706 if (strncmp(name, section_names[i].sec, section_names[i].len)) 2707 continue; 2708 if (!section_names[i].is_attachable) 2709 return -EINVAL; 2710 *attach_type = section_names[i].attach_type; 2711 return 0; 2712 } 2713 pr_warning("failed to guess attach type based on ELF section name '%s'\n", name); 2714 type_names = libbpf_get_type_names(true); 2715 if (type_names != NULL) { 2716 pr_info("attachable section(type) names are:%s\n", type_names); 2717 free(type_names); 2718 } 2719 2720 return -EINVAL; 2721} 2722 2723static int 2724bpf_program__identify_section(struct bpf_program *prog, 2725 enum bpf_prog_type *prog_type, 2726 enum bpf_attach_type *expected_attach_type) 2727{ 2728 return libbpf_prog_type_by_name(prog->section_name, prog_type, 2729 expected_attach_type); 2730} 2731 2732int bpf_map__fd(struct bpf_map *map) 2733{ 2734 return map ? map->fd : -EINVAL; 2735} 2736 2737const struct bpf_map_def *bpf_map__def(struct bpf_map *map) 2738{ 2739 return map ? &map->def : ERR_PTR(-EINVAL); 2740} 2741 2742const char *bpf_map__name(struct bpf_map *map) 2743{ 2744 return map ? map->name : NULL; 2745} 2746 2747__u32 bpf_map__btf_key_type_id(const struct bpf_map *map) 2748{ 2749 return map ? map->btf_key_type_id : 0; 2750} 2751 2752__u32 bpf_map__btf_value_type_id(const struct bpf_map *map) 2753{ 2754 return map ? map->btf_value_type_id : 0; 2755} 2756 2757int bpf_map__set_priv(struct bpf_map *map, void *priv, 2758 bpf_map_clear_priv_t clear_priv) 2759{ 2760 if (!map) 2761 return -EINVAL; 2762 2763 if (map->priv) { 2764 if (map->clear_priv) 2765 map->clear_priv(map, map->priv); 2766 } 2767 2768 map->priv = priv; 2769 map->clear_priv = clear_priv; 2770 return 0; 2771} 2772 2773void *bpf_map__priv(struct bpf_map *map) 2774{ 2775 return map ? map->priv : ERR_PTR(-EINVAL); 2776} 2777 2778bool bpf_map__is_offload_neutral(struct bpf_map *map) 2779{ 2780 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 2781} 2782 2783void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) 2784{ 2785 map->map_ifindex = ifindex; 2786} 2787 2788int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) 2789{ 2790 if (!bpf_map_type__is_map_in_map(map->def.type)) { 2791 pr_warning("error: unsupported map type\n"); 2792 return -EINVAL; 2793 } 2794 if (map->inner_map_fd != -1) { 2795 pr_warning("error: inner_map_fd already specified\n"); 2796 return -EINVAL; 2797 } 2798 map->inner_map_fd = fd; 2799 return 0; 2800} 2801 2802static struct bpf_map * 2803__bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i) 2804{ 2805 ssize_t idx; 2806 struct bpf_map *s, *e; 2807 2808 if (!obj || !obj->maps) 2809 return NULL; 2810 2811 s = obj->maps; 2812 e = obj->maps + obj->nr_maps; 2813 2814 if ((m < s) || (m >= e)) { 2815 pr_warning("error in %s: map handler doesn't belong to object\n", 2816 __func__); 2817 return NULL; 2818 } 2819 2820 idx = (m - obj->maps) + i; 2821 if (idx >= obj->nr_maps || idx < 0) 2822 return NULL; 2823 return &obj->maps[idx]; 2824} 2825 2826struct bpf_map * 2827bpf_map__next(struct bpf_map *prev, struct bpf_object *obj) 2828{ 2829 if (prev == NULL) 2830 return obj->maps; 2831 2832 return __bpf_map__iter(prev, obj, 1); 2833} 2834 2835struct bpf_map * 2836bpf_map__prev(struct bpf_map *next, struct bpf_object *obj) 2837{ 2838 if (next == NULL) { 2839 if (!obj->nr_maps) 2840 return NULL; 2841 return obj->maps + obj->nr_maps - 1; 2842 } 2843 2844 return __bpf_map__iter(next, obj, -1); 2845} 2846 2847struct bpf_map * 2848bpf_object__find_map_by_name(struct bpf_object *obj, const char *name) 2849{ 2850 struct bpf_map *pos; 2851 2852 bpf_object__for_each_map(pos, obj) { 2853 if (pos->name && !strcmp(pos->name, name)) 2854 return pos; 2855 } 2856 return NULL; 2857} 2858 2859int 2860bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name) 2861{ 2862 return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); 2863} 2864 2865struct bpf_map * 2866bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset) 2867{ 2868 int i; 2869 2870 for (i = 0; i < obj->nr_maps; i++) { 2871 if (obj->maps[i].offset == offset) 2872 return &obj->maps[i]; 2873 } 2874 return ERR_PTR(-ENOENT); 2875} 2876 2877long libbpf_get_error(const void *ptr) 2878{ 2879 if (IS_ERR(ptr)) 2880 return PTR_ERR(ptr); 2881 return 0; 2882} 2883 2884int bpf_prog_load(const char *file, enum bpf_prog_type type, 2885 struct bpf_object **pobj, int *prog_fd) 2886{ 2887 struct bpf_prog_load_attr attr; 2888 2889 memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); 2890 attr.file = file; 2891 attr.prog_type = type; 2892 attr.expected_attach_type = 0; 2893 2894 return bpf_prog_load_xattr(&attr, pobj, prog_fd); 2895} 2896 2897int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, 2898 struct bpf_object **pobj, int *prog_fd) 2899{ 2900 struct bpf_object_open_attr open_attr = { 2901 .file = attr->file, 2902 .prog_type = attr->prog_type, 2903 }; 2904 struct bpf_program *prog, *first_prog = NULL; 2905 enum bpf_attach_type expected_attach_type; 2906 enum bpf_prog_type prog_type; 2907 struct bpf_object *obj; 2908 struct bpf_map *map; 2909 int err; 2910 2911 if (!attr) 2912 return -EINVAL; 2913 if (!attr->file) 2914 return -EINVAL; 2915 2916 obj = bpf_object__open_xattr(&open_attr); 2917 if (IS_ERR_OR_NULL(obj)) 2918 return -ENOENT; 2919 2920 bpf_object__for_each_program(prog, obj) { 2921 /* 2922 * If type is not specified, try to guess it based on 2923 * section name. 2924 */ 2925 prog_type = attr->prog_type; 2926 prog->prog_ifindex = attr->ifindex; 2927 expected_attach_type = attr->expected_attach_type; 2928 if (prog_type == BPF_PROG_TYPE_UNSPEC) { 2929 err = bpf_program__identify_section(prog, &prog_type, 2930 &expected_attach_type); 2931 if (err < 0) { 2932 bpf_object__close(obj); 2933 return -EINVAL; 2934 } 2935 } 2936 2937 bpf_program__set_type(prog, prog_type); 2938 bpf_program__set_expected_attach_type(prog, 2939 expected_attach_type); 2940 2941 if (!first_prog) 2942 first_prog = prog; 2943 } 2944 2945 bpf_object__for_each_map(map, obj) { 2946 if (!bpf_map__is_offload_neutral(map)) 2947 map->map_ifindex = attr->ifindex; 2948 } 2949 2950 if (!first_prog) { 2951 pr_warning("object file doesn't contain bpf program\n"); 2952 bpf_object__close(obj); 2953 return -ENOENT; 2954 } 2955 2956 err = bpf_object__load(obj); 2957 if (err) { 2958 bpf_object__close(obj); 2959 return -EINVAL; 2960 } 2961 2962 *pobj = obj; 2963 *prog_fd = bpf_program__fd(first_prog); 2964 return 0; 2965} 2966 2967enum bpf_perf_event_ret 2968bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, 2969 void **copy_mem, size_t *copy_size, 2970 bpf_perf_event_print_t fn, void *private_data) 2971{ 2972 struct perf_event_mmap_page *header = mmap_mem; 2973 __u64 data_head = ring_buffer_read_head(header); 2974 __u64 data_tail = header->data_tail; 2975 void *base = ((__u8 *)header) + page_size; 2976 int ret = LIBBPF_PERF_EVENT_CONT; 2977 struct perf_event_header *ehdr; 2978 size_t ehdr_size; 2979 2980 while (data_head != data_tail) { 2981 ehdr = base + (data_tail & (mmap_size - 1)); 2982 ehdr_size = ehdr->size; 2983 2984 if (((void *)ehdr) + ehdr_size > base + mmap_size) { 2985 void *copy_start = ehdr; 2986 size_t len_first = base + mmap_size - copy_start; 2987 size_t len_secnd = ehdr_size - len_first; 2988 2989 if (*copy_size < ehdr_size) { 2990 free(*copy_mem); 2991 *copy_mem = malloc(ehdr_size); 2992 if (!*copy_mem) { 2993 *copy_size = 0; 2994 ret = LIBBPF_PERF_EVENT_ERROR; 2995 break; 2996 } 2997 *copy_size = ehdr_size; 2998 } 2999 3000 memcpy(*copy_mem, copy_start, len_first); 3001 memcpy(*copy_mem + len_first, base, len_secnd); 3002 ehdr = *copy_mem; 3003 } 3004 3005 ret = fn(ehdr, private_data); 3006 data_tail += ehdr_size; 3007 if (ret != LIBBPF_PERF_EVENT_CONT) 3008 break; 3009 } 3010 3011 ring_buffer_write_tail(header, data_tail); 3012 return ret; 3013} 3014 3015struct bpf_prog_info_array_desc { 3016 int array_offset; /* e.g. offset of jited_prog_insns */ 3017 int count_offset; /* e.g. offset of jited_prog_len */ 3018 int size_offset; /* > 0: offset of rec size, 3019 * < 0: fix size of -size_offset 3020 */ 3021}; 3022 3023static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = { 3024 [BPF_PROG_INFO_JITED_INSNS] = { 3025 offsetof(struct bpf_prog_info, jited_prog_insns), 3026 offsetof(struct bpf_prog_info, jited_prog_len), 3027 -1, 3028 }, 3029 [BPF_PROG_INFO_XLATED_INSNS] = { 3030 offsetof(struct bpf_prog_info, xlated_prog_insns), 3031 offsetof(struct bpf_prog_info, xlated_prog_len), 3032 -1, 3033 }, 3034 [BPF_PROG_INFO_MAP_IDS] = { 3035 offsetof(struct bpf_prog_info, map_ids), 3036 offsetof(struct bpf_prog_info, nr_map_ids), 3037 -(int)sizeof(__u32), 3038 }, 3039 [BPF_PROG_INFO_JITED_KSYMS] = { 3040 offsetof(struct bpf_prog_info, jited_ksyms), 3041 offsetof(struct bpf_prog_info, nr_jited_ksyms), 3042 -(int)sizeof(__u64), 3043 }, 3044 [BPF_PROG_INFO_JITED_FUNC_LENS] = { 3045 offsetof(struct bpf_prog_info, jited_func_lens), 3046 offsetof(struct bpf_prog_info, nr_jited_func_lens), 3047 -(int)sizeof(__u32), 3048 }, 3049 [BPF_PROG_INFO_FUNC_INFO] = { 3050 offsetof(struct bpf_prog_info, func_info), 3051 offsetof(struct bpf_prog_info, nr_func_info), 3052 offsetof(struct bpf_prog_info, func_info_rec_size), 3053 }, 3054 [BPF_PROG_INFO_LINE_INFO] = { 3055 offsetof(struct bpf_prog_info, line_info), 3056 offsetof(struct bpf_prog_info, nr_line_info), 3057 offsetof(struct bpf_prog_info, line_info_rec_size), 3058 }, 3059 [BPF_PROG_INFO_JITED_LINE_INFO] = { 3060 offsetof(struct bpf_prog_info, jited_line_info), 3061 offsetof(struct bpf_prog_info, nr_jited_line_info), 3062 offsetof(struct bpf_prog_info, jited_line_info_rec_size), 3063 }, 3064 [BPF_PROG_INFO_PROG_TAGS] = { 3065 offsetof(struct bpf_prog_info, prog_tags), 3066 offsetof(struct bpf_prog_info, nr_prog_tags), 3067 -(int)sizeof(__u8) * BPF_TAG_SIZE, 3068 }, 3069 3070}; 3071 3072static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset) 3073{ 3074 __u32 *array = (__u32 *)info; 3075 3076 if (offset >= 0) 3077 return array[offset / sizeof(__u32)]; 3078 return -(int)offset; 3079} 3080 3081static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset) 3082{ 3083 __u64 *array = (__u64 *)info; 3084 3085 if (offset >= 0) 3086 return array[offset / sizeof(__u64)]; 3087 return -(int)offset; 3088} 3089 3090static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset, 3091 __u32 val) 3092{ 3093 __u32 *array = (__u32 *)info; 3094 3095 if (offset >= 0) 3096 array[offset / sizeof(__u32)] = val; 3097} 3098 3099static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset, 3100 __u64 val) 3101{ 3102 __u64 *array = (__u64 *)info; 3103 3104 if (offset >= 0) 3105 array[offset / sizeof(__u64)] = val; 3106} 3107 3108struct bpf_prog_info_linear * 3109bpf_program__get_prog_info_linear(int fd, __u64 arrays) 3110{ 3111 struct bpf_prog_info_linear *info_linear; 3112 struct bpf_prog_info info = {}; 3113 __u32 info_len = sizeof(info); 3114 __u32 data_len = 0; 3115 int i, err; 3116 void *ptr; 3117 3118 if (arrays >> BPF_PROG_INFO_LAST_ARRAY) 3119 return ERR_PTR(-EINVAL); 3120 3121 /* step 1: get array dimensions */ 3122 err = bpf_obj_get_info_by_fd(fd, &info, &info_len); 3123 if (err) { 3124 pr_debug("can't get prog info: %s", strerror(errno)); 3125 return ERR_PTR(-EFAULT); 3126 } 3127 3128 /* step 2: calculate total size of all arrays */ 3129 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3130 bool include_array = (arrays & (1UL << i)) > 0; 3131 struct bpf_prog_info_array_desc *desc; 3132 __u32 count, size; 3133 3134 desc = bpf_prog_info_array_desc + i; 3135 3136 /* kernel is too old to support this field */ 3137 if (info_len < desc->array_offset + sizeof(__u32) || 3138 info_len < desc->count_offset + sizeof(__u32) || 3139 (desc->size_offset > 0 && info_len < desc->size_offset)) 3140 include_array = false; 3141 3142 if (!include_array) { 3143 arrays &= ~(1UL << i); /* clear the bit */ 3144 continue; 3145 } 3146 3147 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 3148 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 3149 3150 data_len += count * size; 3151 } 3152 3153 /* step 3: allocate continuous memory */ 3154 data_len = roundup(data_len, sizeof(__u64)); 3155 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len); 3156 if (!info_linear) 3157 return ERR_PTR(-ENOMEM); 3158 3159 /* step 4: fill data to info_linear->info */ 3160 info_linear->arrays = arrays; 3161 memset(&info_linear->info, 0, sizeof(info)); 3162 ptr = info_linear->data; 3163 3164 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3165 struct bpf_prog_info_array_desc *desc; 3166 __u32 count, size; 3167 3168 if ((arrays & (1UL << i)) == 0) 3169 continue; 3170 3171 desc = bpf_prog_info_array_desc + i; 3172 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 3173 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 3174 bpf_prog_info_set_offset_u32(&info_linear->info, 3175 desc->count_offset, count); 3176 bpf_prog_info_set_offset_u32(&info_linear->info, 3177 desc->size_offset, size); 3178 bpf_prog_info_set_offset_u64(&info_linear->info, 3179 desc->array_offset, 3180 ptr_to_u64(ptr)); 3181 ptr += count * size; 3182 } 3183 3184 /* step 5: call syscall again to get required arrays */ 3185 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); 3186 if (err) { 3187 pr_debug("can't get prog info: %s", strerror(errno)); 3188 free(info_linear); 3189 return ERR_PTR(-EFAULT); 3190 } 3191 3192 /* step 6: verify the data */ 3193 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3194 struct bpf_prog_info_array_desc *desc; 3195 __u32 v1, v2; 3196 3197 if ((arrays & (1UL << i)) == 0) 3198 continue; 3199 3200 desc = bpf_prog_info_array_desc + i; 3201 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 3202 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, 3203 desc->count_offset); 3204 if (v1 != v2) 3205 pr_warning("%s: mismatch in element count\n", __func__); 3206 3207 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 3208 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, 3209 desc->size_offset); 3210 if (v1 != v2) 3211 pr_warning("%s: mismatch in rec size\n", __func__); 3212 } 3213 3214 /* step 7: update info_len and data_len */ 3215 info_linear->info_len = sizeof(struct bpf_prog_info); 3216 info_linear->data_len = data_len; 3217 3218 return info_linear; 3219} 3220 3221void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear) 3222{ 3223 int i; 3224 3225 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3226 struct bpf_prog_info_array_desc *desc; 3227 __u64 addr, offs; 3228 3229 if ((info_linear->arrays & (1UL << i)) == 0) 3230 continue; 3231 3232 desc = bpf_prog_info_array_desc + i; 3233 addr = bpf_prog_info_read_offset_u64(&info_linear->info, 3234 desc->array_offset); 3235 offs = addr - ptr_to_u64(info_linear->data); 3236 bpf_prog_info_set_offset_u64(&info_linear->info, 3237 desc->array_offset, offs); 3238 } 3239} 3240 3241void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear) 3242{ 3243 int i; 3244 3245 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3246 struct bpf_prog_info_array_desc *desc; 3247 __u64 addr, offs; 3248 3249 if ((info_linear->arrays & (1UL << i)) == 0) 3250 continue; 3251 3252 desc = bpf_prog_info_array_desc + i; 3253 offs = bpf_prog_info_read_offset_u64(&info_linear->info, 3254 desc->array_offset); 3255 addr = offs + ptr_to_u64(info_linear->data); 3256 bpf_prog_info_set_offset_u64(&info_linear->info, 3257 desc->array_offset, addr); 3258 } 3259}