Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'libbpf: split BTF support'

Andrii Nakryiko says:

====================
This patch set adds support for generating and deduplicating split BTF. This
is an enhancement to the BTF, which allows to designate one BTF as the "base
BTF" (e.g., vmlinux BTF), and one or more other BTFs as "split BTF" (e.g.,
kernel module BTF), which are building upon and extending base BTF with extra
types and strings.

Once loaded, split BTF appears as a single unified BTF superset of base BTF,
with continuous and transparent numbering scheme. This allows all the existing
users of BTF to work correctly and stay agnostic to the base/split BTFs
composition. The only difference is in how to instantiate split BTF: it
requires base BTF to be alread instantiated and passed to btf__new_xxx_split()
or btf__parse_xxx_split() "constructors" explicitly.

This split approach is necessary if we are to have a reasonably-sized kernel
module BTFs. By deduping each kernel module's BTF individually, resulting
module BTFs contain copies of a lot of kernel types that are already present
in vmlinux BTF. Even those single copies result in a big BTF size bloat. On my
kernel configuration with 700 modules built, non-split BTF approach results in
115MBs of BTFs across all modules. With split BTF deduplication approach,
total size is down to 5.2MBs total, which is on part with vmlinux BTF (at
around 4MBs). This seems reasonable and practical. As to why we'd need kernel
module BTFs, that should be pretty obvious to anyone using BPF at this point,
as it allows all the BTF-powered features to be used with kernel modules:
tp_btf, fentry/fexit/fmod_ret, lsm, bpf_iter, etc.

This patch set is a pre-requisite to adding split BTF support to pahole, which
is a prerequisite to integrating split BTF into the Linux kernel build setup
to generate BTF for kernel modules. The latter will come as a follow-up patch
series once this series makes it to the libbpf and pahole makes use of it.

Patch #4 introduces necessary basic support for split BTF into libbpf APIs.
Patch #8 implements minimal changes to BTF dedup algorithm to allow
deduplicating split BTFs. Patch #11 adds extra -B flag to bpftool to allow to
specify the path to base BTF for cases when one wants to dump or inspect split
BTF. All the rest are refactorings, clean ups, bug fixes and selftests.

v1->v2:
- addressed Song's feedback.
====================

Signed-off-by: Alexei Starovoitov <ast@kernel.org>

+1293 -356
+6 -3
tools/bpf/bpftool/btf.c
··· 358 358 } 359 359 } else { 360 360 int cnt = btf__get_nr_types(btf); 361 + int start_id = 1; 361 362 362 - for (i = 1; i <= cnt; i++) { 363 + if (base_btf) 364 + start_id = btf__get_nr_types(base_btf) + 1; 365 + 366 + for (i = start_id; i <= cnt; i++) { 363 367 t = btf__type_by_id(btf, i); 364 368 dump_btf_type(btf, i, t); 365 369 } ··· 442 438 return -1; 443 439 } 444 440 src = GET_ARG(); 445 - 446 441 if (is_prefix(src, "map")) { 447 442 struct bpf_map_info info = {}; 448 443 __u32 len = sizeof(info); ··· 502 499 } 503 500 NEXT_ARG(); 504 501 } else if (is_prefix(src, "file")) { 505 - btf = btf__parse(*argv, NULL); 502 + btf = btf__parse_split(*argv, base_btf); 506 503 if (IS_ERR(btf)) { 507 504 err = -PTR_ERR(btf); 508 505 btf = NULL;
+14 -1
tools/bpf/bpftool/main.c
··· 11 11 12 12 #include <bpf/bpf.h> 13 13 #include <bpf/libbpf.h> 14 + #include <bpf/btf.h> 14 15 15 16 #include "main.h" 16 17 ··· 29 28 bool block_mount; 30 29 bool verifier_logs; 31 30 bool relaxed_maps; 31 + struct btf *base_btf; 32 32 struct pinned_obj_table prog_table; 33 33 struct pinned_obj_table map_table; 34 34 struct pinned_obj_table link_table; ··· 393 391 { "mapcompat", no_argument, NULL, 'm' }, 394 392 { "nomount", no_argument, NULL, 'n' }, 395 393 { "debug", no_argument, NULL, 'd' }, 394 + { "base-btf", required_argument, NULL, 'B' }, 396 395 { 0 } 397 396 }; 398 397 int opt, ret; ··· 410 407 hash_init(link_table.table); 411 408 412 409 opterr = 0; 413 - while ((opt = getopt_long(argc, argv, "Vhpjfmnd", 410 + while ((opt = getopt_long(argc, argv, "VhpjfmndB:", 414 411 options, NULL)) >= 0) { 415 412 switch (opt) { 416 413 case 'V': ··· 444 441 libbpf_set_print(print_all_levels); 445 442 verifier_logs = true; 446 443 break; 444 + case 'B': 445 + base_btf = btf__parse(optarg, NULL); 446 + if (libbpf_get_error(base_btf)) { 447 + p_err("failed to parse base BTF at '%s': %ld\n", 448 + optarg, libbpf_get_error(base_btf)); 449 + base_btf = NULL; 450 + return -1; 451 + } 452 + break; 447 453 default: 448 454 p_err("unrecognized option '%s'", argv[optind - 1]); 449 455 if (json_output) ··· 477 465 delete_pinned_obj_table(&map_table); 478 466 delete_pinned_obj_table(&link_table); 479 467 } 468 + btf__free(base_btf); 480 469 481 470 return ret; 482 471 }
+1
tools/bpf/bpftool/main.h
··· 90 90 extern bool block_mount; 91 91 extern bool verifier_logs; 92 92 extern bool relaxed_maps; 93 + extern struct btf *base_btf; 93 94 extern struct pinned_obj_table prog_table; 94 95 extern struct pinned_obj_table map_table; 95 96 extern struct pinned_obj_table link_table;
+473 -336
tools/lib/bpf/btf.c
··· 78 78 void *types_data; 79 79 size_t types_data_cap; /* used size stored in hdr->type_len */ 80 80 81 - /* type ID to `struct btf_type *` lookup index */ 81 + /* type ID to `struct btf_type *` lookup index 82 + * type_offs[0] corresponds to the first non-VOID type: 83 + * - for base BTF it's type [1]; 84 + * - for split BTF it's the first non-base BTF type. 85 + */ 82 86 __u32 *type_offs; 83 87 size_t type_offs_cap; 88 + /* number of types in this BTF instance: 89 + * - doesn't include special [0] void type; 90 + * - for split BTF counts number of types added on top of base BTF. 91 + */ 84 92 __u32 nr_types; 93 + /* if not NULL, points to the base BTF on top of which the current 94 + * split BTF is based 95 + */ 96 + struct btf *base_btf; 97 + /* BTF type ID of the first type in this BTF instance: 98 + * - for base BTF it's equal to 1; 99 + * - for split BTF it's equal to biggest type ID of base BTF plus 1. 100 + */ 101 + int start_id; 102 + /* logical string offset of this BTF instance: 103 + * - for base BTF it's equal to 0; 104 + * - for split BTF it's equal to total size of base BTF's string section size. 105 + */ 106 + int start_str_off; 85 107 86 108 void *strs_data; 87 109 size_t strs_data_cap; /* used size stored in hdr->str_len */ ··· 112 90 struct hashmap *strs_hash; 113 91 /* whether strings are already deduplicated */ 114 92 bool strs_deduped; 93 + /* extra indirection layer to make strings hashmap work with stable 94 + * string offsets and ability to transparently choose between 95 + * btf->strs_data or btf_dedup->strs_data as a source of strings. 96 + * This is used for BTF strings dedup to transfer deduplicated strings 97 + * data back to struct btf without re-building strings index. 98 + */ 99 + void **strs_data_ptr; 100 + 115 101 /* BTF object FD, if loaded into kernel */ 116 102 int fd; 117 103 ··· 198 168 __u32 *p; 199 169 200 170 p = btf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32), 201 - btf->nr_types + 1, BTF_MAX_NR_TYPES, 1); 171 + btf->nr_types, BTF_MAX_NR_TYPES, 1); 202 172 if (!p) 203 173 return -ENOMEM; 204 174 ··· 245 215 return -EINVAL; 246 216 } 247 217 248 - if (meta_left < hdr->type_off) { 249 - pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off); 218 + if (meta_left < hdr->str_off + hdr->str_len) { 219 + pr_debug("Invalid BTF total size:%u\n", btf->raw_size); 250 220 return -EINVAL; 251 221 } 252 222 253 - if (meta_left < hdr->str_off) { 254 - pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off); 223 + if (hdr->type_off + hdr->type_len > hdr->str_off) { 224 + pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n", 225 + hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len); 255 226 return -EINVAL; 256 227 } 257 228 258 - if (hdr->type_off >= hdr->str_off) { 259 - pr_debug("BTF type section offset >= string section offset. No type?\n"); 260 - return -EINVAL; 261 - } 262 - 263 - if (hdr->type_off & 0x02) { 229 + if (hdr->type_off % 4) { 264 230 pr_debug("BTF type section is not aligned to 4 bytes\n"); 265 231 return -EINVAL; 266 232 } ··· 270 244 const char *start = btf->strs_data; 271 245 const char *end = start + btf->hdr->str_len; 272 246 273 - if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || 274 - start[0] || end[-1]) { 247 + if (btf->base_btf && hdr->str_len == 0) 248 + return 0; 249 + if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) { 275 250 pr_debug("Invalid BTF string section\n"); 276 251 return -EINVAL; 277 252 } 278 - 253 + if (!btf->base_btf && start[0]) { 254 + pr_debug("Invalid BTF string section\n"); 255 + return -EINVAL; 256 + } 279 257 return 0; 280 258 } 281 259 ··· 394 364 struct btf_header *hdr = btf->hdr; 395 365 void *next_type = btf->types_data; 396 366 void *end_type = next_type + hdr->type_len; 397 - int err, i = 0, type_size; 398 - 399 - /* VOID (type_id == 0) is specially handled by btf__get_type_by_id(), 400 - * so ensure we can never properly use its offset from index by 401 - * setting it to a large value 402 - */ 403 - err = btf_add_type_idx_entry(btf, UINT_MAX); 404 - if (err) 405 - return err; 367 + int err, type_size; 406 368 407 369 while (next_type + sizeof(struct btf_type) <= end_type) { 408 - i++; 409 - 410 370 if (btf->swapped_endian) 411 371 btf_bswap_type_base(next_type); 412 372 ··· 404 384 if (type_size < 0) 405 385 return type_size; 406 386 if (next_type + type_size > end_type) { 407 - pr_warn("BTF type [%d] is malformed\n", i); 387 + pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types); 408 388 return -EINVAL; 409 389 } 410 390 ··· 429 409 430 410 __u32 btf__get_nr_types(const struct btf *btf) 431 411 { 432 - return btf->nr_types; 412 + return btf->start_id + btf->nr_types - 1; 433 413 } 434 414 435 415 /* internal helper returning non-const pointer to a type */ ··· 437 417 { 438 418 if (type_id == 0) 439 419 return &btf_void; 440 - 441 - return btf->types_data + btf->type_offs[type_id]; 420 + if (type_id < btf->start_id) 421 + return btf_type_by_id(btf->base_btf, type_id); 422 + return btf->types_data + btf->type_offs[type_id - btf->start_id]; 442 423 } 443 424 444 425 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) 445 426 { 446 - if (type_id > btf->nr_types) 427 + if (type_id >= btf->start_id + btf->nr_types) 447 428 return NULL; 448 429 return btf_type_by_id((struct btf *)btf, type_id); 449 430 } ··· 453 432 { 454 433 const struct btf_type *t; 455 434 const char *name; 456 - int i; 435 + int i, n; 457 436 458 - for (i = 1; i <= btf->nr_types; i++) { 437 + if (btf->base_btf && btf->base_btf->ptr_sz > 0) 438 + return btf->base_btf->ptr_sz; 439 + 440 + n = btf__get_nr_types(btf); 441 + for (i = 1; i <= n; i++) { 459 442 t = btf__type_by_id(btf, i); 460 443 if (!btf_is_int(t)) 461 444 continue; ··· 742 717 free(btf); 743 718 } 744 719 745 - struct btf *btf__new_empty(void) 720 + static struct btf *btf_new_empty(struct btf *base_btf) 746 721 { 747 722 struct btf *btf; 748 723 ··· 750 725 if (!btf) 751 726 return ERR_PTR(-ENOMEM); 752 727 728 + btf->nr_types = 0; 729 + btf->start_id = 1; 730 + btf->start_str_off = 0; 753 731 btf->fd = -1; 754 732 btf->ptr_sz = sizeof(void *); 755 733 btf->swapped_endian = false; 756 734 735 + if (base_btf) { 736 + btf->base_btf = base_btf; 737 + btf->start_id = btf__get_nr_types(base_btf) + 1; 738 + btf->start_str_off = base_btf->hdr->str_len; 739 + } 740 + 757 741 /* +1 for empty string at offset 0 */ 758 - btf->raw_size = sizeof(struct btf_header) + 1; 742 + btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1); 759 743 btf->raw_data = calloc(1, btf->raw_size); 760 744 if (!btf->raw_data) { 761 745 free(btf); ··· 778 744 779 745 btf->types_data = btf->raw_data + btf->hdr->hdr_len; 780 746 btf->strs_data = btf->raw_data + btf->hdr->hdr_len; 781 - btf->hdr->str_len = 1; /* empty string at offset 0 */ 747 + btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */ 782 748 783 749 return btf; 784 750 } 785 751 786 - struct btf *btf__new(const void *data, __u32 size) 752 + struct btf *btf__new_empty(void) 753 + { 754 + return btf_new_empty(NULL); 755 + } 756 + 757 + struct btf *btf__new_empty_split(struct btf *base_btf) 758 + { 759 + return btf_new_empty(base_btf); 760 + } 761 + 762 + static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf) 787 763 { 788 764 struct btf *btf; 789 765 int err; ··· 801 757 btf = calloc(1, sizeof(struct btf)); 802 758 if (!btf) 803 759 return ERR_PTR(-ENOMEM); 760 + 761 + btf->nr_types = 0; 762 + btf->start_id = 1; 763 + btf->start_str_off = 0; 764 + 765 + if (base_btf) { 766 + btf->base_btf = base_btf; 767 + btf->start_id = btf__get_nr_types(base_btf) + 1; 768 + btf->start_str_off = base_btf->hdr->str_len; 769 + } 804 770 805 771 btf->raw_data = malloc(size); 806 772 if (!btf->raw_data) { ··· 844 790 return btf; 845 791 } 846 792 847 - struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) 793 + struct btf *btf__new(const void *data, __u32 size) 794 + { 795 + return btf_new(data, size, NULL); 796 + } 797 + 798 + static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, 799 + struct btf_ext **btf_ext) 848 800 { 849 801 Elf_Data *btf_data = NULL, *btf_ext_data = NULL; 850 802 int err = 0, fd = -1, idx = 0; ··· 928 868 err = -ENOENT; 929 869 goto done; 930 870 } 931 - btf = btf__new(btf_data->d_buf, btf_data->d_size); 871 + btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf); 932 872 if (IS_ERR(btf)) 933 873 goto done; 934 874 ··· 973 913 return btf; 974 914 } 975 915 976 - struct btf *btf__parse_raw(const char *path) 916 + struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) 917 + { 918 + return btf_parse_elf(path, NULL, btf_ext); 919 + } 920 + 921 + struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf) 922 + { 923 + return btf_parse_elf(path, base_btf, NULL); 924 + } 925 + 926 + static struct btf *btf_parse_raw(const char *path, struct btf *base_btf) 977 927 { 978 928 struct btf *btf = NULL; 979 929 void *data = NULL; ··· 1037 967 } 1038 968 1039 969 /* finally parse BTF data */ 1040 - btf = btf__new(data, sz); 970 + btf = btf_new(data, sz, base_btf); 1041 971 1042 972 err_out: 1043 973 free(data); ··· 1046 976 return err ? ERR_PTR(err) : btf; 1047 977 } 1048 978 1049 - struct btf *btf__parse(const char *path, struct btf_ext **btf_ext) 979 + struct btf *btf__parse_raw(const char *path) 980 + { 981 + return btf_parse_raw(path, NULL); 982 + } 983 + 984 + struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf) 985 + { 986 + return btf_parse_raw(path, base_btf); 987 + } 988 + 989 + static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext) 1050 990 { 1051 991 struct btf *btf; 1052 992 1053 993 if (btf_ext) 1054 994 *btf_ext = NULL; 1055 995 1056 - btf = btf__parse_raw(path); 996 + btf = btf_parse_raw(path, base_btf); 1057 997 if (!IS_ERR(btf) || PTR_ERR(btf) != -EPROTO) 1058 998 return btf; 1059 999 1060 - return btf__parse_elf(path, btf_ext); 1000 + return btf_parse_elf(path, base_btf, btf_ext); 1001 + } 1002 + 1003 + struct btf *btf__parse(const char *path, struct btf_ext **btf_ext) 1004 + { 1005 + return btf_parse(path, NULL, btf_ext); 1006 + } 1007 + 1008 + struct btf *btf__parse_split(const char *path, struct btf *base_btf) 1009 + { 1010 + return btf_parse(path, base_btf, NULL); 1061 1011 } 1062 1012 1063 1013 static int compare_vsi_off(const void *_a, const void *_b) ··· 1261 1171 1262 1172 memcpy(p, btf->types_data, hdr->type_len); 1263 1173 if (swap_endian) { 1264 - for (i = 1; i <= btf->nr_types; i++) { 1265 - t = p + btf->type_offs[i]; 1174 + for (i = 0; i < btf->nr_types; i++) { 1175 + t = p + btf->type_offs[i]; 1266 1176 /* btf_bswap_type_rest() relies on native t->info, so 1267 1177 * we swap base type info after we swapped all the 1268 1178 * additional information ··· 1305 1215 1306 1216 const char *btf__str_by_offset(const struct btf *btf, __u32 offset) 1307 1217 { 1308 - if (offset < btf->hdr->str_len) 1309 - return btf->strs_data + offset; 1218 + if (offset < btf->start_str_off) 1219 + return btf__str_by_offset(btf->base_btf, offset); 1220 + else if (offset - btf->start_str_off < btf->hdr->str_len) 1221 + return btf->strs_data + (offset - btf->start_str_off); 1310 1222 else 1311 1223 return NULL; 1312 1224 } ··· 1455 1363 1456 1364 static size_t strs_hash_fn(const void *key, void *ctx) 1457 1365 { 1458 - struct btf *btf = ctx; 1459 - const char *str = btf->strs_data + (long)key; 1366 + const struct btf *btf = ctx; 1367 + const char *strs = *btf->strs_data_ptr; 1368 + const char *str = strs + (long)key; 1460 1369 1461 1370 return str_hash(str); 1462 1371 } 1463 1372 1464 1373 static bool strs_hash_equal_fn(const void *key1, const void *key2, void *ctx) 1465 1374 { 1466 - struct btf *btf = ctx; 1467 - const char *str1 = btf->strs_data + (long)key1; 1468 - const char *str2 = btf->strs_data + (long)key2; 1375 + const struct btf *btf = ctx; 1376 + const char *strs = *btf->strs_data_ptr; 1377 + const char *str1 = strs + (long)key1; 1378 + const char *str2 = strs + (long)key2; 1469 1379 1470 1380 return strcmp(str1, str2) == 0; 1471 1381 } ··· 1512 1418 memcpy(types, btf->types_data, btf->hdr->type_len); 1513 1419 memcpy(strs, btf->strs_data, btf->hdr->str_len); 1514 1420 1421 + /* make hashmap below use btf->strs_data as a source of strings */ 1422 + btf->strs_data_ptr = &btf->strs_data; 1423 + 1515 1424 /* build lookup index for all strings */ 1516 1425 hash = hashmap__new(strs_hash_fn, strs_hash_equal_fn, btf); 1517 1426 if (IS_ERR(hash)) { ··· 1545 1448 /* if BTF was created from scratch, all strings are guaranteed to be 1546 1449 * unique and deduplicated 1547 1450 */ 1548 - btf->strs_deduped = btf->hdr->str_len <= 1; 1451 + if (btf->hdr->str_len == 0) 1452 + btf->strs_deduped = true; 1453 + if (!btf->base_btf && btf->hdr->str_len == 1) 1454 + btf->strs_deduped = true; 1549 1455 1550 1456 /* invalidate raw_data representation */ 1551 1457 btf_invalidate_raw_data(btf); ··· 1580 1480 long old_off, new_off, len; 1581 1481 void *p; 1582 1482 1483 + if (btf->base_btf) { 1484 + int ret; 1485 + 1486 + ret = btf__find_str(btf->base_btf, s); 1487 + if (ret != -ENOENT) 1488 + return ret; 1489 + } 1490 + 1583 1491 /* BTF needs to be in a modifiable state to build string lookup index */ 1584 1492 if (btf_ensure_modifiable(btf)) 1585 1493 return -ENOMEM; ··· 1602 1494 memcpy(p, s, len); 1603 1495 1604 1496 if (hashmap__find(btf->strs_hash, (void *)new_off, (void **)&old_off)) 1605 - return old_off; 1497 + return btf->start_str_off + old_off; 1606 1498 1607 1499 return -ENOENT; 1608 1500 } ··· 1617 1509 long old_off, new_off, len; 1618 1510 void *p; 1619 1511 int err; 1512 + 1513 + if (btf->base_btf) { 1514 + int ret; 1515 + 1516 + ret = btf__find_str(btf->base_btf, s); 1517 + if (ret != -ENOENT) 1518 + return ret; 1519 + } 1620 1520 1621 1521 if (btf_ensure_modifiable(btf)) 1622 1522 return -ENOMEM; ··· 1652 1536 err = hashmap__insert(btf->strs_hash, (void *)new_off, (void *)new_off, 1653 1537 HASHMAP_ADD, (const void **)&old_off, NULL); 1654 1538 if (err == -EEXIST) 1655 - return old_off; /* duplicated string, return existing offset */ 1539 + return btf->start_str_off + old_off; /* duplicated string, return existing offset */ 1656 1540 if (err) 1657 1541 return err; 1658 1542 1659 1543 btf->hdr->str_len += len; /* new unique string, adjust data length */ 1660 - return new_off; 1544 + return btf->start_str_off + new_off; 1661 1545 } 1662 1546 1663 1547 static void *btf_add_type_mem(struct btf *btf, size_t add_sz) ··· 1676 1560 t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t)); 1677 1561 } 1678 1562 1563 + static int btf_commit_type(struct btf *btf, int data_sz) 1564 + { 1565 + int err; 1566 + 1567 + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); 1568 + if (err) 1569 + return err; 1570 + 1571 + btf->hdr->type_len += data_sz; 1572 + btf->hdr->str_off += data_sz; 1573 + btf->nr_types++; 1574 + return btf->start_id + btf->nr_types - 1; 1575 + } 1576 + 1679 1577 /* 1680 1578 * Append new BTF_KIND_INT type with: 1681 1579 * - *name* - non-empty, non-NULL type name; ··· 1702 1572 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding) 1703 1573 { 1704 1574 struct btf_type *t; 1705 - int sz, err, name_off; 1575 + int sz, name_off; 1706 1576 1707 1577 /* non-empty name */ 1708 1578 if (!name || !name[0]) ··· 1736 1606 /* set INT info, we don't allow setting legacy bit offset/size */ 1737 1607 *(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8); 1738 1608 1739 - err = btf_add_type_idx_entry(btf, btf->hdr->type_len); 1740 - if (err) 1741 - return err; 1742 - 1743 - btf->hdr->type_len += sz; 1744 - btf->hdr->str_off += sz; 1745 - btf->nr_types++; 1746 - return btf->nr_types; 1609 + return btf_commit_type(btf, sz); 1747 1610 } 1748 1611 1749 1612 /* it's completely legal to append BTF types with type IDs pointing forward to ··· 1754 1631 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id) 1755 1632 { 1756 1633 struct btf_type *t; 1757 - int sz, name_off = 0, err; 1634 + int sz, name_off = 0; 1758 1635 1759 1636 if (validate_type_id(ref_type_id)) 1760 1637 return -EINVAL; ··· 1777 1654 t->info = btf_type_info(kind, 0, 0); 1778 1655 t->type = ref_type_id; 1779 1656 1780 - err = btf_add_type_idx_entry(btf, btf->hdr->type_len); 1781 - if (err) 1782 - return err; 1783 - 1784 - btf->hdr->type_len += sz; 1785 - btf->hdr->str_off += sz; 1786 - btf->nr_types++; 1787 - return btf->nr_types; 1657 + return btf_commit_type(btf, sz); 1788 1658 } 1789 1659 1790 1660 /* ··· 1805 1689 { 1806 1690 struct btf_type *t; 1807 1691 struct btf_array *a; 1808 - int sz, err; 1692 + int sz; 1809 1693 1810 1694 if (validate_type_id(index_type_id) || validate_type_id(elem_type_id)) 1811 1695 return -EINVAL; ··· 1827 1711 a->index_type = index_type_id; 1828 1712 a->nelems = nr_elems; 1829 1713 1830 - err = btf_add_type_idx_entry(btf, btf->hdr->type_len); 1831 - if (err) 1832 - return err; 1833 - 1834 - btf->hdr->type_len += sz; 1835 - btf->hdr->str_off += sz; 1836 - btf->nr_types++; 1837 - return btf->nr_types; 1714 + return btf_commit_type(btf, sz); 1838 1715 } 1839 1716 1840 1717 /* generic STRUCT/UNION append function */ 1841 1718 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz) 1842 1719 { 1843 1720 struct btf_type *t; 1844 - int sz, err, name_off = 0; 1721 + int sz, name_off = 0; 1845 1722 1846 1723 if (btf_ensure_modifiable(btf)) 1847 1724 return -ENOMEM; ··· 1857 1748 t->info = btf_type_info(kind, 0, 0); 1858 1749 t->size = bytes_sz; 1859 1750 1860 - err = btf_add_type_idx_entry(btf, btf->hdr->type_len); 1861 - if (err) 1862 - return err; 1863 - 1864 - btf->hdr->type_len += sz; 1865 - btf->hdr->str_off += sz; 1866 - btf->nr_types++; 1867 - return btf->nr_types; 1751 + return btf_commit_type(btf, sz); 1868 1752 } 1869 1753 1870 1754 /* ··· 1895 1793 return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz); 1896 1794 } 1897 1795 1796 + static struct btf_type *btf_last_type(struct btf *btf) 1797 + { 1798 + return btf_type_by_id(btf, btf__get_nr_types(btf)); 1799 + } 1800 + 1898 1801 /* 1899 1802 * Append new field for the current STRUCT/UNION type with: 1900 1803 * - *name* - name of the field, can be NULL or empty for anonymous field; ··· 1921 1814 /* last type should be union/struct */ 1922 1815 if (btf->nr_types == 0) 1923 1816 return -EINVAL; 1924 - t = btf_type_by_id(btf, btf->nr_types); 1817 + t = btf_last_type(btf); 1925 1818 if (!btf_is_composite(t)) 1926 1819 return -EINVAL; 1927 1820 ··· 1956 1849 m->offset = bit_offset | (bit_size << 24); 1957 1850 1958 1851 /* btf_add_type_mem can invalidate t pointer */ 1959 - t = btf_type_by_id(btf, btf->nr_types); 1852 + t = btf_last_type(btf); 1960 1853 /* update parent type's vlen and kflag */ 1961 1854 t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t)); 1962 1855 ··· 1981 1874 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz) 1982 1875 { 1983 1876 struct btf_type *t; 1984 - int sz, err, name_off = 0; 1877 + int sz, name_off = 0; 1985 1878 1986 1879 /* byte_sz must be power of 2 */ 1987 1880 if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8) ··· 2006 1899 t->info = btf_type_info(BTF_KIND_ENUM, 0, 0); 2007 1900 t->size = byte_sz; 2008 1901 2009 - err = btf_add_type_idx_entry(btf, btf->hdr->type_len); 2010 - if (err) 2011 - return err; 2012 - 2013 - btf->hdr->type_len += sz; 2014 - btf->hdr->str_off += sz; 2015 - btf->nr_types++; 2016 - return btf->nr_types; 1902 + return btf_commit_type(btf, sz); 2017 1903 } 2018 1904 2019 1905 /* ··· 2026 1926 /* last type should be BTF_KIND_ENUM */ 2027 1927 if (btf->nr_types == 0) 2028 1928 return -EINVAL; 2029 - t = btf_type_by_id(btf, btf->nr_types); 1929 + t = btf_last_type(btf); 2030 1930 if (!btf_is_enum(t)) 2031 1931 return -EINVAL; 2032 1932 ··· 2053 1953 v->val = value; 2054 1954 2055 1955 /* update parent type's vlen */ 2056 - t = btf_type_by_id(btf, btf->nr_types); 1956 + t = btf_last_type(btf); 2057 1957 btf_type_inc_vlen(t); 2058 1958 2059 1959 btf->hdr->type_len += sz; ··· 2193 2093 int btf__add_func_proto(struct btf *btf, int ret_type_id) 2194 2094 { 2195 2095 struct btf_type *t; 2196 - int sz, err; 2096 + int sz; 2197 2097 2198 2098 if (validate_type_id(ret_type_id)) 2199 2099 return -EINVAL; ··· 2213 2113 t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0); 2214 2114 t->type = ret_type_id; 2215 2115 2216 - err = btf_add_type_idx_entry(btf, btf->hdr->type_len); 2217 - if (err) 2218 - return err; 2219 - 2220 - btf->hdr->type_len += sz; 2221 - btf->hdr->str_off += sz; 2222 - btf->nr_types++; 2223 - return btf->nr_types; 2116 + return btf_commit_type(btf, sz); 2224 2117 } 2225 2118 2226 2119 /* ··· 2236 2143 /* last type should be BTF_KIND_FUNC_PROTO */ 2237 2144 if (btf->nr_types == 0) 2238 2145 return -EINVAL; 2239 - t = btf_type_by_id(btf, btf->nr_types); 2146 + t = btf_last_type(btf); 2240 2147 if (!btf_is_func_proto(t)) 2241 2148 return -EINVAL; 2242 2149 ··· 2259 2166 p->type = type_id; 2260 2167 2261 2168 /* update parent type's vlen */ 2262 - t = btf_type_by_id(btf, btf->nr_types); 2169 + t = btf_last_type(btf); 2263 2170 btf_type_inc_vlen(t); 2264 2171 2265 2172 btf->hdr->type_len += sz; ··· 2281 2188 { 2282 2189 struct btf_type *t; 2283 2190 struct btf_var *v; 2284 - int sz, err, name_off; 2191 + int sz, name_off; 2285 2192 2286 2193 /* non-empty name */ 2287 2194 if (!name || !name[0]) ··· 2312 2219 v = btf_var(t); 2313 2220 v->linkage = linkage; 2314 2221 2315 - err = btf_add_type_idx_entry(btf, btf->hdr->type_len); 2316 - if (err) 2317 - return err; 2318 - 2319 - btf->hdr->type_len += sz; 2320 - btf->hdr->str_off += sz; 2321 - btf->nr_types++; 2322 - return btf->nr_types; 2222 + return btf_commit_type(btf, sz); 2323 2223 } 2324 2224 2325 2225 /* ··· 2330 2244 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz) 2331 2245 { 2332 2246 struct btf_type *t; 2333 - int sz, err, name_off; 2247 + int sz, name_off; 2334 2248 2335 2249 /* non-empty name */ 2336 2250 if (!name || !name[0]) ··· 2353 2267 t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0); 2354 2268 t->size = byte_sz; 2355 2269 2356 - err = btf_add_type_idx_entry(btf, btf->hdr->type_len); 2357 - if (err) 2358 - return err; 2359 - 2360 - btf->hdr->type_len += sz; 2361 - btf->hdr->str_off += sz; 2362 - btf->nr_types++; 2363 - return btf->nr_types; 2270 + return btf_commit_type(btf, sz); 2364 2271 } 2365 2272 2366 2273 /* ··· 2375 2296 /* last type should be BTF_KIND_DATASEC */ 2376 2297 if (btf->nr_types == 0) 2377 2298 return -EINVAL; 2378 - t = btf_type_by_id(btf, btf->nr_types); 2299 + t = btf_last_type(btf); 2379 2300 if (!btf_is_datasec(t)) 2380 2301 return -EINVAL; 2381 2302 ··· 2396 2317 v->size = byte_sz; 2397 2318 2398 2319 /* update parent type's vlen */ 2399 - t = btf_type_by_id(btf, btf->nr_types); 2320 + t = btf_last_type(btf); 2400 2321 btf_type_inc_vlen(t); 2401 2322 2402 2323 btf->hdr->type_len += sz; ··· 2718 2639 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, 2719 2640 const struct btf_dedup_opts *opts); 2720 2641 static void btf_dedup_free(struct btf_dedup *d); 2642 + static int btf_dedup_prep(struct btf_dedup *d); 2721 2643 static int btf_dedup_strings(struct btf_dedup *d); 2722 2644 static int btf_dedup_prim_types(struct btf_dedup *d); 2723 2645 static int btf_dedup_struct_types(struct btf_dedup *d); ··· 2877 2797 if (btf_ensure_modifiable(btf)) 2878 2798 return -ENOMEM; 2879 2799 2800 + err = btf_dedup_prep(d); 2801 + if (err) { 2802 + pr_debug("btf_dedup_prep failed:%d\n", err); 2803 + goto done; 2804 + } 2880 2805 err = btf_dedup_strings(d); 2881 2806 if (err < 0) { 2882 2807 pr_debug("btf_dedup_strings failed:%d\n", err); ··· 2944 2859 __u32 *hypot_list; 2945 2860 size_t hypot_cnt; 2946 2861 size_t hypot_cap; 2862 + /* Whether hypothetical mapping, if successful, would need to adjust 2863 + * already canonicalized types (due to a new forward declaration to 2864 + * concrete type resolution). In such case, during split BTF dedup 2865 + * candidate type would still be considered as different, because base 2866 + * BTF is considered to be immutable. 2867 + */ 2868 + bool hypot_adjust_canon; 2947 2869 /* Various option modifying behavior of algorithm */ 2948 2870 struct btf_dedup_opts opts; 2949 - }; 2950 - 2951 - struct btf_str_ptr { 2952 - const char *str; 2953 - __u32 new_off; 2954 - bool used; 2955 - }; 2956 - 2957 - struct btf_str_ptrs { 2958 - struct btf_str_ptr *ptrs; 2959 - const char *data; 2960 - __u32 cnt; 2961 - __u32 cap; 2871 + /* temporary strings deduplication state */ 2872 + void *strs_data; 2873 + size_t strs_cap; 2874 + size_t strs_len; 2875 + struct hashmap* strs_hash; 2962 2876 }; 2963 2877 2964 2878 static long hash_combine(long h, long value) ··· 2998 2914 for (i = 0; i < d->hypot_cnt; i++) 2999 2915 d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID; 3000 2916 d->hypot_cnt = 0; 2917 + d->hypot_adjust_canon = false; 3001 2918 } 3002 2919 3003 2920 static void btf_dedup_free(struct btf_dedup *d) ··· 3038 2953 { 3039 2954 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); 3040 2955 hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn; 3041 - int i, err = 0; 2956 + int i, err = 0, type_cnt; 3042 2957 3043 2958 if (!d) 3044 2959 return ERR_PTR(-ENOMEM); ··· 3058 2973 goto done; 3059 2974 } 3060 2975 3061 - d->map = malloc(sizeof(__u32) * (1 + btf->nr_types)); 2976 + type_cnt = btf__get_nr_types(btf) + 1; 2977 + d->map = malloc(sizeof(__u32) * type_cnt); 3062 2978 if (!d->map) { 3063 2979 err = -ENOMEM; 3064 2980 goto done; 3065 2981 } 3066 2982 /* special BTF "void" type is made canonical immediately */ 3067 2983 d->map[0] = 0; 3068 - for (i = 1; i <= btf->nr_types; i++) { 2984 + for (i = 1; i < type_cnt; i++) { 3069 2985 struct btf_type *t = btf_type_by_id(d->btf, i); 3070 2986 3071 2987 /* VAR and DATASEC are never deduped and are self-canonical */ ··· 3076 2990 d->map[i] = BTF_UNPROCESSED_ID; 3077 2991 } 3078 2992 3079 - d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types)); 2993 + d->hypot_map = malloc(sizeof(__u32) * type_cnt); 3080 2994 if (!d->hypot_map) { 3081 2995 err = -ENOMEM; 3082 2996 goto done; 3083 2997 } 3084 - for (i = 0; i <= btf->nr_types; i++) 2998 + for (i = 0; i < type_cnt; i++) 3085 2999 d->hypot_map[i] = BTF_UNPROCESSED_ID; 3086 3000 3087 3001 done: ··· 3105 3019 int i, j, r, rec_size; 3106 3020 struct btf_type *t; 3107 3021 3108 - for (i = 1; i <= d->btf->nr_types; i++) { 3109 - t = btf_type_by_id(d->btf, i); 3022 + for (i = 0; i < d->btf->nr_types; i++) { 3023 + t = btf_type_by_id(d->btf, d->btf->start_id + i); 3110 3024 r = fn(&t->name_off, ctx); 3111 3025 if (r) 3112 3026 return r; ··· 3186 3100 return 0; 3187 3101 } 3188 3102 3189 - static int str_sort_by_content(const void *a1, const void *a2) 3103 + static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx) 3190 3104 { 3191 - const struct btf_str_ptr *p1 = a1; 3192 - const struct btf_str_ptr *p2 = a2; 3105 + struct btf_dedup *d = ctx; 3106 + __u32 str_off = *str_off_ptr; 3107 + long old_off, new_off, len; 3108 + const char *s; 3109 + void *p; 3110 + int err; 3193 3111 3194 - return strcmp(p1->str, p2->str); 3195 - } 3196 - 3197 - static int str_sort_by_offset(const void *a1, const void *a2) 3198 - { 3199 - const struct btf_str_ptr *p1 = a1; 3200 - const struct btf_str_ptr *p2 = a2; 3201 - 3202 - if (p1->str != p2->str) 3203 - return p1->str < p2->str ? -1 : 1; 3204 - return 0; 3205 - } 3206 - 3207 - static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem) 3208 - { 3209 - const struct btf_str_ptr *p = pelem; 3210 - 3211 - if (str_ptr != p->str) 3212 - return (const char *)str_ptr < p->str ? -1 : 1; 3213 - return 0; 3214 - } 3215 - 3216 - static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx) 3217 - { 3218 - struct btf_str_ptrs *strs; 3219 - struct btf_str_ptr *s; 3220 - 3221 - if (*str_off_ptr == 0) 3112 + /* don't touch empty string or string in main BTF */ 3113 + if (str_off == 0 || str_off < d->btf->start_str_off) 3222 3114 return 0; 3223 3115 3224 - strs = ctx; 3225 - s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt, 3226 - sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp); 3227 - if (!s) 3228 - return -EINVAL; 3229 - s->used = true; 3230 - return 0; 3231 - } 3116 + s = btf__str_by_offset(d->btf, str_off); 3117 + if (d->btf->base_btf) { 3118 + err = btf__find_str(d->btf->base_btf, s); 3119 + if (err >= 0) { 3120 + *str_off_ptr = err; 3121 + return 0; 3122 + } 3123 + if (err != -ENOENT) 3124 + return err; 3125 + } 3232 3126 3233 - static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx) 3234 - { 3235 - struct btf_str_ptrs *strs; 3236 - struct btf_str_ptr *s; 3127 + len = strlen(s) + 1; 3237 3128 3238 - if (*str_off_ptr == 0) 3239 - return 0; 3129 + new_off = d->strs_len; 3130 + p = btf_add_mem(&d->strs_data, &d->strs_cap, 1, new_off, BTF_MAX_STR_OFFSET, len); 3131 + if (!p) 3132 + return -ENOMEM; 3240 3133 3241 - strs = ctx; 3242 - s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt, 3243 - sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp); 3244 - if (!s) 3245 - return -EINVAL; 3246 - *str_off_ptr = s->new_off; 3134 + memcpy(p, s, len); 3135 + 3136 + /* Now attempt to add the string, but only if the string with the same 3137 + * contents doesn't exist already (HASHMAP_ADD strategy). If such 3138 + * string exists, we'll get its offset in old_off (that's old_key). 3139 + */ 3140 + err = hashmap__insert(d->strs_hash, (void *)new_off, (void *)new_off, 3141 + HASHMAP_ADD, (const void **)&old_off, NULL); 3142 + if (err == -EEXIST) { 3143 + *str_off_ptr = d->btf->start_str_off + old_off; 3144 + } else if (err) { 3145 + return err; 3146 + } else { 3147 + *str_off_ptr = d->btf->start_str_off + new_off; 3148 + d->strs_len += len; 3149 + } 3247 3150 return 0; 3248 3151 } 3249 3152 ··· 3249 3174 */ 3250 3175 static int btf_dedup_strings(struct btf_dedup *d) 3251 3176 { 3252 - char *start = d->btf->strs_data; 3253 - char *end = start + d->btf->hdr->str_len; 3254 - char *p = start, *tmp_strs = NULL; 3255 - struct btf_str_ptrs strs = { 3256 - .cnt = 0, 3257 - .cap = 0, 3258 - .ptrs = NULL, 3259 - .data = start, 3260 - }; 3261 - int i, j, err = 0, grp_idx; 3262 - bool grp_used; 3177 + char *s; 3178 + int err; 3263 3179 3264 3180 if (d->btf->strs_deduped) 3265 3181 return 0; 3266 3182 3267 - /* build index of all strings */ 3268 - while (p < end) { 3269 - if (strs.cnt + 1 > strs.cap) { 3270 - struct btf_str_ptr *new_ptrs; 3271 - 3272 - strs.cap += max(strs.cnt / 2, 16U); 3273 - new_ptrs = libbpf_reallocarray(strs.ptrs, strs.cap, sizeof(strs.ptrs[0])); 3274 - if (!new_ptrs) { 3275 - err = -ENOMEM; 3276 - goto done; 3277 - } 3278 - strs.ptrs = new_ptrs; 3279 - } 3280 - 3281 - strs.ptrs[strs.cnt].str = p; 3282 - strs.ptrs[strs.cnt].used = false; 3283 - 3284 - p += strlen(p) + 1; 3285 - strs.cnt++; 3286 - } 3287 - 3288 - /* temporary storage for deduplicated strings */ 3289 - tmp_strs = malloc(d->btf->hdr->str_len); 3290 - if (!tmp_strs) { 3291 - err = -ENOMEM; 3292 - goto done; 3293 - } 3294 - 3295 - /* mark all used strings */ 3296 - strs.ptrs[0].used = true; 3297 - err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs); 3298 - if (err) 3299 - goto done; 3300 - 3301 - /* sort strings by context, so that we can identify duplicates */ 3302 - qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content); 3303 - 3304 - /* 3305 - * iterate groups of equal strings and if any instance in a group was 3306 - * referenced, emit single instance and remember new offset 3183 + /* temporarily switch to use btf_dedup's strs_data for strings for hash 3184 + * functions; later we'll just transfer hashmap to struct btf as is, 3185 + * along the strs_data 3307 3186 */ 3308 - p = tmp_strs; 3309 - grp_idx = 0; 3310 - grp_used = strs.ptrs[0].used; 3311 - /* iterate past end to avoid code duplication after loop */ 3312 - for (i = 1; i <= strs.cnt; i++) { 3313 - /* 3314 - * when i == strs.cnt, we want to skip string comparison and go 3315 - * straight to handling last group of strings (otherwise we'd 3316 - * need to handle last group after the loop w/ duplicated code) 3317 - */ 3318 - if (i < strs.cnt && 3319 - !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) { 3320 - grp_used = grp_used || strs.ptrs[i].used; 3321 - continue; 3322 - } 3187 + d->btf->strs_data_ptr = &d->strs_data; 3323 3188 3324 - /* 3325 - * this check would have been required after the loop to handle 3326 - * last group of strings, but due to <= condition in a loop 3327 - * we avoid that duplication 3328 - */ 3329 - if (grp_used) { 3330 - int new_off = p - tmp_strs; 3331 - __u32 len = strlen(strs.ptrs[grp_idx].str); 3332 - 3333 - memmove(p, strs.ptrs[grp_idx].str, len + 1); 3334 - for (j = grp_idx; j < i; j++) 3335 - strs.ptrs[j].new_off = new_off; 3336 - p += len + 1; 3337 - } 3338 - 3339 - if (i < strs.cnt) { 3340 - grp_idx = i; 3341 - grp_used = strs.ptrs[i].used; 3342 - } 3189 + d->strs_hash = hashmap__new(strs_hash_fn, strs_hash_equal_fn, d->btf); 3190 + if (IS_ERR(d->strs_hash)) { 3191 + err = PTR_ERR(d->strs_hash); 3192 + d->strs_hash = NULL; 3193 + goto err_out; 3343 3194 } 3344 3195 3345 - /* replace original strings with deduped ones */ 3346 - d->btf->hdr->str_len = p - tmp_strs; 3347 - memmove(start, tmp_strs, d->btf->hdr->str_len); 3348 - end = start + d->btf->hdr->str_len; 3196 + if (!d->btf->base_btf) { 3197 + s = btf_add_mem(&d->strs_data, &d->strs_cap, 1, d->strs_len, BTF_MAX_STR_OFFSET, 1); 3198 + if (!s) 3199 + return -ENOMEM; 3200 + /* initial empty string */ 3201 + s[0] = 0; 3202 + d->strs_len = 1; 3349 3203 3350 - /* restore original order for further binary search lookups */ 3351 - qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset); 3204 + /* insert empty string; we won't be looking it up during strings 3205 + * dedup, but it's good to have it for generic BTF string lookups 3206 + */ 3207 + err = hashmap__insert(d->strs_hash, (void *)0, (void *)0, 3208 + HASHMAP_ADD, NULL, NULL); 3209 + if (err) 3210 + goto err_out; 3211 + } 3352 3212 3353 3213 /* remap string offsets */ 3354 - err = btf_for_each_str_off(d, btf_str_remap_offset, &strs); 3214 + err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d); 3355 3215 if (err) 3356 - goto done; 3216 + goto err_out; 3357 3217 3358 - d->btf->hdr->str_len = end - start; 3218 + /* replace BTF string data and hash with deduped ones */ 3219 + free(d->btf->strs_data); 3220 + hashmap__free(d->btf->strs_hash); 3221 + d->btf->strs_data = d->strs_data; 3222 + d->btf->strs_data_cap = d->strs_cap; 3223 + d->btf->hdr->str_len = d->strs_len; 3224 + d->btf->strs_hash = d->strs_hash; 3225 + /* now point strs_data_ptr back to btf->strs_data */ 3226 + d->btf->strs_data_ptr = &d->btf->strs_data; 3227 + 3228 + d->strs_data = d->strs_hash = NULL; 3229 + d->strs_len = d->strs_cap = 0; 3359 3230 d->btf->strs_deduped = true; 3231 + return 0; 3360 3232 3361 - done: 3362 - free(tmp_strs); 3363 - free(strs.ptrs); 3233 + err_out: 3234 + free(d->strs_data); 3235 + hashmap__free(d->strs_hash); 3236 + d->strs_data = d->strs_hash = NULL; 3237 + d->strs_len = d->strs_cap = 0; 3238 + 3239 + /* restore strings pointer for existing d->btf->strs_hash back */ 3240 + d->btf->strs_data_ptr = &d->strs_data; 3241 + 3364 3242 return err; 3365 3243 } 3366 3244 ··· 3578 3550 return true; 3579 3551 } 3580 3552 3553 + /* Prepare split BTF for deduplication by calculating hashes of base BTF's 3554 + * types and initializing the rest of the state (canonical type mapping) for 3555 + * the fixed base BTF part. 3556 + */ 3557 + static int btf_dedup_prep(struct btf_dedup *d) 3558 + { 3559 + struct btf_type *t; 3560 + int type_id; 3561 + long h; 3562 + 3563 + if (!d->btf->base_btf) 3564 + return 0; 3565 + 3566 + for (type_id = 1; type_id < d->btf->start_id; type_id++) { 3567 + t = btf_type_by_id(d->btf, type_id); 3568 + 3569 + /* all base BTF types are self-canonical by definition */ 3570 + d->map[type_id] = type_id; 3571 + 3572 + switch (btf_kind(t)) { 3573 + case BTF_KIND_VAR: 3574 + case BTF_KIND_DATASEC: 3575 + /* VAR and DATASEC are never hash/deduplicated */ 3576 + continue; 3577 + case BTF_KIND_CONST: 3578 + case BTF_KIND_VOLATILE: 3579 + case BTF_KIND_RESTRICT: 3580 + case BTF_KIND_PTR: 3581 + case BTF_KIND_FWD: 3582 + case BTF_KIND_TYPEDEF: 3583 + case BTF_KIND_FUNC: 3584 + h = btf_hash_common(t); 3585 + break; 3586 + case BTF_KIND_INT: 3587 + h = btf_hash_int(t); 3588 + break; 3589 + case BTF_KIND_ENUM: 3590 + h = btf_hash_enum(t); 3591 + break; 3592 + case BTF_KIND_STRUCT: 3593 + case BTF_KIND_UNION: 3594 + h = btf_hash_struct(t); 3595 + break; 3596 + case BTF_KIND_ARRAY: 3597 + h = btf_hash_array(t); 3598 + break; 3599 + case BTF_KIND_FUNC_PROTO: 3600 + h = btf_hash_fnproto(t); 3601 + break; 3602 + default: 3603 + pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id); 3604 + return -EINVAL; 3605 + } 3606 + if (btf_dedup_table_add(d, h, type_id)) 3607 + return -ENOMEM; 3608 + } 3609 + 3610 + return 0; 3611 + } 3612 + 3581 3613 /* 3582 3614 * Deduplicate primitive types, that can't reference other types, by calculating 3583 3615 * their type signature hash and comparing them with any possible canonical ··· 3731 3643 { 3732 3644 int i, err; 3733 3645 3734 - for (i = 1; i <= d->btf->nr_types; i++) { 3735 - err = btf_dedup_prim_type(d, i); 3646 + for (i = 0; i < d->btf->nr_types; i++) { 3647 + err = btf_dedup_prim_type(d, d->btf->start_id + i); 3736 3648 if (err) 3737 3649 return err; 3738 3650 } ··· 3783 3695 static inline __u16 btf_fwd_kind(struct btf_type *t) 3784 3696 { 3785 3697 return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT; 3698 + } 3699 + 3700 + /* Check if given two types are identical ARRAY definitions */ 3701 + static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2) 3702 + { 3703 + struct btf_type *t1, *t2; 3704 + 3705 + t1 = btf_type_by_id(d->btf, id1); 3706 + t2 = btf_type_by_id(d->btf, id2); 3707 + if (!btf_is_array(t1) || !btf_is_array(t2)) 3708 + return 0; 3709 + 3710 + return btf_equal_array(t1, t2); 3786 3711 } 3787 3712 3788 3713 /* ··· 3908 3807 canon_id = resolve_fwd_id(d, canon_id); 3909 3808 3910 3809 hypot_type_id = d->hypot_map[canon_id]; 3911 - if (hypot_type_id <= BTF_MAX_NR_TYPES) 3912 - return hypot_type_id == cand_id; 3810 + if (hypot_type_id <= BTF_MAX_NR_TYPES) { 3811 + /* In some cases compiler will generate different DWARF types 3812 + * for *identical* array type definitions and use them for 3813 + * different fields within the *same* struct. This breaks type 3814 + * equivalence check, which makes an assumption that candidate 3815 + * types sub-graph has a consistent and deduped-by-compiler 3816 + * types within a single CU. So work around that by explicitly 3817 + * allowing identical array types here. 3818 + */ 3819 + return hypot_type_id == cand_id || 3820 + btf_dedup_identical_arrays(d, hypot_type_id, cand_id); 3821 + } 3913 3822 3914 3823 if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) 3915 3824 return -ENOMEM; ··· 3945 3834 } else { 3946 3835 real_kind = cand_kind; 3947 3836 fwd_kind = btf_fwd_kind(canon_type); 3837 + /* we'd need to resolve base FWD to STRUCT/UNION */ 3838 + if (fwd_kind == real_kind && canon_id < d->btf->start_id) 3839 + d->hypot_adjust_canon = true; 3948 3840 } 3949 3841 return fwd_kind == real_kind; 3950 3842 } ··· 3985 3871 return 0; 3986 3872 cand_arr = btf_array(cand_type); 3987 3873 canon_arr = btf_array(canon_type); 3988 - eq = btf_dedup_is_equiv(d, 3989 - cand_arr->index_type, canon_arr->index_type); 3874 + eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type); 3990 3875 if (eq <= 0) 3991 3876 return eq; 3992 3877 return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type); ··· 4068 3955 */ 4069 3956 static void btf_dedup_merge_hypot_map(struct btf_dedup *d) 4070 3957 { 4071 - __u32 cand_type_id, targ_type_id; 3958 + __u32 canon_type_id, targ_type_id; 4072 3959 __u16 t_kind, c_kind; 4073 3960 __u32 t_id, c_id; 4074 3961 int i; 4075 3962 4076 3963 for (i = 0; i < d->hypot_cnt; i++) { 4077 - cand_type_id = d->hypot_list[i]; 4078 - targ_type_id = d->hypot_map[cand_type_id]; 3964 + canon_type_id = d->hypot_list[i]; 3965 + targ_type_id = d->hypot_map[canon_type_id]; 4079 3966 t_id = resolve_type_id(d, targ_type_id); 4080 - c_id = resolve_type_id(d, cand_type_id); 3967 + c_id = resolve_type_id(d, canon_type_id); 4081 3968 t_kind = btf_kind(btf__type_by_id(d->btf, t_id)); 4082 3969 c_kind = btf_kind(btf__type_by_id(d->btf, c_id)); 4083 3970 /* ··· 4092 3979 * stability is not a requirement for STRUCT/UNION equivalence 4093 3980 * checks, though. 4094 3981 */ 3982 + 3983 + /* if it's the split BTF case, we still need to point base FWD 3984 + * to STRUCT/UNION in a split BTF, because FWDs from split BTF 3985 + * will be resolved against base FWD. If we don't point base 3986 + * canonical FWD to the resolved STRUCT/UNION, then all the 3987 + * FWDs in split BTF won't be correctly resolved to a proper 3988 + * STRUCT/UNION. 3989 + */ 4095 3990 if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD) 4096 3991 d->map[c_id] = t_id; 4097 - else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD) 3992 + 3993 + /* if graph equivalence determined that we'd need to adjust 3994 + * base canonical types, then we need to only point base FWDs 3995 + * to STRUCTs/UNIONs and do no more modifications. For all 3996 + * other purposes the type graphs were not equivalent. 3997 + */ 3998 + if (d->hypot_adjust_canon) 3999 + continue; 4000 + 4001 + if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD) 4098 4002 d->map[t_id] = c_id; 4099 4003 4100 4004 if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) && ··· 4195 4065 return eq; 4196 4066 if (!eq) 4197 4067 continue; 4198 - new_id = cand_id; 4199 4068 btf_dedup_merge_hypot_map(d); 4069 + if (d->hypot_adjust_canon) /* not really equivalent */ 4070 + continue; 4071 + new_id = cand_id; 4200 4072 break; 4201 4073 } 4202 4074 ··· 4213 4081 { 4214 4082 int i, err; 4215 4083 4216 - for (i = 1; i <= d->btf->nr_types; i++) { 4217 - err = btf_dedup_struct_type(d, i); 4084 + for (i = 0; i < d->btf->nr_types; i++) { 4085 + err = btf_dedup_struct_type(d, d->btf->start_id + i); 4218 4086 if (err) 4219 4087 return err; 4220 4088 } ··· 4357 4225 { 4358 4226 int i, err; 4359 4227 4360 - for (i = 1; i <= d->btf->nr_types; i++) { 4361 - err = btf_dedup_ref_type(d, i); 4228 + for (i = 0; i < d->btf->nr_types; i++) { 4229 + err = btf_dedup_ref_type(d, d->btf->start_id + i); 4362 4230 if (err < 0) 4363 4231 return err; 4364 4232 } ··· 4382 4250 static int btf_dedup_compact_types(struct btf_dedup *d) 4383 4251 { 4384 4252 __u32 *new_offs; 4385 - __u32 next_type_id = 1; 4253 + __u32 next_type_id = d->btf->start_id; 4254 + const struct btf_type *t; 4386 4255 void *p; 4387 - int i, len; 4256 + int i, id, len; 4388 4257 4389 4258 /* we are going to reuse hypot_map to store compaction remapping */ 4390 4259 d->hypot_map[0] = 0; 4391 - for (i = 1; i <= d->btf->nr_types; i++) 4392 - d->hypot_map[i] = BTF_UNPROCESSED_ID; 4260 + /* base BTF types are not renumbered */ 4261 + for (id = 1; id < d->btf->start_id; id++) 4262 + d->hypot_map[id] = id; 4263 + for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) 4264 + d->hypot_map[id] = BTF_UNPROCESSED_ID; 4393 4265 4394 4266 p = d->btf->types_data; 4395 4267 4396 - for (i = 1; i <= d->btf->nr_types; i++) { 4397 - if (d->map[i] != i) 4268 + for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) { 4269 + if (d->map[id] != id) 4398 4270 continue; 4399 4271 4400 - len = btf_type_size(btf__type_by_id(d->btf, i)); 4272 + t = btf__type_by_id(d->btf, id); 4273 + len = btf_type_size(t); 4401 4274 if (len < 0) 4402 4275 return len; 4403 4276 4404 - memmove(p, btf__type_by_id(d->btf, i), len); 4405 - d->hypot_map[i] = next_type_id; 4406 - d->btf->type_offs[next_type_id] = p - d->btf->types_data; 4277 + memmove(p, t, len); 4278 + d->hypot_map[id] = next_type_id; 4279 + d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data; 4407 4280 p += len; 4408 4281 next_type_id++; 4409 4282 } 4410 4283 4411 4284 /* shrink struct btf's internal types index and update btf_header */ 4412 - d->btf->nr_types = next_type_id - 1; 4413 - d->btf->type_offs_cap = d->btf->nr_types + 1; 4285 + d->btf->nr_types = next_type_id - d->btf->start_id; 4286 + d->btf->type_offs_cap = d->btf->nr_types; 4414 4287 d->btf->hdr->type_len = p - d->btf->types_data; 4415 4288 new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap, 4416 4289 sizeof(*new_offs)); 4417 - if (!new_offs) 4290 + if (d->btf->type_offs_cap && !new_offs) 4418 4291 return -ENOMEM; 4419 4292 d->btf->type_offs = new_offs; 4420 4293 d->btf->hdr->str_off = d->btf->hdr->type_len; ··· 4551 4414 { 4552 4415 int i, r; 4553 4416 4554 - for (i = 1; i <= d->btf->nr_types; i++) { 4555 - r = btf_dedup_remap_type(d, i); 4417 + for (i = 0; i < d->btf->nr_types; i++) { 4418 + r = btf_dedup_remap_type(d, d->btf->start_id + i); 4556 4419 if (r < 0) 4557 4420 return r; 4558 4421 }
+8
tools/lib/bpf/btf.h
··· 31 31 }; 32 32 33 33 LIBBPF_API void btf__free(struct btf *btf); 34 + 34 35 LIBBPF_API struct btf *btf__new(const void *data, __u32 size); 36 + LIBBPF_API struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf); 35 37 LIBBPF_API struct btf *btf__new_empty(void); 38 + LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf); 39 + 36 40 LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext); 41 + LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf); 37 42 LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext); 43 + LIBBPF_API struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf); 38 44 LIBBPF_API struct btf *btf__parse_raw(const char *path); 45 + LIBBPF_API struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf); 46 + 39 47 LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); 40 48 LIBBPF_API int btf__load(struct btf *btf); 41 49 LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
+9
tools/lib/bpf/libbpf.map
··· 337 337 perf_buffer__consume_buffer; 338 338 xsk_socket__create_shared; 339 339 } LIBBPF_0.1.0; 340 + 341 + LIBBPF_0.3.0 { 342 + global: 343 + btf__parse_elf_split; 344 + btf__parse_raw_split; 345 + btf__parse_split; 346 + btf__new_empty_split; 347 + btf__new_split; 348 + } LIBBPF_0.2.0;
+1 -1
tools/testing/selftests/bpf/Makefile
··· 386 386 TRUNNER_BPF_PROGS_DIR := progs 387 387 TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \ 388 388 network_helpers.c testing_helpers.c \ 389 - flow_dissector_load.h 389 + btf_helpers.c flow_dissector_load.h 390 390 TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read \ 391 391 $(wildcard progs/btf_dump_test_case_*.c) 392 392 TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE
+259
tools/testing/selftests/bpf/btf_helpers.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2020 Facebook */ 3 + #include <stdio.h> 4 + #include <errno.h> 5 + #include <bpf/btf.h> 6 + #include <bpf/libbpf.h> 7 + #include "test_progs.h" 8 + 9 + static const char * const btf_kind_str_mapping[] = { 10 + [BTF_KIND_UNKN] = "UNKNOWN", 11 + [BTF_KIND_INT] = "INT", 12 + [BTF_KIND_PTR] = "PTR", 13 + [BTF_KIND_ARRAY] = "ARRAY", 14 + [BTF_KIND_STRUCT] = "STRUCT", 15 + [BTF_KIND_UNION] = "UNION", 16 + [BTF_KIND_ENUM] = "ENUM", 17 + [BTF_KIND_FWD] = "FWD", 18 + [BTF_KIND_TYPEDEF] = "TYPEDEF", 19 + [BTF_KIND_VOLATILE] = "VOLATILE", 20 + [BTF_KIND_CONST] = "CONST", 21 + [BTF_KIND_RESTRICT] = "RESTRICT", 22 + [BTF_KIND_FUNC] = "FUNC", 23 + [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO", 24 + [BTF_KIND_VAR] = "VAR", 25 + [BTF_KIND_DATASEC] = "DATASEC", 26 + }; 27 + 28 + static const char *btf_kind_str(__u16 kind) 29 + { 30 + if (kind > BTF_KIND_DATASEC) 31 + return "UNKNOWN"; 32 + return btf_kind_str_mapping[kind]; 33 + } 34 + 35 + static const char *btf_int_enc_str(__u8 encoding) 36 + { 37 + switch (encoding) { 38 + case 0: 39 + return "(none)"; 40 + case BTF_INT_SIGNED: 41 + return "SIGNED"; 42 + case BTF_INT_CHAR: 43 + return "CHAR"; 44 + case BTF_INT_BOOL: 45 + return "BOOL"; 46 + default: 47 + return "UNKN"; 48 + } 49 + } 50 + 51 + static const char *btf_var_linkage_str(__u32 linkage) 52 + { 53 + switch (linkage) { 54 + case BTF_VAR_STATIC: 55 + return "static"; 56 + case BTF_VAR_GLOBAL_ALLOCATED: 57 + return "global-alloc"; 58 + default: 59 + return "(unknown)"; 60 + } 61 + } 62 + 63 + static const char *btf_func_linkage_str(const struct btf_type *t) 64 + { 65 + switch (btf_vlen(t)) { 66 + case BTF_FUNC_STATIC: 67 + return "static"; 68 + case BTF_FUNC_GLOBAL: 69 + return "global"; 70 + case BTF_FUNC_EXTERN: 71 + return "extern"; 72 + default: 73 + return "(unknown)"; 74 + } 75 + } 76 + 77 + static const char *btf_str(const struct btf *btf, __u32 off) 78 + { 79 + if (!off) 80 + return "(anon)"; 81 + return btf__str_by_offset(btf, off) ?: "(invalid)"; 82 + } 83 + 84 + int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id) 85 + { 86 + const struct btf_type *t; 87 + int kind, i; 88 + __u32 vlen; 89 + 90 + t = btf__type_by_id(btf, id); 91 + if (!t) 92 + return -EINVAL; 93 + 94 + vlen = btf_vlen(t); 95 + kind = btf_kind(t); 96 + 97 + fprintf(out, "[%u] %s '%s'", id, btf_kind_str(kind), btf_str(btf, t->name_off)); 98 + 99 + switch (kind) { 100 + case BTF_KIND_INT: 101 + fprintf(out, " size=%u bits_offset=%u nr_bits=%u encoding=%s", 102 + t->size, btf_int_offset(t), btf_int_bits(t), 103 + btf_int_enc_str(btf_int_encoding(t))); 104 + break; 105 + case BTF_KIND_PTR: 106 + case BTF_KIND_CONST: 107 + case BTF_KIND_VOLATILE: 108 + case BTF_KIND_RESTRICT: 109 + case BTF_KIND_TYPEDEF: 110 + fprintf(out, " type_id=%u", t->type); 111 + break; 112 + case BTF_KIND_ARRAY: { 113 + const struct btf_array *arr = btf_array(t); 114 + 115 + fprintf(out, " type_id=%u index_type_id=%u nr_elems=%u", 116 + arr->type, arr->index_type, arr->nelems); 117 + break; 118 + } 119 + case BTF_KIND_STRUCT: 120 + case BTF_KIND_UNION: { 121 + const struct btf_member *m = btf_members(t); 122 + 123 + fprintf(out, " size=%u vlen=%u", t->size, vlen); 124 + for (i = 0; i < vlen; i++, m++) { 125 + __u32 bit_off, bit_sz; 126 + 127 + bit_off = btf_member_bit_offset(t, i); 128 + bit_sz = btf_member_bitfield_size(t, i); 129 + fprintf(out, "\n\t'%s' type_id=%u bits_offset=%u", 130 + btf_str(btf, m->name_off), m->type, bit_off); 131 + if (bit_sz) 132 + fprintf(out, " bitfield_size=%u", bit_sz); 133 + } 134 + break; 135 + } 136 + case BTF_KIND_ENUM: { 137 + const struct btf_enum *v = btf_enum(t); 138 + 139 + fprintf(out, " size=%u vlen=%u", t->size, vlen); 140 + for (i = 0; i < vlen; i++, v++) { 141 + fprintf(out, "\n\t'%s' val=%u", 142 + btf_str(btf, v->name_off), v->val); 143 + } 144 + break; 145 + } 146 + case BTF_KIND_FWD: 147 + fprintf(out, " fwd_kind=%s", btf_kflag(t) ? "union" : "struct"); 148 + break; 149 + case BTF_KIND_FUNC: 150 + fprintf(out, " type_id=%u linkage=%s", t->type, btf_func_linkage_str(t)); 151 + break; 152 + case BTF_KIND_FUNC_PROTO: { 153 + const struct btf_param *p = btf_params(t); 154 + 155 + fprintf(out, " ret_type_id=%u vlen=%u", t->type, vlen); 156 + for (i = 0; i < vlen; i++, p++) { 157 + fprintf(out, "\n\t'%s' type_id=%u", 158 + btf_str(btf, p->name_off), p->type); 159 + } 160 + break; 161 + } 162 + case BTF_KIND_VAR: 163 + fprintf(out, " type_id=%u, linkage=%s", 164 + t->type, btf_var_linkage_str(btf_var(t)->linkage)); 165 + break; 166 + case BTF_KIND_DATASEC: { 167 + const struct btf_var_secinfo *v = btf_var_secinfos(t); 168 + 169 + fprintf(out, " size=%u vlen=%u", t->size, vlen); 170 + for (i = 0; i < vlen; i++, v++) { 171 + fprintf(out, "\n\ttype_id=%u offset=%u size=%u", 172 + v->type, v->offset, v->size); 173 + } 174 + break; 175 + } 176 + default: 177 + break; 178 + } 179 + 180 + return 0; 181 + } 182 + 183 + /* Print raw BTF type dump into a local buffer and return string pointer back. 184 + * Buffer *will* be overwritten by subsequent btf_type_raw_dump() calls 185 + */ 186 + const char *btf_type_raw_dump(const struct btf *btf, int type_id) 187 + { 188 + static char buf[16 * 1024]; 189 + FILE *buf_file; 190 + 191 + buf_file = fmemopen(buf, sizeof(buf) - 1, "w"); 192 + if (!buf_file) { 193 + fprintf(stderr, "Failed to open memstream: %d\n", errno); 194 + return NULL; 195 + } 196 + 197 + fprintf_btf_type_raw(buf_file, btf, type_id); 198 + fflush(buf_file); 199 + fclose(buf_file); 200 + 201 + return buf; 202 + } 203 + 204 + int btf_validate_raw(struct btf *btf, int nr_types, const char *exp_types[]) 205 + { 206 + int i; 207 + bool ok = true; 208 + 209 + ASSERT_EQ(btf__get_nr_types(btf), nr_types, "btf_nr_types"); 210 + 211 + for (i = 1; i <= nr_types; i++) { 212 + if (!ASSERT_STREQ(btf_type_raw_dump(btf, i), exp_types[i - 1], "raw_dump")) 213 + ok = false; 214 + } 215 + 216 + return ok; 217 + } 218 + 219 + static void btf_dump_printf(void *ctx, const char *fmt, va_list args) 220 + { 221 + vfprintf(ctx, fmt, args); 222 + } 223 + 224 + /* Print BTF-to-C dump into a local buffer and return string pointer back. 225 + * Buffer *will* be overwritten by subsequent btf_type_raw_dump() calls 226 + */ 227 + const char *btf_type_c_dump(const struct btf *btf) 228 + { 229 + static char buf[16 * 1024]; 230 + FILE *buf_file; 231 + struct btf_dump *d = NULL; 232 + struct btf_dump_opts opts = {}; 233 + int err, i; 234 + 235 + buf_file = fmemopen(buf, sizeof(buf) - 1, "w"); 236 + if (!buf_file) { 237 + fprintf(stderr, "Failed to open memstream: %d\n", errno); 238 + return NULL; 239 + } 240 + 241 + opts.ctx = buf_file; 242 + d = btf_dump__new(btf, NULL, &opts, btf_dump_printf); 243 + if (libbpf_get_error(d)) { 244 + fprintf(stderr, "Failed to create btf_dump instance: %ld\n", libbpf_get_error(d)); 245 + return NULL; 246 + } 247 + 248 + for (i = 1; i <= btf__get_nr_types(btf); i++) { 249 + err = btf_dump__dump_type(d, i); 250 + if (err) { 251 + fprintf(stderr, "Failed to dump type [%d]: %d\n", i, err); 252 + return NULL; 253 + } 254 + } 255 + 256 + fflush(buf_file); 257 + fclose(buf_file); 258 + return buf; 259 + }
+19
tools/testing/selftests/bpf/btf_helpers.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2020 Facebook */ 3 + #ifndef __BTF_HELPERS_H 4 + #define __BTF_HELPERS_H 5 + 6 + #include <stdio.h> 7 + #include <bpf/btf.h> 8 + 9 + int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id); 10 + const char *btf_type_raw_dump(const struct btf *btf, int type_id); 11 + int btf_validate_raw(struct btf *btf, int nr_types, const char *exp_types[]); 12 + 13 + #define VALIDATE_RAW_BTF(btf, raw_types...) \ 14 + btf_validate_raw(btf, \ 15 + sizeof((const char *[]){raw_types})/sizeof(void *),\ 16 + (const char *[]){raw_types}) 17 + 18 + const char *btf_type_c_dump(const struct btf *btf); 19 + #endif
+25 -15
tools/testing/selftests/bpf/prog_tests/btf.c
··· 6652 6652 const void *test_btf_data, *expect_btf_data; 6653 6653 const char *ret_test_next_str, *ret_expect_next_str; 6654 6654 const char *test_strs, *expect_strs; 6655 - const char *test_str_cur, *test_str_end; 6655 + const char *test_str_cur; 6656 6656 const char *expect_str_cur, *expect_str_end; 6657 6657 unsigned int raw_btf_size; 6658 6658 void *raw_btf; ··· 6719 6719 goto done; 6720 6720 } 6721 6721 6722 - test_str_cur = test_strs; 6723 - test_str_end = test_strs + test_hdr->str_len; 6724 6722 expect_str_cur = expect_strs; 6725 6723 expect_str_end = expect_strs + expect_hdr->str_len; 6726 - while (test_str_cur < test_str_end && expect_str_cur < expect_str_end) { 6724 + while (expect_str_cur < expect_str_end) { 6727 6725 size_t test_len, expect_len; 6726 + int off; 6727 + 6728 + off = btf__find_str(test_btf, expect_str_cur); 6729 + if (CHECK(off < 0, "exp str '%s' not found: %d\n", expect_str_cur, off)) { 6730 + err = -1; 6731 + goto done; 6732 + } 6733 + test_str_cur = btf__str_by_offset(test_btf, off); 6728 6734 6729 6735 test_len = strlen(test_str_cur); 6730 6736 expect_len = strlen(expect_str_cur); ··· 6747 6741 err = -1; 6748 6742 goto done; 6749 6743 } 6750 - test_str_cur += test_len + 1; 6751 6744 expect_str_cur += expect_len + 1; 6752 - } 6753 - if (CHECK(test_str_cur != test_str_end, 6754 - "test_str_cur:%p != test_str_end:%p", 6755 - test_str_cur, test_str_end)) { 6756 - err = -1; 6757 - goto done; 6758 6745 } 6759 6746 6760 6747 test_nr_types = btf__get_nr_types(test_btf); ··· 6774 6775 err = -1; 6775 6776 goto done; 6776 6777 } 6777 - if (CHECK(memcmp((void *)test_type, 6778 - (void *)expect_type, 6779 - test_size), 6780 - "type #%d: contents differ", i)) { 6778 + if (CHECK(btf_kind(test_type) != btf_kind(expect_type), 6779 + "type %d kind: exp %d != got %u\n", 6780 + i, btf_kind(expect_type), btf_kind(test_type))) { 6781 + err = -1; 6782 + goto done; 6783 + } 6784 + if (CHECK(test_type->info != expect_type->info, 6785 + "type %d info: exp %d != got %u\n", 6786 + i, expect_type->info, test_type->info)) { 6787 + err = -1; 6788 + goto done; 6789 + } 6790 + if (CHECK(test_type->size != expect_type->size, 6791 + "type %d size/type: exp %d != got %u\n", 6792 + i, expect_type->size, test_type->size)) { 6781 6793 err = -1; 6782 6794 goto done; 6783 6795 }
+325
tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2020 Facebook */ 3 + #include <test_progs.h> 4 + #include <bpf/btf.h> 5 + #include "btf_helpers.h" 6 + 7 + static void test_split_simple() { 8 + const struct btf_type *t; 9 + struct btf *btf1, *btf2; 10 + int str_off, err; 11 + 12 + btf1 = btf__new_empty(); 13 + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) 14 + return; 15 + 16 + btf__set_pointer_size(btf1, 8); /* enforce 64-bit arch */ 17 + 18 + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ 19 + btf__add_ptr(btf1, 1); /* [2] ptr to int */ 20 + btf__add_struct(btf1, "s1", 4); /* [3] struct s1 { */ 21 + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ 22 + /* } */ 23 + 24 + VALIDATE_RAW_BTF( 25 + btf1, 26 + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", 27 + "[2] PTR '(anon)' type_id=1", 28 + "[3] STRUCT 's1' size=4 vlen=1\n" 29 + "\t'f1' type_id=1 bits_offset=0"); 30 + 31 + ASSERT_STREQ(btf_type_c_dump(btf1), "\ 32 + struct s1 {\n\ 33 + int f1;\n\ 34 + };\n\n", "c_dump"); 35 + 36 + btf2 = btf__new_empty_split(btf1); 37 + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) 38 + goto cleanup; 39 + 40 + /* pointer size should be "inherited" from main BTF */ 41 + ASSERT_EQ(btf__pointer_size(btf2), 8, "inherit_ptr_sz"); 42 + 43 + str_off = btf__find_str(btf2, "int"); 44 + ASSERT_NEQ(str_off, -ENOENT, "str_int_missing"); 45 + 46 + t = btf__type_by_id(btf2, 1); 47 + if (!ASSERT_OK_PTR(t, "int_type")) 48 + goto cleanup; 49 + ASSERT_EQ(btf_is_int(t), true, "int_kind"); 50 + ASSERT_STREQ(btf__str_by_offset(btf2, t->name_off), "int", "int_name"); 51 + 52 + btf__add_struct(btf2, "s2", 16); /* [4] struct s2 { */ 53 + btf__add_field(btf2, "f1", 6, 0, 0); /* struct s1 f1; */ 54 + btf__add_field(btf2, "f2", 5, 32, 0); /* int f2; */ 55 + btf__add_field(btf2, "f3", 2, 64, 0); /* int *f3; */ 56 + /* } */ 57 + 58 + /* duplicated int */ 59 + btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [5] int */ 60 + 61 + /* duplicated struct s1 */ 62 + btf__add_struct(btf2, "s1", 4); /* [6] struct s1 { */ 63 + btf__add_field(btf2, "f1", 5, 0, 0); /* int f1; */ 64 + /* } */ 65 + 66 + VALIDATE_RAW_BTF( 67 + btf2, 68 + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", 69 + "[2] PTR '(anon)' type_id=1", 70 + "[3] STRUCT 's1' size=4 vlen=1\n" 71 + "\t'f1' type_id=1 bits_offset=0", 72 + "[4] STRUCT 's2' size=16 vlen=3\n" 73 + "\t'f1' type_id=6 bits_offset=0\n" 74 + "\t'f2' type_id=5 bits_offset=32\n" 75 + "\t'f3' type_id=2 bits_offset=64", 76 + "[5] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", 77 + "[6] STRUCT 's1' size=4 vlen=1\n" 78 + "\t'f1' type_id=5 bits_offset=0"); 79 + 80 + ASSERT_STREQ(btf_type_c_dump(btf2), "\ 81 + struct s1 {\n\ 82 + int f1;\n\ 83 + };\n\ 84 + \n\ 85 + struct s1___2 {\n\ 86 + int f1;\n\ 87 + };\n\ 88 + \n\ 89 + struct s2 {\n\ 90 + struct s1___2 f1;\n\ 91 + int f2;\n\ 92 + int *f3;\n\ 93 + };\n\n", "c_dump"); 94 + 95 + err = btf__dedup(btf2, NULL, NULL); 96 + if (!ASSERT_OK(err, "btf_dedup")) 97 + goto cleanup; 98 + 99 + VALIDATE_RAW_BTF( 100 + btf2, 101 + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", 102 + "[2] PTR '(anon)' type_id=1", 103 + "[3] STRUCT 's1' size=4 vlen=1\n" 104 + "\t'f1' type_id=1 bits_offset=0", 105 + "[4] STRUCT 's2' size=16 vlen=3\n" 106 + "\t'f1' type_id=3 bits_offset=0\n" 107 + "\t'f2' type_id=1 bits_offset=32\n" 108 + "\t'f3' type_id=2 bits_offset=64"); 109 + 110 + ASSERT_STREQ(btf_type_c_dump(btf2), "\ 111 + struct s1 {\n\ 112 + int f1;\n\ 113 + };\n\ 114 + \n\ 115 + struct s2 {\n\ 116 + struct s1 f1;\n\ 117 + int f2;\n\ 118 + int *f3;\n\ 119 + };\n\n", "c_dump"); 120 + 121 + cleanup: 122 + btf__free(btf2); 123 + btf__free(btf1); 124 + } 125 + 126 + static void test_split_fwd_resolve() { 127 + struct btf *btf1, *btf2; 128 + int err; 129 + 130 + btf1 = btf__new_empty(); 131 + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) 132 + return; 133 + 134 + btf__set_pointer_size(btf1, 8); /* enforce 64-bit arch */ 135 + 136 + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ 137 + btf__add_ptr(btf1, 4); /* [2] ptr to struct s1 */ 138 + btf__add_ptr(btf1, 5); /* [3] ptr to struct s2 */ 139 + btf__add_struct(btf1, "s1", 16); /* [4] struct s1 { */ 140 + btf__add_field(btf1, "f1", 2, 0, 0); /* struct s1 *f1; */ 141 + btf__add_field(btf1, "f2", 3, 64, 0); /* struct s2 *f2; */ 142 + /* } */ 143 + btf__add_struct(btf1, "s2", 4); /* [5] struct s2 { */ 144 + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ 145 + /* } */ 146 + 147 + VALIDATE_RAW_BTF( 148 + btf1, 149 + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", 150 + "[2] PTR '(anon)' type_id=4", 151 + "[3] PTR '(anon)' type_id=5", 152 + "[4] STRUCT 's1' size=16 vlen=2\n" 153 + "\t'f1' type_id=2 bits_offset=0\n" 154 + "\t'f2' type_id=3 bits_offset=64", 155 + "[5] STRUCT 's2' size=4 vlen=1\n" 156 + "\t'f1' type_id=1 bits_offset=0"); 157 + 158 + btf2 = btf__new_empty_split(btf1); 159 + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) 160 + goto cleanup; 161 + 162 + btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [6] int */ 163 + btf__add_ptr(btf2, 10); /* [7] ptr to struct s1 */ 164 + btf__add_fwd(btf2, "s2", BTF_FWD_STRUCT); /* [8] fwd for struct s2 */ 165 + btf__add_ptr(btf2, 8); /* [9] ptr to fwd struct s2 */ 166 + btf__add_struct(btf2, "s1", 16); /* [10] struct s1 { */ 167 + btf__add_field(btf2, "f1", 7, 0, 0); /* struct s1 *f1; */ 168 + btf__add_field(btf2, "f2", 9, 64, 0); /* struct s2 *f2; */ 169 + /* } */ 170 + 171 + VALIDATE_RAW_BTF( 172 + btf2, 173 + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", 174 + "[2] PTR '(anon)' type_id=4", 175 + "[3] PTR '(anon)' type_id=5", 176 + "[4] STRUCT 's1' size=16 vlen=2\n" 177 + "\t'f1' type_id=2 bits_offset=0\n" 178 + "\t'f2' type_id=3 bits_offset=64", 179 + "[5] STRUCT 's2' size=4 vlen=1\n" 180 + "\t'f1' type_id=1 bits_offset=0", 181 + "[6] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", 182 + "[7] PTR '(anon)' type_id=10", 183 + "[8] FWD 's2' fwd_kind=struct", 184 + "[9] PTR '(anon)' type_id=8", 185 + "[10] STRUCT 's1' size=16 vlen=2\n" 186 + "\t'f1' type_id=7 bits_offset=0\n" 187 + "\t'f2' type_id=9 bits_offset=64"); 188 + 189 + err = btf__dedup(btf2, NULL, NULL); 190 + if (!ASSERT_OK(err, "btf_dedup")) 191 + goto cleanup; 192 + 193 + VALIDATE_RAW_BTF( 194 + btf2, 195 + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", 196 + "[2] PTR '(anon)' type_id=4", 197 + "[3] PTR '(anon)' type_id=5", 198 + "[4] STRUCT 's1' size=16 vlen=2\n" 199 + "\t'f1' type_id=2 bits_offset=0\n" 200 + "\t'f2' type_id=3 bits_offset=64", 201 + "[5] STRUCT 's2' size=4 vlen=1\n" 202 + "\t'f1' type_id=1 bits_offset=0"); 203 + 204 + cleanup: 205 + btf__free(btf2); 206 + btf__free(btf1); 207 + } 208 + 209 + static void test_split_struct_duped() { 210 + struct btf *btf1, *btf2; 211 + int err; 212 + 213 + btf1 = btf__new_empty(); 214 + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) 215 + return; 216 + 217 + btf__set_pointer_size(btf1, 8); /* enforce 64-bit arch */ 218 + 219 + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ 220 + btf__add_ptr(btf1, 5); /* [2] ptr to struct s1 */ 221 + btf__add_fwd(btf1, "s2", BTF_FWD_STRUCT); /* [3] fwd for struct s2 */ 222 + btf__add_ptr(btf1, 3); /* [4] ptr to fwd struct s2 */ 223 + btf__add_struct(btf1, "s1", 16); /* [5] struct s1 { */ 224 + btf__add_field(btf1, "f1", 2, 0, 0); /* struct s1 *f1; */ 225 + btf__add_field(btf1, "f2", 4, 64, 0); /* struct s2 *f2; */ 226 + /* } */ 227 + 228 + VALIDATE_RAW_BTF( 229 + btf1, 230 + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", 231 + "[2] PTR '(anon)' type_id=5", 232 + "[3] FWD 's2' fwd_kind=struct", 233 + "[4] PTR '(anon)' type_id=3", 234 + "[5] STRUCT 's1' size=16 vlen=2\n" 235 + "\t'f1' type_id=2 bits_offset=0\n" 236 + "\t'f2' type_id=4 bits_offset=64"); 237 + 238 + btf2 = btf__new_empty_split(btf1); 239 + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) 240 + goto cleanup; 241 + 242 + btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [6] int */ 243 + btf__add_ptr(btf2, 10); /* [7] ptr to struct s1 */ 244 + btf__add_fwd(btf2, "s2", BTF_FWD_STRUCT); /* [8] fwd for struct s2 */ 245 + btf__add_ptr(btf2, 11); /* [9] ptr to struct s2 */ 246 + btf__add_struct(btf2, "s1", 16); /* [10] struct s1 { */ 247 + btf__add_field(btf2, "f1", 7, 0, 0); /* struct s1 *f1; */ 248 + btf__add_field(btf2, "f2", 9, 64, 0); /* struct s2 *f2; */ 249 + /* } */ 250 + btf__add_struct(btf2, "s2", 40); /* [11] struct s2 { */ 251 + btf__add_field(btf2, "f1", 7, 0, 0); /* struct s1 *f1; */ 252 + btf__add_field(btf2, "f2", 9, 64, 0); /* struct s2 *f2; */ 253 + btf__add_field(btf2, "f3", 6, 128, 0); /* int f3; */ 254 + btf__add_field(btf2, "f4", 10, 192, 0); /* struct s1 f4; */ 255 + /* } */ 256 + btf__add_ptr(btf2, 8); /* [12] ptr to fwd struct s2 */ 257 + btf__add_struct(btf2, "s3", 8); /* [13] struct s3 { */ 258 + btf__add_field(btf2, "f1", 12, 0, 0); /* struct s2 *f1; (fwd) */ 259 + /* } */ 260 + 261 + VALIDATE_RAW_BTF( 262 + btf2, 263 + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", 264 + "[2] PTR '(anon)' type_id=5", 265 + "[3] FWD 's2' fwd_kind=struct", 266 + "[4] PTR '(anon)' type_id=3", 267 + "[5] STRUCT 's1' size=16 vlen=2\n" 268 + "\t'f1' type_id=2 bits_offset=0\n" 269 + "\t'f2' type_id=4 bits_offset=64", 270 + "[6] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", 271 + "[7] PTR '(anon)' type_id=10", 272 + "[8] FWD 's2' fwd_kind=struct", 273 + "[9] PTR '(anon)' type_id=11", 274 + "[10] STRUCT 's1' size=16 vlen=2\n" 275 + "\t'f1' type_id=7 bits_offset=0\n" 276 + "\t'f2' type_id=9 bits_offset=64", 277 + "[11] STRUCT 's2' size=40 vlen=4\n" 278 + "\t'f1' type_id=7 bits_offset=0\n" 279 + "\t'f2' type_id=9 bits_offset=64\n" 280 + "\t'f3' type_id=6 bits_offset=128\n" 281 + "\t'f4' type_id=10 bits_offset=192", 282 + "[12] PTR '(anon)' type_id=8", 283 + "[13] STRUCT 's3' size=8 vlen=1\n" 284 + "\t'f1' type_id=12 bits_offset=0"); 285 + 286 + err = btf__dedup(btf2, NULL, NULL); 287 + if (!ASSERT_OK(err, "btf_dedup")) 288 + goto cleanup; 289 + 290 + VALIDATE_RAW_BTF( 291 + btf2, 292 + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", 293 + "[2] PTR '(anon)' type_id=5", 294 + "[3] FWD 's2' fwd_kind=struct", 295 + "[4] PTR '(anon)' type_id=3", 296 + "[5] STRUCT 's1' size=16 vlen=2\n" 297 + "\t'f1' type_id=2 bits_offset=0\n" 298 + "\t'f2' type_id=4 bits_offset=64", 299 + "[6] PTR '(anon)' type_id=8", 300 + "[7] PTR '(anon)' type_id=9", 301 + "[8] STRUCT 's1' size=16 vlen=2\n" 302 + "\t'f1' type_id=6 bits_offset=0\n" 303 + "\t'f2' type_id=7 bits_offset=64", 304 + "[9] STRUCT 's2' size=40 vlen=4\n" 305 + "\t'f1' type_id=6 bits_offset=0\n" 306 + "\t'f2' type_id=7 bits_offset=64\n" 307 + "\t'f3' type_id=1 bits_offset=128\n" 308 + "\t'f4' type_id=8 bits_offset=192", 309 + "[10] STRUCT 's3' size=8 vlen=1\n" 310 + "\t'f1' type_id=7 bits_offset=0"); 311 + 312 + cleanup: 313 + btf__free(btf2); 314 + btf__free(btf1); 315 + } 316 + 317 + void test_btf_dedup_split() 318 + { 319 + if (test__start_subtest("split_simple")) 320 + test_split_simple(); 321 + if (test__start_subtest("split_struct_duped")) 322 + test_split_struct_duped(); 323 + if (test__start_subtest("split_fwd_resolve")) 324 + test_split_fwd_resolve(); 325 + }
+99
tools/testing/selftests/bpf/prog_tests/btf_split.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2020 Facebook */ 3 + #include <test_progs.h> 4 + #include <bpf/btf.h> 5 + 6 + static char *dump_buf; 7 + static size_t dump_buf_sz; 8 + static FILE *dump_buf_file; 9 + 10 + static void btf_dump_printf(void *ctx, const char *fmt, va_list args) 11 + { 12 + vfprintf(ctx, fmt, args); 13 + } 14 + 15 + void test_btf_split() { 16 + struct btf_dump_opts opts; 17 + struct btf_dump *d = NULL; 18 + const struct btf_type *t; 19 + struct btf *btf1, *btf2; 20 + int str_off, i, err; 21 + 22 + btf1 = btf__new_empty(); 23 + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) 24 + return; 25 + 26 + btf__set_pointer_size(btf1, 8); /* enforce 64-bit arch */ 27 + 28 + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ 29 + btf__add_ptr(btf1, 1); /* [2] ptr to int */ 30 + 31 + btf__add_struct(btf1, "s1", 4); /* [3] struct s1 { */ 32 + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ 33 + /* } */ 34 + 35 + btf2 = btf__new_empty_split(btf1); 36 + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) 37 + goto cleanup; 38 + 39 + /* pointer size should be "inherited" from main BTF */ 40 + ASSERT_EQ(btf__pointer_size(btf2), 8, "inherit_ptr_sz"); 41 + 42 + str_off = btf__find_str(btf2, "int"); 43 + ASSERT_NEQ(str_off, -ENOENT, "str_int_missing"); 44 + 45 + t = btf__type_by_id(btf2, 1); 46 + if (!ASSERT_OK_PTR(t, "int_type")) 47 + goto cleanup; 48 + ASSERT_EQ(btf_is_int(t), true, "int_kind"); 49 + ASSERT_STREQ(btf__str_by_offset(btf2, t->name_off), "int", "int_name"); 50 + 51 + btf__add_struct(btf2, "s2", 16); /* [4] struct s2 { */ 52 + btf__add_field(btf2, "f1", 3, 0, 0); /* struct s1 f1; */ 53 + btf__add_field(btf2, "f2", 1, 32, 0); /* int f2; */ 54 + btf__add_field(btf2, "f3", 2, 64, 0); /* int *f3; */ 55 + /* } */ 56 + 57 + t = btf__type_by_id(btf1, 4); 58 + ASSERT_NULL(t, "split_type_in_main"); 59 + 60 + t = btf__type_by_id(btf2, 4); 61 + if (!ASSERT_OK_PTR(t, "split_struct_type")) 62 + goto cleanup; 63 + ASSERT_EQ(btf_is_struct(t), true, "split_struct_kind"); 64 + ASSERT_EQ(btf_vlen(t), 3, "split_struct_vlen"); 65 + ASSERT_STREQ(btf__str_by_offset(btf2, t->name_off), "s2", "split_struct_name"); 66 + 67 + /* BTF-to-C dump of split BTF */ 68 + dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz); 69 + if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream")) 70 + return; 71 + opts.ctx = dump_buf_file; 72 + d = btf_dump__new(btf2, NULL, &opts, btf_dump_printf); 73 + if (!ASSERT_OK_PTR(d, "btf_dump__new")) 74 + goto cleanup; 75 + for (i = 1; i <= btf__get_nr_types(btf2); i++) { 76 + err = btf_dump__dump_type(d, i); 77 + ASSERT_OK(err, "dump_type_ok"); 78 + } 79 + fflush(dump_buf_file); 80 + dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */ 81 + ASSERT_STREQ(dump_buf, 82 + "struct s1 {\n" 83 + " int f1;\n" 84 + "};\n" 85 + "\n" 86 + "struct s2 {\n" 87 + " struct s1 f1;\n" 88 + " int f2;\n" 89 + " int *f3;\n" 90 + "};\n\n", "c_dump"); 91 + 92 + cleanup: 93 + if (dump_buf_file) 94 + fclose(dump_buf_file); 95 + free(dump_buf); 96 + btf_dump__free(d); 97 + btf__free(btf1); 98 + btf__free(btf2); 99 + }
+43
tools/testing/selftests/bpf/prog_tests/btf_write.c
··· 2 2 /* Copyright (c) 2020 Facebook */ 3 3 #include <test_progs.h> 4 4 #include <bpf/btf.h> 5 + #include "btf_helpers.h" 5 6 6 7 static int duration = 0; 7 8 ··· 40 39 ASSERT_EQ(t->size, 4, "int_sz"); 41 40 ASSERT_EQ(btf_int_encoding(t), BTF_INT_SIGNED, "int_enc"); 42 41 ASSERT_EQ(btf_int_bits(t), 32, "int_bits"); 42 + ASSERT_STREQ(btf_type_raw_dump(btf, 1), 43 + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", "raw_dump"); 43 44 44 45 /* invalid int size */ 45 46 id = btf__add_int(btf, "bad sz int", 7, 0); ··· 62 59 t = btf__type_by_id(btf, 2); 63 60 ASSERT_EQ(btf_kind(t), BTF_KIND_PTR, "ptr_kind"); 64 61 ASSERT_EQ(t->type, 1, "ptr_type"); 62 + ASSERT_STREQ(btf_type_raw_dump(btf, 2), 63 + "[2] PTR '(anon)' type_id=1", "raw_dump"); 65 64 66 65 id = btf__add_const(btf, 5); /* points forward to restrict */ 67 66 ASSERT_EQ(id, 3, "const_id"); 68 67 t = btf__type_by_id(btf, 3); 69 68 ASSERT_EQ(btf_kind(t), BTF_KIND_CONST, "const_kind"); 70 69 ASSERT_EQ(t->type, 5, "const_type"); 70 + ASSERT_STREQ(btf_type_raw_dump(btf, 3), 71 + "[3] CONST '(anon)' type_id=5", "raw_dump"); 71 72 72 73 id = btf__add_volatile(btf, 3); 73 74 ASSERT_EQ(id, 4, "volatile_id"); 74 75 t = btf__type_by_id(btf, 4); 75 76 ASSERT_EQ(btf_kind(t), BTF_KIND_VOLATILE, "volatile_kind"); 76 77 ASSERT_EQ(t->type, 3, "volatile_type"); 78 + ASSERT_STREQ(btf_type_raw_dump(btf, 4), 79 + "[4] VOLATILE '(anon)' type_id=3", "raw_dump"); 77 80 78 81 id = btf__add_restrict(btf, 4); 79 82 ASSERT_EQ(id, 5, "restrict_id"); 80 83 t = btf__type_by_id(btf, 5); 81 84 ASSERT_EQ(btf_kind(t), BTF_KIND_RESTRICT, "restrict_kind"); 82 85 ASSERT_EQ(t->type, 4, "restrict_type"); 86 + ASSERT_STREQ(btf_type_raw_dump(btf, 5), 87 + "[5] RESTRICT '(anon)' type_id=4", "raw_dump"); 83 88 84 89 /* ARRAY */ 85 90 id = btf__add_array(btf, 1, 2, 10); /* int *[10] */ ··· 97 86 ASSERT_EQ(btf_array(t)->index_type, 1, "array_index_type"); 98 87 ASSERT_EQ(btf_array(t)->type, 2, "array_elem_type"); 99 88 ASSERT_EQ(btf_array(t)->nelems, 10, "array_nelems"); 89 + ASSERT_STREQ(btf_type_raw_dump(btf, 6), 90 + "[6] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=10", "raw_dump"); 100 91 101 92 /* STRUCT */ 102 93 err = btf__add_field(btf, "field", 1, 0, 0); ··· 126 113 ASSERT_EQ(m->type, 1, "f2_type"); 127 114 ASSERT_EQ(btf_member_bit_offset(t, 1), 32, "f2_bit_off"); 128 115 ASSERT_EQ(btf_member_bitfield_size(t, 1), 16, "f2_bit_sz"); 116 + ASSERT_STREQ(btf_type_raw_dump(btf, 7), 117 + "[7] STRUCT 's1' size=8 vlen=2\n" 118 + "\t'f1' type_id=1 bits_offset=0\n" 119 + "\t'f2' type_id=1 bits_offset=32 bitfield_size=16", "raw_dump"); 129 120 130 121 /* UNION */ 131 122 id = btf__add_union(btf, "u1", 8); ··· 153 136 ASSERT_EQ(m->type, 1, "f1_type"); 154 137 ASSERT_EQ(btf_member_bit_offset(t, 0), 0, "f1_bit_off"); 155 138 ASSERT_EQ(btf_member_bitfield_size(t, 0), 16, "f1_bit_sz"); 139 + ASSERT_STREQ(btf_type_raw_dump(btf, 8), 140 + "[8] UNION 'u1' size=8 vlen=1\n" 141 + "\t'f1' type_id=1 bits_offset=0 bitfield_size=16", "raw_dump"); 156 142 157 143 /* ENUM */ 158 144 id = btf__add_enum(btf, "e1", 4); ··· 176 156 v = btf_enum(t) + 1; 177 157 ASSERT_STREQ(btf__str_by_offset(btf, v->name_off), "v2", "v2_name"); 178 158 ASSERT_EQ(v->val, 2, "v2_val"); 159 + ASSERT_STREQ(btf_type_raw_dump(btf, 9), 160 + "[9] ENUM 'e1' size=4 vlen=2\n" 161 + "\t'v1' val=1\n" 162 + "\t'v2' val=2", "raw_dump"); 179 163 180 164 /* FWDs */ 181 165 id = btf__add_fwd(btf, "struct_fwd", BTF_FWD_STRUCT); ··· 188 164 ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "struct_fwd", "fwd_name"); 189 165 ASSERT_EQ(btf_kind(t), BTF_KIND_FWD, "fwd_kind"); 190 166 ASSERT_EQ(btf_kflag(t), 0, "fwd_kflag"); 167 + ASSERT_STREQ(btf_type_raw_dump(btf, 10), 168 + "[10] FWD 'struct_fwd' fwd_kind=struct", "raw_dump"); 191 169 192 170 id = btf__add_fwd(btf, "union_fwd", BTF_FWD_UNION); 193 171 ASSERT_EQ(id, 11, "union_fwd_id"); ··· 197 171 ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "union_fwd", "fwd_name"); 198 172 ASSERT_EQ(btf_kind(t), BTF_KIND_FWD, "fwd_kind"); 199 173 ASSERT_EQ(btf_kflag(t), 1, "fwd_kflag"); 174 + ASSERT_STREQ(btf_type_raw_dump(btf, 11), 175 + "[11] FWD 'union_fwd' fwd_kind=union", "raw_dump"); 200 176 201 177 id = btf__add_fwd(btf, "enum_fwd", BTF_FWD_ENUM); 202 178 ASSERT_EQ(id, 12, "enum_fwd_id"); ··· 207 179 ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM, "enum_fwd_kind"); 208 180 ASSERT_EQ(btf_vlen(t), 0, "enum_fwd_kind"); 209 181 ASSERT_EQ(t->size, 4, "enum_fwd_sz"); 182 + ASSERT_STREQ(btf_type_raw_dump(btf, 12), 183 + "[12] ENUM 'enum_fwd' size=4 vlen=0", "raw_dump"); 210 184 211 185 /* TYPEDEF */ 212 186 id = btf__add_typedef(btf, "typedef1", 1); ··· 217 187 ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "typedef1", "typedef_name"); 218 188 ASSERT_EQ(btf_kind(t), BTF_KIND_TYPEDEF, "typedef_kind"); 219 189 ASSERT_EQ(t->type, 1, "typedef_type"); 190 + ASSERT_STREQ(btf_type_raw_dump(btf, 13), 191 + "[13] TYPEDEF 'typedef1' type_id=1", "raw_dump"); 220 192 221 193 /* FUNC & FUNC_PROTO */ 222 194 id = btf__add_func(btf, "func1", BTF_FUNC_GLOBAL, 15); ··· 228 196 ASSERT_EQ(t->type, 15, "func_type"); 229 197 ASSERT_EQ(btf_kind(t), BTF_KIND_FUNC, "func_kind"); 230 198 ASSERT_EQ(btf_vlen(t), BTF_FUNC_GLOBAL, "func_vlen"); 199 + ASSERT_STREQ(btf_type_raw_dump(btf, 14), 200 + "[14] FUNC 'func1' type_id=15 linkage=global", "raw_dump"); 231 201 232 202 id = btf__add_func_proto(btf, 1); 233 203 ASSERT_EQ(id, 15, "func_proto_id"); ··· 248 214 p = btf_params(t) + 1; 249 215 ASSERT_STREQ(btf__str_by_offset(btf, p->name_off), "p2", "p2_name"); 250 216 ASSERT_EQ(p->type, 2, "p2_type"); 217 + ASSERT_STREQ(btf_type_raw_dump(btf, 15), 218 + "[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n" 219 + "\t'p1' type_id=1\n" 220 + "\t'p2' type_id=2", "raw_dump"); 251 221 252 222 /* VAR */ 253 223 id = btf__add_var(btf, "var1", BTF_VAR_GLOBAL_ALLOCATED, 1); ··· 261 223 ASSERT_EQ(btf_kind(t), BTF_KIND_VAR, "var_kind"); 262 224 ASSERT_EQ(t->type, 1, "var_type"); 263 225 ASSERT_EQ(btf_var(t)->linkage, BTF_VAR_GLOBAL_ALLOCATED, "var_type"); 226 + ASSERT_STREQ(btf_type_raw_dump(btf, 16), 227 + "[16] VAR 'var1' type_id=1, linkage=global-alloc", "raw_dump"); 264 228 265 229 /* DATASECT */ 266 230 id = btf__add_datasec(btf, "datasec1", 12); ··· 279 239 ASSERT_EQ(vi->type, 1, "v1_type"); 280 240 ASSERT_EQ(vi->offset, 4, "v1_off"); 281 241 ASSERT_EQ(vi->size, 8, "v1_sz"); 242 + ASSERT_STREQ(btf_type_raw_dump(btf, 17), 243 + "[17] DATASEC 'datasec1' size=12 vlen=1\n" 244 + "\ttype_id=1 offset=4 size=8", "raw_dump"); 282 245 283 246 btf__free(btf); 284 247 }
+11
tools/testing/selftests/bpf/test_progs.h
··· 141 141 ___ok; \ 142 142 }) 143 143 144 + #define ASSERT_NEQ(actual, expected, name) ({ \ 145 + static int duration = 0; \ 146 + typeof(actual) ___act = (actual); \ 147 + typeof(expected) ___exp = (expected); \ 148 + bool ___ok = ___act != ___exp; \ 149 + CHECK(!___ok, (name), \ 150 + "unexpected %s: actual %lld == expected %lld\n", \ 151 + (name), (long long)(___act), (long long)(___exp)); \ 152 + ___ok; \ 153 + }) 154 + 144 155 #define ASSERT_STREQ(actual, expected, name) ({ \ 145 156 static int duration = 0; \ 146 157 const char *___act = actual; \