Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libbpf: Streamline error reporting for high-level APIs

Implement changes to error reporting for high-level libbpf APIs to make them
less surprising and less error-prone to users:
- in all the cases when error happens, errno is set to an appropriate error
value;
- in libbpf 1.0 mode, all pointer-returning APIs return NULL on error and
error code is communicated through errno; this applies both to APIs that
already returned NULL before (so now they communicate more detailed error
codes), as well as for many APIs that used ERR_PTR() macro and encoded
error numbers as fake pointers.
- in legacy (default) mode, those APIs that were returning ERR_PTR(err),
continue doing so, but still set errno.

With these changes, errno can be always used to extract actual error,
regardless of legacy or libbpf 1.0 modes. This is utilized internally in
libbpf in places where libbpf uses it's own high-level APIs.
libbpf_get_error() is adapted to handle both cases completely transparently to
end-users (and is used by libbpf consistently as well).

More context, justification, and discussion can be found in "Libbpf: the road
to v1.0" document ([0]).

[0] https://docs.google.com/document/d/1UyjTZuPFWiPFyKk1tV5an11_iaRuec6U-ZESZ54nNTY

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20210525035935.1461796-5-andrii@kernel.org

authored by

Andrii Nakryiko and committed by
Alexei Starovoitov
e9fc3ce9 f12b6543

+531 -468
+9 -9
tools/lib/bpf/bpf_prog_linfo.c
··· 106 106 nr_linfo = info->nr_line_info; 107 107 108 108 if (!nr_linfo) 109 - return NULL; 109 + return errno = EINVAL, NULL; 110 110 111 111 /* 112 112 * The min size that bpf_prog_linfo has to access for ··· 114 114 */ 115 115 if (info->line_info_rec_size < 116 116 offsetof(struct bpf_line_info, file_name_off)) 117 - return NULL; 117 + return errno = EINVAL, NULL; 118 118 119 119 prog_linfo = calloc(1, sizeof(*prog_linfo)); 120 120 if (!prog_linfo) 121 - return NULL; 121 + return errno = ENOMEM, NULL; 122 122 123 123 /* Copy xlated line_info */ 124 124 prog_linfo->nr_linfo = nr_linfo; ··· 174 174 175 175 err_free: 176 176 bpf_prog_linfo__free(prog_linfo); 177 - return NULL; 177 + return errno = EINVAL, NULL; 178 178 } 179 179 180 180 const struct bpf_line_info * ··· 186 186 const __u64 *jited_linfo; 187 187 188 188 if (func_idx >= prog_linfo->nr_jited_func) 189 - return NULL; 189 + return errno = ENOENT, NULL; 190 190 191 191 nr_linfo = prog_linfo->nr_jited_linfo_per_func[func_idx]; 192 192 if (nr_skip >= nr_linfo) 193 - return NULL; 193 + return errno = ENOENT, NULL; 194 194 195 195 start = prog_linfo->jited_linfo_func_idx[func_idx] + nr_skip; 196 196 jited_rec_size = prog_linfo->jited_rec_size; ··· 198 198 (start * jited_rec_size); 199 199 jited_linfo = raw_jited_linfo; 200 200 if (addr < *jited_linfo) 201 - return NULL; 201 + return errno = ENOENT, NULL; 202 202 203 203 nr_linfo -= nr_skip; 204 204 rec_size = prog_linfo->rec_size; ··· 225 225 226 226 nr_linfo = prog_linfo->nr_linfo; 227 227 if (nr_skip >= nr_linfo) 228 - return NULL; 228 + return errno = ENOENT, NULL; 229 229 230 230 rec_size = prog_linfo->rec_size; 231 231 raw_linfo = prog_linfo->raw_linfo + (nr_skip * rec_size); 232 232 linfo = raw_linfo; 233 233 if (insn_off < linfo->insn_off) 234 - return NULL; 234 + return errno = ENOENT, NULL; 235 235 236 236 nr_linfo -= nr_skip; 237 237 for (i = 0; i < nr_linfo; i++) {
+152 -150
tools/lib/bpf/btf.c
··· 443 443 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) 444 444 { 445 445 if (type_id >= btf->start_id + btf->nr_types) 446 - return NULL; 446 + return errno = EINVAL, NULL; 447 447 return btf_type_by_id((struct btf *)btf, type_id); 448 448 } 449 449 ··· 510 510 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz) 511 511 { 512 512 if (ptr_sz != 4 && ptr_sz != 8) 513 - return -EINVAL; 513 + return libbpf_err(-EINVAL); 514 514 btf->ptr_sz = ptr_sz; 515 515 return 0; 516 516 } ··· 537 537 int btf__set_endianness(struct btf *btf, enum btf_endianness endian) 538 538 { 539 539 if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN) 540 - return -EINVAL; 540 + return libbpf_err(-EINVAL); 541 541 542 542 btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN); 543 543 if (!btf->swapped_endian) { ··· 568 568 int i; 569 569 570 570 t = btf__type_by_id(btf, type_id); 571 - for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); 572 - i++) { 571 + for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) { 573 572 switch (btf_kind(t)) { 574 573 case BTF_KIND_INT: 575 574 case BTF_KIND_STRUCT: ··· 591 592 case BTF_KIND_ARRAY: 592 593 array = btf_array(t); 593 594 if (nelems && array->nelems > UINT32_MAX / nelems) 594 - return -E2BIG; 595 + return libbpf_err(-E2BIG); 595 596 nelems *= array->nelems; 596 597 type_id = array->type; 597 598 break; 598 599 default: 599 - return -EINVAL; 600 + return libbpf_err(-EINVAL); 600 601 } 601 602 602 603 t = btf__type_by_id(btf, type_id); ··· 604 605 605 606 done: 606 607 if (size < 0) 607 - return -EINVAL; 608 + return libbpf_err(-EINVAL); 608 609 if (nelems && size > UINT32_MAX / nelems) 609 - return -E2BIG; 610 + return libbpf_err(-E2BIG); 610 611 611 612 return nelems * size; 612 613 } ··· 639 640 for (i = 0; i < vlen; i++, m++) { 640 641 align = btf__align_of(btf, m->type); 641 642 if (align <= 0) 642 - return align; 643 + return libbpf_err(align); 643 644 max_align = max(max_align, align); 644 645 } 645 646 ··· 647 648 } 648 649 default: 649 650 pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t)); 650 - return 0; 651 + return errno = EINVAL, 0; 651 652 } 652 653 } 653 654 ··· 666 667 } 667 668 668 669 if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t)) 669 - return -EINVAL; 670 + return libbpf_err(-EINVAL); 670 671 671 672 return type_id; 672 673 } ··· 686 687 return i; 687 688 } 688 689 689 - return -ENOENT; 690 + return libbpf_err(-ENOENT); 690 691 } 691 692 692 693 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, ··· 708 709 return i; 709 710 } 710 711 711 - return -ENOENT; 712 + return libbpf_err(-ENOENT); 712 713 } 713 714 714 715 static bool btf_is_modifiable(const struct btf *btf) ··· 784 785 785 786 struct btf *btf__new_empty(void) 786 787 { 787 - return btf_new_empty(NULL); 788 + return libbpf_ptr(btf_new_empty(NULL)); 788 789 } 789 790 790 791 struct btf *btf__new_empty_split(struct btf *base_btf) 791 792 { 792 - return btf_new_empty(base_btf); 793 + return libbpf_ptr(btf_new_empty(base_btf)); 793 794 } 794 795 795 796 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf) ··· 845 846 846 847 struct btf *btf__new(const void *data, __u32 size) 847 848 { 848 - return btf_new(data, size, NULL); 849 + return libbpf_ptr(btf_new(data, size, NULL)); 849 850 } 850 851 851 852 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, ··· 936 937 goto done; 937 938 } 938 939 btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf); 939 - if (IS_ERR(btf)) 940 + err = libbpf_get_error(btf); 941 + if (err) 940 942 goto done; 941 943 942 944 switch (gelf_getclass(elf)) { ··· 953 953 } 954 954 955 955 if (btf_ext && btf_ext_data) { 956 - *btf_ext = btf_ext__new(btf_ext_data->d_buf, 957 - btf_ext_data->d_size); 958 - if (IS_ERR(*btf_ext)) 956 + *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); 957 + err = libbpf_get_error(*btf_ext); 958 + if (err) 959 959 goto done; 960 960 } else if (btf_ext) { 961 961 *btf_ext = NULL; ··· 965 965 elf_end(elf); 966 966 close(fd); 967 967 968 - if (err) 969 - return ERR_PTR(err); 970 - /* 971 - * btf is always parsed before btf_ext, so no need to clean up 972 - * btf_ext, if btf loading failed 973 - */ 974 - if (IS_ERR(btf)) 968 + if (!err) 975 969 return btf; 976 - if (btf_ext && IS_ERR(*btf_ext)) { 977 - btf__free(btf); 978 - err = PTR_ERR(*btf_ext); 979 - return ERR_PTR(err); 980 - } 981 - return btf; 970 + 971 + if (btf_ext) 972 + btf_ext__free(*btf_ext); 973 + btf__free(btf); 974 + 975 + return ERR_PTR(err); 982 976 } 983 977 984 978 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) 985 979 { 986 - return btf_parse_elf(path, NULL, btf_ext); 980 + return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext)); 987 981 } 988 982 989 983 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf) 990 984 { 991 - return btf_parse_elf(path, base_btf, NULL); 985 + return libbpf_ptr(btf_parse_elf(path, base_btf, NULL)); 992 986 } 993 987 994 988 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf) ··· 1050 1056 1051 1057 struct btf *btf__parse_raw(const char *path) 1052 1058 { 1053 - return btf_parse_raw(path, NULL); 1059 + return libbpf_ptr(btf_parse_raw(path, NULL)); 1054 1060 } 1055 1061 1056 1062 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf) 1057 1063 { 1058 - return btf_parse_raw(path, base_btf); 1064 + return libbpf_ptr(btf_parse_raw(path, base_btf)); 1059 1065 } 1060 1066 1061 1067 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext) 1062 1068 { 1063 1069 struct btf *btf; 1070 + int err; 1064 1071 1065 1072 if (btf_ext) 1066 1073 *btf_ext = NULL; 1067 1074 1068 1075 btf = btf_parse_raw(path, base_btf); 1069 - if (!IS_ERR(btf) || PTR_ERR(btf) != -EPROTO) 1076 + err = libbpf_get_error(btf); 1077 + if (!err) 1070 1078 return btf; 1071 - 1079 + if (err != -EPROTO) 1080 + return ERR_PTR(err); 1072 1081 return btf_parse_elf(path, base_btf, btf_ext); 1073 1082 } 1074 1083 1075 1084 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext) 1076 1085 { 1077 - return btf_parse(path, NULL, btf_ext); 1086 + return libbpf_ptr(btf_parse(path, NULL, btf_ext)); 1078 1087 } 1079 1088 1080 1089 struct btf *btf__parse_split(const char *path, struct btf *base_btf) 1081 1090 { 1082 - return btf_parse(path, base_btf, NULL); 1091 + return libbpf_ptr(btf_parse(path, base_btf, NULL)); 1083 1092 } 1084 1093 1085 1094 static int compare_vsi_off(const void *_a, const void *_b) ··· 1175 1178 } 1176 1179 } 1177 1180 1178 - return err; 1181 + return libbpf_err(err); 1179 1182 } 1180 1183 1181 1184 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian); ··· 1188 1191 int err = 0; 1189 1192 1190 1193 if (btf->fd >= 0) 1191 - return -EEXIST; 1194 + return libbpf_err(-EEXIST); 1192 1195 1193 1196 retry_load: 1194 1197 if (log_buf_size) { 1195 1198 log_buf = malloc(log_buf_size); 1196 1199 if (!log_buf) 1197 - return -ENOMEM; 1200 + return libbpf_err(-ENOMEM); 1198 1201 1199 1202 *log_buf = 0; 1200 1203 } ··· 1226 1229 1227 1230 done: 1228 1231 free(log_buf); 1229 - return err; 1232 + return libbpf_err(err); 1230 1233 } 1231 1234 1232 1235 int btf__fd(const struct btf *btf) ··· 1302 1305 1303 1306 data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian); 1304 1307 if (!data) 1305 - return NULL; 1308 + return errno = -ENOMEM, NULL; 1306 1309 1307 1310 btf->raw_size = data_sz; 1308 1311 if (btf->swapped_endian) ··· 1320 1323 else if (offset - btf->start_str_off < btf->hdr->str_len) 1321 1324 return btf_strs_data(btf) + (offset - btf->start_str_off); 1322 1325 else 1323 - return NULL; 1326 + return errno = EINVAL, NULL; 1324 1327 } 1325 1328 1326 1329 const char *btf__name_by_offset(const struct btf *btf, __u32 offset) ··· 1385 1388 int btf__get_from_id(__u32 id, struct btf **btf) 1386 1389 { 1387 1390 struct btf *res; 1388 - int btf_fd; 1391 + int err, btf_fd; 1389 1392 1390 1393 *btf = NULL; 1391 1394 btf_fd = bpf_btf_get_fd_by_id(id); 1392 1395 if (btf_fd < 0) 1393 - return -errno; 1396 + return libbpf_err(-errno); 1394 1397 1395 1398 res = btf_get_from_fd(btf_fd, NULL); 1399 + err = libbpf_get_error(res); 1400 + 1396 1401 close(btf_fd); 1397 - if (IS_ERR(res)) 1398 - return PTR_ERR(res); 1402 + 1403 + if (err) 1404 + return libbpf_err(err); 1399 1405 1400 1406 *btf = res; 1401 1407 return 0; ··· 1415 1415 __s64 key_size, value_size; 1416 1416 __s32 container_id; 1417 1417 1418 - if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == 1419 - max_name) { 1418 + if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == max_name) { 1420 1419 pr_warn("map:%s length of '____btf_map_%s' is too long\n", 1421 1420 map_name, map_name); 1422 - return -EINVAL; 1421 + return libbpf_err(-EINVAL); 1423 1422 } 1424 1423 1425 1424 container_id = btf__find_by_name(btf, container_name); 1426 1425 if (container_id < 0) { 1427 1426 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n", 1428 1427 map_name, container_name); 1429 - return container_id; 1428 + return libbpf_err(container_id); 1430 1429 } 1431 1430 1432 1431 container_type = btf__type_by_id(btf, container_id); 1433 1432 if (!container_type) { 1434 1433 pr_warn("map:%s cannot find BTF type for container_id:%u\n", 1435 1434 map_name, container_id); 1436 - return -EINVAL; 1435 + return libbpf_err(-EINVAL); 1437 1436 } 1438 1437 1439 1438 if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) { 1440 1439 pr_warn("map:%s container_name:%s is an invalid container struct\n", 1441 1440 map_name, container_name); 1442 - return -EINVAL; 1441 + return libbpf_err(-EINVAL); 1443 1442 } 1444 1443 1445 1444 key = btf_members(container_type); ··· 1447 1448 key_size = btf__resolve_size(btf, key->type); 1448 1449 if (key_size < 0) { 1449 1450 pr_warn("map:%s invalid BTF key_type_size\n", map_name); 1450 - return key_size; 1451 + return libbpf_err(key_size); 1451 1452 } 1452 1453 1453 1454 if (expected_key_size != key_size) { 1454 1455 pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", 1455 1456 map_name, (__u32)key_size, expected_key_size); 1456 - return -EINVAL; 1457 + return libbpf_err(-EINVAL); 1457 1458 } 1458 1459 1459 1460 value_size = btf__resolve_size(btf, value->type); 1460 1461 if (value_size < 0) { 1461 1462 pr_warn("map:%s invalid BTF value_type_size\n", map_name); 1462 - return value_size; 1463 + return libbpf_err(value_size); 1463 1464 } 1464 1465 1465 1466 if (expected_value_size != value_size) { 1466 1467 pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", 1467 1468 map_name, (__u32)value_size, expected_value_size); 1468 - return -EINVAL; 1469 + return libbpf_err(-EINVAL); 1469 1470 } 1470 1471 1471 1472 *key_type_id = key->type; ··· 1562 1563 1563 1564 /* BTF needs to be in a modifiable state to build string lookup index */ 1564 1565 if (btf_ensure_modifiable(btf)) 1565 - return -ENOMEM; 1566 + return libbpf_err(-ENOMEM); 1566 1567 1567 1568 off = strset__find_str(btf->strs_set, s); 1568 1569 if (off < 0) 1569 - return off; 1570 + return libbpf_err(off); 1570 1571 1571 1572 return btf->start_str_off + off; 1572 1573 } ··· 1587 1588 } 1588 1589 1589 1590 if (btf_ensure_modifiable(btf)) 1590 - return -ENOMEM; 1591 + return libbpf_err(-ENOMEM); 1591 1592 1592 1593 off = strset__add_str(btf->strs_set, s); 1593 1594 if (off < 0) 1594 - return off; 1595 + return libbpf_err(off); 1595 1596 1596 1597 btf->hdr->str_len = strset__data_size(btf->strs_set); 1597 1598 ··· 1615 1616 1616 1617 err = btf_add_type_idx_entry(btf, btf->hdr->type_len); 1617 1618 if (err) 1618 - return err; 1619 + return libbpf_err(err); 1619 1620 1620 1621 btf->hdr->type_len += data_sz; 1621 1622 btf->hdr->str_off += data_sz; ··· 1652 1653 1653 1654 sz = btf_type_size(src_type); 1654 1655 if (sz < 0) 1655 - return sz; 1656 + return libbpf_err(sz); 1656 1657 1657 1658 /* deconstruct BTF, if necessary, and invalidate raw_data */ 1658 1659 if (btf_ensure_modifiable(btf)) 1659 - return -ENOMEM; 1660 + return libbpf_err(-ENOMEM); 1660 1661 1661 1662 t = btf_add_type_mem(btf, sz); 1662 1663 if (!t) 1663 - return -ENOMEM; 1664 + return libbpf_err(-ENOMEM); 1664 1665 1665 1666 memcpy(t, src_type, sz); 1666 1667 1667 1668 err = btf_type_visit_str_offs(t, btf_rewrite_str, &p); 1668 1669 if (err) 1669 - return err; 1670 + return libbpf_err(err); 1670 1671 1671 1672 return btf_commit_type(btf, sz); 1672 1673 } ··· 1687 1688 1688 1689 /* non-empty name */ 1689 1690 if (!name || !name[0]) 1690 - return -EINVAL; 1691 + return libbpf_err(-EINVAL); 1691 1692 /* byte_sz must be power of 2 */ 1692 1693 if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16) 1693 - return -EINVAL; 1694 + return libbpf_err(-EINVAL); 1694 1695 if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL)) 1695 - return -EINVAL; 1696 + return libbpf_err(-EINVAL); 1696 1697 1697 1698 /* deconstruct BTF, if necessary, and invalidate raw_data */ 1698 1699 if (btf_ensure_modifiable(btf)) 1699 - return -ENOMEM; 1700 + return libbpf_err(-ENOMEM); 1700 1701 1701 1702 sz = sizeof(struct btf_type) + sizeof(int); 1702 1703 t = btf_add_type_mem(btf, sz); 1703 1704 if (!t) 1704 - return -ENOMEM; 1705 + return libbpf_err(-ENOMEM); 1705 1706 1706 1707 /* if something goes wrong later, we might end up with an extra string, 1707 1708 * but that shouldn't be a problem, because BTF can't be constructed ··· 1735 1736 1736 1737 /* non-empty name */ 1737 1738 if (!name || !name[0]) 1738 - return -EINVAL; 1739 + return libbpf_err(-EINVAL); 1739 1740 1740 1741 /* byte_sz must be one of the explicitly allowed values */ 1741 1742 if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 && 1742 1743 byte_sz != 16) 1743 - return -EINVAL; 1744 + return libbpf_err(-EINVAL); 1744 1745 1745 1746 if (btf_ensure_modifiable(btf)) 1746 - return -ENOMEM; 1747 + return libbpf_err(-ENOMEM); 1747 1748 1748 1749 sz = sizeof(struct btf_type); 1749 1750 t = btf_add_type_mem(btf, sz); 1750 1751 if (!t) 1751 - return -ENOMEM; 1752 + return libbpf_err(-ENOMEM); 1752 1753 1753 1754 name_off = btf__add_str(btf, name); 1754 1755 if (name_off < 0) ··· 1779 1780 int sz, name_off = 0; 1780 1781 1781 1782 if (validate_type_id(ref_type_id)) 1782 - return -EINVAL; 1783 + return libbpf_err(-EINVAL); 1783 1784 1784 1785 if (btf_ensure_modifiable(btf)) 1785 - return -ENOMEM; 1786 + return libbpf_err(-ENOMEM); 1786 1787 1787 1788 sz = sizeof(struct btf_type); 1788 1789 t = btf_add_type_mem(btf, sz); 1789 1790 if (!t) 1790 - return -ENOMEM; 1791 + return libbpf_err(-ENOMEM); 1791 1792 1792 1793 if (name && name[0]) { 1793 1794 name_off = btf__add_str(btf, name); ··· 1830 1831 int sz; 1831 1832 1832 1833 if (validate_type_id(index_type_id) || validate_type_id(elem_type_id)) 1833 - return -EINVAL; 1834 + return libbpf_err(-EINVAL); 1834 1835 1835 1836 if (btf_ensure_modifiable(btf)) 1836 - return -ENOMEM; 1837 + return libbpf_err(-ENOMEM); 1837 1838 1838 1839 sz = sizeof(struct btf_type) + sizeof(struct btf_array); 1839 1840 t = btf_add_type_mem(btf, sz); 1840 1841 if (!t) 1841 - return -ENOMEM; 1842 + return libbpf_err(-ENOMEM); 1842 1843 1843 1844 t->name_off = 0; 1844 1845 t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0); ··· 1859 1860 int sz, name_off = 0; 1860 1861 1861 1862 if (btf_ensure_modifiable(btf)) 1862 - return -ENOMEM; 1863 + return libbpf_err(-ENOMEM); 1863 1864 1864 1865 sz = sizeof(struct btf_type); 1865 1866 t = btf_add_type_mem(btf, sz); 1866 1867 if (!t) 1867 - return -ENOMEM; 1868 + return libbpf_err(-ENOMEM); 1868 1869 1869 1870 if (name && name[0]) { 1870 1871 name_off = btf__add_str(btf, name); ··· 1942 1943 1943 1944 /* last type should be union/struct */ 1944 1945 if (btf->nr_types == 0) 1945 - return -EINVAL; 1946 + return libbpf_err(-EINVAL); 1946 1947 t = btf_last_type(btf); 1947 1948 if (!btf_is_composite(t)) 1948 - return -EINVAL; 1949 + return libbpf_err(-EINVAL); 1949 1950 1950 1951 if (validate_type_id(type_id)) 1951 - return -EINVAL; 1952 + return libbpf_err(-EINVAL); 1952 1953 /* best-effort bit field offset/size enforcement */ 1953 1954 is_bitfield = bit_size || (bit_offset % 8 != 0); 1954 1955 if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff)) 1955 - return -EINVAL; 1956 + return libbpf_err(-EINVAL); 1956 1957 1957 1958 /* only offset 0 is allowed for unions */ 1958 1959 if (btf_is_union(t) && bit_offset) 1959 - return -EINVAL; 1960 + return libbpf_err(-EINVAL); 1960 1961 1961 1962 /* decompose and invalidate raw data */ 1962 1963 if (btf_ensure_modifiable(btf)) 1963 - return -ENOMEM; 1964 + return libbpf_err(-ENOMEM); 1964 1965 1965 1966 sz = sizeof(struct btf_member); 1966 1967 m = btf_add_type_mem(btf, sz); 1967 1968 if (!m) 1968 - return -ENOMEM; 1969 + return libbpf_err(-ENOMEM); 1969 1970 1970 1971 if (name && name[0]) { 1971 1972 name_off = btf__add_str(btf, name); ··· 2007 2008 2008 2009 /* byte_sz must be power of 2 */ 2009 2010 if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8) 2010 - return -EINVAL; 2011 + return libbpf_err(-EINVAL); 2011 2012 2012 2013 if (btf_ensure_modifiable(btf)) 2013 - return -ENOMEM; 2014 + return libbpf_err(-ENOMEM); 2014 2015 2015 2016 sz = sizeof(struct btf_type); 2016 2017 t = btf_add_type_mem(btf, sz); 2017 2018 if (!t) 2018 - return -ENOMEM; 2019 + return libbpf_err(-ENOMEM); 2019 2020 2020 2021 if (name && name[0]) { 2021 2022 name_off = btf__add_str(btf, name); ··· 2047 2048 2048 2049 /* last type should be BTF_KIND_ENUM */ 2049 2050 if (btf->nr_types == 0) 2050 - return -EINVAL; 2051 + return libbpf_err(-EINVAL); 2051 2052 t = btf_last_type(btf); 2052 2053 if (!btf_is_enum(t)) 2053 - return -EINVAL; 2054 + return libbpf_err(-EINVAL); 2054 2055 2055 2056 /* non-empty name */ 2056 2057 if (!name || !name[0]) 2057 - return -EINVAL; 2058 + return libbpf_err(-EINVAL); 2058 2059 if (value < INT_MIN || value > UINT_MAX) 2059 - return -E2BIG; 2060 + return libbpf_err(-E2BIG); 2060 2061 2061 2062 /* decompose and invalidate raw data */ 2062 2063 if (btf_ensure_modifiable(btf)) 2063 - return -ENOMEM; 2064 + return libbpf_err(-ENOMEM); 2064 2065 2065 2066 sz = sizeof(struct btf_enum); 2066 2067 v = btf_add_type_mem(btf, sz); 2067 2068 if (!v) 2068 - return -ENOMEM; 2069 + return libbpf_err(-ENOMEM); 2069 2070 2070 2071 name_off = btf__add_str(btf, name); 2071 2072 if (name_off < 0) ··· 2095 2096 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind) 2096 2097 { 2097 2098 if (!name || !name[0]) 2098 - return -EINVAL; 2099 + return libbpf_err(-EINVAL); 2099 2100 2100 2101 switch (fwd_kind) { 2101 2102 case BTF_FWD_STRUCT: ··· 2116 2117 */ 2117 2118 return btf__add_enum(btf, name, sizeof(int)); 2118 2119 default: 2119 - return -EINVAL; 2120 + return libbpf_err(-EINVAL); 2120 2121 } 2121 2122 } 2122 2123 ··· 2131 2132 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id) 2132 2133 { 2133 2134 if (!name || !name[0]) 2134 - return -EINVAL; 2135 + return libbpf_err(-EINVAL); 2135 2136 2136 2137 return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id); 2137 2138 } ··· 2186 2187 int id; 2187 2188 2188 2189 if (!name || !name[0]) 2189 - return -EINVAL; 2190 + return libbpf_err(-EINVAL); 2190 2191 if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL && 2191 2192 linkage != BTF_FUNC_EXTERN) 2192 - return -EINVAL; 2193 + return libbpf_err(-EINVAL); 2193 2194 2194 2195 id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id); 2195 2196 if (id > 0) { ··· 2197 2198 2198 2199 t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0); 2199 2200 } 2200 - return id; 2201 + return libbpf_err(id); 2201 2202 } 2202 2203 2203 2204 /* ··· 2218 2219 int sz; 2219 2220 2220 2221 if (validate_type_id(ret_type_id)) 2221 - return -EINVAL; 2222 + return libbpf_err(-EINVAL); 2222 2223 2223 2224 if (btf_ensure_modifiable(btf)) 2224 - return -ENOMEM; 2225 + return libbpf_err(-ENOMEM); 2225 2226 2226 2227 sz = sizeof(struct btf_type); 2227 2228 t = btf_add_type_mem(btf, sz); 2228 2229 if (!t) 2229 - return -ENOMEM; 2230 + return libbpf_err(-ENOMEM); 2230 2231 2231 2232 /* start out with vlen=0; this will be adjusted when adding enum 2232 2233 * values, if necessary ··· 2253 2254 int sz, name_off = 0; 2254 2255 2255 2256 if (validate_type_id(type_id)) 2256 - return -EINVAL; 2257 + return libbpf_err(-EINVAL); 2257 2258 2258 2259 /* last type should be BTF_KIND_FUNC_PROTO */ 2259 2260 if (btf->nr_types == 0) 2260 - return -EINVAL; 2261 + return libbpf_err(-EINVAL); 2261 2262 t = btf_last_type(btf); 2262 2263 if (!btf_is_func_proto(t)) 2263 - return -EINVAL; 2264 + return libbpf_err(-EINVAL); 2264 2265 2265 2266 /* decompose and invalidate raw data */ 2266 2267 if (btf_ensure_modifiable(btf)) 2267 - return -ENOMEM; 2268 + return libbpf_err(-ENOMEM); 2268 2269 2269 2270 sz = sizeof(struct btf_param); 2270 2271 p = btf_add_type_mem(btf, sz); 2271 2272 if (!p) 2272 - return -ENOMEM; 2273 + return libbpf_err(-ENOMEM); 2273 2274 2274 2275 if (name && name[0]) { 2275 2276 name_off = btf__add_str(btf, name); ··· 2307 2308 2308 2309 /* non-empty name */ 2309 2310 if (!name || !name[0]) 2310 - return -EINVAL; 2311 + return libbpf_err(-EINVAL); 2311 2312 if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED && 2312 2313 linkage != BTF_VAR_GLOBAL_EXTERN) 2313 - return -EINVAL; 2314 + return libbpf_err(-EINVAL); 2314 2315 if (validate_type_id(type_id)) 2315 - return -EINVAL; 2316 + return libbpf_err(-EINVAL); 2316 2317 2317 2318 /* deconstruct BTF, if necessary, and invalidate raw_data */ 2318 2319 if (btf_ensure_modifiable(btf)) 2319 - return -ENOMEM; 2320 + return libbpf_err(-ENOMEM); 2320 2321 2321 2322 sz = sizeof(struct btf_type) + sizeof(struct btf_var); 2322 2323 t = btf_add_type_mem(btf, sz); 2323 2324 if (!t) 2324 - return -ENOMEM; 2325 + return libbpf_err(-ENOMEM); 2325 2326 2326 2327 name_off = btf__add_str(btf, name); 2327 2328 if (name_off < 0) ··· 2356 2357 2357 2358 /* non-empty name */ 2358 2359 if (!name || !name[0]) 2359 - return -EINVAL; 2360 + return libbpf_err(-EINVAL); 2360 2361 2361 2362 if (btf_ensure_modifiable(btf)) 2362 - return -ENOMEM; 2363 + return libbpf_err(-ENOMEM); 2363 2364 2364 2365 sz = sizeof(struct btf_type); 2365 2366 t = btf_add_type_mem(btf, sz); 2366 2367 if (!t) 2367 - return -ENOMEM; 2368 + return libbpf_err(-ENOMEM); 2368 2369 2369 2370 name_off = btf__add_str(btf, name); 2370 2371 if (name_off < 0) ··· 2396 2397 2397 2398 /* last type should be BTF_KIND_DATASEC */ 2398 2399 if (btf->nr_types == 0) 2399 - return -EINVAL; 2400 + return libbpf_err(-EINVAL); 2400 2401 t = btf_last_type(btf); 2401 2402 if (!btf_is_datasec(t)) 2402 - return -EINVAL; 2403 + return libbpf_err(-EINVAL); 2403 2404 2404 2405 if (validate_type_id(var_type_id)) 2405 - return -EINVAL; 2406 + return libbpf_err(-EINVAL); 2406 2407 2407 2408 /* decompose and invalidate raw data */ 2408 2409 if (btf_ensure_modifiable(btf)) 2409 - return -ENOMEM; 2410 + return libbpf_err(-ENOMEM); 2410 2411 2411 2412 sz = sizeof(struct btf_var_secinfo); 2412 2413 v = btf_add_type_mem(btf, sz); 2413 2414 if (!v) 2414 - return -ENOMEM; 2415 + return libbpf_err(-ENOMEM); 2415 2416 2416 2417 v->type = var_type_id; 2417 2418 v->offset = offset; ··· 2613 2614 2614 2615 err = btf_ext_parse_hdr(data, size); 2615 2616 if (err) 2616 - return ERR_PTR(err); 2617 + return libbpf_err_ptr(err); 2617 2618 2618 2619 btf_ext = calloc(1, sizeof(struct btf_ext)); 2619 2620 if (!btf_ext) 2620 - return ERR_PTR(-ENOMEM); 2621 + return libbpf_err_ptr(-ENOMEM); 2621 2622 2622 2623 btf_ext->data_size = size; 2623 2624 btf_ext->data = malloc(size); ··· 2627 2628 } 2628 2629 memcpy(btf_ext->data, data, size); 2629 2630 2630 - if (btf_ext->hdr->hdr_len < 2631 - offsetofend(struct btf_ext_header, line_info_len)) 2631 + if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) { 2632 + err = -EINVAL; 2632 2633 goto done; 2634 + } 2635 + 2633 2636 err = btf_ext_setup_func_info(btf_ext); 2634 2637 if (err) 2635 2638 goto done; ··· 2640 2639 if (err) 2641 2640 goto done; 2642 2641 2643 - if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) 2642 + if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) { 2643 + err = -EINVAL; 2644 2644 goto done; 2645 + } 2646 + 2645 2647 err = btf_ext_setup_core_relos(btf_ext); 2646 2648 if (err) 2647 2649 goto done; ··· 2652 2648 done: 2653 2649 if (err) { 2654 2650 btf_ext__free(btf_ext); 2655 - return ERR_PTR(err); 2651 + return libbpf_err_ptr(err); 2656 2652 } 2657 2653 2658 2654 return btf_ext; ··· 2691 2687 existing_len = (*cnt) * record_size; 2692 2688 data = realloc(*info, existing_len + records_len); 2693 2689 if (!data) 2694 - return -ENOMEM; 2690 + return libbpf_err(-ENOMEM); 2695 2691 2696 2692 memcpy(data + existing_len, sinfo->data, records_len); 2697 2693 /* adjust insn_off only, the rest data will be passed ··· 2701 2697 __u32 *insn_off; 2702 2698 2703 2699 insn_off = data + existing_len + (i * record_size); 2704 - *insn_off = *insn_off / sizeof(struct bpf_insn) + 2705 - insns_cnt; 2700 + *insn_off = *insn_off / sizeof(struct bpf_insn) + insns_cnt; 2706 2701 } 2707 2702 *info = data; 2708 2703 *cnt += sinfo->num_info; 2709 2704 return 0; 2710 2705 } 2711 2706 2712 - return -ENOENT; 2707 + return libbpf_err(-ENOENT); 2713 2708 } 2714 2709 2715 2710 int btf_ext__reloc_func_info(const struct btf *btf, ··· 2897 2894 2898 2895 if (IS_ERR(d)) { 2899 2896 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d)); 2900 - return -EINVAL; 2897 + return libbpf_err(-EINVAL); 2901 2898 } 2902 2899 2903 2900 if (btf_ensure_modifiable(btf)) 2904 - return -ENOMEM; 2901 + return libbpf_err(-ENOMEM); 2905 2902 2906 2903 err = btf_dedup_prep(d); 2907 2904 if (err) { ··· 2941 2938 2942 2939 done: 2943 2940 btf_dedup_free(d); 2944 - return err; 2941 + return libbpf_err(err); 2945 2942 } 2946 2943 2947 2944 #define BTF_UNPROCESSED_ID ((__u32)-1) ··· 4414 4411 char path[PATH_MAX + 1]; 4415 4412 struct utsname buf; 4416 4413 struct btf *btf; 4417 - int i; 4414 + int i, err; 4418 4415 4419 4416 uname(&buf); 4420 4417 ··· 4428 4425 btf = btf__parse_raw(path); 4429 4426 else 4430 4427 btf = btf__parse_elf(path, NULL); 4431 - 4432 - pr_debug("loading kernel BTF '%s': %ld\n", 4433 - path, IS_ERR(btf) ? PTR_ERR(btf) : 0); 4434 - if (IS_ERR(btf)) 4428 + err = libbpf_get_error(btf); 4429 + pr_debug("loading kernel BTF '%s': %d\n", path, err); 4430 + if (err) 4435 4431 continue; 4436 4432 4437 4433 return btf; 4438 4434 } 4439 4435 4440 4436 pr_warn("failed to find valid kernel BTF\n"); 4441 - return ERR_PTR(-ESRCH); 4437 + return libbpf_err_ptr(-ESRCH); 4442 4438 } 4443 4439 4444 4440 int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
+7 -7
tools/lib/bpf/btf_dump.c
··· 128 128 129 129 d = calloc(1, sizeof(struct btf_dump)); 130 130 if (!d) 131 - return ERR_PTR(-ENOMEM); 131 + return libbpf_err_ptr(-ENOMEM); 132 132 133 133 d->btf = btf; 134 134 d->btf_ext = btf_ext; ··· 156 156 return d; 157 157 err: 158 158 btf_dump__free(d); 159 - return ERR_PTR(err); 159 + return libbpf_err_ptr(err); 160 160 } 161 161 162 162 static int btf_dump_resize(struct btf_dump *d) ··· 236 236 int err, i; 237 237 238 238 if (id > btf__get_nr_types(d->btf)) 239 - return -EINVAL; 239 + return libbpf_err(-EINVAL); 240 240 241 241 err = btf_dump_resize(d); 242 242 if (err) 243 - return err; 243 + return libbpf_err(err); 244 244 245 245 d->emit_queue_cnt = 0; 246 246 err = btf_dump_order_type(d, id, false); 247 247 if (err < 0) 248 - return err; 248 + return libbpf_err(err); 249 249 250 250 for (i = 0; i < d->emit_queue_cnt; i++) 251 251 btf_dump_emit_type(d, d->emit_queue[i], 0 /*top-level*/); ··· 1075 1075 int lvl, err; 1076 1076 1077 1077 if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts)) 1078 - return -EINVAL; 1078 + return libbpf_err(-EINVAL); 1079 1079 1080 1080 err = btf_dump_resize(d); 1081 1081 if (err) 1082 - return -EINVAL; 1082 + return libbpf_err(err); 1083 1083 1084 1084 fname = OPTS_GET(opts, field_name, ""); 1085 1085 lvl = OPTS_GET(opts, indent_level, 0);
+261 -241
tools/lib/bpf/libbpf.c
··· 2579 2579 2580 2580 if (btf_data) { 2581 2581 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); 2582 - if (IS_ERR(obj->btf)) { 2583 - err = PTR_ERR(obj->btf); 2582 + err = libbpf_get_error(obj->btf); 2583 + if (err) { 2584 2584 obj->btf = NULL; 2585 - pr_warn("Error loading ELF section %s: %d.\n", 2586 - BTF_ELF_SEC, err); 2585 + pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err); 2587 2586 goto out; 2588 2587 } 2589 2588 /* enforce 8-byte pointers for BPF-targeted BTFs */ 2590 2589 btf__set_pointer_size(obj->btf, 8); 2591 - err = 0; 2592 2590 } 2593 2591 if (btf_ext_data) { 2594 2592 if (!obj->btf) { ··· 2594 2596 BTF_EXT_ELF_SEC, BTF_ELF_SEC); 2595 2597 goto out; 2596 2598 } 2597 - obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, 2598 - btf_ext_data->d_size); 2599 - if (IS_ERR(obj->btf_ext)) { 2600 - pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n", 2601 - BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext)); 2599 + obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); 2600 + err = libbpf_get_error(obj->btf_ext); 2601 + if (err) { 2602 + pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n", 2603 + BTF_EXT_ELF_SEC, err); 2602 2604 obj->btf_ext = NULL; 2603 2605 goto out; 2604 2606 } ··· 2682 2684 return 0; 2683 2685 2684 2686 obj->btf_vmlinux = libbpf_find_kernel_btf(); 2685 - if (IS_ERR(obj->btf_vmlinux)) { 2686 - err = PTR_ERR(obj->btf_vmlinux); 2687 + err = libbpf_get_error(obj->btf_vmlinux); 2688 + if (err) { 2687 2689 pr_warn("Error loading vmlinux BTF: %d\n", err); 2688 2690 obj->btf_vmlinux = NULL; 2689 2691 return err; ··· 2749 2751 /* clone BTF to sanitize a copy and leave the original intact */ 2750 2752 raw_data = btf__get_raw_data(obj->btf, &sz); 2751 2753 kern_btf = btf__new(raw_data, sz); 2752 - if (IS_ERR(kern_btf)) 2753 - return PTR_ERR(kern_btf); 2754 + err = libbpf_get_error(kern_btf); 2755 + if (err) 2756 + return err; 2754 2757 2755 2758 /* enforce 8-byte pointers for BPF-targeted BTFs */ 2756 2759 btf__set_pointer_size(obj->btf, 8); ··· 3522 3523 if (pos->sec_name && !strcmp(pos->sec_name, title)) 3523 3524 return pos; 3524 3525 } 3525 - return NULL; 3526 + return errno = ENOENT, NULL; 3526 3527 } 3527 3528 3528 3529 static bool prog_is_subprog(const struct bpf_object *obj, ··· 3555 3556 if (!strcmp(prog->name, name)) 3556 3557 return prog; 3557 3558 } 3558 - return NULL; 3559 + return errno = ENOENT, NULL; 3559 3560 } 3560 3561 3561 3562 static bool bpf_object__shndx_is_data(const struct bpf_object *obj, ··· 3902 3903 3903 3904 err = bpf_obj_get_info_by_fd(fd, &info, &len); 3904 3905 if (err) 3905 - return err; 3906 + return libbpf_err(err); 3906 3907 3907 3908 new_name = strdup(info.name); 3908 3909 if (!new_name) 3909 - return -errno; 3910 + return libbpf_err(-errno); 3910 3911 3911 3912 new_fd = open("/", O_RDONLY | O_CLOEXEC); 3912 3913 if (new_fd < 0) { ··· 3944 3945 close(new_fd); 3945 3946 err_free_new_name: 3946 3947 free(new_name); 3947 - return err; 3948 + return libbpf_err(err); 3948 3949 } 3949 3950 3950 3951 __u32 bpf_map__max_entries(const struct bpf_map *map) ··· 3955 3956 struct bpf_map *bpf_map__inner_map(struct bpf_map *map) 3956 3957 { 3957 3958 if (!bpf_map_type__is_map_in_map(map->def.type)) 3958 - return NULL; 3959 + return errno = EINVAL, NULL; 3959 3960 3960 3961 return map->inner_map; 3961 3962 } ··· 3963 3964 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) 3964 3965 { 3965 3966 if (map->fd >= 0) 3966 - return -EBUSY; 3967 + return libbpf_err(-EBUSY); 3967 3968 map->def.max_entries = max_entries; 3968 3969 return 0; 3969 3970 } ··· 3971 3972 int bpf_map__resize(struct bpf_map *map, __u32 max_entries) 3972 3973 { 3973 3974 if (!map || !max_entries) 3974 - return -EINVAL; 3975 + return libbpf_err(-EINVAL); 3975 3976 3976 3977 return bpf_map__set_max_entries(map, max_entries); 3977 3978 } ··· 5102 5103 } 5103 5104 5104 5105 btf = btf_get_from_fd(fd, obj->btf_vmlinux); 5105 - if (IS_ERR(btf)) { 5106 - pr_warn("failed to load module [%s]'s BTF object #%d: %ld\n", 5107 - name, id, PTR_ERR(btf)); 5108 - err = PTR_ERR(btf); 5106 + err = libbpf_get_error(btf); 5107 + if (err) { 5108 + pr_warn("failed to load module [%s]'s BTF object #%d: %d\n", 5109 + name, id, err); 5109 5110 goto err_out; 5110 5111 } 5111 5112 ··· 6365 6366 6366 6367 if (targ_btf_path) { 6367 6368 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL); 6368 - if (IS_ERR_OR_NULL(obj->btf_vmlinux_override)) { 6369 - err = PTR_ERR(obj->btf_vmlinux_override); 6369 + err = libbpf_get_error(obj->btf_vmlinux_override); 6370 + if (err) { 6370 6371 pr_warn("failed to parse target BTF: %d\n", err); 6371 6372 return err; 6372 6373 } ··· 7423 7424 7424 7425 if (prog->obj->loaded) { 7425 7426 pr_warn("prog '%s': can't load after object was loaded\n", prog->name); 7426 - return -EINVAL; 7427 + return libbpf_err(-EINVAL); 7427 7428 } 7428 7429 7429 7430 if ((prog->type == BPF_PROG_TYPE_TRACING || ··· 7433 7434 7434 7435 err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id); 7435 7436 if (err) 7436 - return err; 7437 + return libbpf_err(err); 7437 7438 7438 7439 prog->attach_btf_obj_fd = btf_obj_fd; 7439 7440 prog->attach_btf_id = btf_type_id; ··· 7443 7444 if (prog->preprocessor) { 7444 7445 pr_warn("Internal error: can't load program '%s'\n", 7445 7446 prog->name); 7446 - return -LIBBPF_ERRNO__INTERNAL; 7447 + return libbpf_err(-LIBBPF_ERRNO__INTERNAL); 7447 7448 } 7448 7449 7449 7450 prog->instances.fds = malloc(sizeof(int)); 7450 7451 if (!prog->instances.fds) { 7451 7452 pr_warn("Not enough memory for BPF fds\n"); 7452 - return -ENOMEM; 7453 + return libbpf_err(-ENOMEM); 7453 7454 } 7454 7455 prog->instances.nr = 1; 7455 7456 prog->instances.fds[0] = -1; ··· 7508 7509 pr_warn("failed to load program '%s'\n", prog->name); 7509 7510 zfree(&prog->insns); 7510 7511 prog->insns_cnt = 0; 7511 - return err; 7512 + return libbpf_err(err); 7512 7513 } 7513 7514 7514 7515 static int ··· 7641 7642 7642 7643 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) 7643 7644 { 7644 - return __bpf_object__open_xattr(attr, 0); 7645 + return libbpf_ptr(__bpf_object__open_xattr(attr, 0)); 7645 7646 } 7646 7647 7647 7648 struct bpf_object *bpf_object__open(const char *path) ··· 7651 7652 .prog_type = BPF_PROG_TYPE_UNSPEC, 7652 7653 }; 7653 7654 7654 - return bpf_object__open_xattr(&attr); 7655 + return libbpf_ptr(__bpf_object__open_xattr(&attr, 0)); 7655 7656 } 7656 7657 7657 7658 struct bpf_object * 7658 7659 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) 7659 7660 { 7660 7661 if (!path) 7661 - return ERR_PTR(-EINVAL); 7662 + return libbpf_err_ptr(-EINVAL); 7662 7663 7663 7664 pr_debug("loading %s\n", path); 7664 7665 7665 - return __bpf_object__open(path, NULL, 0, opts); 7666 + return libbpf_ptr(__bpf_object__open(path, NULL, 0, opts)); 7666 7667 } 7667 7668 7668 7669 struct bpf_object * ··· 7670 7671 const struct bpf_object_open_opts *opts) 7671 7672 { 7672 7673 if (!obj_buf || obj_buf_sz == 0) 7673 - return ERR_PTR(-EINVAL); 7674 + return libbpf_err_ptr(-EINVAL); 7674 7675 7675 - return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts); 7676 + return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, opts)); 7676 7677 } 7677 7678 7678 7679 struct bpf_object * ··· 7687 7688 7688 7689 /* returning NULL is wrong, but backwards-compatible */ 7689 7690 if (!obj_buf || obj_buf_sz == 0) 7690 - return NULL; 7691 + return errno = EINVAL, NULL; 7691 7692 7692 - return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts); 7693 + return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, &opts)); 7693 7694 } 7694 7695 7695 7696 int bpf_object__unload(struct bpf_object *obj) ··· 7697 7698 size_t i; 7698 7699 7699 7700 if (!obj) 7700 - return -EINVAL; 7701 + return libbpf_err(-EINVAL); 7701 7702 7702 7703 for (i = 0; i < obj->nr_maps; i++) { 7703 7704 zclose(obj->maps[i].fd); ··· 8030 8031 int err, i; 8031 8032 8032 8033 if (!attr) 8033 - return -EINVAL; 8034 + return libbpf_err(-EINVAL); 8034 8035 obj = attr->obj; 8035 8036 if (!obj) 8036 - return -EINVAL; 8037 + return libbpf_err(-EINVAL); 8037 8038 8038 8039 if (obj->loaded) { 8039 8040 pr_warn("object '%s': load can't be attempted twice\n", obj->name); 8040 - return -EINVAL; 8041 + return libbpf_err(-EINVAL); 8041 8042 } 8042 8043 8043 8044 if (obj->gen_loader) ··· 8088 8089 8089 8090 bpf_object__unload(obj); 8090 8091 pr_warn("failed to load object '%s'\n", obj->path); 8091 - return err; 8092 + return libbpf_err(err); 8092 8093 } 8093 8094 8094 8095 int bpf_object__load(struct bpf_object *obj) ··· 8160 8161 8161 8162 err = make_parent_dir(path); 8162 8163 if (err) 8163 - return err; 8164 + return libbpf_err(err); 8164 8165 8165 8166 err = check_path(path); 8166 8167 if (err) 8167 - return err; 8168 + return libbpf_err(err); 8168 8169 8169 8170 if (prog == NULL) { 8170 8171 pr_warn("invalid program pointer\n"); 8171 - return -EINVAL; 8172 + return libbpf_err(-EINVAL); 8172 8173 } 8173 8174 8174 8175 if (instance < 0 || instance >= prog->instances.nr) { 8175 8176 pr_warn("invalid prog instance %d of prog %s (max %d)\n", 8176 8177 instance, prog->name, prog->instances.nr); 8177 - return -EINVAL; 8178 + return libbpf_err(-EINVAL); 8178 8179 } 8179 8180 8180 8181 if (bpf_obj_pin(prog->instances.fds[instance], path)) { 8181 8182 err = -errno; 8182 8183 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 8183 8184 pr_warn("failed to pin program: %s\n", cp); 8184 - return err; 8185 + return libbpf_err(err); 8185 8186 } 8186 8187 pr_debug("pinned program '%s'\n", path); 8187 8188 ··· 8195 8196 8196 8197 err = check_path(path); 8197 8198 if (err) 8198 - return err; 8199 + return libbpf_err(err); 8199 8200 8200 8201 if (prog == NULL) { 8201 8202 pr_warn("invalid program pointer\n"); 8202 - return -EINVAL; 8203 + return libbpf_err(-EINVAL); 8203 8204 } 8204 8205 8205 8206 if (instance < 0 || instance >= prog->instances.nr) { 8206 8207 pr_warn("invalid prog instance %d of prog %s (max %d)\n", 8207 8208 instance, prog->name, prog->instances.nr); 8208 - return -EINVAL; 8209 + return libbpf_err(-EINVAL); 8209 8210 } 8210 8211 8211 8212 err = unlink(path); 8212 8213 if (err != 0) 8213 - return -errno; 8214 + return libbpf_err(-errno); 8215 + 8214 8216 pr_debug("unpinned program '%s'\n", path); 8215 8217 8216 8218 return 0; ··· 8223 8223 8224 8224 err = make_parent_dir(path); 8225 8225 if (err) 8226 - return err; 8226 + return libbpf_err(err); 8227 8227 8228 8228 err = check_path(path); 8229 8229 if (err) 8230 - return err; 8230 + return libbpf_err(err); 8231 8231 8232 8232 if (prog == NULL) { 8233 8233 pr_warn("invalid program pointer\n"); 8234 - return -EINVAL; 8234 + return libbpf_err(-EINVAL); 8235 8235 } 8236 8236 8237 8237 if (prog->instances.nr <= 0) { 8238 8238 pr_warn("no instances of prog %s to pin\n", prog->name); 8239 - return -EINVAL; 8239 + return libbpf_err(-EINVAL); 8240 8240 } 8241 8241 8242 8242 if (prog->instances.nr == 1) { ··· 8280 8280 8281 8281 rmdir(path); 8282 8282 8283 - return err; 8283 + return libbpf_err(err); 8284 8284 } 8285 8285 8286 8286 int bpf_program__unpin(struct bpf_program *prog, const char *path) ··· 8289 8289 8290 8290 err = check_path(path); 8291 8291 if (err) 8292 - return err; 8292 + return libbpf_err(err); 8293 8293 8294 8294 if (prog == NULL) { 8295 8295 pr_warn("invalid program pointer\n"); 8296 - return -EINVAL; 8296 + return libbpf_err(-EINVAL); 8297 8297 } 8298 8298 8299 8299 if (prog->instances.nr <= 0) { 8300 8300 pr_warn("no instances of prog %s to pin\n", prog->name); 8301 - return -EINVAL; 8301 + return libbpf_err(-EINVAL); 8302 8302 } 8303 8303 8304 8304 if (prog->instances.nr == 1) { ··· 8312 8312 8313 8313 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 8314 8314 if (len < 0) 8315 - return -EINVAL; 8315 + return libbpf_err(-EINVAL); 8316 8316 else if (len >= PATH_MAX) 8317 - return -ENAMETOOLONG; 8317 + return libbpf_err(-ENAMETOOLONG); 8318 8318 8319 8319 err = bpf_program__unpin_instance(prog, buf, i); 8320 8320 if (err) ··· 8323 8323 8324 8324 err = rmdir(path); 8325 8325 if (err) 8326 - return -errno; 8326 + return libbpf_err(-errno); 8327 8327 8328 8328 return 0; 8329 8329 } ··· 8335 8335 8336 8336 if (map == NULL) { 8337 8337 pr_warn("invalid map pointer\n"); 8338 - return -EINVAL; 8338 + return libbpf_err(-EINVAL); 8339 8339 } 8340 8340 8341 8341 if (map->pin_path) { 8342 8342 if (path && strcmp(path, map->pin_path)) { 8343 8343 pr_warn("map '%s' already has pin path '%s' different from '%s'\n", 8344 8344 bpf_map__name(map), map->pin_path, path); 8345 - return -EINVAL; 8345 + return libbpf_err(-EINVAL); 8346 8346 } else if (map->pinned) { 8347 8347 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", 8348 8348 bpf_map__name(map), map->pin_path); ··· 8352 8352 if (!path) { 8353 8353 pr_warn("missing a path to pin map '%s' at\n", 8354 8354 bpf_map__name(map)); 8355 - return -EINVAL; 8355 + return libbpf_err(-EINVAL); 8356 8356 } else if (map->pinned) { 8357 8357 pr_warn("map '%s' already pinned\n", bpf_map__name(map)); 8358 - return -EEXIST; 8358 + return libbpf_err(-EEXIST); 8359 8359 } 8360 8360 8361 8361 map->pin_path = strdup(path); ··· 8367 8367 8368 8368 err = make_parent_dir(map->pin_path); 8369 8369 if (err) 8370 - return err; 8370 + return libbpf_err(err); 8371 8371 8372 8372 err = check_path(map->pin_path); 8373 8373 if (err) 8374 - return err; 8374 + return libbpf_err(err); 8375 8375 8376 8376 if (bpf_obj_pin(map->fd, map->pin_path)) { 8377 8377 err = -errno; ··· 8386 8386 out_err: 8387 8387 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); 8388 8388 pr_warn("failed to pin map: %s\n", cp); 8389 - return err; 8389 + return libbpf_err(err); 8390 8390 } 8391 8391 8392 8392 int bpf_map__unpin(struct bpf_map *map, const char *path) ··· 8395 8395 8396 8396 if (map == NULL) { 8397 8397 pr_warn("invalid map pointer\n"); 8398 - return -EINVAL; 8398 + return libbpf_err(-EINVAL); 8399 8399 } 8400 8400 8401 8401 if (map->pin_path) { 8402 8402 if (path && strcmp(path, map->pin_path)) { 8403 8403 pr_warn("map '%s' already has pin path '%s' different from '%s'\n", 8404 8404 bpf_map__name(map), map->pin_path, path); 8405 - return -EINVAL; 8405 + return libbpf_err(-EINVAL); 8406 8406 } 8407 8407 path = map->pin_path; 8408 8408 } else if (!path) { 8409 8409 pr_warn("no path to unpin map '%s' from\n", 8410 8410 bpf_map__name(map)); 8411 - return -EINVAL; 8411 + return libbpf_err(-EINVAL); 8412 8412 } 8413 8413 8414 8414 err = check_path(path); 8415 8415 if (err) 8416 - return err; 8416 + return libbpf_err(err); 8417 8417 8418 8418 err = unlink(path); 8419 8419 if (err != 0) 8420 - return -errno; 8420 + return libbpf_err(-errno); 8421 8421 8422 8422 map->pinned = false; 8423 8423 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); ··· 8432 8432 if (path) { 8433 8433 new = strdup(path); 8434 8434 if (!new) 8435 - return -errno; 8435 + return libbpf_err(-errno); 8436 8436 } 8437 8437 8438 8438 free(map->pin_path); ··· 8466 8466 int err; 8467 8467 8468 8468 if (!obj) 8469 - return -ENOENT; 8469 + return libbpf_err(-ENOENT); 8470 8470 8471 8471 if (!obj->loaded) { 8472 8472 pr_warn("object not yet loaded; load it first\n"); 8473 - return -ENOENT; 8473 + return libbpf_err(-ENOENT); 8474 8474 } 8475 8475 8476 8476 bpf_object__for_each_map(map, obj) { ··· 8510 8510 bpf_map__unpin(map, NULL); 8511 8511 } 8512 8512 8513 - return err; 8513 + return libbpf_err(err); 8514 8514 } 8515 8515 8516 8516 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) ··· 8519 8519 int err; 8520 8520 8521 8521 if (!obj) 8522 - return -ENOENT; 8522 + return libbpf_err(-ENOENT); 8523 8523 8524 8524 bpf_object__for_each_map(map, obj) { 8525 8525 char *pin_path = NULL; ··· 8531 8531 len = snprintf(buf, PATH_MAX, "%s/%s", path, 8532 8532 bpf_map__name(map)); 8533 8533 if (len < 0) 8534 - return -EINVAL; 8534 + return libbpf_err(-EINVAL); 8535 8535 else if (len >= PATH_MAX) 8536 - return -ENAMETOOLONG; 8536 + return libbpf_err(-ENAMETOOLONG); 8537 8537 sanitize_pin_path(buf); 8538 8538 pin_path = buf; 8539 8539 } else if (!map->pin_path) { ··· 8542 8542 8543 8543 err = bpf_map__unpin(map, pin_path); 8544 8544 if (err) 8545 - return err; 8545 + return libbpf_err(err); 8546 8546 } 8547 8547 8548 8548 return 0; ··· 8554 8554 int err; 8555 8555 8556 8556 if (!obj) 8557 - return -ENOENT; 8557 + return libbpf_err(-ENOENT); 8558 8558 8559 8559 if (!obj->loaded) { 8560 8560 pr_warn("object not yet loaded; load it first\n"); 8561 - return -ENOENT; 8561 + return libbpf_err(-ENOENT); 8562 8562 } 8563 8563 8564 8564 bpf_object__for_each_program(prog, obj) { ··· 8597 8597 bpf_program__unpin(prog, buf); 8598 8598 } 8599 8599 8600 - return err; 8600 + return libbpf_err(err); 8601 8601 } 8602 8602 8603 8603 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) ··· 8606 8606 int err; 8607 8607 8608 8608 if (!obj) 8609 - return -ENOENT; 8609 + return libbpf_err(-ENOENT); 8610 8610 8611 8611 bpf_object__for_each_program(prog, obj) { 8612 8612 char buf[PATH_MAX]; ··· 8615 8615 len = snprintf(buf, PATH_MAX, "%s/%s", path, 8616 8616 prog->pin_name); 8617 8617 if (len < 0) 8618 - return -EINVAL; 8618 + return libbpf_err(-EINVAL); 8619 8619 else if (len >= PATH_MAX) 8620 - return -ENAMETOOLONG; 8620 + return libbpf_err(-ENAMETOOLONG); 8621 8621 8622 8622 err = bpf_program__unpin(prog, buf); 8623 8623 if (err) 8624 - return err; 8624 + return libbpf_err(err); 8625 8625 } 8626 8626 8627 8627 return 0; ··· 8633 8633 8634 8634 err = bpf_object__pin_maps(obj, path); 8635 8635 if (err) 8636 - return err; 8636 + return libbpf_err(err); 8637 8637 8638 8638 err = bpf_object__pin_programs(obj, path); 8639 8639 if (err) { 8640 8640 bpf_object__unpin_maps(obj, path); 8641 - return err; 8641 + return libbpf_err(err); 8642 8642 } 8643 8643 8644 8644 return 0; ··· 8735 8735 8736 8736 const char *bpf_object__name(const struct bpf_object *obj) 8737 8737 { 8738 - return obj ? obj->name : ERR_PTR(-EINVAL); 8738 + return obj ? obj->name : libbpf_err_ptr(-EINVAL); 8739 8739 } 8740 8740 8741 8741 unsigned int bpf_object__kversion(const struct bpf_object *obj) ··· 8756 8756 int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version) 8757 8757 { 8758 8758 if (obj->loaded) 8759 - return -EINVAL; 8759 + return libbpf_err(-EINVAL); 8760 8760 8761 8761 obj->kern_version = kern_version; 8762 8762 ··· 8776 8776 8777 8777 void *bpf_object__priv(const struct bpf_object *obj) 8778 8778 { 8779 - return obj ? obj->priv : ERR_PTR(-EINVAL); 8779 + return obj ? obj->priv : libbpf_err_ptr(-EINVAL); 8780 8780 } 8781 8781 8782 8782 int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts) ··· 8812 8812 8813 8813 if (p->obj != obj) { 8814 8814 pr_warn("error: program handler doesn't match object\n"); 8815 - return NULL; 8815 + return errno = EINVAL, NULL; 8816 8816 } 8817 8817 8818 8818 idx = (p - obj->programs) + (forward ? 1 : -1); ··· 8858 8858 8859 8859 void *bpf_program__priv(const struct bpf_program *prog) 8860 8860 { 8861 - return prog ? prog->priv : ERR_PTR(-EINVAL); 8861 + return prog ? prog->priv : libbpf_err_ptr(-EINVAL); 8862 8862 } 8863 8863 8864 8864 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) ··· 8885 8885 title = strdup(title); 8886 8886 if (!title) { 8887 8887 pr_warn("failed to strdup program title\n"); 8888 - return ERR_PTR(-ENOMEM); 8888 + return libbpf_err_ptr(-ENOMEM); 8889 8889 } 8890 8890 } 8891 8891 ··· 8900 8900 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) 8901 8901 { 8902 8902 if (prog->obj->loaded) 8903 - return -EINVAL; 8903 + return libbpf_err(-EINVAL); 8904 8904 8905 8905 prog->load = autoload; 8906 8906 return 0; ··· 8922 8922 int *instances_fds; 8923 8923 8924 8924 if (nr_instances <= 0 || !prep) 8925 - return -EINVAL; 8925 + return libbpf_err(-EINVAL); 8926 8926 8927 8927 if (prog->instances.nr > 0 || prog->instances.fds) { 8928 8928 pr_warn("Can't set pre-processor after loading\n"); 8929 - return -EINVAL; 8929 + return libbpf_err(-EINVAL); 8930 8930 } 8931 8931 8932 8932 instances_fds = malloc(sizeof(int) * nr_instances); 8933 8933 if (!instances_fds) { 8934 8934 pr_warn("alloc memory failed for fds\n"); 8935 - return -ENOMEM; 8935 + return libbpf_err(-ENOMEM); 8936 8936 } 8937 8937 8938 8938 /* fill all fd with -1 */ ··· 8949 8949 int fd; 8950 8950 8951 8951 if (!prog) 8952 - return -EINVAL; 8952 + return libbpf_err(-EINVAL); 8953 8953 8954 8954 if (n >= prog->instances.nr || n < 0) { 8955 8955 pr_warn("Can't get the %dth fd from program %s: only %d instances\n", 8956 8956 n, prog->name, prog->instances.nr); 8957 - return -EINVAL; 8957 + return libbpf_err(-EINVAL); 8958 8958 } 8959 8959 8960 8960 fd = prog->instances.fds[n]; 8961 8961 if (fd < 0) { 8962 8962 pr_warn("%dth instance of program '%s' is invalid\n", 8963 8963 n, prog->name); 8964 - return -ENOENT; 8964 + return libbpf_err(-ENOENT); 8965 8965 } 8966 8966 8967 8967 return fd; ··· 8987 8987 int bpf_program__set_##NAME(struct bpf_program *prog) \ 8988 8988 { \ 8989 8989 if (!prog) \ 8990 - return -EINVAL; \ 8990 + return libbpf_err(-EINVAL); \ 8991 8991 bpf_program__set_type(prog, TYPE); \ 8992 8992 return 0; \ 8993 8993 } \ ··· 9274 9274 char *type_names; 9275 9275 9276 9276 if (!name) 9277 - return -EINVAL; 9277 + return libbpf_err(-EINVAL); 9278 9278 9279 9279 sec_def = find_sec_def(name); 9280 9280 if (sec_def) { ··· 9290 9290 free(type_names); 9291 9291 } 9292 9292 9293 - return -ESRCH; 9293 + return libbpf_err(-ESRCH); 9294 9294 } 9295 9295 9296 9296 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, ··· 9488 9488 int err; 9489 9489 9490 9490 btf = libbpf_find_kernel_btf(); 9491 - if (IS_ERR(btf)) { 9491 + err = libbpf_get_error(btf); 9492 + if (err) { 9492 9493 pr_warn("vmlinux BTF is not found\n"); 9493 - return -EINVAL; 9494 + return libbpf_err(err); 9494 9495 } 9495 9496 9496 9497 err = find_attach_btf_id(btf, name, attach_type); ··· 9499 9498 pr_warn("%s is not found in vmlinux BTF\n", name); 9500 9499 9501 9500 btf__free(btf); 9502 - return err; 9501 + return libbpf_err(err); 9503 9502 } 9504 9503 9505 9504 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) ··· 9510 9509 int err = -EINVAL; 9511 9510 9512 9511 info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0); 9513 - if (IS_ERR_OR_NULL(info_linear)) { 9512 + err = libbpf_get_error(info_linear); 9513 + if (err) { 9514 9514 pr_warn("failed get_prog_info_linear for FD %d\n", 9515 9515 attach_prog_fd); 9516 - return -EINVAL; 9516 + return err; 9517 9517 } 9518 9518 info = &info_linear->info; 9519 9519 if (!info->btf_id) { ··· 9635 9633 int i; 9636 9634 9637 9635 if (!name) 9638 - return -EINVAL; 9636 + return libbpf_err(-EINVAL); 9639 9637 9640 9638 for (i = 0; i < ARRAY_SIZE(section_defs); i++) { 9641 9639 if (strncmp(name, section_defs[i].sec, section_defs[i].len)) 9642 9640 continue; 9643 9641 if (!section_defs[i].is_attachable) 9644 - return -EINVAL; 9642 + return libbpf_err(-EINVAL); 9645 9643 *attach_type = section_defs[i].expected_attach_type; 9646 9644 return 0; 9647 9645 } ··· 9652 9650 free(type_names); 9653 9651 } 9654 9652 9655 - return -EINVAL; 9653 + return libbpf_err(-EINVAL); 9656 9654 } 9657 9655 9658 9656 int bpf_map__fd(const struct bpf_map *map) 9659 9657 { 9660 - return map ? map->fd : -EINVAL; 9658 + return map ? map->fd : libbpf_err(-EINVAL); 9661 9659 } 9662 9660 9663 9661 const struct bpf_map_def *bpf_map__def(const struct bpf_map *map) 9664 9662 { 9665 - return map ? &map->def : ERR_PTR(-EINVAL); 9663 + return map ? &map->def : libbpf_err_ptr(-EINVAL); 9666 9664 } 9667 9665 9668 9666 const char *bpf_map__name(const struct bpf_map *map) ··· 9678 9676 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) 9679 9677 { 9680 9678 if (map->fd >= 0) 9681 - return -EBUSY; 9679 + return libbpf_err(-EBUSY); 9682 9680 map->def.type = type; 9683 9681 return 0; 9684 9682 } ··· 9691 9689 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) 9692 9690 { 9693 9691 if (map->fd >= 0) 9694 - return -EBUSY; 9692 + return libbpf_err(-EBUSY); 9695 9693 map->def.map_flags = flags; 9696 9694 return 0; 9697 9695 } ··· 9704 9702 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) 9705 9703 { 9706 9704 if (map->fd >= 0) 9707 - return -EBUSY; 9705 + return libbpf_err(-EBUSY); 9708 9706 map->numa_node = numa_node; 9709 9707 return 0; 9710 9708 } ··· 9717 9715 int bpf_map__set_key_size(struct bpf_map *map, __u32 size) 9718 9716 { 9719 9717 if (map->fd >= 0) 9720 - return -EBUSY; 9718 + return libbpf_err(-EBUSY); 9721 9719 map->def.key_size = size; 9722 9720 return 0; 9723 9721 } ··· 9730 9728 int bpf_map__set_value_size(struct bpf_map *map, __u32 size) 9731 9729 { 9732 9730 if (map->fd >= 0) 9733 - return -EBUSY; 9731 + return libbpf_err(-EBUSY); 9734 9732 map->def.value_size = size; 9735 9733 return 0; 9736 9734 } ··· 9749 9747 bpf_map_clear_priv_t clear_priv) 9750 9748 { 9751 9749 if (!map) 9752 - return -EINVAL; 9750 + return libbpf_err(-EINVAL); 9753 9751 9754 9752 if (map->priv) { 9755 9753 if (map->clear_priv) ··· 9763 9761 9764 9762 void *bpf_map__priv(const struct bpf_map *map) 9765 9763 { 9766 - return map ? map->priv : ERR_PTR(-EINVAL); 9764 + return map ? map->priv : libbpf_err_ptr(-EINVAL); 9767 9765 } 9768 9766 9769 9767 int bpf_map__set_initial_value(struct bpf_map *map, ··· 9771 9769 { 9772 9770 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG || 9773 9771 size != map->def.value_size || map->fd >= 0) 9774 - return -EINVAL; 9772 + return libbpf_err(-EINVAL); 9775 9773 9776 9774 memcpy(map->mmaped, data, size); 9777 9775 return 0; ··· 9803 9801 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) 9804 9802 { 9805 9803 if (map->fd >= 0) 9806 - return -EBUSY; 9804 + return libbpf_err(-EBUSY); 9807 9805 map->map_ifindex = ifindex; 9808 9806 return 0; 9809 9807 } ··· 9812 9810 { 9813 9811 if (!bpf_map_type__is_map_in_map(map->def.type)) { 9814 9812 pr_warn("error: unsupported map type\n"); 9815 - return -EINVAL; 9813 + return libbpf_err(-EINVAL); 9816 9814 } 9817 9815 if (map->inner_map_fd != -1) { 9818 9816 pr_warn("error: inner_map_fd already specified\n"); 9819 - return -EINVAL; 9817 + return libbpf_err(-EINVAL); 9820 9818 } 9821 9819 zfree(&map->inner_map); 9822 9820 map->inner_map_fd = fd; ··· 9830 9828 struct bpf_map *s, *e; 9831 9829 9832 9830 if (!obj || !obj->maps) 9833 - return NULL; 9831 + return errno = EINVAL, NULL; 9834 9832 9835 9833 s = obj->maps; 9836 9834 e = obj->maps + obj->nr_maps; ··· 9838 9836 if ((m < s) || (m >= e)) { 9839 9837 pr_warn("error in %s: map handler doesn't belong to object\n", 9840 9838 __func__); 9841 - return NULL; 9839 + return errno = EINVAL, NULL; 9842 9840 } 9843 9841 9844 9842 idx = (m - obj->maps) + i; ··· 9877 9875 if (pos->name && !strcmp(pos->name, name)) 9878 9876 return pos; 9879 9877 } 9880 - return NULL; 9878 + return errno = ENOENT, NULL; 9881 9879 } 9882 9880 9883 9881 int ··· 9889 9887 struct bpf_map * 9890 9888 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset) 9891 9889 { 9892 - return ERR_PTR(-ENOTSUP); 9890 + return libbpf_err_ptr(-ENOTSUP); 9893 9891 } 9894 9892 9895 9893 long libbpf_get_error(const void *ptr) 9896 9894 { 9897 - return PTR_ERR_OR_ZERO(ptr); 9895 + if (!IS_ERR_OR_NULL(ptr)) 9896 + return 0; 9897 + 9898 + if (IS_ERR(ptr)) 9899 + errno = -PTR_ERR(ptr); 9900 + 9901 + /* If ptr == NULL, then errno should be already set by the failing 9902 + * API, because libbpf never returns NULL on success and it now always 9903 + * sets errno on error. So no extra errno handling for ptr == NULL 9904 + * case. 9905 + */ 9906 + return -errno; 9898 9907 } 9899 9908 9900 9909 int bpf_prog_load(const char *file, enum bpf_prog_type type, ··· 9931 9918 int err; 9932 9919 9933 9920 if (!attr) 9934 - return -EINVAL; 9921 + return libbpf_err(-EINVAL); 9935 9922 if (!attr->file) 9936 - return -EINVAL; 9923 + return libbpf_err(-EINVAL); 9937 9924 9938 9925 open_attr.file = attr->file; 9939 9926 open_attr.prog_type = attr->prog_type; 9940 9927 9941 9928 obj = bpf_object__open_xattr(&open_attr); 9942 - if (IS_ERR_OR_NULL(obj)) 9943 - return -ENOENT; 9929 + err = libbpf_get_error(obj); 9930 + if (err) 9931 + return libbpf_err(-ENOENT); 9944 9932 9945 9933 bpf_object__for_each_program(prog, obj) { 9946 9934 enum bpf_attach_type attach_type = attr->expected_attach_type; ··· 9961 9947 * didn't provide a fallback type, too bad... 9962 9948 */ 9963 9949 bpf_object__close(obj); 9964 - return -EINVAL; 9950 + return libbpf_err(-EINVAL); 9965 9951 } 9966 9952 9967 9953 prog->prog_ifindex = attr->ifindex; ··· 9979 9965 if (!first_prog) { 9980 9966 pr_warn("object file doesn't contain bpf program\n"); 9981 9967 bpf_object__close(obj); 9982 - return -ENOENT; 9968 + return libbpf_err(-ENOENT); 9983 9969 } 9984 9970 9985 9971 err = bpf_object__load(obj); 9986 9972 if (err) { 9987 9973 bpf_object__close(obj); 9988 - return err; 9974 + return libbpf_err(err); 9989 9975 } 9990 9976 9991 9977 *pobj = obj; ··· 10004 9990 /* Replace link's underlying BPF program with the new one */ 10005 9991 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog) 10006 9992 { 10007 - return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL); 9993 + int ret; 9994 + 9995 + ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL); 9996 + return libbpf_err_errno(ret); 10008 9997 } 10009 9998 10010 9999 /* Release "ownership" of underlying BPF resource (typically, BPF program ··· 10040 10023 free(link->pin_path); 10041 10024 free(link); 10042 10025 10043 - return err; 10026 + return libbpf_err(err); 10044 10027 } 10045 10028 10046 10029 int bpf_link__fd(const struct bpf_link *link) ··· 10055 10038 10056 10039 static int bpf_link__detach_fd(struct bpf_link *link) 10057 10040 { 10058 - return close(link->fd); 10041 + return libbpf_err_errno(close(link->fd)); 10059 10042 } 10060 10043 10061 10044 struct bpf_link *bpf_link__open(const char *path) ··· 10067 10050 if (fd < 0) { 10068 10051 fd = -errno; 10069 10052 pr_warn("failed to open link at %s: %d\n", path, fd); 10070 - return ERR_PTR(fd); 10053 + return libbpf_err_ptr(fd); 10071 10054 } 10072 10055 10073 10056 link = calloc(1, sizeof(*link)); 10074 10057 if (!link) { 10075 10058 close(fd); 10076 - return ERR_PTR(-ENOMEM); 10059 + return libbpf_err_ptr(-ENOMEM); 10077 10060 } 10078 10061 link->detach = &bpf_link__detach_fd; 10079 10062 link->fd = fd; ··· 10081 10064 link->pin_path = strdup(path); 10082 10065 if (!link->pin_path) { 10083 10066 bpf_link__destroy(link); 10084 - return ERR_PTR(-ENOMEM); 10067 + return libbpf_err_ptr(-ENOMEM); 10085 10068 } 10086 10069 10087 10070 return link; ··· 10097 10080 int err; 10098 10081 10099 10082 if (link->pin_path) 10100 - return -EBUSY; 10083 + return libbpf_err(-EBUSY); 10101 10084 err = make_parent_dir(path); 10102 10085 if (err) 10103 - return err; 10086 + return libbpf_err(err); 10104 10087 err = check_path(path); 10105 10088 if (err) 10106 - return err; 10089 + return libbpf_err(err); 10107 10090 10108 10091 link->pin_path = strdup(path); 10109 10092 if (!link->pin_path) 10110 - return -ENOMEM; 10093 + return libbpf_err(-ENOMEM); 10111 10094 10112 10095 if (bpf_obj_pin(link->fd, link->pin_path)) { 10113 10096 err = -errno; 10114 10097 zfree(&link->pin_path); 10115 - return err; 10098 + return libbpf_err(err); 10116 10099 } 10117 10100 10118 10101 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); ··· 10124 10107 int err; 10125 10108 10126 10109 if (!link->pin_path) 10127 - return -EINVAL; 10110 + return libbpf_err(-EINVAL); 10128 10111 10129 10112 err = unlink(link->pin_path); 10130 10113 if (err != 0) 10131 - return -errno; 10114 + return libbpf_err_errno(err); 10132 10115 10133 10116 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); 10134 10117 zfree(&link->pin_path); ··· 10144 10127 err = -errno; 10145 10128 10146 10129 close(link->fd); 10147 - return err; 10130 + return libbpf_err(err); 10148 10131 } 10149 10132 10150 - struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, 10151 - int pfd) 10133 + struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, int pfd) 10152 10134 { 10153 10135 char errmsg[STRERR_BUFSIZE]; 10154 10136 struct bpf_link *link; ··· 10156 10140 if (pfd < 0) { 10157 10141 pr_warn("prog '%s': invalid perf event FD %d\n", 10158 10142 prog->name, pfd); 10159 - return ERR_PTR(-EINVAL); 10143 + return libbpf_err_ptr(-EINVAL); 10160 10144 } 10161 10145 prog_fd = bpf_program__fd(prog); 10162 10146 if (prog_fd < 0) { 10163 10147 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n", 10164 10148 prog->name); 10165 - return ERR_PTR(-EINVAL); 10149 + return libbpf_err_ptr(-EINVAL); 10166 10150 } 10167 10151 10168 10152 link = calloc(1, sizeof(*link)); 10169 10153 if (!link) 10170 - return ERR_PTR(-ENOMEM); 10154 + return libbpf_err_ptr(-ENOMEM); 10171 10155 link->detach = &bpf_link__detach_perf_event; 10172 10156 link->fd = pfd; 10173 10157 ··· 10179 10163 if (err == -EPROTO) 10180 10164 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n", 10181 10165 prog->name, pfd); 10182 - return ERR_PTR(err); 10166 + return libbpf_err_ptr(err); 10183 10167 } 10184 10168 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { 10185 10169 err = -errno; 10186 10170 free(link); 10187 10171 pr_warn("prog '%s': failed to enable pfd %d: %s\n", 10188 10172 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 10189 - return ERR_PTR(err); 10173 + return libbpf_err_ptr(err); 10190 10174 } 10191 10175 return link; 10192 10176 } ··· 10310 10294 pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n", 10311 10295 prog->name, retprobe ? "kretprobe" : "kprobe", func_name, 10312 10296 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 10313 - return ERR_PTR(pfd); 10297 + return libbpf_err_ptr(pfd); 10314 10298 } 10315 10299 link = bpf_program__attach_perf_event(prog, pfd); 10316 - if (IS_ERR(link)) { 10300 + err = libbpf_get_error(link); 10301 + if (err) { 10317 10302 close(pfd); 10318 - err = PTR_ERR(link); 10319 10303 pr_warn("prog '%s': failed to attach to %s '%s': %s\n", 10320 10304 prog->name, retprobe ? "kretprobe" : "kprobe", func_name, 10321 10305 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 10322 - return link; 10306 + return libbpf_err_ptr(err); 10323 10307 } 10324 10308 return link; 10325 10309 } ··· 10352 10336 prog->name, retprobe ? "uretprobe" : "uprobe", 10353 10337 binary_path, func_offset, 10354 10338 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 10355 - return ERR_PTR(pfd); 10339 + return libbpf_err_ptr(pfd); 10356 10340 } 10357 10341 link = bpf_program__attach_perf_event(prog, pfd); 10358 - if (IS_ERR(link)) { 10342 + err = libbpf_get_error(link); 10343 + if (err) { 10359 10344 close(pfd); 10360 - err = PTR_ERR(link); 10361 10345 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n", 10362 10346 prog->name, retprobe ? "uretprobe" : "uprobe", 10363 10347 binary_path, func_offset, 10364 10348 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 10365 - return link; 10349 + return libbpf_err_ptr(err); 10366 10350 } 10367 10351 return link; 10368 10352 } ··· 10430 10414 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n", 10431 10415 prog->name, tp_category, tp_name, 10432 10416 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 10433 - return ERR_PTR(pfd); 10417 + return libbpf_err_ptr(pfd); 10434 10418 } 10435 10419 link = bpf_program__attach_perf_event(prog, pfd); 10436 - if (IS_ERR(link)) { 10420 + err = libbpf_get_error(link); 10421 + if (err) { 10437 10422 close(pfd); 10438 - err = PTR_ERR(link); 10439 10423 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n", 10440 10424 prog->name, tp_category, tp_name, 10441 10425 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 10442 - return link; 10426 + return libbpf_err_ptr(err); 10443 10427 } 10444 10428 return link; 10445 10429 } ··· 10452 10436 10453 10437 sec_name = strdup(prog->sec_name); 10454 10438 if (!sec_name) 10455 - return ERR_PTR(-ENOMEM); 10439 + return libbpf_err_ptr(-ENOMEM); 10456 10440 10457 10441 /* extract "tp/<category>/<name>" */ 10458 10442 tp_cat = sec_name + sec->len; 10459 10443 tp_name = strchr(tp_cat, '/'); 10460 10444 if (!tp_name) { 10461 - link = ERR_PTR(-EINVAL); 10462 - goto out; 10445 + free(sec_name); 10446 + return libbpf_err_ptr(-EINVAL); 10463 10447 } 10464 10448 *tp_name = '\0'; 10465 10449 tp_name++; 10466 10450 10467 10451 link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); 10468 - out: 10469 10452 free(sec_name); 10470 10453 return link; 10471 10454 } ··· 10479 10464 prog_fd = bpf_program__fd(prog); 10480 10465 if (prog_fd < 0) { 10481 10466 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 10482 - return ERR_PTR(-EINVAL); 10467 + return libbpf_err_ptr(-EINVAL); 10483 10468 } 10484 10469 10485 10470 link = calloc(1, sizeof(*link)); 10486 10471 if (!link) 10487 - return ERR_PTR(-ENOMEM); 10472 + return libbpf_err_ptr(-ENOMEM); 10488 10473 link->detach = &bpf_link__detach_fd; 10489 10474 10490 10475 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd); ··· 10493 10478 free(link); 10494 10479 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n", 10495 10480 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 10496 - return ERR_PTR(pfd); 10481 + return libbpf_err_ptr(pfd); 10497 10482 } 10498 10483 link->fd = pfd; 10499 10484 return link; ··· 10517 10502 prog_fd = bpf_program__fd(prog); 10518 10503 if (prog_fd < 0) { 10519 10504 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 10520 - return ERR_PTR(-EINVAL); 10505 + return libbpf_err_ptr(-EINVAL); 10521 10506 } 10522 10507 10523 10508 link = calloc(1, sizeof(*link)); 10524 10509 if (!link) 10525 - return ERR_PTR(-ENOMEM); 10510 + return libbpf_err_ptr(-ENOMEM); 10526 10511 link->detach = &bpf_link__detach_fd; 10527 10512 10528 10513 pfd = bpf_raw_tracepoint_open(NULL, prog_fd); ··· 10531 10516 free(link); 10532 10517 pr_warn("prog '%s': failed to attach: %s\n", 10533 10518 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 10534 - return ERR_PTR(pfd); 10519 + return libbpf_err_ptr(pfd); 10535 10520 } 10536 10521 link->fd = pfd; 10537 10522 return (struct bpf_link *)link; ··· 10559 10544 return bpf_program__attach_lsm(prog); 10560 10545 } 10561 10546 10562 - static struct bpf_link *attach_iter(const struct bpf_sec_def *sec, 10563 - struct bpf_program *prog) 10564 - { 10565 - return bpf_program__attach_iter(prog, NULL); 10566 - } 10567 - 10568 10547 static struct bpf_link * 10569 10548 bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id, 10570 10549 const char *target_name) ··· 10573 10564 prog_fd = bpf_program__fd(prog); 10574 10565 if (prog_fd < 0) { 10575 10566 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 10576 - return ERR_PTR(-EINVAL); 10567 + return libbpf_err_ptr(-EINVAL); 10577 10568 } 10578 10569 10579 10570 link = calloc(1, sizeof(*link)); 10580 10571 if (!link) 10581 - return ERR_PTR(-ENOMEM); 10572 + return libbpf_err_ptr(-ENOMEM); 10582 10573 link->detach = &bpf_link__detach_fd; 10583 10574 10584 10575 attach_type = bpf_program__get_expected_attach_type(prog); ··· 10589 10580 pr_warn("prog '%s': failed to attach to %s: %s\n", 10590 10581 prog->name, target_name, 10591 10582 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); 10592 - return ERR_PTR(link_fd); 10583 + return libbpf_err_ptr(link_fd); 10593 10584 } 10594 10585 link->fd = link_fd; 10595 10586 return link; ··· 10622 10613 if (!!target_fd != !!attach_func_name) { 10623 10614 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n", 10624 10615 prog->name); 10625 - return ERR_PTR(-EINVAL); 10616 + return libbpf_err_ptr(-EINVAL); 10626 10617 } 10627 10618 10628 10619 if (prog->type != BPF_PROG_TYPE_EXT) { 10629 10620 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace", 10630 10621 prog->name); 10631 - return ERR_PTR(-EINVAL); 10622 + return libbpf_err_ptr(-EINVAL); 10632 10623 } 10633 10624 10634 10625 if (target_fd) { 10635 10626 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd); 10636 10627 if (btf_id < 0) 10637 - return ERR_PTR(btf_id); 10628 + return libbpf_err_ptr(btf_id); 10638 10629 10639 10630 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace"); 10640 10631 } else { ··· 10656 10647 __u32 target_fd = 0; 10657 10648 10658 10649 if (!OPTS_VALID(opts, bpf_iter_attach_opts)) 10659 - return ERR_PTR(-EINVAL); 10650 + return libbpf_err_ptr(-EINVAL); 10660 10651 10661 10652 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0); 10662 10653 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0); ··· 10664 10655 prog_fd = bpf_program__fd(prog); 10665 10656 if (prog_fd < 0) { 10666 10657 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 10667 - return ERR_PTR(-EINVAL); 10658 + return libbpf_err_ptr(-EINVAL); 10668 10659 } 10669 10660 10670 10661 link = calloc(1, sizeof(*link)); 10671 10662 if (!link) 10672 - return ERR_PTR(-ENOMEM); 10663 + return libbpf_err_ptr(-ENOMEM); 10673 10664 link->detach = &bpf_link__detach_fd; 10674 10665 10675 10666 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER, ··· 10679 10670 free(link); 10680 10671 pr_warn("prog '%s': failed to attach to iterator: %s\n", 10681 10672 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); 10682 - return ERR_PTR(link_fd); 10673 + return libbpf_err_ptr(link_fd); 10683 10674 } 10684 10675 link->fd = link_fd; 10685 10676 return link; 10677 + } 10678 + 10679 + static struct bpf_link *attach_iter(const struct bpf_sec_def *sec, 10680 + struct bpf_program *prog) 10681 + { 10682 + return bpf_program__attach_iter(prog, NULL); 10686 10683 } 10687 10684 10688 10685 struct bpf_link *bpf_program__attach(struct bpf_program *prog) ··· 10697 10682 10698 10683 sec_def = find_sec_def(prog->sec_name); 10699 10684 if (!sec_def || !sec_def->attach_fn) 10700 - return ERR_PTR(-ESRCH); 10685 + return libbpf_err_ptr(-ESRCH); 10701 10686 10702 10687 return sec_def->attach_fn(sec_def, prog); 10703 10688 } ··· 10720 10705 int err; 10721 10706 10722 10707 if (!bpf_map__is_struct_ops(map) || map->fd == -1) 10723 - return ERR_PTR(-EINVAL); 10708 + return libbpf_err_ptr(-EINVAL); 10724 10709 10725 10710 link = calloc(1, sizeof(*link)); 10726 10711 if (!link) 10727 - return ERR_PTR(-EINVAL); 10712 + return libbpf_err_ptr(-EINVAL); 10728 10713 10729 10714 st_ops = map->st_ops; 10730 10715 for (i = 0; i < btf_vlen(st_ops->type); i++) { ··· 10744 10729 if (err) { 10745 10730 err = -errno; 10746 10731 free(link); 10747 - return ERR_PTR(err); 10732 + return libbpf_err_ptr(err); 10748 10733 } 10749 10734 10750 10735 link->detach = bpf_link__detach_struct_ops; ··· 10798 10783 } 10799 10784 10800 10785 ring_buffer_write_tail(header, data_tail); 10801 - return ret; 10786 + return libbpf_err(ret); 10802 10787 } 10803 10788 10804 10789 struct perf_buffer; ··· 10951 10936 p.lost_cb = opts ? opts->lost_cb : NULL; 10952 10937 p.ctx = opts ? opts->ctx : NULL; 10953 10938 10954 - return __perf_buffer__new(map_fd, page_cnt, &p); 10939 + return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); 10955 10940 } 10956 10941 10957 10942 struct perf_buffer * ··· 10967 10952 p.cpus = opts->cpus; 10968 10953 p.map_keys = opts->map_keys; 10969 10954 10970 - return __perf_buffer__new(map_fd, page_cnt, &p); 10955 + return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); 10971 10956 } 10972 10957 10973 10958 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, ··· 11188 11173 int i, cnt, err; 11189 11174 11190 11175 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); 11176 + if (cnt < 0) 11177 + return libbpf_err_errno(cnt); 11178 + 11191 11179 for (i = 0; i < cnt; i++) { 11192 11180 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; 11193 11181 11194 11182 err = perf_buffer__process_records(pb, cpu_buf); 11195 11183 if (err) { 11196 11184 pr_warn("error while processing records: %d\n", err); 11197 - return err; 11185 + return libbpf_err(err); 11198 11186 } 11199 11187 } 11200 - return cnt < 0 ? -errno : cnt; 11188 + return cnt; 11201 11189 } 11202 11190 11203 11191 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer ··· 11221 11203 struct perf_cpu_buf *cpu_buf; 11222 11204 11223 11205 if (buf_idx >= pb->cpu_cnt) 11224 - return -EINVAL; 11206 + return libbpf_err(-EINVAL); 11225 11207 11226 11208 cpu_buf = pb->cpu_bufs[buf_idx]; 11227 11209 if (!cpu_buf) 11228 - return -ENOENT; 11210 + return libbpf_err(-ENOENT); 11229 11211 11230 11212 return cpu_buf->fd; 11231 11213 } ··· 11243 11225 struct perf_cpu_buf *cpu_buf; 11244 11226 11245 11227 if (buf_idx >= pb->cpu_cnt) 11246 - return -EINVAL; 11228 + return libbpf_err(-EINVAL); 11247 11229 11248 11230 cpu_buf = pb->cpu_bufs[buf_idx]; 11249 11231 if (!cpu_buf) 11250 - return -ENOENT; 11232 + return libbpf_err(-ENOENT); 11251 11233 11252 11234 return perf_buffer__process_records(pb, cpu_buf); 11253 11235 } ··· 11265 11247 err = perf_buffer__process_records(pb, cpu_buf); 11266 11248 if (err) { 11267 11249 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err); 11268 - return err; 11250 + return libbpf_err(err); 11269 11251 } 11270 11252 } 11271 11253 return 0; ··· 11377 11359 void *ptr; 11378 11360 11379 11361 if (arrays >> BPF_PROG_INFO_LAST_ARRAY) 11380 - return ERR_PTR(-EINVAL); 11362 + return libbpf_err_ptr(-EINVAL); 11381 11363 11382 11364 /* step 1: get array dimensions */ 11383 11365 err = bpf_obj_get_info_by_fd(fd, &info, &info_len); 11384 11366 if (err) { 11385 11367 pr_debug("can't get prog info: %s", strerror(errno)); 11386 - return ERR_PTR(-EFAULT); 11368 + return libbpf_err_ptr(-EFAULT); 11387 11369 } 11388 11370 11389 11371 /* step 2: calculate total size of all arrays */ ··· 11415 11397 data_len = roundup(data_len, sizeof(__u64)); 11416 11398 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len); 11417 11399 if (!info_linear) 11418 - return ERR_PTR(-ENOMEM); 11400 + return libbpf_err_ptr(-ENOMEM); 11419 11401 11420 11402 /* step 4: fill data to info_linear->info */ 11421 11403 info_linear->arrays = arrays; ··· 11447 11429 if (err) { 11448 11430 pr_debug("can't get prog info: %s", strerror(errno)); 11449 11431 free(info_linear); 11450 - return ERR_PTR(-EFAULT); 11432 + return libbpf_err_ptr(-EFAULT); 11451 11433 } 11452 11434 11453 11435 /* step 6: verify the data */ ··· 11526 11508 int btf_obj_fd = 0, btf_id = 0, err; 11527 11509 11528 11510 if (!prog || attach_prog_fd < 0 || !attach_func_name) 11529 - return -EINVAL; 11511 + return libbpf_err(-EINVAL); 11530 11512 11531 11513 if (prog->obj->loaded) 11532 - return -EINVAL; 11514 + return libbpf_err(-EINVAL); 11533 11515 11534 11516 if (attach_prog_fd) { 11535 11517 btf_id = libbpf_find_prog_btf_id(attach_func_name, 11536 11518 attach_prog_fd); 11537 11519 if (btf_id < 0) 11538 - return btf_id; 11520 + return libbpf_err(btf_id); 11539 11521 } else { 11540 11522 /* load btf_vmlinux, if not yet */ 11541 11523 err = bpf_object__load_vmlinux_btf(prog->obj, true); 11542 11524 if (err) 11543 - return err; 11525 + return libbpf_err(err); 11544 11526 err = find_kernel_btf_id(prog->obj, attach_func_name, 11545 11527 prog->expected_attach_type, 11546 11528 &btf_obj_fd, &btf_id); 11547 11529 if (err) 11548 - return err; 11530 + return libbpf_err(err); 11549 11531 } 11550 11532 11551 11533 prog->attach_btf_id = btf_id; ··· 11644 11626 11645 11627 err = parse_cpu_mask_file(fcpu, &mask, &n); 11646 11628 if (err) 11647 - return err; 11629 + return libbpf_err(err); 11648 11630 11649 11631 tmp_cpus = 0; 11650 11632 for (i = 0; i < n; i++) { ··· 11664 11646 .object_name = s->name, 11665 11647 ); 11666 11648 struct bpf_object *obj; 11667 - int i; 11649 + int i, err; 11668 11650 11669 11651 /* Attempt to preserve opts->object_name, unless overriden by user 11670 11652 * explicitly. Overwriting object name for skeletons is discouraged, ··· 11679 11661 } 11680 11662 11681 11663 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts); 11682 - if (IS_ERR(obj)) { 11683 - pr_warn("failed to initialize skeleton BPF object '%s': %ld\n", 11684 - s->name, PTR_ERR(obj)); 11685 - return PTR_ERR(obj); 11664 + err = libbpf_get_error(obj); 11665 + if (err) { 11666 + pr_warn("failed to initialize skeleton BPF object '%s': %d\n", 11667 + s->name, err); 11668 + return libbpf_err(err); 11686 11669 } 11687 11670 11688 11671 *s->obj = obj; ··· 11696 11677 *map = bpf_object__find_map_by_name(obj, name); 11697 11678 if (!*map) { 11698 11679 pr_warn("failed to find skeleton map '%s'\n", name); 11699 - return -ESRCH; 11680 + return libbpf_err(-ESRCH); 11700 11681 } 11701 11682 11702 11683 /* externs shouldn't be pre-setup from user code */ ··· 11711 11692 *prog = bpf_object__find_program_by_name(obj, name); 11712 11693 if (!*prog) { 11713 11694 pr_warn("failed to find skeleton program '%s'\n", name); 11714 - return -ESRCH; 11695 + return libbpf_err(-ESRCH); 11715 11696 } 11716 11697 } 11717 11698 ··· 11725 11706 err = bpf_object__load(*s->obj); 11726 11707 if (err) { 11727 11708 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err); 11728 - return err; 11709 + return libbpf_err(err); 11729 11710 } 11730 11711 11731 11712 for (i = 0; i < s->map_cnt; i++) { ··· 11764 11745 *mmaped = NULL; 11765 11746 pr_warn("failed to re-mmap() map '%s': %d\n", 11766 11747 bpf_map__name(map), err); 11767 - return err; 11748 + return libbpf_err(err); 11768 11749 } 11769 11750 } 11770 11751 ··· 11773 11754 11774 11755 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) 11775 11756 { 11776 - int i; 11757 + int i, err; 11777 11758 11778 11759 for (i = 0; i < s->prog_cnt; i++) { 11779 11760 struct bpf_program *prog = *s->progs[i].prog; ··· 11788 11769 continue; 11789 11770 11790 11771 *link = sec_def->attach_fn(sec_def, prog); 11791 - if (IS_ERR(*link)) { 11792 - pr_warn("failed to auto-attach program '%s': %ld\n", 11793 - bpf_program__name(prog), PTR_ERR(*link)); 11794 - return PTR_ERR(*link); 11772 + err = libbpf_get_error(*link); 11773 + if (err) { 11774 + pr_warn("failed to auto-attach program '%s': %d\n", 11775 + bpf_program__name(prog), err); 11776 + return libbpf_err(err); 11795 11777 } 11796 11778 } 11797 11779
+4 -3
tools/lib/bpf/libbpf_errno.c
··· 12 12 #include <string.h> 13 13 14 14 #include "libbpf.h" 15 + #include "libbpf_internal.h" 15 16 16 17 /* make sure libbpf doesn't use kernel-only integer typedefs */ 17 18 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 ··· 40 39 int libbpf_strerror(int err, char *buf, size_t size) 41 40 { 42 41 if (!buf || !size) 43 - return -1; 42 + return libbpf_err(-EINVAL); 44 43 45 44 err = err > 0 ? err : -err; 46 45 ··· 49 48 50 49 ret = strerror_r(err, buf, size); 51 50 buf[size - 1] = '\0'; 52 - return ret; 51 + return libbpf_err_errno(ret); 53 52 } 54 53 55 54 if (err < __LIBBPF_ERRNO__END) { ··· 63 62 64 63 snprintf(buf, size, "Unknown libbpf error %d", err); 65 64 buf[size - 1] = '\0'; 66 - return -1; 65 + return libbpf_err(-ENOENT); 67 66 }
+27
tools/lib/bpf/libbpf_internal.h
··· 462 462 return ret; 463 463 } 464 464 465 + /* handle error for pointer-returning APIs, err is assumed to be < 0 always */ 466 + static inline void *libbpf_err_ptr(int err) 467 + { 468 + /* set errno on error, this doesn't break anything */ 469 + errno = -err; 470 + 471 + if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS) 472 + return NULL; 473 + 474 + /* legacy: encode err as ptr */ 475 + return ERR_PTR(err); 476 + } 477 + 478 + /* handle pointer-returning APIs' error handling */ 479 + static inline void *libbpf_ptr(void *ret) 480 + { 481 + /* set errno on error, this doesn't break anything */ 482 + if (IS_ERR(ret)) 483 + errno = -PTR_ERR(ret); 484 + 485 + if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS) 486 + return IS_ERR(ret) ? NULL : ret; 487 + 488 + /* legacy: pass-through original pointer */ 489 + return ret; 490 + } 491 + 465 492 #endif /* __LIBBPF_LIBBPF_INTERNAL_H */
+11 -11
tools/lib/bpf/linker.c
··· 220 220 int err; 221 221 222 222 if (!OPTS_VALID(opts, bpf_linker_opts)) 223 - return NULL; 223 + return errno = EINVAL, NULL; 224 224 225 225 if (elf_version(EV_CURRENT) == EV_NONE) { 226 226 pr_warn_elf("libelf initialization failed"); 227 - return NULL; 227 + return errno = EINVAL, NULL; 228 228 } 229 229 230 230 linker = calloc(1, sizeof(*linker)); 231 231 if (!linker) 232 - return NULL; 232 + return errno = ENOMEM, NULL; 233 233 234 234 linker->fd = -1; 235 235 ··· 241 241 242 242 err_out: 243 243 bpf_linker__free(linker); 244 - return NULL; 244 + return errno = -err, NULL; 245 245 } 246 246 247 247 static struct dst_sec *add_dst_sec(struct bpf_linker *linker, const char *sec_name) ··· 444 444 int err = 0; 445 445 446 446 if (!OPTS_VALID(opts, bpf_linker_file_opts)) 447 - return -EINVAL; 447 + return libbpf_err(-EINVAL); 448 448 449 449 if (!linker->elf) 450 - return -EINVAL; 450 + return libbpf_err(-EINVAL); 451 451 452 452 err = err ?: linker_load_obj_file(linker, filename, opts, &obj); 453 453 err = err ?: linker_append_sec_data(linker, &obj); ··· 467 467 if (obj.fd >= 0) 468 468 close(obj.fd); 469 469 470 - return err; 470 + return libbpf_err(err); 471 471 } 472 472 473 473 static bool is_dwarf_sec_name(const char *name) ··· 2548 2548 int err, i; 2549 2549 2550 2550 if (!linker->elf) 2551 - return -EINVAL; 2551 + return libbpf_err(-EINVAL); 2552 2552 2553 2553 err = finalize_btf(linker); 2554 2554 if (err) 2555 - return err; 2555 + return libbpf_err(err); 2556 2556 2557 2557 /* Finalize strings */ 2558 2558 strs_sz = strset__data_size(linker->strtab_strs); ··· 2584 2584 if (elf_update(linker->elf, ELF_C_NULL) < 0) { 2585 2585 err = -errno; 2586 2586 pr_warn_elf("failed to finalize ELF layout"); 2587 - return err; 2587 + return libbpf_err(err); 2588 2588 } 2589 2589 2590 2590 /* Write out final ELF contents */ 2591 2591 if (elf_update(linker->elf, ELF_C_WRITE) < 0) { 2592 2592 err = -errno; 2593 2593 pr_warn_elf("failed to write ELF contents"); 2594 - return err; 2594 + return libbpf_err(err); 2595 2595 } 2596 2596 2597 2597 elf_end(linker->elf);
+47 -34
tools/lib/bpf/netlink.c
··· 225 225 int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags, 226 226 const struct bpf_xdp_set_link_opts *opts) 227 227 { 228 - int old_fd = -1; 228 + int old_fd = -1, ret; 229 229 230 230 if (!OPTS_VALID(opts, bpf_xdp_set_link_opts)) 231 - return -EINVAL; 231 + return libbpf_err(-EINVAL); 232 232 233 233 if (OPTS_HAS(opts, old_fd)) { 234 234 old_fd = OPTS_GET(opts, old_fd, -1); 235 235 flags |= XDP_FLAGS_REPLACE; 236 236 } 237 237 238 - return __bpf_set_link_xdp_fd_replace(ifindex, fd, old_fd, flags); 238 + ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, old_fd, flags); 239 + return libbpf_err(ret); 239 240 } 240 241 241 242 int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) 242 243 { 243 - return __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags); 244 + int ret; 245 + 246 + ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags); 247 + return libbpf_err(ret); 244 248 } 245 249 246 250 static int __dump_link_nlmsg(struct nlmsghdr *nlh, ··· 325 321 }; 326 322 327 323 if (flags & ~XDP_FLAGS_MASK || !info_size) 328 - return -EINVAL; 324 + return libbpf_err(-EINVAL); 329 325 330 326 /* Check whether the single {HW,DRV,SKB} mode is set */ 331 327 flags &= (XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE); 332 328 mask = flags - 1; 333 329 if (flags && flags & mask) 334 - return -EINVAL; 330 + return libbpf_err(-EINVAL); 335 331 336 332 xdp_id.ifindex = ifindex; 337 333 xdp_id.flags = flags; ··· 345 341 memset((void *) info + sz, 0, info_size - sz); 346 342 } 347 343 348 - return ret; 344 + return libbpf_err(ret); 349 345 } 350 346 351 347 static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags) ··· 373 369 if (!ret) 374 370 *prog_id = get_xdp_id(&info, flags); 375 371 376 - return ret; 372 + return libbpf_err(ret); 377 373 } 378 374 379 375 typedef int (*qdisc_config_t)(struct nlmsghdr *nh, struct tcmsg *t, ··· 467 463 468 464 int bpf_tc_hook_create(struct bpf_tc_hook *hook) 469 465 { 466 + int ret; 467 + 470 468 if (!hook || !OPTS_VALID(hook, bpf_tc_hook) || 471 469 OPTS_GET(hook, ifindex, 0) <= 0) 472 - return -EINVAL; 470 + return libbpf_err(-EINVAL); 473 471 474 - return tc_qdisc_create_excl(hook); 472 + ret = tc_qdisc_create_excl(hook); 473 + return libbpf_err(ret); 475 474 } 476 475 477 476 static int __bpf_tc_detach(const struct bpf_tc_hook *hook, ··· 485 478 { 486 479 if (!hook || !OPTS_VALID(hook, bpf_tc_hook) || 487 480 OPTS_GET(hook, ifindex, 0) <= 0) 488 - return -EINVAL; 481 + return libbpf_err(-EINVAL); 489 482 490 483 switch (OPTS_GET(hook, attach_point, 0)) { 491 484 case BPF_TC_INGRESS: 492 485 case BPF_TC_EGRESS: 493 - return __bpf_tc_detach(hook, NULL, true); 486 + return libbpf_err(__bpf_tc_detach(hook, NULL, true)); 494 487 case BPF_TC_INGRESS | BPF_TC_EGRESS: 495 - return tc_qdisc_delete(hook); 488 + return libbpf_err(tc_qdisc_delete(hook)); 496 489 case BPF_TC_CUSTOM: 497 - return -EOPNOTSUPP; 490 + return libbpf_err(-EOPNOTSUPP); 498 491 default: 499 - return -EINVAL; 492 + return libbpf_err(-EINVAL); 500 493 } 501 494 } 502 495 ··· 581 574 if (!hook || !opts || 582 575 !OPTS_VALID(hook, bpf_tc_hook) || 583 576 !OPTS_VALID(opts, bpf_tc_opts)) 584 - return -EINVAL; 577 + return libbpf_err(-EINVAL); 585 578 586 579 ifindex = OPTS_GET(hook, ifindex, 0); 587 580 parent = OPTS_GET(hook, parent, 0); ··· 594 587 flags = OPTS_GET(opts, flags, 0); 595 588 596 589 if (ifindex <= 0 || !prog_fd || prog_id) 597 - return -EINVAL; 590 + return libbpf_err(-EINVAL); 598 591 if (priority > UINT16_MAX) 599 - return -EINVAL; 592 + return libbpf_err(-EINVAL); 600 593 if (flags & ~BPF_TC_F_REPLACE) 601 - return -EINVAL; 594 + return libbpf_err(-EINVAL); 602 595 603 596 flags = (flags & BPF_TC_F_REPLACE) ? NLM_F_REPLACE : NLM_F_EXCL; 604 597 protocol = ETH_P_ALL; ··· 615 608 616 609 ret = tc_get_tcm_parent(attach_point, &parent); 617 610 if (ret < 0) 618 - return ret; 611 + return libbpf_err(ret); 619 612 req.tc.tcm_parent = parent; 620 613 621 614 ret = nlattr_add(&req.nh, sizeof(req), TCA_KIND, "bpf", sizeof("bpf")); 622 615 if (ret < 0) 623 - return ret; 616 + return libbpf_err(ret); 624 617 nla = nlattr_begin_nested(&req.nh, sizeof(req), TCA_OPTIONS); 625 618 if (!nla) 626 - return -EMSGSIZE; 619 + return libbpf_err(-EMSGSIZE); 627 620 ret = tc_add_fd_and_name(&req.nh, sizeof(req), prog_fd); 628 621 if (ret < 0) 629 - return ret; 622 + return libbpf_err(ret); 630 623 bpf_flags = TCA_BPF_FLAG_ACT_DIRECT; 631 624 ret = nlattr_add(&req.nh, sizeof(req), TCA_BPF_FLAGS, &bpf_flags, 632 625 sizeof(bpf_flags)); 633 626 if (ret < 0) 634 - return ret; 627 + return libbpf_err(ret); 635 628 nlattr_end_nested(&req.nh, nla); 636 629 637 630 info.opts = opts; 638 631 639 632 ret = libbpf_netlink_send_recv(&req.nh, get_tc_info, NULL, &info); 640 633 if (ret < 0) 641 - return ret; 634 + return libbpf_err(ret); 642 635 if (!info.processed) 643 - return -ENOENT; 636 + return libbpf_err(-ENOENT); 644 637 return ret; 645 638 } 646 639 ··· 715 708 int bpf_tc_detach(const struct bpf_tc_hook *hook, 716 709 const struct bpf_tc_opts *opts) 717 710 { 718 - return !opts ? -EINVAL : __bpf_tc_detach(hook, opts, false); 711 + int ret; 712 + 713 + if (!opts) 714 + return libbpf_err(-EINVAL); 715 + 716 + ret = __bpf_tc_detach(hook, opts, false); 717 + return libbpf_err(ret); 719 718 } 720 719 721 720 int bpf_tc_query(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts) ··· 738 725 if (!hook || !opts || 739 726 !OPTS_VALID(hook, bpf_tc_hook) || 740 727 !OPTS_VALID(opts, bpf_tc_opts)) 741 - return -EINVAL; 728 + return libbpf_err(-EINVAL); 742 729 743 730 ifindex = OPTS_GET(hook, ifindex, 0); 744 731 parent = OPTS_GET(hook, parent, 0); ··· 752 739 753 740 if (ifindex <= 0 || flags || prog_fd || prog_id || 754 741 !handle || !priority) 755 - return -EINVAL; 742 + return libbpf_err(-EINVAL); 756 743 if (priority > UINT16_MAX) 757 - return -EINVAL; 744 + return libbpf_err(-EINVAL); 758 745 759 746 protocol = ETH_P_ALL; 760 747 ··· 769 756 770 757 ret = tc_get_tcm_parent(attach_point, &parent); 771 758 if (ret < 0) 772 - return ret; 759 + return libbpf_err(ret); 773 760 req.tc.tcm_parent = parent; 774 761 775 762 ret = nlattr_add(&req.nh, sizeof(req), TCA_KIND, "bpf", sizeof("bpf")); 776 763 if (ret < 0) 777 - return ret; 764 + return libbpf_err(ret); 778 765 779 766 info.opts = opts; 780 767 781 768 ret = libbpf_netlink_send_recv(&req.nh, get_tc_info, NULL, &info); 782 769 if (ret < 0) 783 - return ret; 770 + return libbpf_err(ret); 784 771 if (!info.processed) 785 - return -ENOENT; 772 + return libbpf_err(-ENOENT); 786 773 return ret; 787 774 }
+13 -13
tools/lib/bpf/ringbuf.c
··· 69 69 err = -errno; 70 70 pr_warn("ringbuf: failed to get map info for fd=%d: %d\n", 71 71 map_fd, err); 72 - return err; 72 + return libbpf_err(err); 73 73 } 74 74 75 75 if (info.type != BPF_MAP_TYPE_RINGBUF) { 76 76 pr_warn("ringbuf: map fd=%d is not BPF_MAP_TYPE_RINGBUF\n", 77 77 map_fd); 78 - return -EINVAL; 78 + return libbpf_err(-EINVAL); 79 79 } 80 80 81 81 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); 82 82 if (!tmp) 83 - return -ENOMEM; 83 + return libbpf_err(-ENOMEM); 84 84 rb->rings = tmp; 85 85 86 86 tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events)); 87 87 if (!tmp) 88 - return -ENOMEM; 88 + return libbpf_err(-ENOMEM); 89 89 rb->events = tmp; 90 90 91 91 r = &rb->rings[rb->ring_cnt]; ··· 103 103 err = -errno; 104 104 pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n", 105 105 map_fd, err); 106 - return err; 106 + return libbpf_err(err); 107 107 } 108 108 r->consumer_pos = tmp; 109 109 ··· 118 118 ringbuf_unmap_ring(rb, r); 119 119 pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n", 120 120 map_fd, err); 121 - return err; 121 + return libbpf_err(err); 122 122 } 123 123 r->producer_pos = tmp; 124 124 r->data = tmp + rb->page_size; ··· 133 133 ringbuf_unmap_ring(rb, r); 134 134 pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n", 135 135 map_fd, err); 136 - return err; 136 + return libbpf_err(err); 137 137 } 138 138 139 139 rb->ring_cnt++; ··· 165 165 int err; 166 166 167 167 if (!OPTS_VALID(opts, ring_buffer_opts)) 168 - return NULL; 168 + return errno = EINVAL, NULL; 169 169 170 170 rb = calloc(1, sizeof(*rb)); 171 171 if (!rb) 172 - return NULL; 172 + return errno = ENOMEM, NULL; 173 173 174 174 rb->page_size = getpagesize(); 175 175 ··· 188 188 189 189 err_out: 190 190 ring_buffer__free(rb); 191 - return NULL; 191 + return errno = -err, NULL; 192 192 } 193 193 194 194 static inline int roundup_len(__u32 len) ··· 260 260 261 261 err = ringbuf_process_ring(ring); 262 262 if (err < 0) 263 - return err; 263 + return libbpf_err(err); 264 264 res += err; 265 265 } 266 266 if (res > INT_MAX) ··· 279 279 280 280 cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms); 281 281 if (cnt < 0) 282 - return -errno; 282 + return libbpf_err(-errno); 283 283 284 284 for (i = 0; i < cnt; i++) { 285 285 __u32 ring_id = rb->events[i].data.fd; ··· 287 287 288 288 err = ringbuf_process_ring(ring); 289 289 if (err < 0) 290 - return err; 290 + return libbpf_err(err); 291 291 res += err; 292 292 } 293 293 if (res > INT_MAX)