Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Alexei Starovoitov says:

====================
pull-request: bpf-next 2020-07-13

The following pull-request contains BPF updates for your *net-next* tree.

We've added 36 non-merge commits during the last 7 day(s) which contain
a total of 62 files changed, 2242 insertions(+), 468 deletions(-).

The main changes are:

1) Avoid trace_printk warning banner by switching bpf_trace_printk to use
its own tracing event, from Alan.

2) Better libbpf support on older kernels, from Andrii.

3) Additional AF_XDP stats, from Ciara.

4) build time resolution of BTF IDs, from Jiri.

5) BPF_CGROUP_INET_SOCK_RELEASE hook, from Stanislav.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+2235 -461
+36
Documentation/bpf/btf.rst
··· 691 691 bpf_insn``. For ELF API, the ``insn_off`` is the byte offset from the 692 692 beginning of section (``btf_ext_info_sec->sec_name_off``). 693 693 694 + 4.2 .BTF_ids section 695 + ==================== 696 + 697 + The .BTF_ids section encodes BTF ID values that are used within the kernel. 698 + 699 + This section is created during the kernel compilation with the help of 700 + macros defined in ``include/linux/btf_ids.h`` header file. Kernel code can 701 + use them to create lists and sets (sorted lists) of BTF ID values. 702 + 703 + The ``BTF_ID_LIST`` and ``BTF_ID`` macros define unsorted list of BTF ID values, 704 + with following syntax:: 705 + 706 + BTF_ID_LIST(list) 707 + BTF_ID(type1, name1) 708 + BTF_ID(type2, name2) 709 + 710 + resulting in following layout in .BTF_ids section:: 711 + 712 + __BTF_ID__type1__name1__1: 713 + .zero 4 714 + __BTF_ID__type2__name2__2: 715 + .zero 4 716 + 717 + The ``u32 list[];`` variable is defined to access the list. 718 + 719 + The ``BTF_ID_UNUSED`` macro defines 4 zero bytes. It's used when we 720 + want to define unused entry in BTF_ID_LIST, like:: 721 + 722 + BTF_ID_LIST(bpf_skb_output_btf_ids) 723 + BTF_ID(struct, sk_buff) 724 + BTF_ID_UNUSED 725 + BTF_ID(struct, task_struct) 726 + 727 + All the BTF ID lists and sets are compiled in the .BTF_ids section and 728 + resolved during the linking phase of kernel build by ``resolve_btfids`` tool. 729 + 694 730 5. Using BTF 695 731 ************ 696 732
+20 -5
Makefile
··· 448 448 STRIP = $(CROSS_COMPILE)strip 449 449 endif 450 450 PAHOLE = pahole 451 + RESOLVE_BTFIDS = $(objtree)/tools/bpf/resolve_btfids/resolve_btfids 451 452 LEX = flex 452 453 YACC = bison 453 454 AWK = awk ··· 511 510 CLANG_FLAGS := 512 511 513 512 export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC 514 - export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL 513 + export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL 515 514 export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX 516 515 export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ 517 516 export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE ··· 1054 1053 1055 1054 HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf) 1056 1055 1056 + has_libelf = $(call try-run,\ 1057 + echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) 1058 + 1057 1059 ifdef CONFIG_STACK_VALIDATION 1058 - has_libelf := $(call try-run,\ 1059 - echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) 1060 1060 ifeq ($(has_libelf),1) 1061 1061 objtool_target := tools/objtool FORCE 1062 1062 else 1063 1063 SKIP_STACK_VALIDATION := 1 1064 1064 export SKIP_STACK_VALIDATION 1065 + endif 1066 + endif 1067 + 1068 + ifdef CONFIG_DEBUG_INFO_BTF 1069 + ifeq ($(has_libelf),1) 1070 + resolve_btfids_target := tools/bpf/resolve_btfids FORCE 1071 + else 1072 + ERROR_RESOLVE_BTFIDS := 1 1065 1073 endif 1066 1074 endif 1067 1075 ··· 1185 1175 $(Q)$(MAKE) $(build)=. 1186 1176 1187 1177 # All the preparing.. 1188 - prepare: prepare0 prepare-objtool 1178 + prepare: prepare0 prepare-objtool prepare-resolve_btfids 1189 1179 1190 1180 # Support for using generic headers in asm-generic 1191 1181 asm-generic := -f $(srctree)/scripts/Makefile.asm-generic obj ··· 1198 1188 $(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/uapi/asm \ 1199 1189 generic=include/uapi/asm-generic 1200 1190 1201 - PHONY += prepare-objtool 1191 + PHONY += prepare-objtool prepare-resolve_btfids 1202 1192 prepare-objtool: $(objtool_target) 1203 1193 ifeq ($(SKIP_STACK_VALIDATION),1) 1204 1194 ifdef CONFIG_UNWINDER_ORC ··· 1209 1199 endif 1210 1200 endif 1211 1201 1202 + prepare-resolve_btfids: $(resolve_btfids_target) 1203 + ifeq ($(ERROR_RESOLVE_BTFIDS),1) 1204 + @echo "error: Cannot resolve BTF IDs for CONFIG_DEBUG_INFO_BTF, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 1205 + @false 1206 + endif 1212 1207 # Generate some files 1213 1208 # --------------------------------------------------------------------------- 1214 1209
+4
include/asm-generic/vmlinux.lds.h
··· 641 641 __start_BTF = .; \ 642 642 *(.BTF) \ 643 643 __stop_BTF = .; \ 644 + } \ 645 + . = ALIGN(4); \ 646 + .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ 647 + *(.BTF_ids) \ 644 648 } 645 649 #else 646 650 #define BTF
+4
include/linux/bpf-cgroup.h
··· 210 210 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ 211 211 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) 212 212 213 + #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \ 214 + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE) 215 + 213 216 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ 214 217 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) 215 218 ··· 404 401 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) 405 402 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) 406 403 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) 404 + #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) 407 405 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) 408 406 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) 409 407 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
+87
include/linux/btf_ids.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef _LINUX_BTF_IDS_H 4 + #define _LINUX_BTF_IDS_H 5 + 6 + #include <linux/compiler.h> /* for __PASTE */ 7 + 8 + /* 9 + * Following macros help to define lists of BTF IDs placed 10 + * in .BTF_ids section. They are initially filled with zeros 11 + * (during compilation) and resolved later during the 12 + * linking phase by resolve_btfids tool. 13 + * 14 + * Any change in list layout must be reflected in resolve_btfids 15 + * tool logic. 16 + */ 17 + 18 + #define BTF_IDS_SECTION ".BTF_ids" 19 + 20 + #define ____BTF_ID(symbol) \ 21 + asm( \ 22 + ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ 23 + ".local " #symbol " ; \n" \ 24 + ".type " #symbol ", @object; \n" \ 25 + ".size " #symbol ", 4; \n" \ 26 + #symbol ": \n" \ 27 + ".zero 4 \n" \ 28 + ".popsection; \n"); 29 + 30 + #define __BTF_ID(symbol) \ 31 + ____BTF_ID(symbol) 32 + 33 + #define __ID(prefix) \ 34 + __PASTE(prefix, __COUNTER__) 35 + 36 + /* 37 + * The BTF_ID defines unique symbol for each ID pointing 38 + * to 4 zero bytes. 39 + */ 40 + #define BTF_ID(prefix, name) \ 41 + __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__)) 42 + 43 + /* 44 + * The BTF_ID_LIST macro defines pure (unsorted) list 45 + * of BTF IDs, with following layout: 46 + * 47 + * BTF_ID_LIST(list1) 48 + * BTF_ID(type1, name1) 49 + * BTF_ID(type2, name2) 50 + * 51 + * list1: 52 + * __BTF_ID__type1__name1__1: 53 + * .zero 4 54 + * __BTF_ID__type2__name2__2: 55 + * .zero 4 56 + * 57 + */ 58 + #define __BTF_ID_LIST(name) \ 59 + asm( \ 60 + ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ 61 + ".local " #name "; \n" \ 62 + #name ":; \n" \ 63 + ".popsection; \n"); \ 64 + 65 + #define BTF_ID_LIST(name) \ 66 + __BTF_ID_LIST(name) \ 67 + extern u32 name[]; 68 + 69 + /* 70 + * The BTF_ID_UNUSED macro defines 4 zero bytes. 71 + * It's used when we want to define 'unused' entry 72 + * in BTF_ID_LIST, like: 73 + * 74 + * BTF_ID_LIST(bpf_skb_output_btf_ids) 75 + * BTF_ID(struct, sk_buff) 76 + * BTF_ID_UNUSED 77 + * BTF_ID(struct, task_struct) 78 + */ 79 + 80 + #define BTF_ID_UNUSED \ 81 + asm( \ 82 + ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ 83 + ".zero 4 \n" \ 84 + ".popsection; \n"); 85 + 86 + 87 + #endif
+4
include/net/xdp_sock.h
··· 69 69 spinlock_t tx_completion_lock; 70 70 /* Protects generic receive. */ 71 71 spinlock_t rx_lock; 72 + 73 + /* Statistics */ 72 74 u64 rx_dropped; 75 + u64 rx_queue_full; 76 + 73 77 struct list_head map_list; 74 78 /* Protects map_list */ 75 79 spinlock_t map_list_lock;
+1
include/uapi/linux/bpf.h
··· 226 226 BPF_CGROUP_INET4_GETSOCKNAME, 227 227 BPF_CGROUP_INET6_GETSOCKNAME, 228 228 BPF_XDP_DEVMAP, 229 + BPF_CGROUP_INET_SOCK_RELEASE, 229 230 __MAX_BPF_ATTACH_TYPE 230 231 }; 231 232
+4 -1
include/uapi/linux/if_xdp.h
··· 73 73 }; 74 74 75 75 struct xdp_statistics { 76 - __u64 rx_dropped; /* Dropped for reasons other than invalid desc */ 76 + __u64 rx_dropped; /* Dropped for other reasons */ 77 77 __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ 78 78 __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ 79 + __u64 rx_ring_full; /* Dropped due to rx ring being full */ 80 + __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */ 81 + __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */ 79 82 }; 80 83 81 84 struct xdp_options {
+11
include/uapi/linux/xdp_diag.h
··· 30 30 #define XDP_SHOW_RING_CFG (1 << 1) 31 31 #define XDP_SHOW_UMEM (1 << 2) 32 32 #define XDP_SHOW_MEMINFO (1 << 3) 33 + #define XDP_SHOW_STATS (1 << 4) 33 34 34 35 enum { 35 36 XDP_DIAG_NONE, ··· 42 41 XDP_DIAG_UMEM_FILL_RING, 43 42 XDP_DIAG_UMEM_COMPLETION_RING, 44 43 XDP_DIAG_MEMINFO, 44 + XDP_DIAG_STATS, 45 45 __XDP_DIAG_MAX, 46 46 }; 47 47 ··· 69 67 __u32 queue_id; 70 68 __u32 flags; 71 69 __u32 refs; 70 + }; 71 + 72 + struct xdp_diag_stats { 73 + __u64 n_rx_dropped; 74 + __u64 n_rx_invalid; 75 + __u64 n_rx_full; 76 + __u64 n_fill_ring_empty; 77 + __u64 n_tx_invalid; 78 + __u64 n_tx_ring_empty; 72 79 }; 73 80 74 81 #endif /* _LINUX_XDP_DIAG_H */
+11 -92
kernel/bpf/btf.c
··· 18 18 #include <linux/sort.h> 19 19 #include <linux/bpf_verifier.h> 20 20 #include <linux/btf.h> 21 + #include <linux/btf_ids.h> 21 22 #include <linux/skmsg.h> 22 23 #include <linux/perf_event.h> 23 24 #include <net/sock.h> ··· 3622 3621 return kern_ctx_type->type; 3623 3622 } 3624 3623 3624 + BTF_ID_LIST(bpf_ctx_convert_btf_id) 3625 + BTF_ID(struct, bpf_ctx_convert) 3626 + 3625 3627 struct btf *btf_parse_vmlinux(void) 3626 3628 { 3627 3629 struct btf_verifier_env *env = NULL; 3628 3630 struct bpf_verifier_log *log; 3629 3631 struct btf *btf = NULL; 3630 - int err, btf_id; 3632 + int err; 3631 3633 3632 3634 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 3633 3635 if (!env) ··· 3663 3659 if (err) 3664 3660 goto errout; 3665 3661 3666 - /* find struct bpf_ctx_convert for type checking later */ 3667 - btf_id = btf_find_by_name_kind(btf, "bpf_ctx_convert", BTF_KIND_STRUCT); 3668 - if (btf_id < 0) { 3669 - err = btf_id; 3670 - goto errout; 3671 - } 3672 3662 /* btf_parse_vmlinux() runs under bpf_verifier_lock */ 3673 - bpf_ctx_convert.t = btf_type_by_id(btf, btf_id); 3663 + bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); 3674 3664 3675 3665 /* find bpf map structs for map_ptr access checking */ 3676 3666 err = btf_vmlinux_map_ids_init(btf, log); ··· 4077 4079 return -EINVAL; 4078 4080 } 4079 4081 4080 - static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn, 4081 - int arg) 4082 - { 4083 - char fnname[KSYM_SYMBOL_LEN + 4] = "btf_"; 4084 - const struct btf_param *args; 4085 - const struct btf_type *t; 4086 - const char *tname, *sym; 4087 - u32 btf_id, i; 4088 - 4089 - if (IS_ERR(btf_vmlinux)) { 4090 - bpf_log(log, "btf_vmlinux is malformed\n"); 4091 - return -EINVAL; 4092 - } 4093 - 4094 - sym = kallsyms_lookup((long)fn, NULL, NULL, NULL, fnname + 4); 4095 - if (!sym) { 4096 - bpf_log(log, "kernel doesn't have kallsyms\n"); 4097 - return -EFAULT; 4098 - } 4099 - 4100 - for (i = 1; i <= btf_vmlinux->nr_types; i++) { 4101 - t = btf_type_by_id(btf_vmlinux, i); 4102 - if (BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) 4103 - continue; 4104 - tname = __btf_name_by_offset(btf_vmlinux, t->name_off); 4105 - if (!strcmp(tname, fnname)) 4106 - break; 4107 - } 4108 - if (i > btf_vmlinux->nr_types) { 4109 - bpf_log(log, "helper %s type is not found\n", fnname); 4110 - return -ENOENT; 4111 - } 4112 - 4113 - t = btf_type_by_id(btf_vmlinux, t->type); 4114 - if (!btf_type_is_ptr(t)) 4115 - return -EFAULT; 4116 - t = btf_type_by_id(btf_vmlinux, t->type); 4117 - if (!btf_type_is_func_proto(t)) 4118 - return -EFAULT; 4119 - 4120 - args = (const struct btf_param *)(t + 1); 4121 - if (arg >= btf_type_vlen(t)) { 4122 - bpf_log(log, "bpf helper %s doesn't have %d-th argument\n", 4123 - fnname, arg); 4124 - return -EINVAL; 4125 - } 4126 - 4127 - t = btf_type_by_id(btf_vmlinux, args[arg].type); 4128 - if (!btf_type_is_ptr(t) || !t->type) { 4129 - /* anything but the pointer to struct is a helper config bug */ 4130 - bpf_log(log, "ARG_PTR_TO_BTF is misconfigured\n"); 4131 - return -EFAULT; 4132 - } 4133 - btf_id = t->type; 4134 - t = btf_type_by_id(btf_vmlinux, t->type); 4135 - /* skip modifiers */ 4136 - while (btf_type_is_modifier(t)) { 4137 - btf_id = t->type; 4138 - t = btf_type_by_id(btf_vmlinux, t->type); 4139 - } 4140 - if (!btf_type_is_struct(t)) { 4141 - bpf_log(log, "ARG_PTR_TO_BTF is not a struct\n"); 4142 - return -EFAULT; 4143 - } 4144 - bpf_log(log, "helper %s arg%d has btf_id %d struct %s\n", fnname + 4, 4145 - arg, btf_id, __btf_name_by_offset(btf_vmlinux, t->name_off)); 4146 - return btf_id; 4147 - } 4148 - 4149 4082 int btf_resolve_helper_id(struct bpf_verifier_log *log, 4150 4083 const struct bpf_func_proto *fn, int arg) 4151 4084 { 4152 - int *btf_id = &fn->btf_id[arg]; 4153 - int ret; 4085 + int id; 4154 4086 4155 4087 if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID) 4156 4088 return -EINVAL; 4157 - 4158 - ret = READ_ONCE(*btf_id); 4159 - if (ret) 4160 - return ret; 4161 - /* ok to race the search. The result is the same */ 4162 - ret = __btf_resolve_helper_id(log, fn->func, arg); 4163 - if (!ret) { 4164 - /* Function argument cannot be type 'void' */ 4165 - bpf_log(log, "BTF resolution bug\n"); 4166 - return -EFAULT; 4167 - } 4168 - WRITE_ONCE(*btf_id, ret); 4169 - return ret; 4089 + id = fn->btf_id[arg]; 4090 + if (!id || id > btf_vmlinux->nr_types) 4091 + return -EINVAL; 4092 + return id; 4170 4093 } 4171 4094 4172 4095 static int __get_type_size(struct btf *btf, u32 btf_id,
+4 -1
kernel/bpf/stackmap.c
··· 9 9 #include <linux/elf.h> 10 10 #include <linux/pagemap.h> 11 11 #include <linux/irq_work.h> 12 + #include <linux/btf_ids.h> 12 13 #include "percpu_freelist.h" 13 14 14 15 #define STACK_CREATE_FLAG_MASK \ ··· 577 576 return __bpf_get_stack(regs, task, buf, size, flags); 578 577 } 579 578 580 - static int bpf_get_task_stack_btf_ids[5]; 579 + BTF_ID_LIST(bpf_get_task_stack_btf_ids) 580 + BTF_ID(struct, task_struct) 581 + 581 582 const struct bpf_func_proto bpf_get_task_stack_proto = { 582 583 .func = bpf_get_task_stack, 583 584 .gpl_only = false,
+3
kernel/bpf/syscall.c
··· 1981 1981 case BPF_PROG_TYPE_CGROUP_SOCK: 1982 1982 switch (expected_attach_type) { 1983 1983 case BPF_CGROUP_INET_SOCK_CREATE: 1984 + case BPF_CGROUP_INET_SOCK_RELEASE: 1984 1985 case BPF_CGROUP_INET4_POST_BIND: 1985 1986 case BPF_CGROUP_INET6_POST_BIND: 1986 1987 return 0; ··· 2780 2779 return BPF_PROG_TYPE_CGROUP_SKB; 2781 2780 break; 2782 2781 case BPF_CGROUP_INET_SOCK_CREATE: 2782 + case BPF_CGROUP_INET_SOCK_RELEASE: 2783 2783 case BPF_CGROUP_INET4_POST_BIND: 2784 2784 case BPF_CGROUP_INET6_POST_BIND: 2785 2785 return BPF_PROG_TYPE_CGROUP_SOCK; ··· 2929 2927 case BPF_CGROUP_INET_INGRESS: 2930 2928 case BPF_CGROUP_INET_EGRESS: 2931 2929 case BPF_CGROUP_INET_SOCK_CREATE: 2930 + case BPF_CGROUP_INET_SOCK_RELEASE: 2932 2931 case BPF_CGROUP_INET4_BIND: 2933 2932 case BPF_CGROUP_INET6_BIND: 2934 2933 case BPF_CGROUP_INET4_POST_BIND:
+2
kernel/trace/Makefile
··· 31 31 GCOV_PROFILE := y 32 32 endif 33 33 34 + CFLAGS_bpf_trace.o := -I$(src) 35 + 34 36 CFLAGS_trace_benchmark.o := -I$(src) 35 37 CFLAGS_trace_events_filter.o := -I$(src) 36 38
+44 -7
kernel/trace/bpf_trace.c
··· 11 11 #include <linux/uaccess.h> 12 12 #include <linux/ctype.h> 13 13 #include <linux/kprobes.h> 14 + #include <linux/spinlock.h> 14 15 #include <linux/syscalls.h> 15 16 #include <linux/error-injection.h> 17 + #include <linux/btf_ids.h> 16 18 17 19 #include <asm/tlb.h> 18 20 19 21 #include "trace_probe.h" 20 22 #include "trace.h" 23 + 24 + #define CREATE_TRACE_POINTS 25 + #include "bpf_trace.h" 21 26 22 27 #define bpf_event_rcu_dereference(p) \ 23 28 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) ··· 379 374 } 380 375 } 381 376 377 + static DEFINE_RAW_SPINLOCK(trace_printk_lock); 378 + 379 + #define BPF_TRACE_PRINTK_SIZE 1024 380 + 381 + static inline __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...) 382 + { 383 + static char buf[BPF_TRACE_PRINTK_SIZE]; 384 + unsigned long flags; 385 + va_list ap; 386 + int ret; 387 + 388 + raw_spin_lock_irqsave(&trace_printk_lock, flags); 389 + va_start(ap, fmt); 390 + ret = vsnprintf(buf, sizeof(buf), fmt, ap); 391 + va_end(ap); 392 + /* vsnprintf() will not append null for zero-length strings */ 393 + if (ret == 0) 394 + buf[0] = '\0'; 395 + trace_bpf_trace_printk(buf); 396 + raw_spin_unlock_irqrestore(&trace_printk_lock, flags); 397 + 398 + return ret; 399 + } 400 + 382 401 /* 383 402 * Only limited trace_printk() conversion specifiers allowed: 384 403 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s ··· 512 483 */ 513 484 #define __BPF_TP_EMIT() __BPF_ARG3_TP() 514 485 #define __BPF_TP(...) \ 515 - __trace_printk(0 /* Fake ip */, \ 516 - fmt, ##__VA_ARGS__) 486 + bpf_do_trace_printk(fmt, ##__VA_ARGS__) 517 487 518 488 #define __BPF_ARG1_TP(...) \ 519 489 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ ··· 549 521 const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 550 522 { 551 523 /* 552 - * this program might be calling bpf_trace_printk, 553 - * so allocate per-cpu printk buffers 524 + * This program might be calling bpf_trace_printk, 525 + * so enable the associated bpf_trace/bpf_trace_printk event. 526 + * Repeat this each time as it is possible a user has 527 + * disabled bpf_trace_printk events. By loading a program 528 + * calling bpf_trace_printk() however the user has expressed 529 + * the intent to see such events. 554 530 */ 555 - trace_printk_init_buffers(); 531 + if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) 532 + pr_warn_ratelimited("could not enable bpf_trace_printk events"); 556 533 557 534 return &bpf_trace_printk_proto; 558 535 } ··· 743 710 return err; 744 711 } 745 712 746 - static int bpf_seq_printf_btf_ids[5]; 713 + BTF_ID_LIST(bpf_seq_printf_btf_ids) 714 + BTF_ID(struct, seq_file) 715 + 747 716 static const struct bpf_func_proto bpf_seq_printf_proto = { 748 717 .func = bpf_seq_printf, 749 718 .gpl_only = true, ··· 763 728 return seq_write(m, data, len) ? -EOVERFLOW : 0; 764 729 } 765 730 766 - static int bpf_seq_write_btf_ids[5]; 731 + BTF_ID_LIST(bpf_seq_write_btf_ids) 732 + BTF_ID(struct, seq_file) 733 + 767 734 static const struct bpf_func_proto bpf_seq_write_proto = { 768 735 .func = bpf_seq_write, 769 736 .gpl_only = true,
+34
kernel/trace/bpf_trace.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #undef TRACE_SYSTEM 3 + #define TRACE_SYSTEM bpf_trace 4 + 5 + #if !defined(_TRACE_BPF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) 6 + 7 + #define _TRACE_BPF_TRACE_H 8 + 9 + #include <linux/tracepoint.h> 10 + 11 + TRACE_EVENT(bpf_trace_printk, 12 + 13 + TP_PROTO(const char *bpf_string), 14 + 15 + TP_ARGS(bpf_string), 16 + 17 + TP_STRUCT__entry( 18 + __string(bpf_string, bpf_string) 19 + ), 20 + 21 + TP_fast_assign( 22 + __assign_str(bpf_string, bpf_string); 23 + ), 24 + 25 + TP_printk("%s", __get_str(bpf_string)) 26 + ); 27 + 28 + #endif /* _TRACE_BPF_TRACE_H */ 29 + 30 + #undef TRACE_INCLUDE_PATH 31 + #define TRACE_INCLUDE_PATH . 32 + #define TRACE_INCLUDE_FILE bpf_trace 33 + 34 + #include <trace/define_trace.h>
+8 -2
net/core/filter.c
··· 75 75 #include <net/ipv6_stubs.h> 76 76 #include <net/bpf_sk_storage.h> 77 77 #include <net/transp_v6.h> 78 + #include <linux/btf_ids.h> 78 79 79 80 /** 80 81 * sk_filter_trim_cap - run a packet through a socket filter ··· 3780 3779 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 3781 3780 }; 3782 3781 3783 - static int bpf_skb_output_btf_ids[5]; 3782 + BTF_ID_LIST(bpf_skb_output_btf_ids) 3783 + BTF_ID(struct, sk_buff) 3784 + 3784 3785 const struct bpf_func_proto bpf_skb_output_proto = { 3785 3786 .func = bpf_skb_event_output, 3786 3787 .gpl_only = true, ··· 4176 4173 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4177 4174 }; 4178 4175 4179 - static int bpf_xdp_output_btf_ids[5]; 4176 + BTF_ID_LIST(bpf_xdp_output_btf_ids) 4177 + BTF_ID(struct, xdp_buff) 4178 + 4180 4179 const struct bpf_func_proto bpf_xdp_output_proto = { 4181 4180 .func = bpf_xdp_event_output, 4182 4181 .gpl_only = true, ··· 6899 6894 case offsetof(struct bpf_sock, priority): 6900 6895 switch (attach_type) { 6901 6896 case BPF_CGROUP_INET_SOCK_CREATE: 6897 + case BPF_CGROUP_INET_SOCK_RELEASE: 6902 6898 goto full_access; 6903 6899 default: 6904 6900 return false;
+3
net/ipv4/af_inet.c
··· 411 411 if (sk) { 412 412 long timeout; 413 413 414 + if (!sk->sk_kern_sock) 415 + BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk); 416 + 414 417 /* Applications forget to leave groups before exiting */ 415 418 ip_mc_drop_socket(sk); 416 419
+31 -5
net/xdp/xsk.c
··· 123 123 addr = xp_get_handle(xskb); 124 124 err = xskq_prod_reserve_desc(xs->rx, addr, len); 125 125 if (err) { 126 - xs->rx_dropped++; 126 + xs->rx_queue_full++; 127 127 return err; 128 128 } 129 129 ··· 274 274 275 275 rcu_read_lock(); 276 276 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { 277 - if (!xskq_cons_peek_desc(xs->tx, desc, umem)) 277 + if (!xskq_cons_peek_desc(xs->tx, desc, umem)) { 278 + xs->tx->queue_empty_descs++; 278 279 continue; 280 + } 279 281 280 282 /* This is the backpressure mechanism for the Tx path. 281 283 * Reserve space in the completion queue and only proceed ··· 388 386 389 387 sent_frame = true; 390 388 } 389 + 390 + xs->tx->queue_empty_descs++; 391 391 392 392 out: 393 393 if (sent_frame) ··· 816 812 ring->desc = offsetof(struct xdp_umem_ring, desc); 817 813 } 818 814 815 + struct xdp_statistics_v1 { 816 + __u64 rx_dropped; 817 + __u64 rx_invalid_descs; 818 + __u64 tx_invalid_descs; 819 + }; 820 + 819 821 static int xsk_getsockopt(struct socket *sock, int level, int optname, 820 822 char __user *optval, int __user *optlen) 821 823 { ··· 841 831 case XDP_STATISTICS: 842 832 { 843 833 struct xdp_statistics stats; 834 + bool extra_stats = true; 835 + size_t stats_size; 844 836 845 - if (len < sizeof(stats)) 837 + if (len < sizeof(struct xdp_statistics_v1)) { 846 838 return -EINVAL; 839 + } else if (len < sizeof(stats)) { 840 + extra_stats = false; 841 + stats_size = sizeof(struct xdp_statistics_v1); 842 + } else { 843 + stats_size = sizeof(stats); 844 + } 847 845 848 846 mutex_lock(&xs->mutex); 849 847 stats.rx_dropped = xs->rx_dropped; 848 + if (extra_stats) { 849 + stats.rx_ring_full = xs->rx_queue_full; 850 + stats.rx_fill_ring_empty_descs = 851 + xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0; 852 + stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); 853 + } else { 854 + stats.rx_dropped += xs->rx_queue_full; 855 + } 850 856 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); 851 857 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); 852 858 mutex_unlock(&xs->mutex); 853 859 854 - if (copy_to_user(optval, &stats, sizeof(stats))) 860 + if (copy_to_user(optval, &stats, stats_size)) 855 861 return -EFAULT; 856 - if (put_user(sizeof(stats), optlen)) 862 + if (put_user(stats_size, optlen)) 857 863 return -EFAULT; 858 864 859 865 return 0;
+1
net/xdp/xsk_buff_pool.c
··· 189 189 190 190 for (;;) { 191 191 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { 192 + pool->fq->queue_empty_descs++; 192 193 xp_release(xskb); 193 194 return NULL; 194 195 }
+17
net/xdp/xsk_diag.c
··· 76 76 return err; 77 77 } 78 78 79 + static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb) 80 + { 81 + struct xdp_diag_stats du = {}; 82 + 83 + du.n_rx_dropped = xs->rx_dropped; 84 + du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx); 85 + du.n_rx_full = xs->rx_queue_full; 86 + du.n_fill_ring_empty = xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0; 87 + du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx); 88 + du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx); 89 + return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du); 90 + } 91 + 79 92 static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb, 80 93 struct xdp_diag_req *req, 81 94 struct user_namespace *user_ns, ··· 129 116 130 117 if ((req->xdiag_show & XDP_SHOW_MEMINFO) && 131 118 sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO)) 119 + goto out_nlmsg_trim; 120 + 121 + if ((req->xdiag_show & XDP_SHOW_STATS) && 122 + xsk_diag_put_stats(xs, nlskb)) 132 123 goto out_nlmsg_trim; 133 124 134 125 mutex_unlock(&xs->mutex);
+6
net/xdp/xsk_queue.h
··· 38 38 u32 cached_cons; 39 39 struct xdp_ring *ring; 40 40 u64 invalid_descs; 41 + u64 queue_empty_descs; 41 42 }; 42 43 43 44 /* The structure of the shared state of the rings are the same as the ··· 353 352 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) 354 353 { 355 354 return q ? q->invalid_descs : 0; 355 + } 356 + 357 + static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) 358 + { 359 + return q ? q->queue_empty_descs : 0; 356 360 } 357 361 358 362 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
+1 -1
samples/bpf/Makefile
··· 93 93 tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o 94 94 lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o 95 95 xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o 96 - test_map_in_map-objs := bpf_load.o test_map_in_map_user.o 96 + test_map_in_map-objs := test_map_in_map_user.o 97 97 per_socket_stats_example-objs := cookie_uid_helper_example.o 98 98 xdp_redirect-objs := xdp_redirect_user.o 99 99 xdp_redirect_map-objs := xdp_redirect_map_user.o
+2 -1
samples/bpf/fds_example.c
··· 30 30 #define BPF_M_MAP 1 31 31 #define BPF_M_PROG 2 32 32 33 + char bpf_log_buf[BPF_LOG_BUF_SIZE]; 34 + 33 35 static void usage(void) 34 36 { 35 37 printf("Usage: fds_example [...]\n"); ··· 59 57 BPF_EXIT_INSN(), 60 58 }; 61 59 size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn); 62 - char bpf_log_buf[BPF_LOG_BUF_SIZE]; 63 60 struct bpf_object *obj; 64 61 int prog_fd; 65 62
+91 -85
samples/bpf/map_perf_test_kern.c
··· 9 9 #include <linux/version.h> 10 10 #include <uapi/linux/bpf.h> 11 11 #include <bpf/bpf_helpers.h> 12 - #include "bpf_legacy.h" 13 12 #include <bpf/bpf_tracing.h> 13 + #include <bpf/bpf_core_read.h> 14 + #include "trace_common.h" 14 15 15 16 #define MAX_ENTRIES 1000 16 17 #define MAX_NR_CPUS 1024 17 18 18 - struct bpf_map_def_legacy SEC("maps") hash_map = { 19 - .type = BPF_MAP_TYPE_HASH, 20 - .key_size = sizeof(u32), 21 - .value_size = sizeof(long), 22 - .max_entries = MAX_ENTRIES, 19 + struct { 20 + __uint(type, BPF_MAP_TYPE_HASH); 21 + __type(key, u32); 22 + __type(value, long); 23 + __uint(max_entries, MAX_ENTRIES); 24 + } hash_map SEC(".maps"); 25 + 26 + struct { 27 + __uint(type, BPF_MAP_TYPE_LRU_HASH); 28 + __type(key, u32); 29 + __type(value, long); 30 + __uint(max_entries, 10000); 31 + } lru_hash_map SEC(".maps"); 32 + 33 + struct { 34 + __uint(type, BPF_MAP_TYPE_LRU_HASH); 35 + __type(key, u32); 36 + __type(value, long); 37 + __uint(max_entries, 10000); 38 + __uint(map_flags, BPF_F_NO_COMMON_LRU); 39 + } nocommon_lru_hash_map SEC(".maps"); 40 + 41 + struct inner_lru { 42 + __uint(type, BPF_MAP_TYPE_LRU_HASH); 43 + __type(key, u32); 44 + __type(value, long); 45 + __uint(max_entries, MAX_ENTRIES); 46 + __uint(map_flags, BPF_F_NUMA_NODE); 47 + __uint(numa_node, 0); 48 + } inner_lru_hash_map SEC(".maps"); 49 + 50 + struct { 51 + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); 52 + __uint(max_entries, MAX_NR_CPUS); 53 + __uint(key_size, sizeof(u32)); 54 + __array(values, struct inner_lru); /* use inner_lru as inner map */ 55 + } array_of_lru_hashs SEC(".maps") = { 56 + /* statically initialize the first element */ 57 + .values = { &inner_lru_hash_map }, 23 58 }; 24 59 25 - struct bpf_map_def_legacy SEC("maps") lru_hash_map = { 26 - .type = BPF_MAP_TYPE_LRU_HASH, 27 - .key_size = sizeof(u32), 28 - .value_size = sizeof(long), 29 - .max_entries = 10000, 30 - }; 60 + struct { 61 + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 62 + __uint(key_size, sizeof(u32)); 63 + __uint(value_size, sizeof(long)); 64 + __uint(max_entries, MAX_ENTRIES); 65 + } percpu_hash_map SEC(".maps"); 31 66 32 - struct bpf_map_def_legacy SEC("maps") nocommon_lru_hash_map = { 33 - .type = BPF_MAP_TYPE_LRU_HASH, 34 - .key_size = sizeof(u32), 35 - .value_size = sizeof(long), 36 - .max_entries = 10000, 37 - .map_flags = BPF_F_NO_COMMON_LRU, 38 - }; 67 + struct { 68 + __uint(type, BPF_MAP_TYPE_HASH); 69 + __type(key, u32); 70 + __type(value, long); 71 + __uint(max_entries, MAX_ENTRIES); 72 + __uint(map_flags, BPF_F_NO_PREALLOC); 73 + } hash_map_alloc SEC(".maps"); 39 74 40 - struct bpf_map_def_legacy SEC("maps") inner_lru_hash_map = { 41 - .type = BPF_MAP_TYPE_LRU_HASH, 42 - .key_size = sizeof(u32), 43 - .value_size = sizeof(long), 44 - .max_entries = MAX_ENTRIES, 45 - .map_flags = BPF_F_NUMA_NODE, 46 - .numa_node = 0, 47 - }; 75 + struct { 76 + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 77 + __uint(key_size, sizeof(u32)); 78 + __uint(value_size, sizeof(long)); 79 + __uint(max_entries, MAX_ENTRIES); 80 + __uint(map_flags, BPF_F_NO_PREALLOC); 81 + } percpu_hash_map_alloc SEC(".maps"); 48 82 49 - struct bpf_map_def_legacy SEC("maps") array_of_lru_hashs = { 50 - .type = BPF_MAP_TYPE_ARRAY_OF_MAPS, 51 - .key_size = sizeof(u32), 52 - .max_entries = MAX_NR_CPUS, 53 - }; 83 + struct { 84 + __uint(type, BPF_MAP_TYPE_LPM_TRIE); 85 + __uint(key_size, 8); 86 + __uint(value_size, sizeof(long)); 87 + __uint(max_entries, 10000); 88 + __uint(map_flags, BPF_F_NO_PREALLOC); 89 + } lpm_trie_map_alloc SEC(".maps"); 54 90 55 - struct bpf_map_def_legacy SEC("maps") percpu_hash_map = { 56 - .type = BPF_MAP_TYPE_PERCPU_HASH, 57 - .key_size = sizeof(u32), 58 - .value_size = sizeof(long), 59 - .max_entries = MAX_ENTRIES, 60 - }; 91 + struct { 92 + __uint(type, BPF_MAP_TYPE_ARRAY); 93 + __type(key, u32); 94 + __type(value, long); 95 + __uint(max_entries, MAX_ENTRIES); 96 + } array_map SEC(".maps"); 61 97 62 - struct bpf_map_def_legacy SEC("maps") hash_map_alloc = { 63 - .type = BPF_MAP_TYPE_HASH, 64 - .key_size = sizeof(u32), 65 - .value_size = sizeof(long), 66 - .max_entries = MAX_ENTRIES, 67 - .map_flags = BPF_F_NO_PREALLOC, 68 - }; 98 + struct { 99 + __uint(type, BPF_MAP_TYPE_LRU_HASH); 100 + __type(key, u32); 101 + __type(value, long); 102 + __uint(max_entries, MAX_ENTRIES); 103 + } lru_hash_lookup_map SEC(".maps"); 69 104 70 - struct bpf_map_def_legacy SEC("maps") percpu_hash_map_alloc = { 71 - .type = BPF_MAP_TYPE_PERCPU_HASH, 72 - .key_size = sizeof(u32), 73 - .value_size = sizeof(long), 74 - .max_entries = MAX_ENTRIES, 75 - .map_flags = BPF_F_NO_PREALLOC, 76 - }; 77 - 78 - struct bpf_map_def_legacy SEC("maps") lpm_trie_map_alloc = { 79 - .type = BPF_MAP_TYPE_LPM_TRIE, 80 - .key_size = 8, 81 - .value_size = sizeof(long), 82 - .max_entries = 10000, 83 - .map_flags = BPF_F_NO_PREALLOC, 84 - }; 85 - 86 - struct bpf_map_def_legacy SEC("maps") array_map = { 87 - .type = BPF_MAP_TYPE_ARRAY, 88 - .key_size = sizeof(u32), 89 - .value_size = sizeof(long), 90 - .max_entries = MAX_ENTRIES, 91 - }; 92 - 93 - struct bpf_map_def_legacy SEC("maps") lru_hash_lookup_map = { 94 - .type = BPF_MAP_TYPE_LRU_HASH, 95 - .key_size = sizeof(u32), 96 - .value_size = sizeof(long), 97 - .max_entries = MAX_ENTRIES, 98 - }; 99 - 100 - SEC("kprobe/sys_getuid") 105 + SEC("kprobe/" SYSCALL(sys_getuid)) 101 106 int stress_hmap(struct pt_regs *ctx) 102 107 { 103 108 u32 key = bpf_get_current_pid_tgid(); ··· 117 112 return 0; 118 113 } 119 114 120 - SEC("kprobe/sys_geteuid") 115 + SEC("kprobe/" SYSCALL(sys_geteuid)) 121 116 int stress_percpu_hmap(struct pt_regs *ctx) 122 117 { 123 118 u32 key = bpf_get_current_pid_tgid(); ··· 131 126 return 0; 132 127 } 133 128 134 - SEC("kprobe/sys_getgid") 129 + SEC("kprobe/" SYSCALL(sys_getgid)) 135 130 int stress_hmap_alloc(struct pt_regs *ctx) 136 131 { 137 132 u32 key = bpf_get_current_pid_tgid(); ··· 145 140 return 0; 146 141 } 147 142 148 - SEC("kprobe/sys_getegid") 143 + SEC("kprobe/" SYSCALL(sys_getegid)) 149 144 int stress_percpu_hmap_alloc(struct pt_regs *ctx) 150 145 { 151 146 u32 key = bpf_get_current_pid_tgid(); ··· 159 154 return 0; 160 155 } 161 156 162 - SEC("kprobe/sys_connect") 157 + SEC("kprobe/" SYSCALL(sys_connect)) 163 158 int stress_lru_hmap_alloc(struct pt_regs *ctx) 164 159 { 160 + struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx); 165 161 char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn"; 166 162 union { 167 163 u16 dst6[8]; ··· 181 175 long val = 1; 182 176 u32 key = 0; 183 177 184 - in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx); 185 - addrlen = (int)PT_REGS_PARM3(ctx); 178 + in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs); 179 + addrlen = (int)PT_REGS_PARM3_CORE(real_regs); 186 180 187 181 if (addrlen != sizeof(*in6)) 188 182 return 0; ··· 239 233 return 0; 240 234 } 241 235 242 - SEC("kprobe/sys_gettid") 236 + SEC("kprobe/" SYSCALL(sys_gettid)) 243 237 int stress_lpm_trie_map_alloc(struct pt_regs *ctx) 244 238 { 245 239 union { ··· 261 255 return 0; 262 256 } 263 257 264 - SEC("kprobe/sys_getpgid") 258 + SEC("kprobe/" SYSCALL(sys_getpgid)) 265 259 int stress_hash_map_lookup(struct pt_regs *ctx) 266 260 { 267 261 u32 key = 1, i; ··· 274 268 return 0; 275 269 } 276 270 277 - SEC("kprobe/sys_getppid") 271 + SEC("kprobe/" SYSCALL(sys_getppid)) 278 272 int stress_array_map_lookup(struct pt_regs *ctx) 279 273 { 280 274 u32 key = 1, i;
+104 -58
samples/bpf/map_perf_test_user.c
··· 11 11 #include <sys/wait.h> 12 12 #include <stdlib.h> 13 13 #include <signal.h> 14 - #include <linux/bpf.h> 15 14 #include <string.h> 16 15 #include <time.h> 17 16 #include <sys/resource.h> ··· 18 19 #include <errno.h> 19 20 20 21 #include <bpf/bpf.h> 21 - #include "bpf_load.h" 22 + #include <bpf/libbpf.h> 22 23 23 24 #define TEST_BIT(t) (1U << (t)) 24 25 #define MAX_NR_CPUS 1024 ··· 60 61 [LRU_HASH_LOOKUP] = "lru_hash_lookup_map", 61 62 }; 62 63 64 + enum map_idx { 65 + array_of_lru_hashs_idx, 66 + hash_map_alloc_idx, 67 + lru_hash_lookup_idx, 68 + NR_IDXES, 69 + }; 70 + 71 + static int map_fd[NR_IDXES]; 72 + 63 73 static int test_flags = ~0; 64 74 static uint32_t num_map_entries; 65 75 static uint32_t inner_lru_hash_size; 66 - static int inner_lru_hash_idx = -1; 67 - static int array_of_lru_hashs_idx = -1; 68 - static int lru_hash_lookup_idx = -1; 69 76 static int lru_hash_lookup_test_entries = 32; 70 77 static uint32_t max_cnt = 1000000; 71 78 ··· 127 122 __u64 start_time; 128 123 int i, ret; 129 124 130 - if (test == INNER_LRU_HASH_PREALLOC) { 125 + if (test == INNER_LRU_HASH_PREALLOC && cpu) { 126 + /* If CPU is not 0, create inner_lru hash map and insert the fd 127 + * value into the array_of_lru_hash map. In case of CPU 0, 128 + * 'inner_lru_hash_map' was statically inserted on the map init 129 + */ 131 130 int outer_fd = map_fd[array_of_lru_hashs_idx]; 132 131 unsigned int mycpu, mynode; 133 132 134 133 assert(cpu < MAX_NR_CPUS); 135 134 136 - if (cpu) { 137 - ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL); 138 - assert(!ret); 135 + ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL); 136 + assert(!ret); 139 137 140 - inner_lru_map_fds[cpu] = 141 - bpf_create_map_node(BPF_MAP_TYPE_LRU_HASH, 142 - test_map_names[INNER_LRU_HASH_PREALLOC], 143 - sizeof(uint32_t), 144 - sizeof(long), 145 - inner_lru_hash_size, 0, 146 - mynode); 147 - if (inner_lru_map_fds[cpu] == -1) { 148 - printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n", 149 - strerror(errno), errno); 150 - exit(1); 151 - } 152 - } else { 153 - inner_lru_map_fds[cpu] = map_fd[inner_lru_hash_idx]; 138 + inner_lru_map_fds[cpu] = 139 + bpf_create_map_node(BPF_MAP_TYPE_LRU_HASH, 140 + test_map_names[INNER_LRU_HASH_PREALLOC], 141 + sizeof(uint32_t), 142 + sizeof(long), 143 + inner_lru_hash_size, 0, 144 + mynode); 145 + if (inner_lru_map_fds[cpu] == -1) { 146 + printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n", 147 + strerror(errno), errno); 148 + exit(1); 154 149 } 155 150 156 151 ret = bpf_map_update_elem(outer_fd, &cpu, ··· 382 377 key->data[1] = rand() & 0xff; 383 378 key->data[2] = rand() & 0xff; 384 379 key->data[3] = rand() & 0xff; 385 - r = bpf_map_update_elem(map_fd[6], key, &value, 0); 380 + r = bpf_map_update_elem(map_fd[hash_map_alloc_idx], 381 + key, &value, 0); 386 382 assert(!r); 387 383 } 388 384 ··· 394 388 key->data[3] = 1; 395 389 value = 128; 396 390 397 - r = bpf_map_update_elem(map_fd[6], key, &value, 0); 391 + r = bpf_map_update_elem(map_fd[hash_map_alloc_idx], key, &value, 0); 398 392 assert(!r); 399 393 } 400 394 401 - static void fixup_map(struct bpf_map_data *map, int idx) 395 + static void fixup_map(struct bpf_object *obj) 402 396 { 397 + struct bpf_map *map; 403 398 int i; 404 399 405 - if (!strcmp("inner_lru_hash_map", map->name)) { 406 - inner_lru_hash_idx = idx; 407 - inner_lru_hash_size = map->def.max_entries; 408 - } 400 + bpf_object__for_each_map(map, obj) { 401 + const char *name = bpf_map__name(map); 409 402 410 - if (!strcmp("array_of_lru_hashs", map->name)) { 411 - if (inner_lru_hash_idx == -1) { 412 - printf("inner_lru_hash_map must be defined before array_of_lru_hashs\n"); 413 - exit(1); 403 + /* Only change the max_entries for the enabled test(s) */ 404 + for (i = 0; i < NR_TESTS; i++) { 405 + if (!strcmp(test_map_names[i], name) && 406 + (check_test_flags(i))) { 407 + bpf_map__resize(map, num_map_entries); 408 + continue; 409 + } 414 410 } 415 - map->def.inner_map_idx = inner_lru_hash_idx; 416 - array_of_lru_hashs_idx = idx; 417 411 } 418 - 419 - if (!strcmp("lru_hash_lookup_map", map->name)) 420 - lru_hash_lookup_idx = idx; 421 - 422 - if (num_map_entries <= 0) 423 - return; 424 412 425 413 inner_lru_hash_size = num_map_entries; 426 - 427 - /* Only change the max_entries for the enabled test(s) */ 428 - for (i = 0; i < NR_TESTS; i++) { 429 - if (!strcmp(test_map_names[i], map->name) && 430 - (check_test_flags(i))) { 431 - map->def.max_entries = num_map_entries; 432 - } 433 - } 434 414 } 435 415 436 416 int main(int argc, char **argv) 437 417 { 438 418 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; 419 + int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); 420 + struct bpf_link *links[8]; 421 + struct bpf_program *prog; 422 + struct bpf_object *obj; 423 + struct bpf_map *map; 439 424 char filename[256]; 440 - int num_cpu = 8; 425 + int i = 0; 441 426 442 - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); 443 - setrlimit(RLIMIT_MEMLOCK, &r); 427 + if (setrlimit(RLIMIT_MEMLOCK, &r)) { 428 + perror("setrlimit(RLIMIT_MEMLOCK)"); 429 + return 1; 430 + } 444 431 445 432 if (argc > 1) 446 433 test_flags = atoi(argv[1]) ? : test_flags; 447 434 448 435 if (argc > 2) 449 - num_cpu = atoi(argv[2]) ? : num_cpu; 436 + nr_cpus = atoi(argv[2]) ? : nr_cpus; 450 437 451 438 if (argc > 3) 452 439 num_map_entries = atoi(argv[3]); ··· 447 448 if (argc > 4) 448 449 max_cnt = atoi(argv[4]); 449 450 450 - if (load_bpf_file_fixup_map(filename, fixup_map)) { 451 - printf("%s", bpf_log_buf); 452 - return 1; 451 + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); 452 + obj = bpf_object__open_file(filename, NULL); 453 + if (libbpf_get_error(obj)) { 454 + fprintf(stderr, "ERROR: opening BPF object file failed\n"); 455 + return 0; 456 + } 457 + 458 + map = bpf_object__find_map_by_name(obj, "inner_lru_hash_map"); 459 + if (libbpf_get_error(map)) { 460 + fprintf(stderr, "ERROR: finding a map in obj file failed\n"); 461 + goto cleanup; 462 + } 463 + 464 + inner_lru_hash_size = bpf_map__max_entries(map); 465 + if (!inner_lru_hash_size) { 466 + fprintf(stderr, "ERROR: failed to get map attribute\n"); 467 + goto cleanup; 468 + } 469 + 470 + /* resize BPF map prior to loading */ 471 + if (num_map_entries > 0) 472 + fixup_map(obj); 473 + 474 + /* load BPF program */ 475 + if (bpf_object__load(obj)) { 476 + fprintf(stderr, "ERROR: loading BPF object file failed\n"); 477 + goto cleanup; 478 + } 479 + 480 + map_fd[0] = bpf_object__find_map_fd_by_name(obj, "array_of_lru_hashs"); 481 + map_fd[1] = bpf_object__find_map_fd_by_name(obj, "hash_map_alloc"); 482 + map_fd[2] = bpf_object__find_map_fd_by_name(obj, "lru_hash_lookup_map"); 483 + if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0) { 484 + fprintf(stderr, "ERROR: finding a map in obj file failed\n"); 485 + goto cleanup; 486 + } 487 + 488 + bpf_object__for_each_program(prog, obj) { 489 + links[i] = bpf_program__attach(prog); 490 + if (libbpf_get_error(links[i])) { 491 + fprintf(stderr, "ERROR: bpf_program__attach failed\n"); 492 + links[i] = NULL; 493 + goto cleanup; 494 + } 495 + i++; 453 496 } 454 497 455 498 fill_lpm_trie(); 456 499 457 - run_perf_test(num_cpu); 500 + run_perf_test(nr_cpus); 458 501 502 + cleanup: 503 + for (i--; i >= 0; i--) 504 + bpf_link__destroy(links[i]); 505 + 506 + bpf_object__close(obj); 459 507 return 0; 460 508 }
+48 -46
samples/bpf/test_map_in_map_kern.c
··· 11 11 #include <uapi/linux/bpf.h> 12 12 #include <uapi/linux/in6.h> 13 13 #include <bpf/bpf_helpers.h> 14 - #include "bpf_legacy.h" 15 14 #include <bpf/bpf_tracing.h> 15 + #include <bpf/bpf_core_read.h> 16 + #include "trace_common.h" 16 17 17 18 #define MAX_NR_PORTS 65536 18 19 19 20 /* map #0 */ 20 - struct bpf_map_def_legacy SEC("maps") port_a = { 21 - .type = BPF_MAP_TYPE_ARRAY, 22 - .key_size = sizeof(u32), 23 - .value_size = sizeof(int), 24 - .max_entries = MAX_NR_PORTS, 25 - }; 21 + struct inner_a { 22 + __uint(type, BPF_MAP_TYPE_ARRAY); 23 + __type(key, u32); 24 + __type(value, int); 25 + __uint(max_entries, MAX_NR_PORTS); 26 + } port_a SEC(".maps"); 26 27 27 28 /* map #1 */ 28 - struct bpf_map_def_legacy SEC("maps") port_h = { 29 - .type = BPF_MAP_TYPE_HASH, 30 - .key_size = sizeof(u32), 31 - .value_size = sizeof(int), 32 - .max_entries = 1, 33 - }; 29 + struct inner_h { 30 + __uint(type, BPF_MAP_TYPE_HASH); 31 + __type(key, u32); 32 + __type(value, int); 33 + __uint(max_entries, 1); 34 + } port_h SEC(".maps"); 34 35 35 36 /* map #2 */ 36 - struct bpf_map_def_legacy SEC("maps") reg_result_h = { 37 - .type = BPF_MAP_TYPE_HASH, 38 - .key_size = sizeof(u32), 39 - .value_size = sizeof(int), 40 - .max_entries = 1, 41 - }; 37 + struct { 38 + __uint(type, BPF_MAP_TYPE_HASH); 39 + __type(key, u32); 40 + __type(value, int); 41 + __uint(max_entries, 1); 42 + } reg_result_h SEC(".maps"); 42 43 43 44 /* map #3 */ 44 - struct bpf_map_def_legacy SEC("maps") inline_result_h = { 45 - .type = BPF_MAP_TYPE_HASH, 46 - .key_size = sizeof(u32), 47 - .value_size = sizeof(int), 48 - .max_entries = 1, 49 - }; 45 + struct { 46 + __uint(type, BPF_MAP_TYPE_HASH); 47 + __type(key, u32); 48 + __type(value, int); 49 + __uint(max_entries, 1); 50 + } inline_result_h SEC(".maps"); 50 51 51 52 /* map #4 */ /* Test case #0 */ 52 - struct bpf_map_def_legacy SEC("maps") a_of_port_a = { 53 - .type = BPF_MAP_TYPE_ARRAY_OF_MAPS, 54 - .key_size = sizeof(u32), 55 - .inner_map_idx = 0, /* map_fd[0] is port_a */ 56 - .max_entries = MAX_NR_PORTS, 57 - }; 53 + struct { 54 + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); 55 + __uint(max_entries, MAX_NR_PORTS); 56 + __uint(key_size, sizeof(u32)); 57 + __array(values, struct inner_a); /* use inner_a as inner map */ 58 + } a_of_port_a SEC(".maps"); 58 59 59 60 /* map #5 */ /* Test case #1 */ 60 - struct bpf_map_def_legacy SEC("maps") h_of_port_a = { 61 - .type = BPF_MAP_TYPE_HASH_OF_MAPS, 62 - .key_size = sizeof(u32), 63 - .inner_map_idx = 0, /* map_fd[0] is port_a */ 64 - .max_entries = 1, 65 - }; 61 + struct { 62 + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); 63 + __uint(max_entries, 1); 64 + __uint(key_size, sizeof(u32)); 65 + __array(values, struct inner_a); /* use inner_a as inner map */ 66 + } h_of_port_a SEC(".maps"); 66 67 67 68 /* map #6 */ /* Test case #2 */ 68 - struct bpf_map_def_legacy SEC("maps") h_of_port_h = { 69 - .type = BPF_MAP_TYPE_HASH_OF_MAPS, 70 - .key_size = sizeof(u32), 71 - .inner_map_idx = 1, /* map_fd[1] is port_h */ 72 - .max_entries = 1, 73 - }; 69 + struct { 70 + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); 71 + __uint(max_entries, 1); 72 + __uint(key_size, sizeof(u32)); 73 + __array(values, struct inner_h); /* use inner_h as inner map */ 74 + } h_of_port_h SEC(".maps"); 74 75 75 76 static __always_inline int do_reg_lookup(void *inner_map, u32 port) 76 77 { ··· 103 102 return result ? *result : -ENOENT; 104 103 } 105 104 106 - SEC("kprobe/sys_connect") 105 + SEC("kprobe/" SYSCALL(sys_connect)) 107 106 int trace_sys_connect(struct pt_regs *ctx) 108 107 { 108 + struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx); 109 109 struct sockaddr_in6 *in6; 110 110 u16 test_case, port, dst6[8]; 111 111 int addrlen, ret, inline_ret, ret_key = 0; ··· 114 112 void *outer_map, *inner_map; 115 113 bool inline_hash = false; 116 114 117 - in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx); 118 - addrlen = (int)PT_REGS_PARM3(ctx); 115 + in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs); 116 + addrlen = (int)PT_REGS_PARM3_CORE(real_regs); 119 117 120 118 if (addrlen != sizeof(*in6)) 121 119 return 0;
+48 -5
samples/bpf/test_map_in_map_user.c
··· 11 11 #include <stdlib.h> 12 12 #include <stdio.h> 13 13 #include <bpf/bpf.h> 14 - #include "bpf_load.h" 14 + #include <bpf/libbpf.h> 15 + 16 + static int map_fd[7]; 15 17 16 18 #define PORT_A (map_fd[0]) 17 19 #define PORT_H (map_fd[1]) ··· 115 113 int main(int argc, char **argv) 116 114 { 117 115 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; 116 + struct bpf_link *link = NULL; 117 + struct bpf_program *prog; 118 + struct bpf_object *obj; 118 119 char filename[256]; 119 120 120 - assert(!setrlimit(RLIMIT_MEMLOCK, &r)); 121 + if (setrlimit(RLIMIT_MEMLOCK, &r)) { 122 + perror("setrlimit(RLIMIT_MEMLOCK)"); 123 + return 1; 124 + } 121 125 122 126 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); 127 + obj = bpf_object__open_file(filename, NULL); 128 + if (libbpf_get_error(obj)) { 129 + fprintf(stderr, "ERROR: opening BPF object file failed\n"); 130 + return 0; 131 + } 123 132 124 - if (load_bpf_file(filename)) { 125 - printf("%s", bpf_log_buf); 126 - return 1; 133 + prog = bpf_object__find_program_by_name(obj, "trace_sys_connect"); 134 + if (!prog) { 135 + printf("finding a prog in obj file failed\n"); 136 + goto cleanup; 137 + } 138 + 139 + /* load BPF program */ 140 + if (bpf_object__load(obj)) { 141 + fprintf(stderr, "ERROR: loading BPF object file failed\n"); 142 + goto cleanup; 143 + } 144 + 145 + map_fd[0] = bpf_object__find_map_fd_by_name(obj, "port_a"); 146 + map_fd[1] = bpf_object__find_map_fd_by_name(obj, "port_h"); 147 + map_fd[2] = bpf_object__find_map_fd_by_name(obj, "reg_result_h"); 148 + map_fd[3] = bpf_object__find_map_fd_by_name(obj, "inline_result_h"); 149 + map_fd[4] = bpf_object__find_map_fd_by_name(obj, "a_of_port_a"); 150 + map_fd[5] = bpf_object__find_map_fd_by_name(obj, "h_of_port_a"); 151 + map_fd[6] = bpf_object__find_map_fd_by_name(obj, "h_of_port_h"); 152 + if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0 || 153 + map_fd[3] < 0 || map_fd[4] < 0 || map_fd[5] < 0 || map_fd[6] < 0) { 154 + fprintf(stderr, "ERROR: finding a map in obj file failed\n"); 155 + goto cleanup; 156 + } 157 + 158 + link = bpf_program__attach(prog); 159 + if (libbpf_get_error(link)) { 160 + fprintf(stderr, "ERROR: bpf_program__attach failed\n"); 161 + link = NULL; 162 + goto cleanup; 127 163 } 128 164 129 165 test_map_in_map(); 130 166 167 + cleanup: 168 + bpf_link__destroy(link); 169 + bpf_object__close(obj); 131 170 return 0; 132 171 }
+6 -3
samples/bpf/test_probe_write_user_kern.c
··· 10 10 #include <linux/version.h> 11 11 #include <bpf/bpf_helpers.h> 12 12 #include <bpf/bpf_tracing.h> 13 + #include <bpf/bpf_core_read.h> 14 + #include "trace_common.h" 13 15 14 16 struct bpf_map_def SEC("maps") dnat_map = { 15 17 .type = BPF_MAP_TYPE_HASH, ··· 28 26 * This example sits on a syscall, and the syscall ABI is relatively stable 29 27 * of course, across platforms, and over time, the ABI may change. 30 28 */ 31 - SEC("kprobe/sys_connect") 29 + SEC("kprobe/" SYSCALL(sys_connect)) 32 30 int bpf_prog1(struct pt_regs *ctx) 33 31 { 32 + struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx); 33 + void *sockaddr_arg = (void *)PT_REGS_PARM2_CORE(real_regs); 34 + int sockaddr_len = (int)PT_REGS_PARM3_CORE(real_regs); 34 35 struct sockaddr_in new_addr, orig_addr = {}; 35 36 struct sockaddr_in *mapped_addr; 36 - void *sockaddr_arg = (void *)PT_REGS_PARM2(ctx); 37 - int sockaddr_len = (int)PT_REGS_PARM3(ctx); 38 37 39 38 if (sockaddr_len > sizeof(orig_addr)) 40 39 return 0;
+85 -2
samples/bpf/xdpsock_user.c
··· 77 77 static int opt_pkt_count; 78 78 static u16 opt_pkt_size = MIN_PKT_SIZE; 79 79 static u32 opt_pkt_fill_pattern = 0x12345678; 80 + static bool opt_extra_stats; 80 81 static int opt_poll; 81 82 static int opt_interval = 1; 82 83 static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP; ··· 104 103 struct xsk_socket *xsk; 105 104 unsigned long rx_npkts; 106 105 unsigned long tx_npkts; 106 + unsigned long rx_dropped_npkts; 107 + unsigned long rx_invalid_npkts; 108 + unsigned long tx_invalid_npkts; 109 + unsigned long rx_full_npkts; 110 + unsigned long rx_fill_empty_npkts; 111 + unsigned long tx_empty_npkts; 107 112 unsigned long prev_rx_npkts; 108 113 unsigned long prev_tx_npkts; 114 + unsigned long prev_rx_dropped_npkts; 115 + unsigned long prev_rx_invalid_npkts; 116 + unsigned long prev_tx_invalid_npkts; 117 + unsigned long prev_rx_full_npkts; 118 + unsigned long prev_rx_fill_empty_npkts; 119 + unsigned long prev_tx_empty_npkts; 109 120 u32 outstanding_tx; 110 121 }; 111 122 ··· 160 147 } 161 148 } 162 149 150 + static int xsk_get_xdp_stats(int fd, struct xsk_socket_info *xsk) 151 + { 152 + struct xdp_statistics stats; 153 + socklen_t optlen; 154 + int err; 155 + 156 + optlen = sizeof(stats); 157 + err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); 158 + if (err) 159 + return err; 160 + 161 + if (optlen == sizeof(struct xdp_statistics)) { 162 + xsk->rx_dropped_npkts = stats.rx_dropped; 163 + xsk->rx_invalid_npkts = stats.rx_invalid_descs; 164 + xsk->tx_invalid_npkts = stats.tx_invalid_descs; 165 + xsk->rx_full_npkts = stats.rx_ring_full; 166 + xsk->rx_fill_empty_npkts = stats.rx_fill_ring_empty_descs; 167 + xsk->tx_empty_npkts = stats.tx_ring_empty_descs; 168 + return 0; 169 + } 170 + 171 + return -EINVAL; 172 + } 173 + 163 174 static void dump_stats(void) 164 175 { 165 176 unsigned long now = get_nsecs(); ··· 194 157 195 158 for (i = 0; i < num_socks && xsks[i]; i++) { 196 159 char *fmt = "%-15s %'-11.0f %'-11lu\n"; 197 - double rx_pps, tx_pps; 160 + double rx_pps, tx_pps, dropped_pps, rx_invalid_pps, full_pps, fill_empty_pps, 161 + tx_invalid_pps, tx_empty_pps; 198 162 199 163 rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) * 200 164 1000000000. / dt; ··· 213 175 214 176 xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts; 215 177 xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts; 178 + 179 + if (opt_extra_stats) { 180 + if (!xsk_get_xdp_stats(xsk_socket__fd(xsks[i]->xsk), xsks[i])) { 181 + dropped_pps = (xsks[i]->rx_dropped_npkts - 182 + xsks[i]->prev_rx_dropped_npkts) * 1000000000. / dt; 183 + rx_invalid_pps = (xsks[i]->rx_invalid_npkts - 184 + xsks[i]->prev_rx_invalid_npkts) * 1000000000. / dt; 185 + tx_invalid_pps = (xsks[i]->tx_invalid_npkts - 186 + xsks[i]->prev_tx_invalid_npkts) * 1000000000. / dt; 187 + full_pps = (xsks[i]->rx_full_npkts - 188 + xsks[i]->prev_rx_full_npkts) * 1000000000. / dt; 189 + fill_empty_pps = (xsks[i]->rx_fill_empty_npkts - 190 + xsks[i]->prev_rx_fill_empty_npkts) 191 + * 1000000000. / dt; 192 + tx_empty_pps = (xsks[i]->tx_empty_npkts - 193 + xsks[i]->prev_tx_empty_npkts) * 1000000000. / dt; 194 + 195 + printf(fmt, "rx dropped", dropped_pps, 196 + xsks[i]->rx_dropped_npkts); 197 + printf(fmt, "rx invalid", rx_invalid_pps, 198 + xsks[i]->rx_invalid_npkts); 199 + printf(fmt, "tx invalid", tx_invalid_pps, 200 + xsks[i]->tx_invalid_npkts); 201 + printf(fmt, "rx queue full", full_pps, 202 + xsks[i]->rx_full_npkts); 203 + printf(fmt, "fill ring empty", fill_empty_pps, 204 + xsks[i]->rx_fill_empty_npkts); 205 + printf(fmt, "tx ring empty", tx_empty_pps, 206 + xsks[i]->tx_empty_npkts); 207 + 208 + xsks[i]->prev_rx_dropped_npkts = xsks[i]->rx_dropped_npkts; 209 + xsks[i]->prev_rx_invalid_npkts = xsks[i]->rx_invalid_npkts; 210 + xsks[i]->prev_tx_invalid_npkts = xsks[i]->tx_invalid_npkts; 211 + xsks[i]->prev_rx_full_npkts = xsks[i]->rx_full_npkts; 212 + xsks[i]->prev_rx_fill_empty_npkts = xsks[i]->rx_fill_empty_npkts; 213 + xsks[i]->prev_tx_empty_npkts = xsks[i]->tx_empty_npkts; 214 + } else { 215 + printf("%-15s\n", "Error retrieving extra stats"); 216 + } 217 + } 216 218 } 217 219 } 218 220 ··· 708 630 {"tx-pkt-count", required_argument, 0, 'C'}, 709 631 {"tx-pkt-size", required_argument, 0, 's'}, 710 632 {"tx-pkt-pattern", required_argument, 0, 'P'}, 633 + {"extra-stats", no_argument, 0, 'x'}, 711 634 {0, 0, 0, 0} 712 635 }; 713 636 ··· 743 664 " (Default: %d bytes)\n" 744 665 " Min size: %d, Max size %d.\n" 745 666 " -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n" 667 + " -x, --extra-stats Display extra statistics.\n" 746 668 "\n"; 747 669 fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE, 748 670 opt_batch_size, MIN_PKT_SIZE, MIN_PKT_SIZE, ··· 759 679 opterr = 0; 760 680 761 681 for (;;) { 762 - c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:", 682 + c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:x", 763 683 long_options, &option_index); 764 684 if (c == -1) 765 685 break; ··· 839 759 break; 840 760 case 'P': 841 761 opt_pkt_fill_pattern = strtol(optarg, NULL, 16); 762 + break; 763 + case 'x': 764 + opt_extra_stats = 1; 842 765 break; 843 766 default: 844 767 usage(basename(argv[0]));
+3
tools/Makefile
··· 67 67 cgroup firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE 68 68 $(call descend,$@) 69 69 70 + bpf/%: FORCE 71 + $(call descend,$@) 72 + 70 73 liblockdep: FORCE 71 74 $(call descend,lib/lockdep) 72 75
+8 -1
tools/bpf/Makefile
··· 123 123 runqslower_clean: 124 124 $(call descend,runqslower,clean) 125 125 126 + resolve_btfids: 127 + $(call descend,resolve_btfids) 128 + 129 + resolve_btfids_clean: 130 + $(call descend,resolve_btfids,clean) 131 + 126 132 .PHONY: all install clean bpftool bpftool_install bpftool_clean \ 127 - runqslower runqslower_install runqslower_clean 133 + runqslower runqslower_install runqslower_clean \ 134 + resolve_btfids resolve_btfids_clean
+1
tools/bpf/bpftool/common.c
··· 33 33 [BPF_CGROUP_INET_INGRESS] = "ingress", 34 34 [BPF_CGROUP_INET_EGRESS] = "egress", 35 35 [BPF_CGROUP_INET_SOCK_CREATE] = "sock_create", 36 + [BPF_CGROUP_INET_SOCK_RELEASE] = "sock_release", 36 37 [BPF_CGROUP_SOCK_OPS] = "sock_ops", 37 38 [BPF_CGROUP_DEVICE] = "device", 38 39 [BPF_CGROUP_INET4_BIND] = "bind4",
+10 -13
tools/bpf/bpftool/gen.c
··· 88 88 return NULL; 89 89 } 90 90 91 - static void codegen_btf_dump_printf(void *ct, const char *fmt, va_list args) 91 + static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args) 92 92 { 93 93 vprintf(fmt, args); 94 94 } ··· 104 104 int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec); 105 105 const char *sec_ident; 106 106 char var_ident[256]; 107 + bool strip_mods = false; 107 108 108 - if (strcmp(sec_name, ".data") == 0) 109 + if (strcmp(sec_name, ".data") == 0) { 109 110 sec_ident = "data"; 110 - else if (strcmp(sec_name, ".bss") == 0) 111 + } else if (strcmp(sec_name, ".bss") == 0) { 111 112 sec_ident = "bss"; 112 - else if (strcmp(sec_name, ".rodata") == 0) 113 + } else if (strcmp(sec_name, ".rodata") == 0) { 113 114 sec_ident = "rodata"; 114 - else if (strcmp(sec_name, ".kconfig") == 0) 115 + strip_mods = true; 116 + } else if (strcmp(sec_name, ".kconfig") == 0) { 115 117 sec_ident = "kconfig"; 116 - else 118 + } else { 117 119 return 0; 120 + } 118 121 119 122 printf(" struct %s__%s {\n", obj_name, sec_ident); 120 123 for (i = 0; i < vlen; i++, sec_var++) { ··· 126 123 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, 127 124 .field_name = var_ident, 128 125 .indent_level = 2, 126 + .strip_mods = strip_mods, 129 127 ); 130 128 int need_off = sec_var->offset, align_off, align; 131 129 __u32 var_type_id = var->type; 132 - const struct btf_type *t; 133 - 134 - t = btf__type_by_id(btf, var_type_id); 135 - while (btf_is_mod(t)) { 136 - var_type_id = t->type; 137 - t = btf__type_by_id(btf, var_type_id); 138 - } 139 130 140 131 if (off > need_off) { 141 132 p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
+1 -1
tools/bpf/bpftool/pids.c
··· 15 15 16 16 int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) 17 17 { 18 - p_err("bpftool built without PID iterator support"); 19 18 return -ENOTSUP; 20 19 } 21 20 void delete_obj_refs_table(struct obj_refs_table *table) {} 22 21 void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) {} 22 + void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_writer) {} 23 23 24 24 #else /* BPFTOOL_WITHOUT_SKELETONS */ 25 25
+10
tools/bpf/resolve_btfids/Build
··· 1 + resolve_btfids-y += main.o 2 + resolve_btfids-y += rbtree.o 3 + resolve_btfids-y += zalloc.o 4 + resolve_btfids-y += string.o 5 + resolve_btfids-y += ctype.o 6 + resolve_btfids-y += str_error_r.o 7 + 8 + $(OUTPUT)%.o: ../../lib/%.c FORCE 9 + $(call rule_mkdir) 10 + $(call if_changed_dep,cc_o_c)
+77
tools/bpf/resolve_btfids/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + include ../../scripts/Makefile.include 3 + 4 + ifeq ($(srctree),) 5 + srctree := $(patsubst %/,%,$(dir $(CURDIR))) 6 + srctree := $(patsubst %/,%,$(dir $(srctree))) 7 + srctree := $(patsubst %/,%,$(dir $(srctree))) 8 + endif 9 + 10 + ifeq ($(V),1) 11 + Q = 12 + msg = 13 + else 14 + Q = @ 15 + msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))"; 16 + MAKEFLAGS=--no-print-directory 17 + endif 18 + 19 + OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/ 20 + 21 + LIBBPF_SRC := $(srctree)/tools/lib/bpf/ 22 + SUBCMD_SRC := $(srctree)/tools/lib/subcmd/ 23 + 24 + BPFOBJ := $(OUTPUT)/libbpf.a 25 + SUBCMDOBJ := $(OUTPUT)/libsubcmd.a 26 + 27 + BINARY := $(OUTPUT)/resolve_btfids 28 + BINARY_IN := $(BINARY)-in.o 29 + 30 + all: $(BINARY) 31 + 32 + $(OUTPUT): 33 + $(call msg,MKDIR,,$@) 34 + $(Q)mkdir -p $(OUTPUT) 35 + 36 + $(SUBCMDOBJ): fixdep FORCE 37 + $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT) 38 + 39 + $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT) 40 + $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@) 41 + 42 + CFLAGS := -g \ 43 + -I$(srctree)/tools/include \ 44 + -I$(srctree)/tools/include/uapi \ 45 + -I$(LIBBPF_SRC) \ 46 + -I$(SUBCMD_SRC) 47 + 48 + LIBS = -lelf -lz 49 + 50 + export srctree OUTPUT CFLAGS Q 51 + include $(srctree)/tools/build/Makefile.include 52 + 53 + $(BINARY_IN): fixdep FORCE 54 + $(Q)$(MAKE) $(build)=resolve_btfids 55 + 56 + $(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN) 57 + $(call msg,LINK,$@) 58 + $(Q)$(CC) $(BINARY_IN) $(LDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS) 59 + 60 + libsubcmd-clean: 61 + $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT) clean 62 + 63 + libbpf-clean: 64 + $(Q)$(MAKE) -C $(LIBBPF_SRC) OUTPUT=$(OUTPUT) clean 65 + 66 + clean: libsubcmd-clean libbpf-clean fixdep-clean 67 + $(call msg,CLEAN,$(BINARY)) 68 + $(Q)$(RM) -f $(BINARY); \ 69 + find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM) 70 + 71 + tags: 72 + $(call msg,GEN,,tags) 73 + $(Q)ctags -R . $(LIBBPF_SRC) $(SUBCMD_SRC) 74 + 75 + FORCE: 76 + 77 + .PHONY: all FORCE clean tags
+721
tools/bpf/resolve_btfids/main.c
··· 1 + // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 + 3 + /* 4 + * resolve_btfids scans Elf object for .BTF_ids section and resolves 5 + * its symbols with BTF ID values. 6 + * 7 + * Each symbol points to 4 bytes data and is expected to have 8 + * following name syntax: 9 + * 10 + * __BTF_ID__<type>__<symbol>[__<id>] 11 + * 12 + * type is: 13 + * 14 + * func - lookup BTF_KIND_FUNC symbol with <symbol> name 15 + * and store its ID into the data: 16 + * 17 + * __BTF_ID__func__vfs_close__1: 18 + * .zero 4 19 + * 20 + * struct - lookup BTF_KIND_STRUCT symbol with <symbol> name 21 + * and store its ID into the data: 22 + * 23 + * __BTF_ID__struct__sk_buff__1: 24 + * .zero 4 25 + * 26 + * union - lookup BTF_KIND_UNION symbol with <symbol> name 27 + * and store its ID into the data: 28 + * 29 + * __BTF_ID__union__thread_union__1: 30 + * .zero 4 31 + * 32 + * typedef - lookup BTF_KIND_TYPEDEF symbol with <symbol> name 33 + * and store its ID into the data: 34 + * 35 + * __BTF_ID__typedef__pid_t__1: 36 + * .zero 4 37 + * 38 + * set - store symbol size into first 4 bytes and sort following 39 + * ID list 40 + * 41 + * __BTF_ID__set__list: 42 + * .zero 4 43 + * list: 44 + * __BTF_ID__func__vfs_getattr__3: 45 + * .zero 4 46 + * __BTF_ID__func__vfs_fallocate__4: 47 + * .zero 4 48 + */ 49 + 50 + #define _GNU_SOURCE 51 + #include <stdio.h> 52 + #include <string.h> 53 + #include <unistd.h> 54 + #include <stdlib.h> 55 + #include <libelf.h> 56 + #include <gelf.h> 57 + #include <sys/stat.h> 58 + #include <fcntl.h> 59 + #include <errno.h> 60 + #include <linux/rbtree.h> 61 + #include <linux/zalloc.h> 62 + #include <linux/err.h> 63 + #include <btf.h> 64 + #include <libbpf.h> 65 + #include <parse-options.h> 66 + 67 + #define BTF_IDS_SECTION ".BTF_ids" 68 + #define BTF_ID "__BTF_ID__" 69 + 70 + #define BTF_STRUCT "struct" 71 + #define BTF_UNION "union" 72 + #define BTF_TYPEDEF "typedef" 73 + #define BTF_FUNC "func" 74 + #define BTF_SET "set" 75 + 76 + #define ADDR_CNT 100 77 + 78 + struct btf_id { 79 + struct rb_node rb_node; 80 + char *name; 81 + union { 82 + int id; 83 + int cnt; 84 + }; 85 + int addr_cnt; 86 + Elf64_Addr addr[ADDR_CNT]; 87 + }; 88 + 89 + struct object { 90 + const char *path; 91 + const char *btf; 92 + 93 + struct { 94 + int fd; 95 + Elf *elf; 96 + Elf_Data *symbols; 97 + Elf_Data *idlist; 98 + int symbols_shndx; 99 + int idlist_shndx; 100 + size_t strtabidx; 101 + unsigned long idlist_addr; 102 + } efile; 103 + 104 + struct rb_root sets; 105 + struct rb_root structs; 106 + struct rb_root unions; 107 + struct rb_root typedefs; 108 + struct rb_root funcs; 109 + 110 + int nr_funcs; 111 + int nr_structs; 112 + int nr_unions; 113 + int nr_typedefs; 114 + }; 115 + 116 + static int verbose; 117 + 118 + int eprintf(int level, int var, const char *fmt, ...) 119 + { 120 + va_list args; 121 + int ret; 122 + 123 + if (var >= level) { 124 + va_start(args, fmt); 125 + ret = vfprintf(stderr, fmt, args); 126 + va_end(args); 127 + } 128 + return ret; 129 + } 130 + 131 + #ifndef pr_fmt 132 + #define pr_fmt(fmt) fmt 133 + #endif 134 + 135 + #define pr_debug(fmt, ...) \ 136 + eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__) 137 + #define pr_debugN(n, fmt, ...) \ 138 + eprintf(n, verbose, pr_fmt(fmt), ##__VA_ARGS__) 139 + #define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__) 140 + #define pr_err(fmt, ...) \ 141 + eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__) 142 + 143 + static bool is_btf_id(const char *name) 144 + { 145 + return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1); 146 + } 147 + 148 + static struct btf_id *btf_id__find(struct rb_root *root, const char *name) 149 + { 150 + struct rb_node *p = root->rb_node; 151 + struct btf_id *id; 152 + int cmp; 153 + 154 + while (p) { 155 + id = rb_entry(p, struct btf_id, rb_node); 156 + cmp = strcmp(id->name, name); 157 + if (cmp < 0) 158 + p = p->rb_left; 159 + else if (cmp > 0) 160 + p = p->rb_right; 161 + else 162 + return id; 163 + } 164 + return NULL; 165 + } 166 + 167 + static struct btf_id* 168 + btf_id__add(struct rb_root *root, char *name, bool unique) 169 + { 170 + struct rb_node **p = &root->rb_node; 171 + struct rb_node *parent = NULL; 172 + struct btf_id *id; 173 + int cmp; 174 + 175 + while (*p != NULL) { 176 + parent = *p; 177 + id = rb_entry(parent, struct btf_id, rb_node); 178 + cmp = strcmp(id->name, name); 179 + if (cmp < 0) 180 + p = &(*p)->rb_left; 181 + else if (cmp > 0) 182 + p = &(*p)->rb_right; 183 + else 184 + return unique ? NULL : id; 185 + } 186 + 187 + id = zalloc(sizeof(*id)); 188 + if (id) { 189 + pr_debug("adding symbol %s\n", name); 190 + id->name = name; 191 + rb_link_node(&id->rb_node, parent, p); 192 + rb_insert_color(&id->rb_node, root); 193 + } 194 + return id; 195 + } 196 + 197 + static char *get_id(const char *prefix_end) 198 + { 199 + /* 200 + * __BTF_ID__func__vfs_truncate__0 201 + * prefix_end = ^ 202 + */ 203 + char *p, *id = strdup(prefix_end + sizeof("__") - 1); 204 + 205 + if (id) { 206 + /* 207 + * __BTF_ID__func__vfs_truncate__0 208 + * id = ^ 209 + * 210 + * cut the unique id part 211 + */ 212 + p = strrchr(id, '_'); 213 + p--; 214 + if (*p != '_') { 215 + free(id); 216 + return NULL; 217 + } 218 + *p = '\0'; 219 + } 220 + return id; 221 + } 222 + 223 + static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size) 224 + { 225 + char *id; 226 + 227 + id = get_id(name + size); 228 + if (!id) { 229 + pr_err("FAILED to parse symbol name: %s\n", name); 230 + return NULL; 231 + } 232 + 233 + return btf_id__add(root, id, false); 234 + } 235 + 236 + static int elf_collect(struct object *obj) 237 + { 238 + Elf_Scn *scn = NULL; 239 + size_t shdrstrndx; 240 + int idx = 0; 241 + Elf *elf; 242 + int fd; 243 + 244 + fd = open(obj->path, O_RDWR, 0666); 245 + if (fd == -1) { 246 + pr_err("FAILED cannot open %s: %s\n", 247 + obj->path, strerror(errno)); 248 + return -1; 249 + } 250 + 251 + elf_version(EV_CURRENT); 252 + 253 + elf = elf_begin(fd, ELF_C_RDWR_MMAP, NULL); 254 + if (!elf) { 255 + pr_err("FAILED cannot create ELF descriptor: %s\n", 256 + elf_errmsg(-1)); 257 + return -1; 258 + } 259 + 260 + obj->efile.fd = fd; 261 + obj->efile.elf = elf; 262 + 263 + elf_flagelf(elf, ELF_C_SET, ELF_F_LAYOUT); 264 + 265 + if (elf_getshdrstrndx(elf, &shdrstrndx) != 0) { 266 + pr_err("FAILED cannot get shdr str ndx\n"); 267 + return -1; 268 + } 269 + 270 + /* 271 + * Scan all the elf sections and look for save data 272 + * from .BTF_ids section and symbols. 273 + */ 274 + while ((scn = elf_nextscn(elf, scn)) != NULL) { 275 + Elf_Data *data; 276 + GElf_Shdr sh; 277 + char *name; 278 + 279 + idx++; 280 + if (gelf_getshdr(scn, &sh) != &sh) { 281 + pr_err("FAILED get section(%d) header\n", idx); 282 + return -1; 283 + } 284 + 285 + name = elf_strptr(elf, shdrstrndx, sh.sh_name); 286 + if (!name) { 287 + pr_err("FAILED get section(%d) name\n", idx); 288 + return -1; 289 + } 290 + 291 + data = elf_getdata(scn, 0); 292 + if (!data) { 293 + pr_err("FAILED to get section(%d) data from %s\n", 294 + idx, name); 295 + return -1; 296 + } 297 + 298 + pr_debug2("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", 299 + idx, name, (unsigned long) data->d_size, 300 + (int) sh.sh_link, (unsigned long) sh.sh_flags, 301 + (int) sh.sh_type); 302 + 303 + if (sh.sh_type == SHT_SYMTAB) { 304 + obj->efile.symbols = data; 305 + obj->efile.symbols_shndx = idx; 306 + obj->efile.strtabidx = sh.sh_link; 307 + } else if (!strcmp(name, BTF_IDS_SECTION)) { 308 + obj->efile.idlist = data; 309 + obj->efile.idlist_shndx = idx; 310 + obj->efile.idlist_addr = sh.sh_addr; 311 + } 312 + } 313 + 314 + return 0; 315 + } 316 + 317 + static int symbols_collect(struct object *obj) 318 + { 319 + Elf_Scn *scn = NULL; 320 + int n, i, err = 0; 321 + GElf_Shdr sh; 322 + char *name; 323 + 324 + scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx); 325 + if (!scn) 326 + return -1; 327 + 328 + if (gelf_getshdr(scn, &sh) != &sh) 329 + return -1; 330 + 331 + n = sh.sh_size / sh.sh_entsize; 332 + 333 + /* 334 + * Scan symbols and look for the ones starting with 335 + * __BTF_ID__* over .BTF_ids section. 336 + */ 337 + for (i = 0; !err && i < n; i++) { 338 + char *tmp, *prefix; 339 + struct btf_id *id; 340 + GElf_Sym sym; 341 + int err = -1; 342 + 343 + if (!gelf_getsym(obj->efile.symbols, i, &sym)) 344 + return -1; 345 + 346 + if (sym.st_shndx != obj->efile.idlist_shndx) 347 + continue; 348 + 349 + name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, 350 + sym.st_name); 351 + 352 + if (!is_btf_id(name)) 353 + continue; 354 + 355 + /* 356 + * __BTF_ID__TYPE__vfs_truncate__0 357 + * prefix = ^ 358 + */ 359 + prefix = name + sizeof(BTF_ID) - 1; 360 + 361 + /* struct */ 362 + if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) { 363 + obj->nr_structs++; 364 + id = add_symbol(&obj->structs, prefix, sizeof(BTF_STRUCT) - 1); 365 + /* union */ 366 + } else if (!strncmp(prefix, BTF_UNION, sizeof(BTF_UNION) - 1)) { 367 + obj->nr_unions++; 368 + id = add_symbol(&obj->unions, prefix, sizeof(BTF_UNION) - 1); 369 + /* typedef */ 370 + } else if (!strncmp(prefix, BTF_TYPEDEF, sizeof(BTF_TYPEDEF) - 1)) { 371 + obj->nr_typedefs++; 372 + id = add_symbol(&obj->typedefs, prefix, sizeof(BTF_TYPEDEF) - 1); 373 + /* func */ 374 + } else if (!strncmp(prefix, BTF_FUNC, sizeof(BTF_FUNC) - 1)) { 375 + obj->nr_funcs++; 376 + id = add_symbol(&obj->funcs, prefix, sizeof(BTF_FUNC) - 1); 377 + /* set */ 378 + } else if (!strncmp(prefix, BTF_SET, sizeof(BTF_SET) - 1)) { 379 + id = add_symbol(&obj->sets, prefix, sizeof(BTF_SET) - 1); 380 + /* 381 + * SET objects store list's count, which is encoded 382 + * in symbol's size, together with 'cnt' field hence 383 + * that - 1. 384 + */ 385 + if (id) 386 + id->cnt = sym.st_size / sizeof(int) - 1; 387 + } else { 388 + pr_err("FAILED unsupported prefix %s\n", prefix); 389 + return -1; 390 + } 391 + 392 + if (!id) 393 + return -ENOMEM; 394 + 395 + if (id->addr_cnt >= ADDR_CNT) { 396 + pr_err("FAILED symbol %s crossed the number of allowed lists", 397 + id->name); 398 + return -1; 399 + } 400 + id->addr[id->addr_cnt++] = sym.st_value; 401 + } 402 + 403 + return 0; 404 + } 405 + 406 + static struct btf *btf__parse_raw(const char *file) 407 + { 408 + struct btf *btf; 409 + struct stat st; 410 + __u8 *buf; 411 + FILE *f; 412 + 413 + if (stat(file, &st)) 414 + return NULL; 415 + 416 + f = fopen(file, "rb"); 417 + if (!f) 418 + return NULL; 419 + 420 + buf = malloc(st.st_size); 421 + if (!buf) { 422 + btf = ERR_PTR(-ENOMEM); 423 + goto exit_close; 424 + } 425 + 426 + if ((size_t) st.st_size != fread(buf, 1, st.st_size, f)) { 427 + btf = ERR_PTR(-EINVAL); 428 + goto exit_free; 429 + } 430 + 431 + btf = btf__new(buf, st.st_size); 432 + 433 + exit_free: 434 + free(buf); 435 + exit_close: 436 + fclose(f); 437 + return btf; 438 + } 439 + 440 + static bool is_btf_raw(const char *file) 441 + { 442 + __u16 magic = 0; 443 + int fd, nb_read; 444 + 445 + fd = open(file, O_RDONLY); 446 + if (fd < 0) 447 + return false; 448 + 449 + nb_read = read(fd, &magic, sizeof(magic)); 450 + close(fd); 451 + return nb_read == sizeof(magic) && magic == BTF_MAGIC; 452 + } 453 + 454 + static struct btf *btf_open(const char *path) 455 + { 456 + if (is_btf_raw(path)) 457 + return btf__parse_raw(path); 458 + else 459 + return btf__parse_elf(path, NULL); 460 + } 461 + 462 + static int symbols_resolve(struct object *obj) 463 + { 464 + int nr_typedefs = obj->nr_typedefs; 465 + int nr_structs = obj->nr_structs; 466 + int nr_unions = obj->nr_unions; 467 + int nr_funcs = obj->nr_funcs; 468 + int err, type_id; 469 + struct btf *btf; 470 + __u32 nr; 471 + 472 + btf = btf_open(obj->btf ?: obj->path); 473 + err = libbpf_get_error(btf); 474 + if (err) { 475 + pr_err("FAILED: load BTF from %s: %s", 476 + obj->path, strerror(err)); 477 + return -1; 478 + } 479 + 480 + err = -1; 481 + nr = btf__get_nr_types(btf); 482 + 483 + /* 484 + * Iterate all the BTF types and search for collected symbol IDs. 485 + */ 486 + for (type_id = 1; type_id <= nr; type_id++) { 487 + const struct btf_type *type; 488 + struct rb_root *root; 489 + struct btf_id *id; 490 + const char *str; 491 + int *nr; 492 + 493 + type = btf__type_by_id(btf, type_id); 494 + if (!type) { 495 + pr_err("FAILED: malformed BTF, can't resolve type for ID %d\n", 496 + type_id); 497 + goto out; 498 + } 499 + 500 + if (btf_is_func(type) && nr_funcs) { 501 + nr = &nr_funcs; 502 + root = &obj->funcs; 503 + } else if (btf_is_struct(type) && nr_structs) { 504 + nr = &nr_structs; 505 + root = &obj->structs; 506 + } else if (btf_is_union(type) && nr_unions) { 507 + nr = &nr_unions; 508 + root = &obj->unions; 509 + } else if (btf_is_typedef(type) && nr_typedefs) { 510 + nr = &nr_typedefs; 511 + root = &obj->typedefs; 512 + } else 513 + continue; 514 + 515 + str = btf__name_by_offset(btf, type->name_off); 516 + if (!str) { 517 + pr_err("FAILED: malformed BTF, can't resolve name for ID %d\n", 518 + type_id); 519 + goto out; 520 + } 521 + 522 + id = btf_id__find(root, str); 523 + if (id) { 524 + id->id = type_id; 525 + (*nr)--; 526 + } 527 + } 528 + 529 + err = 0; 530 + out: 531 + btf__free(btf); 532 + return err; 533 + } 534 + 535 + static int id_patch(struct object *obj, struct btf_id *id) 536 + { 537 + Elf_Data *data = obj->efile.idlist; 538 + int *ptr = data->d_buf; 539 + int i; 540 + 541 + if (!id->id) { 542 + pr_err("FAILED unresolved symbol %s\n", id->name); 543 + return -EINVAL; 544 + } 545 + 546 + for (i = 0; i < id->addr_cnt; i++) { 547 + unsigned long addr = id->addr[i]; 548 + unsigned long idx = addr - obj->efile.idlist_addr; 549 + 550 + pr_debug("patching addr %5lu: ID %7d [%s]\n", 551 + idx, id->id, id->name); 552 + 553 + if (idx >= data->d_size) { 554 + pr_err("FAILED patching index %lu out of bounds %lu\n", 555 + idx, data->d_size); 556 + return -1; 557 + } 558 + 559 + idx = idx / sizeof(int); 560 + ptr[idx] = id->id; 561 + } 562 + 563 + return 0; 564 + } 565 + 566 + static int __symbols_patch(struct object *obj, struct rb_root *root) 567 + { 568 + struct rb_node *next; 569 + struct btf_id *id; 570 + 571 + next = rb_first(root); 572 + while (next) { 573 + id = rb_entry(next, struct btf_id, rb_node); 574 + 575 + if (id_patch(obj, id)) 576 + return -1; 577 + 578 + next = rb_next(next); 579 + } 580 + return 0; 581 + } 582 + 583 + static int cmp_id(const void *pa, const void *pb) 584 + { 585 + const int *a = pa, *b = pb; 586 + 587 + return *a - *b; 588 + } 589 + 590 + static int sets_patch(struct object *obj) 591 + { 592 + Elf_Data *data = obj->efile.idlist; 593 + int *ptr = data->d_buf; 594 + struct rb_node *next; 595 + 596 + next = rb_first(&obj->sets); 597 + while (next) { 598 + unsigned long addr, idx; 599 + struct btf_id *id; 600 + int *base; 601 + int cnt; 602 + 603 + id = rb_entry(next, struct btf_id, rb_node); 604 + addr = id->addr[0]; 605 + idx = addr - obj->efile.idlist_addr; 606 + 607 + /* sets are unique */ 608 + if (id->addr_cnt != 1) { 609 + pr_err("FAILED malformed data for set '%s'\n", 610 + id->name); 611 + return -1; 612 + } 613 + 614 + idx = idx / sizeof(int); 615 + base = &ptr[idx] + 1; 616 + cnt = ptr[idx]; 617 + 618 + pr_debug("sorting addr %5lu: cnt %6d [%s]\n", 619 + (idx + 1) * sizeof(int), cnt, id->name); 620 + 621 + qsort(base, cnt, sizeof(int), cmp_id); 622 + 623 + next = rb_next(next); 624 + } 625 + } 626 + 627 + static int symbols_patch(struct object *obj) 628 + { 629 + int err; 630 + 631 + if (__symbols_patch(obj, &obj->structs) || 632 + __symbols_patch(obj, &obj->unions) || 633 + __symbols_patch(obj, &obj->typedefs) || 634 + __symbols_patch(obj, &obj->funcs) || 635 + __symbols_patch(obj, &obj->sets)) 636 + return -1; 637 + 638 + if (sets_patch(obj)) 639 + return -1; 640 + 641 + elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY); 642 + 643 + err = elf_update(obj->efile.elf, ELF_C_WRITE); 644 + if (err < 0) { 645 + pr_err("FAILED elf_update(WRITE): %s\n", 646 + elf_errmsg(-1)); 647 + } 648 + 649 + pr_debug("update %s for %s\n", 650 + err >= 0 ? "ok" : "failed", obj->path); 651 + return err < 0 ? -1 : 0; 652 + } 653 + 654 + static const char * const resolve_btfids_usage[] = { 655 + "resolve_btfids [<options>] <ELF object>", 656 + NULL 657 + }; 658 + 659 + int main(int argc, const char **argv) 660 + { 661 + bool no_fail = false; 662 + struct object obj = { 663 + .efile = { 664 + .idlist_shndx = -1, 665 + .symbols_shndx = -1, 666 + }, 667 + .structs = RB_ROOT, 668 + .unions = RB_ROOT, 669 + .typedefs = RB_ROOT, 670 + .funcs = RB_ROOT, 671 + .sets = RB_ROOT, 672 + }; 673 + struct option btfid_options[] = { 674 + OPT_INCR('v', "verbose", &verbose, 675 + "be more verbose (show errors, etc)"), 676 + OPT_STRING(0, "btf", &obj.btf, "BTF data", 677 + "BTF data"), 678 + OPT_BOOLEAN(0, "no-fail", &no_fail, 679 + "do not fail if " BTF_IDS_SECTION " section is not found"), 680 + OPT_END() 681 + }; 682 + int err = -1; 683 + 684 + argc = parse_options(argc, argv, btfid_options, resolve_btfids_usage, 685 + PARSE_OPT_STOP_AT_NON_OPTION); 686 + if (argc != 1) 687 + usage_with_options(resolve_btfids_usage, btfid_options); 688 + 689 + obj.path = argv[0]; 690 + 691 + if (elf_collect(&obj)) 692 + goto out; 693 + 694 + /* 695 + * We did not find .BTF_ids section or symbols section, 696 + * nothing to do.. 697 + */ 698 + if (obj.efile.idlist_shndx == -1 || 699 + obj.efile.symbols_shndx == -1) { 700 + if (no_fail) 701 + return 0; 702 + pr_err("FAILED to find needed sections\n"); 703 + return -1; 704 + } 705 + 706 + if (symbols_collect(&obj)) 707 + goto out; 708 + 709 + if (symbols_resolve(&obj)) 710 + goto out; 711 + 712 + if (symbols_patch(&obj)) 713 + goto out; 714 + 715 + err = 0; 716 + out: 717 + if (obj.efile.elf) 718 + elf_end(obj.efile.elf); 719 + close(obj.efile.fd); 720 + return err; 721 + }
+87
tools/include/linux/btf_ids.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef _LINUX_BTF_IDS_H 4 + #define _LINUX_BTF_IDS_H 5 + 6 + #include <linux/compiler.h> /* for __PASTE */ 7 + 8 + /* 9 + * Following macros help to define lists of BTF IDs placed 10 + * in .BTF_ids section. They are initially filled with zeros 11 + * (during compilation) and resolved later during the 12 + * linking phase by resolve_btfids tool. 13 + * 14 + * Any change in list layout must be reflected in resolve_btfids 15 + * tool logic. 16 + */ 17 + 18 + #define BTF_IDS_SECTION ".BTF_ids" 19 + 20 + #define ____BTF_ID(symbol) \ 21 + asm( \ 22 + ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ 23 + ".local " #symbol " ; \n" \ 24 + ".type " #symbol ", @object; \n" \ 25 + ".size " #symbol ", 4; \n" \ 26 + #symbol ": \n" \ 27 + ".zero 4 \n" \ 28 + ".popsection; \n"); 29 + 30 + #define __BTF_ID(symbol) \ 31 + ____BTF_ID(symbol) 32 + 33 + #define __ID(prefix) \ 34 + __PASTE(prefix, __COUNTER__) 35 + 36 + /* 37 + * The BTF_ID defines unique symbol for each ID pointing 38 + * to 4 zero bytes. 39 + */ 40 + #define BTF_ID(prefix, name) \ 41 + __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__)) 42 + 43 + /* 44 + * The BTF_ID_LIST macro defines pure (unsorted) list 45 + * of BTF IDs, with following layout: 46 + * 47 + * BTF_ID_LIST(list1) 48 + * BTF_ID(type1, name1) 49 + * BTF_ID(type2, name2) 50 + * 51 + * list1: 52 + * __BTF_ID__type1__name1__1: 53 + * .zero 4 54 + * __BTF_ID__type2__name2__2: 55 + * .zero 4 56 + * 57 + */ 58 + #define __BTF_ID_LIST(name) \ 59 + asm( \ 60 + ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ 61 + ".local " #name "; \n" \ 62 + #name ":; \n" \ 63 + ".popsection; \n"); \ 64 + 65 + #define BTF_ID_LIST(name) \ 66 + __BTF_ID_LIST(name) \ 67 + extern u32 name[]; 68 + 69 + /* 70 + * The BTF_ID_UNUSED macro defines 4 zero bytes. 71 + * It's used when we want to define 'unused' entry 72 + * in BTF_ID_LIST, like: 73 + * 74 + * BTF_ID_LIST(bpf_skb_output_btf_ids) 75 + * BTF_ID(struct, sk_buff) 76 + * BTF_ID_UNUSED 77 + * BTF_ID(struct, task_struct) 78 + */ 79 + 80 + #define BTF_ID_UNUSED \ 81 + asm( \ 82 + ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ 83 + ".zero 4 \n" \ 84 + ".popsection; \n"); 85 + 86 + 87 + #endif
+4
tools/include/linux/compiler.h
··· 201 201 # define __fallthrough 202 202 #endif 203 203 204 + /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ 205 + #define ___PASTE(a, b) a##b 206 + #define __PASTE(a, b) ___PASTE(a, b) 207 + 204 208 #endif /* _TOOLS_LINUX_COMPILER_H */
+1
tools/include/uapi/linux/bpf.h
··· 226 226 BPF_CGROUP_INET4_GETSOCKNAME, 227 227 BPF_CGROUP_INET6_GETSOCKNAME, 228 228 BPF_XDP_DEVMAP, 229 + BPF_CGROUP_INET_SOCK_RELEASE, 229 230 __MAX_BPF_ATTACH_TYPE 230 231 }; 231 232
+4 -1
tools/include/uapi/linux/if_xdp.h
··· 73 73 }; 74 74 75 75 struct xdp_statistics { 76 - __u64 rx_dropped; /* Dropped for reasons other than invalid desc */ 76 + __u64 rx_dropped; /* Dropped for other reasons */ 77 77 __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ 78 78 __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ 79 + __u64 rx_ring_full; /* Dropped due to rx ring being full */ 80 + __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */ 81 + __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */ 79 82 }; 80 83 81 84 struct xdp_options {
+7 -2
tools/lib/bpf/btf.c
··· 389 389 if (!btf) 390 390 return; 391 391 392 - if (btf->fd != -1) 392 + if (btf->fd >= 0) 393 393 close(btf->fd); 394 394 395 395 free(btf->data); ··· 397 397 free(btf); 398 398 } 399 399 400 - struct btf *btf__new(__u8 *data, __u32 size) 400 + struct btf *btf__new(const void *data, __u32 size) 401 401 { 402 402 struct btf *btf; 403 403 int err; ··· 698 698 int btf__fd(const struct btf *btf) 699 699 { 700 700 return btf->fd; 701 + } 702 + 703 + void btf__set_fd(struct btf *btf, int fd) 704 + { 705 + btf->fd = fd; 701 706 } 702 707 703 708 const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
+5 -2
tools/lib/bpf/btf.h
··· 63 63 }; 64 64 65 65 LIBBPF_API void btf__free(struct btf *btf); 66 - LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size); 66 + LIBBPF_API struct btf *btf__new(const void *data, __u32 size); 67 67 LIBBPF_API struct btf *btf__parse_elf(const char *path, 68 68 struct btf_ext **btf_ext); 69 69 LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); ··· 79 79 LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); 80 80 LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); 81 81 LIBBPF_API int btf__fd(const struct btf *btf); 82 + LIBBPF_API void btf__set_fd(struct btf *btf, int fd); 82 83 LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size); 83 84 LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); 84 85 LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf); ··· 144 143 * necessary indentation already 145 144 */ 146 145 int indent_level; 146 + /* strip all the const/volatile/restrict mods */ 147 + bool strip_mods; 147 148 }; 148 - #define btf_dump_emit_type_decl_opts__last_field indent_level 149 + #define btf_dump_emit_type_decl_opts__last_field strip_mods 149 150 150 151 LIBBPF_API int 151 152 btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
+8 -2
tools/lib/bpf/btf_dump.c
··· 60 60 const struct btf_ext *btf_ext; 61 61 btf_dump_printf_fn_t printf_fn; 62 62 struct btf_dump_opts opts; 63 + bool strip_mods; 63 64 64 65 /* per-type auxiliary state */ 65 66 struct btf_dump_type_aux_state *type_states; ··· 1033 1032 1034 1033 fname = OPTS_GET(opts, field_name, ""); 1035 1034 lvl = OPTS_GET(opts, indent_level, 0); 1035 + d->strip_mods = OPTS_GET(opts, strip_mods, false); 1036 1036 btf_dump_emit_type_decl(d, id, fname, lvl); 1037 + d->strip_mods = false; 1037 1038 return 0; 1038 1039 } 1039 1040 ··· 1048 1045 1049 1046 stack_start = d->decl_stack_cnt; 1050 1047 for (;;) { 1048 + t = btf__type_by_id(d->btf, id); 1049 + if (d->strip_mods && btf_is_mod(t)) 1050 + goto skip_mod; 1051 + 1051 1052 err = btf_dump_push_decl_stack_id(d, id); 1052 1053 if (err < 0) { 1053 1054 /* ··· 1063 1056 d->decl_stack_cnt = stack_start; 1064 1057 return; 1065 1058 } 1066 - 1059 + skip_mod: 1067 1060 /* VOID */ 1068 1061 if (id == 0) 1069 1062 break; 1070 1063 1071 - t = btf__type_by_id(d->btf, id); 1072 1064 switch (btf_kind(t)) { 1073 1065 case BTF_KIND_PTR: 1074 1066 case BTF_KIND_VOLATILE:
+81 -68
tools/lib/bpf/libbpf.c
··· 2338 2338 return false; 2339 2339 } 2340 2340 2341 - static void bpf_object__sanitize_btf(struct bpf_object *obj) 2341 + static bool btf_needs_sanitization(struct bpf_object *obj) 2342 2342 { 2343 2343 bool has_func_global = obj->caps.btf_func_global; 2344 2344 bool has_datasec = obj->caps.btf_datasec; 2345 2345 bool has_func = obj->caps.btf_func; 2346 - struct btf *btf = obj->btf; 2346 + 2347 + return !has_func || !has_datasec || !has_func_global; 2348 + } 2349 + 2350 + static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) 2351 + { 2352 + bool has_func_global = obj->caps.btf_func_global; 2353 + bool has_datasec = obj->caps.btf_datasec; 2354 + bool has_func = obj->caps.btf_func; 2347 2355 struct btf_type *t; 2348 2356 int i, j, vlen; 2349 - 2350 - if (!obj->btf || (has_func && has_datasec && has_func_global)) 2351 - return; 2352 2357 2353 2358 for (i = 1; i <= btf__get_nr_types(btf); i++) { 2354 2359 t = (struct btf_type *)btf__type_by_id(btf, i); ··· 2404 2399 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */ 2405 2400 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); 2406 2401 } 2407 - } 2408 - } 2409 - 2410 - static void bpf_object__sanitize_btf_ext(struct bpf_object *obj) 2411 - { 2412 - if (!obj->btf_ext) 2413 - return; 2414 - 2415 - if (!obj->caps.btf_func) { 2416 - btf_ext__free(obj->btf_ext); 2417 - obj->btf_ext = NULL; 2418 2402 } 2419 2403 } 2420 2404 ··· 2467 2473 return 0; 2468 2474 2469 2475 err = btf__finalize_data(obj, obj->btf); 2470 - if (!err) 2471 - return 0; 2472 - 2473 - pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); 2474 - btf__free(obj->btf); 2475 - obj->btf = NULL; 2476 - btf_ext__free(obj->btf_ext); 2477 - obj->btf_ext = NULL; 2478 - 2479 - if (libbpf_needs_btf(obj)) { 2480 - pr_warn("BTF is required, but is missing or corrupted.\n"); 2481 - return -ENOENT; 2476 + if (err) { 2477 + pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); 2478 + return err; 2482 2479 } 2480 + 2483 2481 return 0; 2484 2482 } 2485 2483 ··· 2524 2538 2525 2539 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) 2526 2540 { 2541 + struct btf *kern_btf = obj->btf; 2542 + bool btf_mandatory, sanitize; 2527 2543 int err = 0; 2528 2544 2529 2545 if (!obj->btf) 2530 2546 return 0; 2531 2547 2532 - bpf_object__sanitize_btf(obj); 2533 - bpf_object__sanitize_btf_ext(obj); 2548 + sanitize = btf_needs_sanitization(obj); 2549 + if (sanitize) { 2550 + const void *raw_data; 2551 + __u32 sz; 2534 2552 2535 - err = btf__load(obj->btf); 2536 - if (err) { 2537 - pr_warn("Error loading %s into kernel: %d.\n", 2538 - BTF_ELF_SEC, err); 2539 - btf__free(obj->btf); 2540 - obj->btf = NULL; 2541 - /* btf_ext can't exist without btf, so free it as well */ 2542 - if (obj->btf_ext) { 2543 - btf_ext__free(obj->btf_ext); 2544 - obj->btf_ext = NULL; 2545 - } 2553 + /* clone BTF to sanitize a copy and leave the original intact */ 2554 + raw_data = btf__get_raw_data(obj->btf, &sz); 2555 + kern_btf = btf__new(raw_data, sz); 2556 + if (IS_ERR(kern_btf)) 2557 + return PTR_ERR(kern_btf); 2546 2558 2547 - if (kernel_needs_btf(obj)) 2548 - return err; 2559 + bpf_object__sanitize_btf(obj, kern_btf); 2549 2560 } 2550 - return 0; 2561 + 2562 + err = btf__load(kern_btf); 2563 + if (sanitize) { 2564 + if (!err) { 2565 + /* move fd to libbpf's BTF */ 2566 + btf__set_fd(obj->btf, btf__fd(kern_btf)); 2567 + btf__set_fd(kern_btf, -1); 2568 + } 2569 + btf__free(kern_btf); 2570 + } 2571 + if (err) { 2572 + btf_mandatory = kernel_needs_btf(obj); 2573 + pr_warn("Error loading .BTF into kernel: %d. %s\n", err, 2574 + btf_mandatory ? "BTF is mandatory, can't proceed." 2575 + : "BTF is optional, ignoring."); 2576 + if (!btf_mandatory) 2577 + err = 0; 2578 + } 2579 + return err; 2551 2580 } 2552 2581 2553 2582 static int bpf_object__elf_collect(struct bpf_object *obj) ··· 3786 3785 create_attr.btf_fd = 0; 3787 3786 create_attr.btf_key_type_id = 0; 3788 3787 create_attr.btf_value_type_id = 0; 3789 - if (obj->btf && !bpf_map_find_btf_info(obj, map)) { 3788 + if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { 3790 3789 create_attr.btf_fd = btf__fd(obj->btf); 3791 3790 create_attr.btf_key_type_id = map->btf_key_type_id; 3792 3791 create_attr.btf_value_type_id = map->btf_value_type_id; ··· 5376 5375 load_attr.kern_version = kern_version; 5377 5376 load_attr.prog_ifindex = prog->prog_ifindex; 5378 5377 } 5379 - /* if .BTF.ext was loaded, kernel supports associated BTF for prog */ 5380 - if (prog->obj->btf_ext) 5381 - btf_fd = bpf_object__btf_fd(prog->obj); 5382 - else 5383 - btf_fd = -1; 5384 - load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0; 5385 - load_attr.func_info = prog->func_info; 5386 - load_attr.func_info_rec_size = prog->func_info_rec_size; 5387 - load_attr.func_info_cnt = prog->func_info_cnt; 5388 - load_attr.line_info = prog->line_info; 5389 - load_attr.line_info_rec_size = prog->line_info_rec_size; 5390 - load_attr.line_info_cnt = prog->line_info_cnt; 5378 + /* specify func_info/line_info only if kernel supports them */ 5379 + btf_fd = bpf_object__btf_fd(prog->obj); 5380 + if (btf_fd >= 0 && prog->obj->caps.btf_func) { 5381 + load_attr.prog_btf_fd = btf_fd; 5382 + load_attr.func_info = prog->func_info; 5383 + load_attr.func_info_rec_size = prog->func_info_rec_size; 5384 + load_attr.func_info_cnt = prog->func_info_cnt; 5385 + load_attr.line_info = prog->line_info; 5386 + load_attr.line_info_rec_size = prog->line_info_rec_size; 5387 + load_attr.line_info_cnt = prog->line_info_cnt; 5388 + } 5391 5389 load_attr.log_level = prog->log_level; 5392 5390 load_attr.prog_flags = prog->prog_flags; 5393 5391 ··· 6923 6923 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, 6924 6924 BPF_CGROUP_INET_EGRESS), 6925 6925 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), 6926 + BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK, 6927 + BPF_CGROUP_INET_SOCK_CREATE), 6928 + BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK, 6929 + BPF_CGROUP_INET_SOCK_RELEASE), 6926 6930 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, 6927 6931 BPF_CGROUP_INET_SOCK_CREATE), 6928 6932 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, ··· 8592 8588 struct perf_buffer_params *p) 8593 8589 { 8594 8590 const char *online_cpus_file = "/sys/devices/system/cpu/online"; 8595 - struct bpf_map_info map = {}; 8591 + struct bpf_map_info map; 8596 8592 char msg[STRERR_BUFSIZE]; 8597 8593 struct perf_buffer *pb; 8598 8594 bool *online = NULL; ··· 8605 8601 return ERR_PTR(-EINVAL); 8606 8602 } 8607 8603 8604 + /* best-effort sanity checks */ 8605 + memset(&map, 0, sizeof(map)); 8608 8606 map_info_len = sizeof(map); 8609 8607 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); 8610 8608 if (err) { 8611 8609 err = -errno; 8612 - pr_warn("failed to get map info for map FD %d: %s\n", 8613 - map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); 8614 - return ERR_PTR(err); 8615 - } 8616 - 8617 - if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { 8618 - pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", 8619 - map.name); 8620 - return ERR_PTR(-EINVAL); 8610 + /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return 8611 + * -EBADFD, -EFAULT, or -E2BIG on real error 8612 + */ 8613 + if (err != -EINVAL) { 8614 + pr_warn("failed to get map info for map FD %d: %s\n", 8615 + map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); 8616 + return ERR_PTR(err); 8617 + } 8618 + pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", 8619 + map_fd); 8620 + } else { 8621 + if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { 8622 + pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", 8623 + map.name); 8624 + return ERR_PTR(-EINVAL); 8625 + } 8621 8626 } 8622 8627 8623 8628 pb = calloc(1, sizeof(*pb)); ··· 8658 8645 err = pb->cpu_cnt; 8659 8646 goto error; 8660 8647 } 8661 - if (map.max_entries < pb->cpu_cnt) 8648 + if (map.max_entries && map.max_entries < pb->cpu_cnt) 8662 8649 pb->cpu_cnt = map.max_entries; 8663 8650 } 8664 8651
+1
tools/lib/bpf/libbpf.map
··· 288 288 bpf_map__value_size; 289 289 bpf_program__autoload; 290 290 bpf_program__set_autoload; 291 + btf__set_fd; 291 292 } LIBBPF_0.0.9;
+14 -1
tools/testing/selftests/bpf/Makefile
··· 111 111 BUILD_DIR := $(SCRATCH_DIR)/build 112 112 INCLUDE_DIR := $(SCRATCH_DIR)/include 113 113 BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a 114 + RESOLVE_BTFIDS := $(BUILD_DIR)/resolve_btfids/resolve_btfids 114 115 115 116 # Define simple and short `make test_progs`, `make test_sysctl`, etc targets 116 117 # to build individual tests. ··· 178 177 $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \ 179 178 DESTDIR=$(SCRATCH_DIR) prefix= all install_headers 180 179 181 - $(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(INCLUDE_DIR): 180 + $(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_DIR): 182 181 $(call msg,MKDIR,,$@) 183 182 mkdir -p $@ 184 183 ··· 190 189 $(call msg,CP,,$@) 191 190 cp "$(VMLINUX_H)" $@ 192 191 endif 192 + 193 + $(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids \ 194 + $(TOOLSDIR)/bpf/resolve_btfids/main.c \ 195 + $(TOOLSDIR)/lib/rbtree.c \ 196 + $(TOOLSDIR)/lib/zalloc.c \ 197 + $(TOOLSDIR)/lib/string.c \ 198 + $(TOOLSDIR)/lib/ctype.c \ 199 + $(TOOLSDIR)/lib/str_error_r.c 200 + $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \ 201 + OUTPUT=$(BUILD_DIR)/resolve_btfids/ BPFOBJ=$(BPFOBJ) 193 202 194 203 # Get Clang's default includes on this system, as opposed to those seen by 195 204 # '-target bpf'. This fixes "missing" files on some architectures/distros, ··· 363 352 364 353 $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ 365 354 $(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \ 355 + $(RESOLVE_BTFIDS) \ 366 356 | $(TRUNNER_BINARY)-extras 367 357 $$(call msg,BINARY,,$$@) 368 358 $$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@ 359 + $(RESOLVE_BTFIDS) --no-fail --btf btf_data.o $$@ 369 360 370 361 endef 371 362
-14
tools/testing/selftests/bpf/bpf_legacy.h
··· 2 2 #ifndef __BPF_LEGACY__ 3 3 #define __BPF_LEGACY__ 4 4 5 - /* 6 - * legacy bpf_map_def with extra fields supported only by bpf_load(), do not 7 - * use outside of samples/bpf 8 - */ 9 - struct bpf_map_def_legacy { 10 - unsigned int type; 11 - unsigned int key_size; 12 - unsigned int value_size; 13 - unsigned int max_entries; 14 - unsigned int map_flags; 15 - unsigned int inner_map_idx; 16 - unsigned int numa_node; 17 - }; 18 - 19 5 #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \ 20 6 struct ____btf_map_##name { \ 21 7 type_key key; \
+33
tools/testing/selftests/bpf/prog_tests/core_retro.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (c) 2020 Facebook 3 + #define _GNU_SOURCE 4 + #include <test_progs.h> 5 + #include "test_core_retro.skel.h" 6 + 7 + void test_core_retro(void) 8 + { 9 + int err, zero = 0, res, duration = 0; 10 + struct test_core_retro *skel; 11 + 12 + /* load program */ 13 + skel = test_core_retro__open_and_load(); 14 + if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) 15 + goto out_close; 16 + 17 + /* attach probe */ 18 + err = test_core_retro__attach(skel); 19 + if (CHECK(err, "attach_kprobe", "err %d\n", err)) 20 + goto out_close; 21 + 22 + /* trigger */ 23 + usleep(1); 24 + 25 + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res); 26 + if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno)) 27 + goto out_close; 28 + 29 + CHECK(res != getpid(), "pid_check", "got %d != exp %d\n", res, getpid()); 30 + 31 + out_close: 32 + test_core_retro__destroy(skel); 33 + }
+12 -30
tools/testing/selftests/bpf/prog_tests/perf_buffer.c
··· 4 4 #include <sched.h> 5 5 #include <sys/socket.h> 6 6 #include <test_progs.h> 7 + #include "test_perf_buffer.skel.h" 7 8 #include "bpf/libbpf_internal.h" 8 9 9 10 /* AddressSanitizer sometimes crashes due to data dereference below, due to ··· 26 25 27 26 void test_perf_buffer(void) 28 27 { 29 - int err, prog_fd, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0; 30 - const char *prog_name = "kprobe/sys_nanosleep"; 31 - const char *file = "./test_perf_buffer.o"; 28 + int err, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0; 32 29 struct perf_buffer_opts pb_opts = {}; 33 - struct bpf_map *perf_buf_map; 30 + struct test_perf_buffer *skel; 34 31 cpu_set_t cpu_set, cpu_seen; 35 - struct bpf_program *prog; 36 - struct bpf_object *obj; 37 32 struct perf_buffer *pb; 38 - struct bpf_link *link; 39 33 bool *online; 40 34 41 35 nr_cpus = libbpf_num_possible_cpus(); ··· 47 51 nr_on_cpus++; 48 52 49 53 /* load program */ 50 - err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd); 51 - if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) { 52 - obj = NULL; 53 - goto out_close; 54 - } 55 - 56 - prog = bpf_object__find_program_by_title(obj, prog_name); 57 - if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) 54 + skel = test_perf_buffer__open_and_load(); 55 + if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) 58 56 goto out_close; 59 57 60 - /* load map */ 61 - perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map"); 62 - if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) 63 - goto out_close; 64 - 65 - /* attach kprobe */ 66 - link = bpf_program__attach_kprobe(prog, false /* retprobe */, 67 - SYS_NANOSLEEP_KPROBE_NAME); 68 - if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link))) 58 + /* attach probe */ 59 + err = test_perf_buffer__attach(skel); 60 + if (CHECK(err, "attach_kprobe", "err %d\n", err)) 69 61 goto out_close; 70 62 71 63 /* set up perf buffer */ 72 64 pb_opts.sample_cb = on_sample; 73 65 pb_opts.ctx = &cpu_seen; 74 - pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts); 66 + pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts); 75 67 if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) 76 - goto out_detach; 68 + goto out_close; 77 69 78 70 /* trigger kprobe on every CPU */ 79 71 CPU_ZERO(&cpu_seen); ··· 78 94 &cpu_set); 79 95 if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", 80 96 i, err)) 81 - goto out_detach; 97 + goto out_close; 82 98 83 99 usleep(1); 84 100 } ··· 94 110 95 111 out_free_pb: 96 112 perf_buffer__free(pb); 97 - out_detach: 98 - bpf_link__destroy(link); 99 113 out_close: 100 - bpf_object__close(obj); 114 + test_perf_buffer__destroy(skel); 101 115 free(online); 102 116 }
+111
tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/err.h> 4 + #include <string.h> 5 + #include <bpf/btf.h> 6 + #include <bpf/libbpf.h> 7 + #include <linux/btf.h> 8 + #include <linux/kernel.h> 9 + #include <linux/btf_ids.h> 10 + #include "test_progs.h" 11 + 12 + static int duration; 13 + 14 + struct symbol { 15 + const char *name; 16 + int type; 17 + int id; 18 + }; 19 + 20 + struct symbol test_symbols[] = { 21 + { "unused", BTF_KIND_UNKN, 0 }, 22 + { "S", BTF_KIND_TYPEDEF, -1 }, 23 + { "T", BTF_KIND_TYPEDEF, -1 }, 24 + { "U", BTF_KIND_TYPEDEF, -1 }, 25 + { "S", BTF_KIND_STRUCT, -1 }, 26 + { "U", BTF_KIND_UNION, -1 }, 27 + { "func", BTF_KIND_FUNC, -1 }, 28 + }; 29 + 30 + BTF_ID_LIST(test_list) 31 + BTF_ID_UNUSED 32 + BTF_ID(typedef, S) 33 + BTF_ID(typedef, T) 34 + BTF_ID(typedef, U) 35 + BTF_ID(struct, S) 36 + BTF_ID(union, U) 37 + BTF_ID(func, func) 38 + 39 + static int 40 + __resolve_symbol(struct btf *btf, int type_id) 41 + { 42 + const struct btf_type *type; 43 + const char *str; 44 + unsigned int i; 45 + 46 + type = btf__type_by_id(btf, type_id); 47 + if (!type) { 48 + PRINT_FAIL("Failed to get type for ID %d\n", type_id); 49 + return -1; 50 + } 51 + 52 + for (i = 0; i < ARRAY_SIZE(test_symbols); i++) { 53 + if (test_symbols[i].id != -1) 54 + continue; 55 + 56 + if (BTF_INFO_KIND(type->info) != test_symbols[i].type) 57 + continue; 58 + 59 + str = btf__name_by_offset(btf, type->name_off); 60 + if (!str) { 61 + PRINT_FAIL("Failed to get name for BTF ID %d\n", type_id); 62 + return -1; 63 + } 64 + 65 + if (!strcmp(str, test_symbols[i].name)) 66 + test_symbols[i].id = type_id; 67 + } 68 + 69 + return 0; 70 + } 71 + 72 + static int resolve_symbols(void) 73 + { 74 + struct btf *btf; 75 + int type_id; 76 + __u32 nr; 77 + 78 + btf = btf__parse_elf("btf_data.o", NULL); 79 + if (CHECK(libbpf_get_error(btf), "resolve", 80 + "Failed to load BTF from btf_data.o\n")) 81 + return -1; 82 + 83 + nr = btf__get_nr_types(btf); 84 + 85 + for (type_id = 1; type_id <= nr; type_id++) { 86 + if (__resolve_symbol(btf, type_id)) 87 + break; 88 + } 89 + 90 + btf__free(btf); 91 + return 0; 92 + } 93 + 94 + int test_resolve_btfids(void) 95 + { 96 + unsigned int i; 97 + int ret = 0; 98 + 99 + if (resolve_symbols()) 100 + return -1; 101 + 102 + /* Check BTF_ID_LIST(test_list) IDs */ 103 + for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) { 104 + ret = CHECK(test_list[i] != test_symbols[i].id, 105 + "id_check", 106 + "wrong ID for %s (%d != %d)\n", test_symbols[i].name, 107 + test_list[i], test_symbols[i].id); 108 + } 109 + 110 + return ret; 111 + }
+3 -3
tools/testing/selftests/bpf/prog_tests/skeleton.c
··· 41 41 CHECK(bss->in4 != 0, "in4", "got %lld != exp %lld\n", bss->in4, 0LL); 42 42 CHECK(bss->out4 != 0, "out4", "got %lld != exp %lld\n", bss->out4, 0LL); 43 43 44 - CHECK(rodata->in6 != 0, "in6", "got %d != exp %d\n", rodata->in6, 0); 44 + CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0); 45 45 CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0); 46 46 47 47 /* validate we can pre-setup global variables, even in .bss */ ··· 49 49 data->in2 = 11; 50 50 bss->in3 = 12; 51 51 bss->in4 = 13; 52 - rodata->in6 = 14; 52 + rodata->in.in6 = 14; 53 53 54 54 err = test_skeleton__load(skel); 55 55 if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) ··· 60 60 CHECK(data->in2 != 11, "in2", "got %lld != exp %lld\n", data->in2, 11LL); 61 61 CHECK(bss->in3 != 12, "in3", "got %d != exp %d\n", bss->in3, 12); 62 62 CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL); 63 - CHECK(rodata->in6 != 14, "in6", "got %d != exp %d\n", rodata->in6, 14); 63 + CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14); 64 64 65 65 /* now set new values and attach to get them into outX variables */ 66 66 data->in1 = 1;
+75
tools/testing/selftests/bpf/prog_tests/trace_printk.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2020, Oracle and/or its affiliates. */ 3 + 4 + #include <test_progs.h> 5 + 6 + #include "trace_printk.skel.h" 7 + 8 + #define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe" 9 + #define SEARCHMSG "testing,testing" 10 + 11 + void test_trace_printk(void) 12 + { 13 + int err, iter = 0, duration = 0, found = 0; 14 + struct trace_printk__bss *bss; 15 + struct trace_printk *skel; 16 + char *buf = NULL; 17 + FILE *fp = NULL; 18 + size_t buflen; 19 + 20 + skel = trace_printk__open(); 21 + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) 22 + return; 23 + 24 + err = trace_printk__load(skel); 25 + if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) 26 + goto cleanup; 27 + 28 + bss = skel->bss; 29 + 30 + err = trace_printk__attach(skel); 31 + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) 32 + goto cleanup; 33 + 34 + fp = fopen(TRACEBUF, "r"); 35 + if (CHECK(fp == NULL, "could not open trace buffer", 36 + "error %d opening %s", errno, TRACEBUF)) 37 + goto cleanup; 38 + 39 + /* We do not want to wait forever if this test fails... */ 40 + fcntl(fileno(fp), F_SETFL, O_NONBLOCK); 41 + 42 + /* wait for tracepoint to trigger */ 43 + usleep(1); 44 + trace_printk__detach(skel); 45 + 46 + if (CHECK(bss->trace_printk_ran == 0, 47 + "bpf_trace_printk never ran", 48 + "ran == %d", bss->trace_printk_ran)) 49 + goto cleanup; 50 + 51 + if (CHECK(bss->trace_printk_ret <= 0, 52 + "bpf_trace_printk returned <= 0 value", 53 + "got %d", bss->trace_printk_ret)) 54 + goto cleanup; 55 + 56 + /* verify our search string is in the trace buffer */ 57 + while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) { 58 + if (strstr(buf, SEARCHMSG) != NULL) 59 + found++; 60 + if (found == bss->trace_printk_ran) 61 + break; 62 + if (++iter > 1000) 63 + break; 64 + } 65 + 66 + if (CHECK(!found, "message from bpf_trace_printk not found", 67 + "no instance of %s in %s", SEARCHMSG, TRACEBUF)) 68 + goto cleanup; 69 + 70 + cleanup: 71 + trace_printk__destroy(skel); 72 + free(buf); 73 + if (fp) 74 + fclose(fp); 75 + }
+75
tools/testing/selftests/bpf/prog_tests/udp_limit.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <test_progs.h> 3 + #include "udp_limit.skel.h" 4 + 5 + #include <sys/types.h> 6 + #include <sys/socket.h> 7 + 8 + static int duration; 9 + 10 + void test_udp_limit(void) 11 + { 12 + struct udp_limit *skel; 13 + int fd1 = -1, fd2 = -1; 14 + int cgroup_fd; 15 + 16 + cgroup_fd = test__join_cgroup("/udp_limit"); 17 + if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno)) 18 + return; 19 + 20 + skel = udp_limit__open_and_load(); 21 + if (CHECK(!skel, "skel-load", "errno %d", errno)) 22 + goto close_cgroup_fd; 23 + 24 + skel->links.sock = bpf_program__attach_cgroup(skel->progs.sock, cgroup_fd); 25 + skel->links.sock_release = bpf_program__attach_cgroup(skel->progs.sock_release, cgroup_fd); 26 + if (CHECK(IS_ERR(skel->links.sock) || IS_ERR(skel->links.sock_release), 27 + "cg-attach", "sock %ld sock_release %ld", 28 + PTR_ERR(skel->links.sock), 29 + PTR_ERR(skel->links.sock_release))) 30 + goto close_skeleton; 31 + 32 + /* BPF program enforces a single UDP socket per cgroup, 33 + * verify that. 34 + */ 35 + fd1 = socket(AF_INET, SOCK_DGRAM, 0); 36 + if (CHECK(fd1 < 0, "fd1", "errno %d", errno)) 37 + goto close_skeleton; 38 + 39 + fd2 = socket(AF_INET, SOCK_DGRAM, 0); 40 + if (CHECK(fd2 >= 0, "fd2", "errno %d", errno)) 41 + goto close_skeleton; 42 + 43 + /* We can reopen again after close. */ 44 + close(fd1); 45 + fd1 = -1; 46 + 47 + fd1 = socket(AF_INET, SOCK_DGRAM, 0); 48 + if (CHECK(fd1 < 0, "fd1-again", "errno %d", errno)) 49 + goto close_skeleton; 50 + 51 + /* Make sure the program was invoked the expected 52 + * number of times: 53 + * - open fd1 - BPF_CGROUP_INET_SOCK_CREATE 54 + * - attempt to openfd2 - BPF_CGROUP_INET_SOCK_CREATE 55 + * - close fd1 - BPF_CGROUP_INET_SOCK_RELEASE 56 + * - open fd1 again - BPF_CGROUP_INET_SOCK_CREATE 57 + */ 58 + if (CHECK(skel->bss->invocations != 4, "bss-invocations", 59 + "invocations=%d", skel->bss->invocations)) 60 + goto close_skeleton; 61 + 62 + /* We should still have a single socket in use */ 63 + if (CHECK(skel->bss->in_use != 1, "bss-in_use", 64 + "in_use=%d", skel->bss->in_use)) 65 + goto close_skeleton; 66 + 67 + close_skeleton: 68 + if (fd1 >= 0) 69 + close(fd1); 70 + if (fd2 >= 0) 71 + close(fd2); 72 + udp_limit__destroy(skel); 73 + close_cgroup_fd: 74 + close(cgroup_fd); 75 + }
+50
tools/testing/selftests/bpf/progs/btf_data.c
··· 1 + // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 + 3 + struct S { 4 + int a; 5 + int b; 6 + int c; 7 + }; 8 + 9 + union U { 10 + int a; 11 + int b; 12 + int c; 13 + }; 14 + 15 + struct S1 { 16 + int a; 17 + int b; 18 + int c; 19 + }; 20 + 21 + union U1 { 22 + int a; 23 + int b; 24 + int c; 25 + }; 26 + 27 + typedef int T; 28 + typedef int S; 29 + typedef int U; 30 + typedef int T1; 31 + typedef int S1; 32 + typedef int U1; 33 + 34 + struct root_struct { 35 + S m_1; 36 + T m_2; 37 + U m_3; 38 + S1 m_4; 39 + T1 m_5; 40 + U1 m_6; 41 + struct S m_7; 42 + struct S1 m_8; 43 + union U m_9; 44 + union U1 m_10; 45 + }; 46 + 47 + int func(struct root_struct *root) 48 + { 49 + return 0; 50 + }
+30
tools/testing/selftests/bpf/progs/test_core_retro.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (c) 2020 Facebook 3 + #include <linux/bpf.h> 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_core_read.h> 6 + 7 + struct task_struct { 8 + int tgid; 9 + } __attribute__((preserve_access_index)); 10 + 11 + struct { 12 + __uint(type, BPF_MAP_TYPE_ARRAY); 13 + __uint(max_entries, 1); 14 + __type(key, int); 15 + __type(value, int); 16 + } results SEC(".maps"); 17 + 18 + SEC("tp/raw_syscalls/sys_enter") 19 + int handle_sys_enter(void *ctx) 20 + { 21 + struct task_struct *task = (void *)bpf_get_current_task(); 22 + int tgid = BPF_CORE_READ(task, tgid); 23 + int zero = 0; 24 + 25 + bpf_map_update_elem(&results, &zero, &tgid, 0); 26 + 27 + return 0; 28 + } 29 + 30 + char _license[] SEC("license") = "GPL";
+2 -2
tools/testing/selftests/bpf/progs/test_perf_buffer.c
··· 12 12 __uint(value_size, sizeof(int)); 13 13 } perf_buf_map SEC(".maps"); 14 14 15 - SEC("kprobe/sys_nanosleep") 16 - int BPF_KPROBE(handle_sys_nanosleep_entry) 15 + SEC("tp/raw_syscalls/sys_enter") 16 + int handle_sys_enter(void *ctx) 17 17 { 18 18 int cpu = bpf_get_smp_processor_id(); 19 19
+4 -2
tools/testing/selftests/bpf/progs/test_skeleton.c
··· 20 20 struct s in5 = {}; 21 21 22 22 /* .rodata section */ 23 - const volatile int in6 = 0; 23 + const volatile struct { 24 + const int in6; 25 + } in = {}; 24 26 25 27 /* .data section */ 26 28 int out1 = -1; ··· 48 46 out3 = in3; 49 47 out4 = in4; 50 48 out5 = in5; 51 - out6 = in6; 49 + out6 = in.in6; 52 50 53 51 bpf_syscall = CONFIG_BPF_SYSCALL; 54 52 kern_ver = LINUX_KERNEL_VERSION;
+21
tools/testing/selftests/bpf/progs/trace_printk.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (c) 2020, Oracle and/or its affiliates. 3 + 4 + #include "vmlinux.h" 5 + #include <bpf/bpf_helpers.h> 6 + #include <bpf/bpf_tracing.h> 7 + 8 + char _license[] SEC("license") = "GPL"; 9 + 10 + int trace_printk_ret = 0; 11 + int trace_printk_ran = 0; 12 + 13 + SEC("tp/raw_syscalls/sys_enter") 14 + int sys_enter(void *ctx) 15 + { 16 + static const char fmt[] = "testing,testing %d\n"; 17 + 18 + trace_printk_ret = bpf_trace_printk(fmt, sizeof(fmt), 19 + ++trace_printk_ran); 20 + return 0; 21 + }
+42
tools/testing/selftests/bpf/progs/udp_limit.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <sys/socket.h> 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + 7 + int invocations = 0, in_use = 0; 8 + 9 + SEC("cgroup/sock_create") 10 + int sock(struct bpf_sock *ctx) 11 + { 12 + __u32 key; 13 + 14 + if (ctx->type != SOCK_DGRAM) 15 + return 1; 16 + 17 + __sync_fetch_and_add(&invocations, 1); 18 + 19 + if (in_use > 0) { 20 + /* BPF_CGROUP_INET_SOCK_RELEASE is _not_ called 21 + * when we return an error from the BPF 22 + * program! 23 + */ 24 + return 0; 25 + } 26 + 27 + __sync_fetch_and_add(&in_use, 1); 28 + return 1; 29 + } 30 + 31 + SEC("cgroup/sock_release") 32 + int sock_release(struct bpf_sock *ctx) 33 + { 34 + __u32 key; 35 + 36 + if (ctx->type != SOCK_DGRAM) 37 + return 1; 38 + 39 + __sync_fetch_and_add(&invocations, 1); 40 + __sync_fetch_and_add(&in_use, -1); 41 + return 1; 42 + }
+8 -5
tools/testing/selftests/bpf/test_progs.c
··· 12 12 #include <string.h> 13 13 #include <execinfo.h> /* backtrace */ 14 14 15 + #define EXIT_NO_TEST 2 16 + #define EXIT_ERR_SETUP_INFRA 3 17 + 15 18 /* defined in test_progs.h */ 16 19 struct test_env env = {}; 17 20 ··· 114 111 if (err < 0) { 115 112 stdio_restore(); 116 113 fprintf(stderr, "Failed to reset process affinity: %d!\n", err); 117 - exit(-1); 114 + exit(EXIT_ERR_SETUP_INFRA); 118 115 } 119 116 err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); 120 117 if (err < 0) { 121 118 stdio_restore(); 122 119 fprintf(stderr, "Failed to reset thread affinity: %d!\n", err); 123 - exit(-1); 120 + exit(EXIT_ERR_SETUP_INFRA); 124 121 } 125 122 } 126 123 ··· 129 126 env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY); 130 127 if (env.saved_netns_fd == -1) { 131 128 perror("open(/proc/self/ns/net)"); 132 - exit(-1); 129 + exit(EXIT_ERR_SETUP_INFRA); 133 130 } 134 131 } 135 132 ··· 138 135 if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) { 139 136 stdio_restore(); 140 137 perror("setns(CLONE_NEWNS)"); 141 - exit(-1); 138 + exit(EXIT_ERR_SETUP_INFRA); 142 139 } 143 140 } 144 141 ··· 743 740 close(env.saved_netns_fd); 744 741 745 742 if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) 746 - return EXIT_FAILURE; 743 + return EXIT_NO_TEST; 747 744 748 745 return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS; 749 746 }