Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2019-12-02

The following pull-request contains BPF updates for your *net* tree.

We've added 10 non-merge commits during the last 6 day(s) which contain
a total of 10 files changed, 60 insertions(+), 51 deletions(-).

The main changes are:

1) Fix vmlinux BTF generation for binutils pre v2.25, from Stanislav Fomichev.

2) Fix libbpf global variable relocation to take symbol's st_value offset
into account, from Andrii Nakryiko.

3) Fix libbpf build on powerpc where check_abi target fails due to different
readelf output format, from Aurelien Jarno.

4) Don't set BPF insns RO for the case when they are JITed in order to avoid
fragmenting the direct map, from Daniel Borkmann.

5) Fix static checker warning in btf_distill_func_proto() as well as a build
error due to empty enum when BPF is compiled out, from Alexei Starovoitov.

6) Fix up generation of bpf_helper_defs.h for perf, from Arnaldo Carvalho de Melo.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+60 -51
+6 -2
include/linux/filter.h
··· 776 776 777 777 static inline void bpf_prog_lock_ro(struct bpf_prog *fp) 778 778 { 779 - set_vm_flush_reset_perms(fp); 780 - set_memory_ro((unsigned long)fp, fp->pages); 779 + #ifndef CONFIG_BPF_JIT_ALWAYS_ON 780 + if (!fp->jited) { 781 + set_vm_flush_reset_perms(fp); 782 + set_memory_ro((unsigned long)fp, fp->pages); 783 + } 784 + #endif 781 785 } 782 786 783 787 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+4 -1
kernel/bpf/btf.c
··· 3463 3463 __ctx_convert##_id, 3464 3464 #include <linux/bpf_types.h> 3465 3465 #undef BPF_PROG_TYPE 3466 + __ctx_convert_unused, /* to avoid empty enum in extreme .config */ 3466 3467 }; 3467 3468 static u8 bpf_ctx_convert_map[] = { 3468 3469 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ ··· 3977 3976 t = btf_type_by_id(btf, btf_id); 3978 3977 while (t && btf_type_is_modifier(t)) 3979 3978 t = btf_type_by_id(btf, t->type); 3980 - if (!t) 3979 + if (!t) { 3980 + *bad_type = btf->types[0]; 3981 3981 return -EINVAL; 3982 + } 3982 3983 if (btf_type_is_ptr(t)) 3983 3984 /* kernel size of pointer. Not BPF's size of pointer*/ 3984 3985 return sizeof(void *);
+5 -5
tools/lib/bpf/Makefile
··· 147 147 148 148 GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \ 149 149 cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ 150 - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \ 150 + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \ 151 151 sort -u | wc -l) 152 152 VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \ 153 153 grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) ··· 180 180 $(BPF_IN_STATIC): force elfdep bpfdep bpf_helper_defs.h 181 181 $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR) 182 182 183 - bpf_helper_defs.h: $(srctree)/include/uapi/linux/bpf.h 183 + bpf_helper_defs.h: $(srctree)/tools/include/uapi/linux/bpf.h 184 184 $(Q)$(srctree)/scripts/bpf_helpers_doc.py --header \ 185 - --file $(srctree)/include/uapi/linux/bpf.h > bpf_helper_defs.h 185 + --file $(srctree)/tools/include/uapi/linux/bpf.h > bpf_helper_defs.h 186 186 187 187 $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION) 188 188 ··· 214 214 "versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \ 215 215 "Please make sure all LIBBPF_API symbols are" \ 216 216 "versioned in $(VERSION_SCRIPT)." >&2; \ 217 - readelf -s --wide $(OUTPUT)libbpf-in.o | \ 217 + readelf -s --wide $(BPF_IN_SHARED) | \ 218 218 cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ 219 - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}'| \ 219 + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \ 220 220 sort -u > $(OUTPUT)libbpf_global_syms.tmp; \ 221 221 readelf -s --wide $(OUTPUT)libbpf.so | \ 222 222 grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \
+20 -25
tools/lib/bpf/libbpf.c
··· 171 171 RELO_DATA, 172 172 } type; 173 173 int insn_idx; 174 - union { 175 - int map_idx; 176 - int text_off; 177 - }; 174 + int map_idx; 175 + int sym_off; 178 176 } *reloc_desc; 179 177 int nr_reloc; 180 178 int log_level; ··· 1817 1819 return -LIBBPF_ERRNO__RELOC; 1818 1820 } 1819 1821 if (sym->st_value % 8) { 1820 - pr_warn("bad call relo offset: %lu\n", sym->st_value); 1822 + pr_warn("bad call relo offset: %llu\n", (__u64)sym->st_value); 1821 1823 return -LIBBPF_ERRNO__RELOC; 1822 1824 } 1823 1825 reloc_desc->type = RELO_CALL; 1824 1826 reloc_desc->insn_idx = insn_idx; 1825 - reloc_desc->text_off = sym->st_value / 8; 1827 + reloc_desc->sym_off = sym->st_value; 1826 1828 obj->has_pseudo_calls = true; 1827 1829 return 0; 1828 1830 } ··· 1866 1868 reloc_desc->type = RELO_LD64; 1867 1869 reloc_desc->insn_idx = insn_idx; 1868 1870 reloc_desc->map_idx = map_idx; 1871 + reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ 1869 1872 return 0; 1870 1873 } 1871 1874 ··· 1898 1899 reloc_desc->type = RELO_DATA; 1899 1900 reloc_desc->insn_idx = insn_idx; 1900 1901 reloc_desc->map_idx = map_idx; 1902 + reloc_desc->sym_off = sym->st_value; 1901 1903 return 0; 1902 1904 } 1903 1905 ··· 3563 3563 return -LIBBPF_ERRNO__RELOC; 3564 3564 3565 3565 if (prog->idx == obj->efile.text_shndx) { 3566 - pr_warn("relo in .text insn %d into off %d\n", 3567 - relo->insn_idx, relo->text_off); 3566 + pr_warn("relo in .text insn %d into off %d (insn #%d)\n", 3567 + relo->insn_idx, relo->sym_off, relo->sym_off / 8); 3568 3568 return -LIBBPF_ERRNO__RELOC; 3569 3569 } 3570 3570 ··· 3599 3599 prog->section_name); 3600 3600 } 3601 3601 insn = &prog->insns[relo->insn_idx]; 3602 - insn->imm += relo->text_off + prog->main_prog_cnt - relo->insn_idx; 3602 + insn->imm += relo->sym_off / 8 + prog->main_prog_cnt - relo->insn_idx; 3603 3603 return 0; 3604 3604 } 3605 3605 ··· 3622 3622 return 0; 3623 3623 3624 3624 for (i = 0; i < prog->nr_reloc; i++) { 3625 - if (prog->reloc_desc[i].type == RELO_LD64 || 3626 - prog->reloc_desc[i].type == RELO_DATA) { 3627 - bool relo_data = prog->reloc_desc[i].type == RELO_DATA; 3628 - struct bpf_insn *insns = prog->insns; 3629 - int insn_idx, map_idx; 3625 + struct reloc_desc *relo = &prog->reloc_desc[i]; 3630 3626 3631 - insn_idx = prog->reloc_desc[i].insn_idx; 3632 - map_idx = prog->reloc_desc[i].map_idx; 3627 + if (relo->type == RELO_LD64 || relo->type == RELO_DATA) { 3628 + struct bpf_insn *insn = &prog->insns[relo->insn_idx]; 3633 3629 3634 - if (insn_idx + 1 >= (int)prog->insns_cnt) { 3630 + if (relo->insn_idx + 1 >= (int)prog->insns_cnt) { 3635 3631 pr_warn("relocation out of range: '%s'\n", 3636 3632 prog->section_name); 3637 3633 return -LIBBPF_ERRNO__RELOC; 3638 3634 } 3639 3635 3640 - if (!relo_data) { 3641 - insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; 3636 + if (relo->type != RELO_DATA) { 3637 + insn[0].src_reg = BPF_PSEUDO_MAP_FD; 3642 3638 } else { 3643 - insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE; 3644 - insns[insn_idx + 1].imm = insns[insn_idx].imm; 3639 + insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; 3640 + insn[1].imm = insn[0].imm + relo->sym_off; 3645 3641 } 3646 - insns[insn_idx].imm = obj->maps[map_idx].fd; 3647 - } else if (prog->reloc_desc[i].type == RELO_CALL) { 3648 - err = bpf_program__reloc_text(prog, obj, 3649 - &prog->reloc_desc[i]); 3642 + insn[0].imm = obj->maps[relo->map_idx].fd; 3643 + } else if (relo->type == RELO_CALL) { 3644 + err = bpf_program__reloc_text(prog, obj, relo); 3650 3645 if (err) 3651 3646 return err; 3652 3647 }
+1
tools/perf/MANIFEST
··· 19 19 tools/lib/str_error_r.c 20 20 tools/lib/vsprintf.c 21 21 tools/lib/zalloc.c 22 + scripts/bpf_helpers_doc.py
+6 -6
tools/testing/selftests/bpf/progs/fentry_test.c
··· 6 6 7 7 char _license[] SEC("license") = "GPL"; 8 8 9 - static volatile __u64 test1_result; 9 + __u64 test1_result = 0; 10 10 BPF_TRACE_1("fentry/bpf_fentry_test1", test1, int, a) 11 11 { 12 12 test1_result = a == 1; 13 13 return 0; 14 14 } 15 15 16 - static volatile __u64 test2_result; 16 + __u64 test2_result = 0; 17 17 BPF_TRACE_2("fentry/bpf_fentry_test2", test2, int, a, __u64, b) 18 18 { 19 19 test2_result = a == 2 && b == 3; 20 20 return 0; 21 21 } 22 22 23 - static volatile __u64 test3_result; 23 + __u64 test3_result = 0; 24 24 BPF_TRACE_3("fentry/bpf_fentry_test3", test3, char, a, int, b, __u64, c) 25 25 { 26 26 test3_result = a == 4 && b == 5 && c == 6; 27 27 return 0; 28 28 } 29 29 30 - static volatile __u64 test4_result; 30 + __u64 test4_result = 0; 31 31 BPF_TRACE_4("fentry/bpf_fentry_test4", test4, 32 32 void *, a, char, b, int, c, __u64, d) 33 33 { ··· 35 35 return 0; 36 36 } 37 37 38 - static volatile __u64 test5_result; 38 + __u64 test5_result = 0; 39 39 BPF_TRACE_5("fentry/bpf_fentry_test5", test5, 40 40 __u64, a, void *, b, short, c, int, d, __u64, e) 41 41 { ··· 44 44 return 0; 45 45 } 46 46 47 - static volatile __u64 test6_result; 47 + __u64 test6_result = 0; 48 48 BPF_TRACE_6("fentry/bpf_fentry_test6", test6, 49 49 __u64, a, void *, b, short, c, int, d, void *, e, __u64, f) 50 50 {
+3 -3
tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
··· 8 8 unsigned int len; 9 9 }; 10 10 11 - static volatile __u64 test_result; 11 + __u64 test_result = 0; 12 12 BPF_TRACE_2("fexit/test_pkt_access", test_main, 13 13 struct sk_buff *, skb, int, ret) 14 14 { ··· 23 23 return 0; 24 24 } 25 25 26 - static volatile __u64 test_result_subprog1; 26 + __u64 test_result_subprog1 = 0; 27 27 BPF_TRACE_2("fexit/test_pkt_access_subprog1", test_subprog1, 28 28 struct sk_buff *, skb, int, ret) 29 29 { ··· 56 56 __u64 args[5]; 57 57 __u64 ret; 58 58 }; 59 - static volatile __u64 test_result_subprog2; 59 + __u64 test_result_subprog2 = 0; 60 60 SEC("fexit/test_pkt_access_subprog2") 61 61 int test_subprog2(struct args_subprog2 *ctx) 62 62 {
+6 -6
tools/testing/selftests/bpf/progs/fexit_test.c
··· 6 6 7 7 char _license[] SEC("license") = "GPL"; 8 8 9 - static volatile __u64 test1_result; 9 + __u64 test1_result = 0; 10 10 BPF_TRACE_2("fexit/bpf_fentry_test1", test1, int, a, int, ret) 11 11 { 12 12 test1_result = a == 1 && ret == 2; 13 13 return 0; 14 14 } 15 15 16 - static volatile __u64 test2_result; 16 + __u64 test2_result = 0; 17 17 BPF_TRACE_3("fexit/bpf_fentry_test2", test2, int, a, __u64, b, int, ret) 18 18 { 19 19 test2_result = a == 2 && b == 3 && ret == 5; 20 20 return 0; 21 21 } 22 22 23 - static volatile __u64 test3_result; 23 + __u64 test3_result = 0; 24 24 BPF_TRACE_4("fexit/bpf_fentry_test3", test3, char, a, int, b, __u64, c, int, ret) 25 25 { 26 26 test3_result = a == 4 && b == 5 && c == 6 && ret == 15; 27 27 return 0; 28 28 } 29 29 30 - static volatile __u64 test4_result; 30 + __u64 test4_result = 0; 31 31 BPF_TRACE_5("fexit/bpf_fentry_test4", test4, 32 32 void *, a, char, b, int, c, __u64, d, int, ret) 33 33 { ··· 37 37 return 0; 38 38 } 39 39 40 - static volatile __u64 test5_result; 40 + __u64 test5_result = 0; 41 41 BPF_TRACE_6("fexit/bpf_fentry_test5", test5, 42 42 __u64, a, void *, b, short, c, int, d, __u64, e, int, ret) 43 43 { ··· 46 46 return 0; 47 47 } 48 48 49 - static volatile __u64 test6_result; 49 + __u64 test6_result = 0; 50 50 BPF_TRACE_7("fexit/bpf_fentry_test6", test6, 51 51 __u64, a, void *, b, short, c, int, d, void *, e, __u64, f, 52 52 int, ret)
+2 -2
tools/testing/selftests/bpf/progs/test_mmap.c
··· 15 15 __type(value, __u64); 16 16 } data_map SEC(".maps"); 17 17 18 - static volatile __u64 in_val; 19 - static volatile __u64 out_val; 18 + __u64 in_val = 0; 19 + __u64 out_val = 0; 20 20 21 21 SEC("raw_tracepoint/sys_enter") 22 22 int test_mmap(void *ctx)