Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2019-11-02

The following pull-request contains BPF updates for your *net* tree.

We've added 6 non-merge commits during the last 6 day(s) which contain
a total of 8 files changed, 35 insertions(+), 9 deletions(-).

The main changes are:

1) Fix ppc BPF JIT's tail call implementation by performing a second pass
to gather a stable JIT context before opcode emission, from Eric Dumazet.

2) Fix build of BPF samples sys_perf_event_open() usage to compiled out
unavailable test_attr__{enabled,open} checks. Also fix potential overflows
in bpf_map_{area_alloc,charge_init} on 32 bit archs, from Björn Töpel.

3) Fix narrow loads of bpf_sysctl context fields with offset > 0 on big endian
archs like s390x and also improve the test coverage, from Ilya Leoshkevich.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+35 -9
+1
MAINTAINERS
··· 3053 3053 R: Martin KaFai Lau <kafai@fb.com> 3054 3054 R: Song Liu <songliubraving@fb.com> 3055 3055 R: Yonghong Song <yhs@fb.com> 3056 + R: Andrii Nakryiko <andriin@fb.com> 3056 3057 L: netdev@vger.kernel.org 3057 3058 L: bpf@vger.kernel.org 3058 3059 T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
+13
arch/powerpc/net/bpf_jit_comp64.c
··· 1142 1142 } 1143 1143 1144 1144 /* 1145 + * If we have seen a tail call, we need a second pass. 1146 + * This is because bpf_jit_emit_common_epilogue() is called 1147 + * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen. 1148 + */ 1149 + if (cgctx.seen & SEEN_TAILCALL) { 1150 + cgctx.idx = 0; 1151 + if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) { 1152 + fp = org_fp; 1153 + goto out_addrs; 1154 + } 1155 + } 1156 + 1157 + /* 1145 1158 * Pretend to build prologue, given the features we've seen. This will 1146 1159 * update ctgtx.idx as it pretends to output instructions, then we can 1147 1160 * calculate total size from idx.
+2 -2
include/linux/bpf.h
··· 656 656 void bpf_map_put(struct bpf_map *map); 657 657 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); 658 658 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); 659 - int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size); 659 + int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); 660 660 void bpf_map_charge_finish(struct bpf_map_memory *mem); 661 661 void bpf_map_charge_move(struct bpf_map_memory *dst, 662 662 struct bpf_map_memory *src); 663 - void *bpf_map_area_alloc(size_t size, int numa_node); 663 + void *bpf_map_area_alloc(u64 size, int numa_node); 664 664 void bpf_map_area_free(void *base); 665 665 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 666 666
+2 -2
kernel/bpf/cgroup.c
··· 1311 1311 return false; 1312 1312 1313 1313 switch (off) { 1314 - case offsetof(struct bpf_sysctl, write): 1314 + case bpf_ctx_range(struct bpf_sysctl, write): 1315 1315 if (type != BPF_READ) 1316 1316 return false; 1317 1317 bpf_ctx_record_field_size(info, size_default); 1318 1318 return bpf_ctx_narrow_access_ok(off, size, size_default); 1319 - case offsetof(struct bpf_sysctl, file_pos): 1319 + case bpf_ctx_range(struct bpf_sysctl, file_pos): 1320 1320 if (type == BPF_READ) { 1321 1321 bpf_ctx_record_field_size(info, size_default); 1322 1322 return bpf_ctx_narrow_access_ok(off, size, size_default);
+5 -2
kernel/bpf/syscall.c
··· 126 126 return map; 127 127 } 128 128 129 - void *bpf_map_area_alloc(size_t size, int numa_node) 129 + void *bpf_map_area_alloc(u64 size, int numa_node) 130 130 { 131 131 /* We really just want to fail instead of triggering OOM killer 132 132 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, ··· 140 140 141 141 const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; 142 142 void *area; 143 + 144 + if (size >= SIZE_MAX) 145 + return NULL; 143 146 144 147 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 145 148 area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, ··· 200 197 atomic_long_sub(pages, &user->locked_vm); 201 198 } 202 199 203 - int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size) 200 + int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size) 204 201 { 205 202 u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; 206 203 struct user_struct *user;
+1
samples/bpf/Makefile
··· 176 176 KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/ 177 177 KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include 178 178 KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf 179 + KBUILD_HOSTCFLAGS += -DHAVE_ATTR_TEST=0 179 180 180 181 HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable 181 182
+4 -2
tools/perf/perf-sys.h
··· 15 15 void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, 16 16 int fd, int group_fd, unsigned long flags); 17 17 18 - #define HAVE_ATTR_TEST 18 + #ifndef HAVE_ATTR_TEST 19 + #define HAVE_ATTR_TEST 1 20 + #endif 19 21 20 22 static inline int 21 23 sys_perf_event_open(struct perf_event_attr *attr, ··· 29 27 fd = syscall(__NR_perf_event_open, attr, pid, cpu, 30 28 group_fd, flags); 31 29 32 - #ifdef HAVE_ATTR_TEST 30 + #if HAVE_ATTR_TEST 33 31 if (unlikely(test_attr__enabled)) 34 32 test_attr__open(attr, pid, cpu, fd, group_fd, flags); 35 33 #endif
+7 -1
tools/testing/selftests/bpf/test_sysctl.c
··· 161 161 .descr = "ctx:file_pos sysctl:read read ok narrow", 162 162 .insns = { 163 163 /* If (file_pos == X) */ 164 + #if __BYTE_ORDER == __LITTLE_ENDIAN 164 165 BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1, 165 166 offsetof(struct bpf_sysctl, file_pos)), 166 - BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2), 167 + #else 168 + BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1, 169 + offsetof(struct bpf_sysctl, file_pos) + 3), 170 + #endif 171 + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 4, 2), 167 172 168 173 /* return ALLOW; */ 169 174 BPF_MOV64_IMM(BPF_REG_0, 1), ··· 181 176 .attach_type = BPF_CGROUP_SYSCTL, 182 177 .sysctl = "kernel/ostype", 183 178 .open_flags = O_RDONLY, 179 + .seek = 4, 184 180 .result = SUCCESS, 185 181 }, 186 182 {