Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2020-04-10

The following pull-request contains BPF updates for your *net* tree.

We've added 13 non-merge commits during the last 7 day(s) which contain
a total of 13 files changed, 137 insertions(+), 43 deletions(-).

The main changes are:

1) JIT code emission fixes for riscv and arm32, from Luke Nelson and Xi Wang.

2) Disable vmlinux BTF info if GCC_PLUGIN_RANDSTRUCT is used, from Slava Bacherikov.

3) Fix oob write in AF_XDP when meta data is used, from Li RongQing.

4) Fix bpf_get_link_xdp_id() handling on single prog when flags are specified,
from Andrey Ignatov.

5) Fix sk_assign() BPF helper for request sockets that can have sk_reuseport
field uninitialized, from Joe Stringer.

6) Fix mprotect() test case for the BPF LSM, from KP Singh.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+137 -43
+10 -2
arch/arm/net/bpf_jit_32.c
··· 929 929 rd = arm_bpf_get_reg64(dst, tmp, ctx); 930 930 931 931 /* Do LSR operation */ 932 - if (val < 32) { 932 + if (val == 0) { 933 + /* An immediate value of 0 encodes a shift amount of 32 934 + * for LSR. To shift by 0, don't do anything. 935 + */ 936 + } else if (val < 32) { 933 937 emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); 934 938 emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); 935 939 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx); ··· 959 955 rd = arm_bpf_get_reg64(dst, tmp, ctx); 960 956 961 957 /* Do ARSH operation */ 962 - if (val < 32) { 958 + if (val == 0) { 959 + /* An immediate value of 0 encodes a shift amount of 32 960 + * for ASR. To shift by 0, don't do anything. 961 + */ 962 + } else if (val < 32) { 963 963 emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); 964 964 emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); 965 965 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
+1 -1
arch/riscv/Kconfig
··· 55 55 select ARCH_HAS_PTE_SPECIAL 56 56 select ARCH_HAS_MMIOWB 57 57 select ARCH_HAS_DEBUG_VIRTUAL 58 - select HAVE_EBPF_JIT 58 + select HAVE_EBPF_JIT if MMU 59 59 select EDAC_SUPPORT 60 60 select ARCH_HAS_GIGANTIC_PAGE 61 61 select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
+32 -17
arch/riscv/net/bpf_jit_comp64.c
··· 110 110 return -(1L << 31) <= val && val < (1L << 31); 111 111 } 112 112 113 + static bool in_auipc_jalr_range(s64 val) 114 + { 115 + /* 116 + * auipc+jalr can reach any signed PC-relative offset in the range 117 + * [-2^31 - 2^11, 2^31 - 2^11). 118 + */ 119 + return (-(1L << 31) - (1L << 11)) <= val && 120 + val < ((1L << 31) - (1L << 11)); 121 + } 122 + 113 123 static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) 114 124 { 115 125 /* Note that the immediate from the add is sign-extended, ··· 390 380 *rd = RV_REG_T2; 391 381 } 392 382 393 - static void emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr, 394 - struct rv_jit_context *ctx) 383 + static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr, 384 + struct rv_jit_context *ctx) 395 385 { 396 386 s64 upper, lower; 397 387 398 388 if (rvoff && is_21b_int(rvoff) && !force_jalr) { 399 389 emit(rv_jal(rd, rvoff >> 1), ctx); 400 - return; 390 + return 0; 391 + } else if (in_auipc_jalr_range(rvoff)) { 392 + upper = (rvoff + (1 << 11)) >> 12; 393 + lower = rvoff & 0xfff; 394 + emit(rv_auipc(RV_REG_T1, upper), ctx); 395 + emit(rv_jalr(rd, RV_REG_T1, lower), ctx); 396 + return 0; 401 397 } 402 398 403 - upper = (rvoff + (1 << 11)) >> 12; 404 - lower = rvoff & 0xfff; 405 - emit(rv_auipc(RV_REG_T1, upper), ctx); 406 - emit(rv_jalr(rd, RV_REG_T1, lower), ctx); 399 + pr_err("bpf-jit: target offset 0x%llx is out of range\n", rvoff); 400 + return -ERANGE; 407 401 } 408 402 409 403 static bool is_signed_bpf_cond(u8 cond) ··· 421 407 s64 off = 0; 422 408 u64 ip; 423 409 u8 rd; 410 + int ret; 424 411 425 412 if (addr && ctx->insns) { 426 413 ip = (u64)(long)(ctx->insns + ctx->ninsns); 427 414 off = addr - ip; 428 - if (!is_32b_int(off)) { 429 - pr_err("bpf-jit: target call addr %pK is out of range\n", 430 - (void *)addr); 431 - return -ERANGE; 432 - } 433 415 } 434 416 435 - emit_jump_and_link(RV_REG_RA, off, !fixed, ctx); 417 + ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx); 418 + if (ret) 419 + return ret; 436 420 rd = bpf_to_rv_reg(BPF_REG_0, ctx); 437 421 emit(rv_addi(rd, RV_REG_A0, 0), ctx); 438 422 return 0; ··· 441 429 { 442 430 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 || 443 431 BPF_CLASS(insn->code) == BPF_JMP; 444 - int s, e, rvoff, i = insn - ctx->prog->insnsi; 432 + int s, e, rvoff, ret, i = insn - ctx->prog->insnsi; 445 433 struct bpf_prog_aux *aux = ctx->prog->aux; 446 434 u8 rd = -1, rs = -1, code = insn->code; 447 435 s16 off = insn->off; ··· 711 699 /* JUMP off */ 712 700 case BPF_JMP | BPF_JA: 713 701 rvoff = rv_offset(i, off, ctx); 714 - emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx); 702 + ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx); 703 + if (ret) 704 + return ret; 715 705 break; 716 706 717 707 /* IF (dst COND src) JUMP off */ ··· 815 801 case BPF_JMP | BPF_CALL: 816 802 { 817 803 bool fixed; 818 - int ret; 819 804 u64 addr; 820 805 821 806 mark_call(ctx); ··· 839 826 break; 840 827 841 828 rvoff = epilogue_offset(ctx); 842 - emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx); 829 + ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx); 830 + if (ret) 831 + return ret; 843 832 break; 844 833 845 834 /* dst = imm64 */
+1 -1
kernel/bpf/bpf_lru_list.h
··· 30 30 struct bpf_lru_list { 31 31 struct list_head lists[NR_BPF_LRU_LIST_T]; 32 32 unsigned int counts[NR_BPF_LRU_LIST_COUNT]; 33 - /* The next inacitve list rotation starts from here */ 33 + /* The next inactive list rotation starts from here */ 34 34 struct list_head *next_inactive_rotation; 35 35 36 36 raw_spinlock_t lock ____cacheline_aligned_in_smp;
+2
lib/Kconfig.debug
··· 242 242 config DEBUG_INFO_BTF 243 243 bool "Generate BTF typeinfo" 244 244 depends on DEBUG_INFO 245 + depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED 246 + depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST 245 247 help 246 248 Generate deduplicated BTF type information from DWARF debug info. 247 249 Turning this on expects presence of pahole tool, which will convert
+1 -1
net/core/filter.c
··· 5925 5925 return -EOPNOTSUPP; 5926 5926 if (unlikely(dev_net(skb->dev) != sock_net(sk))) 5927 5927 return -ENETUNREACH; 5928 - if (unlikely(sk->sk_reuseport)) 5928 + if (unlikely(sk_fullsock(sk) && sk->sk_reuseport)) 5929 5929 return -ESOCKTNOSUPPORT; 5930 5930 if (sk_is_refcounted(sk) && 5931 5931 unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
+1 -1
net/core/sock.c
··· 1872 1872 * as not suitable for copying when cloning. 1873 1873 */ 1874 1874 if (sk_user_data_is_nocopy(newsk)) 1875 - RCU_INIT_POINTER(newsk->sk_user_data, NULL); 1875 + newsk->sk_user_data = NULL; 1876 1876 1877 1877 newsk->sk_err = 0; 1878 1878 newsk->sk_err_soft = 0;
+3 -2
net/xdp/xsk.c
··· 131 131 u64 page_start = addr & ~(PAGE_SIZE - 1); 132 132 u64 first_len = PAGE_SIZE - (addr - page_start); 133 133 134 - memcpy(to_buf, from_buf, first_len + metalen); 135 - memcpy(next_pg_addr, from_buf + first_len, len - first_len); 134 + memcpy(to_buf, from_buf, first_len); 135 + memcpy(next_pg_addr, from_buf + first_len, 136 + len + metalen - first_len); 136 137 137 138 return; 138 139 }
+3 -3
tools/lib/bpf/netlink.c
··· 142 142 struct ifinfomsg ifinfo; 143 143 char attrbuf[64]; 144 144 } req; 145 - __u32 nl_pid; 145 + __u32 nl_pid = 0; 146 146 147 147 sock = libbpf_netlink_open(&nl_pid); 148 148 if (sock < 0) ··· 288 288 { 289 289 struct xdp_id_md xdp_id = {}; 290 290 int sock, ret; 291 - __u32 nl_pid; 291 + __u32 nl_pid = 0; 292 292 __u32 mask; 293 293 294 294 if (flags & ~XDP_FLAGS_MASK || !info_size) ··· 321 321 322 322 static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags) 323 323 { 324 - if (info->attach_mode != XDP_ATTACHED_MULTI) 324 + if (info->attach_mode != XDP_ATTACHED_MULTI && !flags) 325 325 return info->prog_id; 326 326 if (flags & XDP_FLAGS_DRV_MODE) 327 327 return info->drv_prog_id;
+9 -9
tools/testing/selftests/bpf/prog_tests/test_lsm.c
··· 15 15 16 16 char *CMD_ARGS[] = {"true", NULL}; 17 17 18 - int heap_mprotect(void) 18 + #define GET_PAGE_ADDR(ADDR, PAGE_SIZE) \ 19 + (char *)(((unsigned long) (ADDR + PAGE_SIZE)) & ~(PAGE_SIZE-1)) 20 + 21 + int stack_mprotect(void) 19 22 { 20 23 void *buf; 21 24 long sz; ··· 28 25 if (sz < 0) 29 26 return sz; 30 27 31 - buf = memalign(sz, 2 * sz); 32 - if (buf == NULL) 33 - return -ENOMEM; 34 - 35 - ret = mprotect(buf, sz, PROT_READ | PROT_WRITE | PROT_EXEC); 36 - free(buf); 28 + buf = alloca(sz * 3); 29 + ret = mprotect(GET_PAGE_ADDR(buf, sz), sz, 30 + PROT_READ | PROT_WRITE | PROT_EXEC); 37 31 return ret; 38 32 } 39 33 ··· 73 73 74 74 skel->bss->monitored_pid = getpid(); 75 75 76 - err = heap_mprotect(); 77 - if (CHECK(errno != EPERM, "heap_mprotect", "want errno=EPERM, got %d\n", 76 + err = stack_mprotect(); 77 + if (CHECK(errno != EPERM, "stack_mprotect", "want err=EPERM, got %d\n", 78 78 errno)) 79 79 goto close_prog; 80 80
+68
tools/testing/selftests/bpf/prog_tests/xdp_info.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/if_link.h> 3 + #include <test_progs.h> 4 + 5 + #define IFINDEX_LO 1 6 + 7 + void test_xdp_info(void) 8 + { 9 + __u32 len = sizeof(struct bpf_prog_info), duration = 0, prog_id; 10 + const char *file = "./xdp_dummy.o"; 11 + struct bpf_prog_info info = {}; 12 + struct bpf_object *obj; 13 + int err, prog_fd; 14 + 15 + /* Get prog_id for XDP_ATTACHED_NONE mode */ 16 + 17 + err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, 0); 18 + if (CHECK(err, "get_xdp_none", "errno=%d\n", errno)) 19 + return; 20 + if (CHECK(prog_id, "prog_id_none", "unexpected prog_id=%u\n", prog_id)) 21 + return; 22 + 23 + err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_SKB_MODE); 24 + if (CHECK(err, "get_xdp_none_skb", "errno=%d\n", errno)) 25 + return; 26 + if (CHECK(prog_id, "prog_id_none_skb", "unexpected prog_id=%u\n", 27 + prog_id)) 28 + return; 29 + 30 + /* Setup prog */ 31 + 32 + err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); 33 + if (CHECK_FAIL(err)) 34 + return; 35 + 36 + err = bpf_obj_get_info_by_fd(prog_fd, &info, &len); 37 + if (CHECK(err, "get_prog_info", "errno=%d\n", errno)) 38 + goto out_close; 39 + 40 + err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE); 41 + if (CHECK(err, "set_xdp_skb", "errno=%d\n", errno)) 42 + goto out_close; 43 + 44 + /* Get prog_id for single prog mode */ 45 + 46 + err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, 0); 47 + if (CHECK(err, "get_xdp", "errno=%d\n", errno)) 48 + goto out; 49 + if (CHECK(prog_id != info.id, "prog_id", "prog_id not available\n")) 50 + goto out; 51 + 52 + err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_SKB_MODE); 53 + if (CHECK(err, "get_xdp_skb", "errno=%d\n", errno)) 54 + goto out; 55 + if (CHECK(prog_id != info.id, "prog_id_skb", "prog_id not available\n")) 56 + goto out; 57 + 58 + err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_DRV_MODE); 59 + if (CHECK(err, "get_xdp_drv", "errno=%d\n", errno)) 60 + goto out; 61 + if (CHECK(prog_id, "prog_id_drv", "unexpected prog_id=%u\n", prog_id)) 62 + goto out; 63 + 64 + out: 65 + bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0); 66 + out_close: 67 + bpf_object__close(obj); 68 + }
+4 -4
tools/testing/selftests/bpf/progs/lsm.c
··· 23 23 return ret; 24 24 25 25 __u32 pid = bpf_get_current_pid_tgid() >> 32; 26 - int is_heap = 0; 26 + int is_stack = 0; 27 27 28 - is_heap = (vma->vm_start >= vma->vm_mm->start_brk && 29 - vma->vm_end <= vma->vm_mm->brk); 28 + is_stack = (vma->vm_start <= vma->vm_mm->start_stack && 29 + vma->vm_end >= vma->vm_mm->start_stack); 30 30 31 - if (is_heap && monitored_pid == pid) { 31 + if (is_stack && monitored_pid == pid) { 32 32 mprotect_count++; 33 33 ret = -EPERM; 34 34 }
+2 -2
tools/testing/selftests/bpf/verifier/bounds.c
··· 501 501 .result = REJECT 502 502 }, 503 503 { 504 - "bounds check mixed 32bit and 64bit arithmatic. test1", 504 + "bounds check mixed 32bit and 64bit arithmetic. test1", 505 505 .insns = { 506 506 BPF_MOV64_IMM(BPF_REG_0, 0), 507 507 BPF_MOV64_IMM(BPF_REG_1, -1), ··· 520 520 .result = ACCEPT 521 521 }, 522 522 { 523 - "bounds check mixed 32bit and 64bit arithmatic. test2", 523 + "bounds check mixed 32bit and 64bit arithmetic. test2", 524 524 .insns = { 525 525 BPF_MOV64_IMM(BPF_REG_0, 0), 526 526 BPF_MOV64_IMM(BPF_REG_1, -1),