Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: Propagate stack bounds to registers in atomics w/ BPF_FETCH

When BPF_FETCH is set, atomic instructions load a value from memory
into a register. The current verifier code first checks via
check_mem_access whether we can access the memory, and then checks
via check_reg_arg whether we can write into the register.

For loads, check_reg_arg has the side-effect of marking the
register's value as unkonwn, and check_mem_access has the side effect
of propagating bounds from memory to the register. This currently only
takes effect for stack memory.

Therefore with the current order, bounds information is thrown away,
but by simply reversing the order of check_reg_arg
vs. check_mem_access, we can instead propagate bounds smartly.

A simple test is added with an infinite loop that can only be proved
unreachable if this propagation is present. This is implemented both
with C and directly in test_verifier using assembly.

Suggested-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: Brendan Jackman <jackmanb@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210202135002.4024825-1-jackmanb@google.com

authored by

Brendan Jackman and committed by
Alexei Starovoitov
37086bfd 058107ab

+84 -14
+18 -14
kernel/bpf/verifier.c
··· 3665 3665 return -EACCES; 3666 3666 } 3667 3667 3668 + if (insn->imm & BPF_FETCH) { 3669 + if (insn->imm == BPF_CMPXCHG) 3670 + load_reg = BPF_REG_0; 3671 + else 3672 + load_reg = insn->src_reg; 3673 + 3674 + /* check and record load of old value */ 3675 + err = check_reg_arg(env, load_reg, DST_OP); 3676 + if (err) 3677 + return err; 3678 + } else { 3679 + /* This instruction accesses a memory location but doesn't 3680 + * actually load it into a register. 3681 + */ 3682 + load_reg = -1; 3683 + } 3684 + 3668 3685 /* check whether we can read the memory */ 3669 3686 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3670 - BPF_SIZE(insn->code), BPF_READ, -1, true); 3687 + BPF_SIZE(insn->code), BPF_READ, load_reg, true); 3671 3688 if (err) 3672 3689 return err; 3673 3690 3674 3691 /* check whether we can write into the same memory */ 3675 3692 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3676 3693 BPF_SIZE(insn->code), BPF_WRITE, -1, true); 3677 - if (err) 3678 - return err; 3679 - 3680 - if (!(insn->imm & BPF_FETCH)) 3681 - return 0; 3682 - 3683 - if (insn->imm == BPF_CMPXCHG) 3684 - load_reg = BPF_REG_0; 3685 - else 3686 - load_reg = insn->src_reg; 3687 - 3688 - /* check and record load of old value */ 3689 - err = check_reg_arg(env, load_reg, DST_OP); 3690 3694 if (err) 3691 3695 return err; 3692 3696
+15
tools/testing/selftests/bpf/prog_tests/atomic_bounds.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <test_progs.h> 4 + 5 + #include "atomic_bounds.skel.h" 6 + 7 + void test_atomic_bounds(void) 8 + { 9 + struct atomic_bounds *skel; 10 + __u32 duration = 0; 11 + 12 + skel = atomic_bounds__open_and_load(); 13 + if (CHECK(!skel, "skel_load", "couldn't load program\n")) 14 + return; 15 + }
+24
tools/testing/selftests/bpf/progs/atomic_bounds.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/bpf.h> 3 + #include <bpf/bpf_helpers.h> 4 + #include <bpf/bpf_tracing.h> 5 + #include <stdbool.h> 6 + 7 + #ifdef ENABLE_ATOMICS_TESTS 8 + bool skip_tests __attribute((__section__(".data"))) = false; 9 + #else 10 + bool skip_tests = true; 11 + #endif 12 + 13 + SEC("fentry/bpf_fentry_test1") 14 + int BPF_PROG(sub, int x) 15 + { 16 + #ifdef ENABLE_ATOMICS_TESTS 17 + int a = 0; 18 + int b = __sync_fetch_and_add(&a, 1); 19 + /* b is certainly 0 here. Can the verifier tell? */ 20 + while (b) 21 + continue; 22 + #endif 23 + return 0; 24 + }
+27
tools/testing/selftests/bpf/verifier/atomic_bounds.c
··· 1 + { 2 + "BPF_ATOMIC bounds propagation, mem->reg", 3 + .insns = { 4 + /* a = 0; */ 5 + /* 6 + * Note this is implemented with two separate instructions, 7 + * where you might think one would suffice: 8 + * 9 + * BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 10 + * 11 + * This is because BPF_ST_MEM doesn't seem to set the stack slot 12 + * type to 0 when storing an immediate. 13 + */ 14 + BPF_MOV64_IMM(BPF_REG_0, 0), 15 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 16 + /* b = atomic_fetch_add(&a, 1); */ 17 + BPF_MOV64_IMM(BPF_REG_1, 1), 18 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8), 19 + /* Verifier should be able to tell that this infinite loop isn't reachable. */ 20 + /* if (b) while (true) continue; */ 21 + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, -1), 22 + BPF_EXIT_INSN(), 23 + }, 24 + .result = ACCEPT, 25 + .result_unpriv = REJECT, 26 + .errstr_unpriv = "back-edge", 27 + },