Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'bpf: refine retval for bpf_get_task_stack helper'

Dave Marchevsky says:

====================

Similarly to the bpf_get_stack helper, bpf_get_task_stack's return value
can be more tightly bound by the verifier - it's the number of bytes
written to a user-supplied buffer, or a negative error value. Currently
the verifier believes bpf_task_get_stack's retval bounds to be unknown,
requiring extraneous bounds checking to remedy.

Adding it to do_refine_retval_range fixes the issue, as evidenced by
new selftests which fail to load if retval bounds are not refined.

v2: Addressed comment nit in patch 3
====================

Signed-off-by: Alexei Starovoitov <ast@kernel.org>

+72
+1
kernel/bpf/verifier.c
··· 5808 5808 5809 5809 if (ret_type != RET_INTEGER || 5810 5810 (func_id != BPF_FUNC_get_stack && 5811 + func_id != BPF_FUNC_get_task_stack && 5811 5812 func_id != BPF_FUNC_probe_read_str && 5812 5813 func_id != BPF_FUNC_probe_read_kernel_str && 5813 5814 func_id != BPF_FUNC_probe_read_user_str))
+1
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
··· 147 147 return; 148 148 149 149 do_dummy_read(skel->progs.dump_task_stack); 150 + do_dummy_read(skel->progs.get_task_user_stacks); 150 151 151 152 bpf_iter_task_stack__destroy(skel); 152 153 }
+27
tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
··· 35 35 36 36 return 0; 37 37 } 38 + 39 + SEC("iter/task") 40 + int get_task_user_stacks(struct bpf_iter__task *ctx) 41 + { 42 + struct seq_file *seq = ctx->meta->seq; 43 + struct task_struct *task = ctx->task; 44 + uint64_t buf_sz = 0; 45 + int64_t res; 46 + 47 + if (task == (void *)0) 48 + return 0; 49 + 50 + res = bpf_get_task_stack(task, entries, 51 + MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, BPF_F_USER_STACK); 52 + if (res <= 0) 53 + return 0; 54 + 55 + buf_sz += res; 56 + 57 + /* If the verifier doesn't refine bpf_get_task_stack res, and instead 58 + * assumes res is entirely unknown, this program will fail to load as 59 + * the verifier will believe that max buf_sz value allows reading 60 + * past the end of entries in bpf_seq_write call 61 + */ 62 + bpf_seq_write(seq, &entries, buf_sz); 63 + return 0; 64 + }
+43
tools/testing/selftests/bpf/verifier/bpf_get_stack.c
··· 42 42 .result = ACCEPT, 43 43 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 44 44 }, 45 + { 46 + "bpf_get_task_stack return R0 range is refined", 47 + .insns = { 48 + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 49 + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_6, 0), // ctx->meta->seq 50 + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, 8), // ctx->task 51 + BPF_LD_MAP_FD(BPF_REG_1, 0), // fixup_map_array_48b 52 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 53 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 54 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 55 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 56 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 57 + BPF_MOV64_IMM(BPF_REG_0, 0), 58 + BPF_EXIT_INSN(), 59 + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2), 60 + BPF_MOV64_IMM(BPF_REG_0, 0), 61 + BPF_EXIT_INSN(), 62 + 63 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 64 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 65 + BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), // keep buf for seq_write 66 + BPF_MOV64_IMM(BPF_REG_3, 48), 67 + BPF_MOV64_IMM(BPF_REG_4, 0), 68 + BPF_EMIT_CALL(BPF_FUNC_get_task_stack), 69 + BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 0, 2), 70 + BPF_MOV64_IMM(BPF_REG_0, 0), 71 + BPF_EXIT_INSN(), 72 + 73 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 74 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_9), 75 + BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), 76 + BPF_EMIT_CALL(BPF_FUNC_seq_write), 77 + 78 + BPF_MOV64_IMM(BPF_REG_0, 0), 79 + BPF_EXIT_INSN(), 80 + }, 81 + .result = ACCEPT, 82 + .prog_type = BPF_PROG_TYPE_TRACING, 83 + .expected_attach_type = BPF_TRACE_ITER, 84 + .kfunc = "task", 85 + .runs = -1, // Don't run, just load 86 + .fixup_map_array_48b = { 3 }, 87 + },