Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: BPF test_verifier selftests for bpf_loop inlining

A number of test cases for BPF selftests test_verifier to check how
bpf_loop inline transformation rewrites the BPF program. The following
cases are covered:
- happy path
- no-rewrite when flags is non-zero
- no-rewrite when callback is non-constant
- subprogno in insn_aux is updated correctly when dead sub-programs
are removed
- check that correct stack offsets are assigned for spilling of R6-R8
registers

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Song Liu <songliubraving@fb.com>
Link: https://lore.kernel.org/r/20220620235344.569325-5-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
f8acfdd0 1ade2371

+252
+252
tools/testing/selftests/bpf/verifier/bpf_loop_inline.c
··· 1 + #define BTF_TYPES \ 2 + .btf_strings = "\0int\0i\0ctx\0callback\0main\0", \ 3 + .btf_types = { \ 4 + /* 1: int */ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), \ 5 + /* 2: int* */ BTF_PTR_ENC(1), \ 6 + /* 3: void* */ BTF_PTR_ENC(0), \ 7 + /* 4: int __(void*) */ BTF_FUNC_PROTO_ENC(1, 1), \ 8 + BTF_FUNC_PROTO_ARG_ENC(7, 3), \ 9 + /* 5: int __(int, int*) */ BTF_FUNC_PROTO_ENC(1, 2), \ 10 + BTF_FUNC_PROTO_ARG_ENC(5, 1), \ 11 + BTF_FUNC_PROTO_ARG_ENC(7, 2), \ 12 + /* 6: main */ BTF_FUNC_ENC(20, 4), \ 13 + /* 7: callback */ BTF_FUNC_ENC(11, 5), \ 14 + BTF_END_RAW \ 15 + } 16 + 17 + #define MAIN_TYPE 6 18 + #define CALLBACK_TYPE 7 19 + 20 + /* can't use BPF_CALL_REL, jit_subprogs adjusts IMM & OFF 21 + * fields for pseudo calls 22 + */ 23 + #define PSEUDO_CALL_INSN() \ 24 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_CALL, \ 25 + INSN_OFF_MASK, INSN_IMM_MASK) 26 + 27 + /* can't use BPF_FUNC_loop constant, 28 + * do_mix_fixups adjusts the IMM field 29 + */ 30 + #define HELPER_CALL_INSN() \ 31 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, INSN_OFF_MASK, INSN_IMM_MASK) 32 + 33 + { 34 + "inline simple bpf_loop call", 35 + .insns = { 36 + /* main */ 37 + /* force verifier state branching to verify logic on first and 38 + * subsequent bpf_loop insn processing steps 39 + */ 40 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64), 41 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 777, 2), 42 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1), 43 + BPF_JMP_IMM(BPF_JA, 0, 0, 1), 44 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 2), 45 + 46 + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 6), 47 + BPF_RAW_INSN(0, 0, 0, 0, 0), 48 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0), 49 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0), 50 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop), 51 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), 52 + BPF_EXIT_INSN(), 53 + /* callback */ 54 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1), 55 + BPF_EXIT_INSN(), 56 + }, 57 + .expected_insns = { PSEUDO_CALL_INSN() }, 58 + .unexpected_insns = { HELPER_CALL_INSN() }, 59 + .prog_type = BPF_PROG_TYPE_TRACEPOINT, 60 + .result = ACCEPT, 61 + .runs = 0, 62 + .func_info = { { 0, MAIN_TYPE }, { 12, CALLBACK_TYPE } }, 63 + .func_info_cnt = 2, 64 + BTF_TYPES 65 + }, 66 + { 67 + "don't inline bpf_loop call, flags non-zero", 68 + .insns = { 69 + /* main */ 70 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64), 71 + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_0), 72 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64), 73 + BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0), 74 + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 9), 75 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0), 76 + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 0), 77 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1), 78 + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 7), 79 + BPF_RAW_INSN(0, 0, 0, 0, 0), 80 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0), 81 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop), 82 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), 83 + BPF_EXIT_INSN(), 84 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 1), 85 + BPF_JMP_IMM(BPF_JA, 0, 0, -10), 86 + /* callback */ 87 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1), 88 + BPF_EXIT_INSN(), 89 + }, 90 + .expected_insns = { HELPER_CALL_INSN() }, 91 + .unexpected_insns = { PSEUDO_CALL_INSN() }, 92 + .prog_type = BPF_PROG_TYPE_TRACEPOINT, 93 + .result = ACCEPT, 94 + .runs = 0, 95 + .func_info = { { 0, MAIN_TYPE }, { 16, CALLBACK_TYPE } }, 96 + .func_info_cnt = 2, 97 + BTF_TYPES 98 + }, 99 + { 100 + "don't inline bpf_loop call, callback non-constant", 101 + .insns = { 102 + /* main */ 103 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64), 104 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 777, 4), /* pick a random callback */ 105 + 106 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1), 107 + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 10), 108 + BPF_RAW_INSN(0, 0, 0, 0, 0), 109 + BPF_JMP_IMM(BPF_JA, 0, 0, 3), 110 + 111 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1), 112 + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 8), 113 + BPF_RAW_INSN(0, 0, 0, 0, 0), 114 + 115 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0), 116 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0), 117 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop), 118 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), 119 + BPF_EXIT_INSN(), 120 + /* callback */ 121 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1), 122 + BPF_EXIT_INSN(), 123 + /* callback #2 */ 124 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1), 125 + BPF_EXIT_INSN(), 126 + }, 127 + .expected_insns = { HELPER_CALL_INSN() }, 128 + .unexpected_insns = { PSEUDO_CALL_INSN() }, 129 + .prog_type = BPF_PROG_TYPE_TRACEPOINT, 130 + .result = ACCEPT, 131 + .runs = 0, 132 + .func_info = { 133 + { 0, MAIN_TYPE }, 134 + { 14, CALLBACK_TYPE }, 135 + { 16, CALLBACK_TYPE } 136 + }, 137 + .func_info_cnt = 3, 138 + BTF_TYPES 139 + }, 140 + { 141 + "bpf_loop_inline and a dead func", 142 + .insns = { 143 + /* main */ 144 + 145 + /* A reference to callback #1 to make verifier count it as a func. 146 + * This reference is overwritten below and callback #1 is dead. 147 + */ 148 + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 9), 149 + BPF_RAW_INSN(0, 0, 0, 0, 0), 150 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1), 151 + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 8), 152 + BPF_RAW_INSN(0, 0, 0, 0, 0), 153 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0), 154 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0), 155 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop), 156 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), 157 + BPF_EXIT_INSN(), 158 + /* callback */ 159 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1), 160 + BPF_EXIT_INSN(), 161 + /* callback #2 */ 162 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1), 163 + BPF_EXIT_INSN(), 164 + }, 165 + .expected_insns = { PSEUDO_CALL_INSN() }, 166 + .unexpected_insns = { HELPER_CALL_INSN() }, 167 + .prog_type = BPF_PROG_TYPE_TRACEPOINT, 168 + .result = ACCEPT, 169 + .runs = 0, 170 + .func_info = { 171 + { 0, MAIN_TYPE }, 172 + { 10, CALLBACK_TYPE }, 173 + { 12, CALLBACK_TYPE } 174 + }, 175 + .func_info_cnt = 3, 176 + BTF_TYPES 177 + }, 178 + { 179 + "bpf_loop_inline stack locations for loop vars", 180 + .insns = { 181 + /* main */ 182 + BPF_ST_MEM(BPF_W, BPF_REG_10, -12, 0x77), 183 + /* bpf_loop call #1 */ 184 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1), 185 + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 22), 186 + BPF_RAW_INSN(0, 0, 0, 0, 0), 187 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0), 188 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0), 189 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop), 190 + /* bpf_loop call #2 */ 191 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 2), 192 + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 16), 193 + BPF_RAW_INSN(0, 0, 0, 0, 0), 194 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0), 195 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0), 196 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop), 197 + /* call func and exit */ 198 + BPF_CALL_REL(2), 199 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), 200 + BPF_EXIT_INSN(), 201 + /* func */ 202 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -32, 0x55), 203 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 2), 204 + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 6), 205 + BPF_RAW_INSN(0, 0, 0, 0, 0), 206 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0), 207 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0), 208 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop), 209 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), 210 + BPF_EXIT_INSN(), 211 + /* callback */ 212 + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1), 213 + BPF_EXIT_INSN(), 214 + }, 215 + .expected_insns = { 216 + BPF_ST_MEM(BPF_W, BPF_REG_10, -12, 0x77), 217 + SKIP_INSNS(), 218 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -40), 219 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, -32), 220 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, -24), 221 + SKIP_INSNS(), 222 + /* offsets are the same as in the first call */ 223 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -40), 224 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, -32), 225 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, -24), 226 + SKIP_INSNS(), 227 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -32, 0x55), 228 + SKIP_INSNS(), 229 + /* offsets differ from main because of different offset 230 + * in BPF_ST_MEM instruction 231 + */ 232 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -56), 233 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, -48), 234 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, -40), 235 + }, 236 + .unexpected_insns = { HELPER_CALL_INSN() }, 237 + .prog_type = BPF_PROG_TYPE_TRACEPOINT, 238 + .result = ACCEPT, 239 + .func_info = { 240 + { 0, MAIN_TYPE }, 241 + { 16, MAIN_TYPE }, 242 + { 25, CALLBACK_TYPE }, 243 + }, 244 + .func_info_cnt = 3, 245 + BTF_TYPES 246 + }, 247 + 248 + #undef HELPER_CALL_INSN 249 + #undef PSEUDO_CALL_INSN 250 + #undef CALLBACK_TYPE 251 + #undef MAIN_TYPE 252 + #undef BTF_TYPES