Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: two scale tests

Add two tests to check that sequence of 1024 jumps is verifiable.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>

authored by

Alexei Starovoitov and committed by
Daniel Borkmann
08de198c 3da6e7e4

+88
+70
tools/testing/selftests/bpf/test_verifier.c
··· 208 208 self->retval = (uint32_t)res; 209 209 } 210 210 211 + /* test the sequence of 1k jumps */ 212 + static void bpf_fill_scale1(struct bpf_test *self) 213 + { 214 + struct bpf_insn *insn = self->fill_insns; 215 + int i = 0, k = 0; 216 + 217 + insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 218 + /* test to check that the sequence of 1024 jumps is acceptable */ 219 + while (k++ < 1024) { 220 + insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 221 + BPF_FUNC_get_prandom_u32); 222 + insn[i++] = BPF_JMP_IMM(BPF_JGT, BPF_REG_0, bpf_semi_rand_get(), 2); 223 + insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10); 224 + insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 225 + -8 * (k % 64 + 1)); 226 + } 227 + /* every jump adds 1024 steps to insn_processed, so to stay exactly 228 + * within 1m limit add MAX_TEST_INSNS - 1025 MOVs and 1 EXIT 229 + */ 230 + while (i < MAX_TEST_INSNS - 1025) 231 + insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42); 232 + insn[i] = BPF_EXIT_INSN(); 233 + self->prog_len = i + 1; 234 + self->retval = 42; 235 + } 236 + 237 + /* test the sequence of 1k jumps in inner most function (function depth 8)*/ 238 + static void bpf_fill_scale2(struct bpf_test *self) 239 + { 240 + struct bpf_insn *insn = self->fill_insns; 241 + int i = 0, k = 0; 242 + 243 + #define FUNC_NEST 7 244 + for (k = 0; k < FUNC_NEST; k++) { 245 + insn[i++] = BPF_CALL_REL(1); 246 + insn[i++] = BPF_EXIT_INSN(); 247 + } 248 + insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 249 + /* test to check that the sequence of 1024 jumps is acceptable */ 250 + while (k++ < 1024) { 251 + insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 252 + BPF_FUNC_get_prandom_u32); 253 + insn[i++] = BPF_JMP_IMM(BPF_JGT, BPF_REG_0, bpf_semi_rand_get(), 2); 254 + insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10); 255 + insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 256 + -8 * (k % (64 - 4 * FUNC_NEST) + 1)); 257 + } 258 + /* every jump adds 1024 steps to insn_processed, so to stay exactly 259 + * within 1m limit add MAX_TEST_INSNS - 1025 MOVs and 1 EXIT 260 + */ 261 + while (i < MAX_TEST_INSNS - 1025) 262 + insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42); 263 + insn[i] = BPF_EXIT_INSN(); 264 + self->prog_len = i + 1; 265 + self->retval = 42; 266 + } 267 + 268 + static void bpf_fill_scale(struct bpf_test *self) 269 + { 270 + switch (self->retval) { 271 + case 1: 272 + return bpf_fill_scale1(self); 273 + case 2: 274 + return bpf_fill_scale2(self); 275 + default: 276 + self->prog_len = 0; 277 + break; 278 + } 279 + } 280 + 211 281 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */ 212 282 #define BPF_SK_LOOKUP(func) \ 213 283 /* struct bpf_sock_tuple tuple = {} */ \
+18
tools/testing/selftests/bpf/verifier/scale.c
··· 1 + { 2 + "scale: scale test 1", 3 + .insns = { }, 4 + .data = { }, 5 + .fill_helper = bpf_fill_scale, 6 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 7 + .result = ACCEPT, 8 + .retval = 1, 9 + }, 10 + { 11 + "scale: scale test 2", 12 + .insns = { }, 13 + .data = { }, 14 + .fill_helper = bpf_fill_scale, 15 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 16 + .result = ACCEPT, 17 + .retval = 2, 18 + },