Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: synthetic tests to push verifier limits

Add a test to generate 1m ld_imm64 insns to stress the verifier.

Bump the size of fill_ld_abs_vlan_push_pop test from 4k to 29k
and jump_around_ld_abs from 4k to 5.5k.
Larger sizes are not possible due to 16-bit offset encoding
in jump instructions.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>

authored by

Alexei Starovoitov and committed by
Daniel Borkmann
8aa2d4b4 e5e7a8f2

+35 -9
+26 -9
tools/testing/selftests/bpf/test_verifier.c
··· 50 50 #include "../../../include/linux/filter.h" 51 51 52 52 #define MAX_INSNS BPF_MAXINSNS 53 + #define MAX_TEST_INSNS 1000000 53 54 #define MAX_FIXUPS 8 54 55 #define MAX_NR_MAPS 14 55 56 #define MAX_TEST_RUNS 8 ··· 67 66 struct bpf_test { 68 67 const char *descr; 69 68 struct bpf_insn insns[MAX_INSNS]; 69 + struct bpf_insn *fill_insns; 70 70 int fixup_map_hash_8b[MAX_FIXUPS]; 71 71 int fixup_map_hash_48b[MAX_FIXUPS]; 72 72 int fixup_map_hash_16b[MAX_FIXUPS]; ··· 85 83 const char *errstr; 86 84 const char *errstr_unpriv; 87 85 uint32_t retval, retval_unpriv, insn_processed; 86 + int prog_len; 88 87 enum { 89 88 UNDEF, 90 89 ACCEPT, ··· 122 119 123 120 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self) 124 121 { 125 - /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */ 122 + /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */ 126 123 #define PUSH_CNT 51 127 - unsigned int len = BPF_MAXINSNS; 128 - struct bpf_insn *insn = self->insns; 124 + /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */ 125 + unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6; 126 + struct bpf_insn *insn = self->fill_insns; 129 127 int i = 0, j, k = 0; 130 128 131 129 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); ··· 160 156 for (; i < len - 1; i++) 161 157 insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef); 162 158 insn[len - 1] = BPF_EXIT_INSN(); 159 + self->prog_len = len; 163 160 } 164 161 165 162 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self) 166 163 { 167 - struct bpf_insn *insn = self->insns; 168 - unsigned int len = BPF_MAXINSNS; 164 + struct bpf_insn *insn = self->fill_insns; 165 + /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns */ 166 + unsigned int len = (1 << 15) / 6; 169 167 int i = 0; 170 168 171 169 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); ··· 177 171 while (i < len - 1) 178 172 insn[i++] = BPF_LD_ABS(BPF_B, 1); 179 173 insn[i] = BPF_EXIT_INSN(); 174 + self->prog_len = i + 1; 180 175 } 181 176 182 177 static void bpf_fill_rand_ld_dw(struct bpf_test *self) 183 178 { 184 - struct bpf_insn *insn = self->insns; 179 + struct bpf_insn *insn = self->fill_insns; 185 180 uint64_t res = 0; 186 181 int i = 0; 187 182 ··· 200 193 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32); 201 194 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1); 202 195 insn[i] = BPF_EXIT_INSN(); 196 + self->prog_len = i + 1; 203 197 res ^= (res >> 32); 204 198 self->retval = (uint32_t)res; 205 199 } ··· 528 520 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage; 529 521 int *fixup_map_spin_lock = test->fixup_map_spin_lock; 530 522 531 - if (test->fill_helper) 523 + if (test->fill_helper) { 524 + test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn)); 532 525 test->fill_helper(test); 526 + } 533 527 534 528 /* Allocating HTs with 1 elem is fine here, since we only test 535 529 * for verifier and not do a runtime lookup, so the only thing ··· 728 718 prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 729 719 fixup_skips = skips; 730 720 do_test_fixup(test, prog_type, prog, map_fds); 721 + if (test->fill_insns) { 722 + prog = test->fill_insns; 723 + prog_len = test->prog_len; 724 + } else { 725 + prog_len = probe_filter_length(prog); 726 + } 731 727 /* If there were some map skips during fixup due to missing bpf 732 728 * features, skip this test. 733 729 */ 734 730 if (fixup_skips != skips) 735 731 return; 736 - prog_len = probe_filter_length(prog); 737 732 738 733 pflags = 0; 739 734 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT) ··· 746 731 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) 747 732 pflags |= BPF_F_ANY_ALIGNMENT; 748 733 fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags, 749 - "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1); 734 + "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 4); 750 735 if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) { 751 736 printf("SKIP (unsupported program type %d)\n", prog_type); 752 737 skips++; ··· 845 830 goto fail_log; 846 831 } 847 832 close_fds: 833 + if (test->fill_insns) 834 + free(test->fill_insns); 848 835 close(fd_prog); 849 836 for (i = 0; i < MAX_NR_MAPS; i++) 850 837 close(map_fds[i]);
+9
tools/testing/selftests/bpf/verifier/ld_dw.c
··· 34 34 .result = ACCEPT, 35 35 .retval = 5, 36 36 }, 37 + { 38 + "ld_dw: xor semi-random 64 bit imms, test 5", 39 + .insns = { }, 40 + .data = { }, 41 + .fill_helper = bpf_fill_rand_ld_dw, 42 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 43 + .result = ACCEPT, 44 + .retval = 1000000 - 6, 45 + },