Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: verifier/value_or_null.c converted to inline assembly

Test verifier/value_or_null.c automatically converted to use inline assembly.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230325025524.144043-40-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
d3305286 8f59e87a

+290 -220
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 36 36 #include "verifier_uninit.skel.h" 37 37 #include "verifier_value_adj_spill.skel.h" 38 38 #include "verifier_value.skel.h" 39 + #include "verifier_value_or_null.skel.h" 39 40 40 41 __maybe_unused 41 42 static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) ··· 95 94 void test_verifier_uninit(void) { RUN(verifier_uninit); } 96 95 void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); } 97 96 void test_verifier_value(void) { RUN(verifier_value); } 97 + void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); }
+288
tools/testing/selftests/bpf/progs/verifier_value_or_null.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Converted from tools/testing/selftests/bpf/verifier/value_or_null.c */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "bpf_misc.h" 7 + 8 + #define MAX_ENTRIES 11 9 + 10 + struct test_val { 11 + unsigned int index; 12 + int foo[MAX_ENTRIES]; 13 + }; 14 + 15 + struct { 16 + __uint(type, BPF_MAP_TYPE_HASH); 17 + __uint(max_entries, 1); 18 + __type(key, long long); 19 + __type(value, struct test_val); 20 + } map_hash_48b SEC(".maps"); 21 + 22 + struct { 23 + __uint(type, BPF_MAP_TYPE_HASH); 24 + __uint(max_entries, 1); 25 + __type(key, long long); 26 + __type(value, long long); 27 + } map_hash_8b SEC(".maps"); 28 + 29 + SEC("tc") 30 + __description("multiple registers share map_lookup_elem result") 31 + __success __retval(0) 32 + __naked void share_map_lookup_elem_result(void) 33 + { 34 + asm volatile (" \ 35 + r1 = 10; \ 36 + *(u64*)(r10 - 8) = r1; \ 37 + r2 = r10; \ 38 + r2 += -8; \ 39 + r1 = %[map_hash_8b] ll; \ 40 + call %[bpf_map_lookup_elem]; \ 41 + r4 = r0; \ 42 + if r0 == 0 goto l0_%=; \ 43 + r1 = 0; \ 44 + *(u64*)(r4 + 0) = r1; \ 45 + l0_%=: exit; \ 46 + " : 47 + : __imm(bpf_map_lookup_elem), 48 + __imm_addr(map_hash_8b) 49 + : __clobber_all); 50 + } 51 + 52 + SEC("tc") 53 + __description("alu ops on ptr_to_map_value_or_null, 1") 54 + __failure __msg("R4 pointer arithmetic on map_value_or_null") 55 + __naked void map_value_or_null_1(void) 56 + { 57 + asm volatile (" \ 58 + r1 = 10; \ 59 + *(u64*)(r10 - 8) = r1; \ 60 + r2 = r10; \ 61 + r2 += -8; \ 62 + r1 = %[map_hash_8b] ll; \ 63 + call %[bpf_map_lookup_elem]; \ 64 + r4 = r0; \ 65 + r4 += -2; \ 66 + r4 += 2; \ 67 + if r0 == 0 goto l0_%=; \ 68 + r1 = 0; \ 69 + *(u64*)(r4 + 0) = r1; \ 70 + l0_%=: exit; \ 71 + " : 72 + : __imm(bpf_map_lookup_elem), 73 + __imm_addr(map_hash_8b) 74 + : __clobber_all); 75 + } 76 + 77 + SEC("tc") 78 + __description("alu ops on ptr_to_map_value_or_null, 2") 79 + __failure __msg("R4 pointer arithmetic on map_value_or_null") 80 + __naked void map_value_or_null_2(void) 81 + { 82 + asm volatile (" \ 83 + r1 = 10; \ 84 + *(u64*)(r10 - 8) = r1; \ 85 + r2 = r10; \ 86 + r2 += -8; \ 87 + r1 = %[map_hash_8b] ll; \ 88 + call %[bpf_map_lookup_elem]; \ 89 + r4 = r0; \ 90 + r4 &= -1; \ 91 + if r0 == 0 goto l0_%=; \ 92 + r1 = 0; \ 93 + *(u64*)(r4 + 0) = r1; \ 94 + l0_%=: exit; \ 95 + " : 96 + : __imm(bpf_map_lookup_elem), 97 + __imm_addr(map_hash_8b) 98 + : __clobber_all); 99 + } 100 + 101 + SEC("tc") 102 + __description("alu ops on ptr_to_map_value_or_null, 3") 103 + __failure __msg("R4 pointer arithmetic on map_value_or_null") 104 + __naked void map_value_or_null_3(void) 105 + { 106 + asm volatile (" \ 107 + r1 = 10; \ 108 + *(u64*)(r10 - 8) = r1; \ 109 + r2 = r10; \ 110 + r2 += -8; \ 111 + r1 = %[map_hash_8b] ll; \ 112 + call %[bpf_map_lookup_elem]; \ 113 + r4 = r0; \ 114 + r4 <<= 1; \ 115 + if r0 == 0 goto l0_%=; \ 116 + r1 = 0; \ 117 + *(u64*)(r4 + 0) = r1; \ 118 + l0_%=: exit; \ 119 + " : 120 + : __imm(bpf_map_lookup_elem), 121 + __imm_addr(map_hash_8b) 122 + : __clobber_all); 123 + } 124 + 125 + SEC("tc") 126 + __description("invalid memory access with multiple map_lookup_elem calls") 127 + __failure __msg("R4 !read_ok") 128 + __naked void multiple_map_lookup_elem_calls(void) 129 + { 130 + asm volatile (" \ 131 + r1 = 10; \ 132 + *(u64*)(r10 - 8) = r1; \ 133 + r2 = r10; \ 134 + r2 += -8; \ 135 + r1 = %[map_hash_8b] ll; \ 136 + r8 = r1; \ 137 + r7 = r2; \ 138 + call %[bpf_map_lookup_elem]; \ 139 + r4 = r0; \ 140 + r1 = r8; \ 141 + r2 = r7; \ 142 + call %[bpf_map_lookup_elem]; \ 143 + if r0 == 0 goto l0_%=; \ 144 + r1 = 0; \ 145 + *(u64*)(r4 + 0) = r1; \ 146 + l0_%=: exit; \ 147 + " : 148 + : __imm(bpf_map_lookup_elem), 149 + __imm_addr(map_hash_8b) 150 + : __clobber_all); 151 + } 152 + 153 + SEC("tc") 154 + __description("valid indirect map_lookup_elem access with 2nd lookup in branch") 155 + __success __retval(0) 156 + __naked void with_2nd_lookup_in_branch(void) 157 + { 158 + asm volatile (" \ 159 + r1 = 10; \ 160 + *(u64*)(r10 - 8) = r1; \ 161 + r2 = r10; \ 162 + r2 += -8; \ 163 + r1 = %[map_hash_8b] ll; \ 164 + r8 = r1; \ 165 + r7 = r2; \ 166 + call %[bpf_map_lookup_elem]; \ 167 + r2 = 10; \ 168 + if r2 != 0 goto l0_%=; \ 169 + r1 = r8; \ 170 + r2 = r7; \ 171 + call %[bpf_map_lookup_elem]; \ 172 + l0_%=: r4 = r0; \ 173 + if r0 == 0 goto l1_%=; \ 174 + r1 = 0; \ 175 + *(u64*)(r4 + 0) = r1; \ 176 + l1_%=: exit; \ 177 + " : 178 + : __imm(bpf_map_lookup_elem), 179 + __imm_addr(map_hash_8b) 180 + : __clobber_all); 181 + } 182 + 183 + SEC("socket") 184 + __description("invalid map access from else condition") 185 + __failure __msg("R0 unbounded memory access") 186 + __failure_unpriv __msg_unpriv("R0 leaks addr") 187 + __flag(BPF_F_ANY_ALIGNMENT) 188 + __naked void map_access_from_else_condition(void) 189 + { 190 + asm volatile (" \ 191 + r1 = 0; \ 192 + *(u64*)(r10 - 8) = r1; \ 193 + r2 = r10; \ 194 + r2 += -8; \ 195 + r1 = %[map_hash_48b] ll; \ 196 + call %[bpf_map_lookup_elem]; \ 197 + if r0 == 0 goto l0_%=; \ 198 + r1 = *(u32*)(r0 + 0); \ 199 + if r1 >= %[__imm_0] goto l1_%=; \ 200 + r1 += 1; \ 201 + l1_%=: r1 <<= 2; \ 202 + r0 += r1; \ 203 + r1 = %[test_val_foo]; \ 204 + *(u64*)(r0 + 0) = r1; \ 205 + l0_%=: exit; \ 206 + " : 207 + : __imm(bpf_map_lookup_elem), 208 + __imm_addr(map_hash_48b), 209 + __imm_const(__imm_0, MAX_ENTRIES-1), 210 + __imm_const(test_val_foo, offsetof(struct test_val, foo)) 211 + : __clobber_all); 212 + } 213 + 214 + SEC("tc") 215 + __description("map lookup and null branch prediction") 216 + __success __retval(0) 217 + __naked void lookup_and_null_branch_prediction(void) 218 + { 219 + asm volatile (" \ 220 + r1 = 10; \ 221 + *(u64*)(r10 - 8) = r1; \ 222 + r2 = r10; \ 223 + r2 += -8; \ 224 + r1 = %[map_hash_8b] ll; \ 225 + call %[bpf_map_lookup_elem]; \ 226 + r6 = r0; \ 227 + if r6 == 0 goto l0_%=; \ 228 + if r6 != 0 goto l0_%=; \ 229 + r10 += 10; \ 230 + l0_%=: exit; \ 231 + " : 232 + : __imm(bpf_map_lookup_elem), 233 + __imm_addr(map_hash_8b) 234 + : __clobber_all); 235 + } 236 + 237 + SEC("cgroup/skb") 238 + __description("MAP_VALUE_OR_NULL check_ids() in regsafe()") 239 + __failure __msg("R8 invalid mem access 'map_value_or_null'") 240 + __failure_unpriv __msg_unpriv("") 241 + __flag(BPF_F_TEST_STATE_FREQ) 242 + __naked void null_check_ids_in_regsafe(void) 243 + { 244 + asm volatile (" \ 245 + r1 = 0; \ 246 + *(u64*)(r10 - 8) = r1; \ 247 + /* r9 = map_lookup_elem(...) */ \ 248 + r2 = r10; \ 249 + r2 += -8; \ 250 + r1 = %[map_hash_8b] ll; \ 251 + call %[bpf_map_lookup_elem]; \ 252 + r9 = r0; \ 253 + /* r8 = map_lookup_elem(...) */ \ 254 + r2 = r10; \ 255 + r2 += -8; \ 256 + r1 = %[map_hash_8b] ll; \ 257 + call %[bpf_map_lookup_elem]; \ 258 + r8 = r0; \ 259 + /* r7 = ktime_get_ns() */ \ 260 + call %[bpf_ktime_get_ns]; \ 261 + r7 = r0; \ 262 + /* r6 = ktime_get_ns() */ \ 263 + call %[bpf_ktime_get_ns]; \ 264 + r6 = r0; \ 265 + /* if r6 > r7 goto +1 ; no new information about the state is derived from\ 266 + * ; this check, thus produced verifier states differ\ 267 + * ; only in 'insn_idx' \ 268 + * r9 = r8 ; optionally share ID between r9 and r8\ 269 + */ \ 270 + if r6 > r7 goto l0_%=; \ 271 + r9 = r8; \ 272 + l0_%=: /* if r9 == 0 goto <exit> */ \ 273 + if r9 == 0 goto l1_%=; \ 274 + /* read map value via r8, this is not always \ 275 + * safe because r8 might be not equal to r9. \ 276 + */ \ 277 + r0 = *(u64*)(r8 + 0); \ 278 + l1_%=: /* exit 0 */ \ 279 + r0 = 0; \ 280 + exit; \ 281 + " : 282 + : __imm(bpf_ktime_get_ns), 283 + __imm(bpf_map_lookup_elem), 284 + __imm_addr(map_hash_8b) 285 + : __clobber_all); 286 + } 287 + 288 + char _license[] SEC("license") = "GPL";
-220
tools/testing/selftests/bpf/verifier/value_or_null.c
··· 1 - { 2 - "multiple registers share map_lookup_elem result", 3 - .insns = { 4 - BPF_MOV64_IMM(BPF_REG_1, 10), 5 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 6 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 7 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 8 - BPF_LD_MAP_FD(BPF_REG_1, 0), 9 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 10 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 11 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 12 - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 13 - BPF_EXIT_INSN(), 14 - }, 15 - .fixup_map_hash_8b = { 4 }, 16 - .result = ACCEPT, 17 - .prog_type = BPF_PROG_TYPE_SCHED_CLS 18 - }, 19 - { 20 - "alu ops on ptr_to_map_value_or_null, 1", 21 - .insns = { 22 - BPF_MOV64_IMM(BPF_REG_1, 10), 23 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 24 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 25 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 26 - BPF_LD_MAP_FD(BPF_REG_1, 0), 27 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 28 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 29 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2), 30 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), 31 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 32 - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 33 - BPF_EXIT_INSN(), 34 - }, 35 - .fixup_map_hash_8b = { 4 }, 36 - .errstr = "R4 pointer arithmetic on map_value_or_null", 37 - .result = REJECT, 38 - .prog_type = BPF_PROG_TYPE_SCHED_CLS 39 - }, 40 - { 41 - "alu ops on ptr_to_map_value_or_null, 2", 42 - .insns = { 43 - BPF_MOV64_IMM(BPF_REG_1, 10), 44 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 45 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 46 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 47 - BPF_LD_MAP_FD(BPF_REG_1, 0), 48 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 49 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 50 - BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1), 51 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 52 - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 53 - BPF_EXIT_INSN(), 54 - }, 55 - .fixup_map_hash_8b = { 4 }, 56 - .errstr = "R4 pointer arithmetic on map_value_or_null", 57 - .result = REJECT, 58 - .prog_type = BPF_PROG_TYPE_SCHED_CLS 59 - }, 60 - { 61 - "alu ops on ptr_to_map_value_or_null, 3", 62 - .insns = { 63 - BPF_MOV64_IMM(BPF_REG_1, 10), 64 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 65 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 66 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 67 - BPF_LD_MAP_FD(BPF_REG_1, 0), 68 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 69 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 70 - BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1), 71 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 72 - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 73 - BPF_EXIT_INSN(), 74 - }, 75 - .fixup_map_hash_8b = { 4 }, 76 - .errstr = "R4 pointer arithmetic on map_value_or_null", 77 - .result = REJECT, 78 - .prog_type = BPF_PROG_TYPE_SCHED_CLS 79 - }, 80 - { 81 - "invalid memory access with multiple map_lookup_elem calls", 82 - .insns = { 83 - BPF_MOV64_IMM(BPF_REG_1, 10), 84 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 85 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 86 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 87 - BPF_LD_MAP_FD(BPF_REG_1, 0), 88 - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), 89 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 90 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 91 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 92 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), 93 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), 94 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 95 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 96 - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 97 - BPF_EXIT_INSN(), 98 - }, 99 - .fixup_map_hash_8b = { 4 }, 100 - .result = REJECT, 101 - .errstr = "R4 !read_ok", 102 - .prog_type = BPF_PROG_TYPE_SCHED_CLS 103 - }, 104 - { 105 - "valid indirect map_lookup_elem access with 2nd lookup in branch", 106 - .insns = { 107 - BPF_MOV64_IMM(BPF_REG_1, 10), 108 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 109 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 110 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 111 - BPF_LD_MAP_FD(BPF_REG_1, 0), 112 - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), 113 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 114 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 115 - BPF_MOV64_IMM(BPF_REG_2, 10), 116 - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3), 117 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), 118 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), 119 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 120 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 121 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 122 - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 123 - BPF_EXIT_INSN(), 124 - }, 125 - .fixup_map_hash_8b = { 4 }, 126 - .result = ACCEPT, 127 - .prog_type = BPF_PROG_TYPE_SCHED_CLS 128 - }, 129 - { 130 - "invalid map access from else condition", 131 - .insns = { 132 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 133 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 134 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 135 - BPF_LD_MAP_FD(BPF_REG_1, 0), 136 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 137 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 138 - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 139 - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1), 140 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), 141 - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 142 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 143 - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 144 - BPF_EXIT_INSN(), 145 - }, 146 - .fixup_map_hash_48b = { 3 }, 147 - .errstr = "R0 unbounded memory access", 148 - .result = REJECT, 149 - .errstr_unpriv = "R0 leaks addr", 150 - .result_unpriv = REJECT, 151 - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 152 - }, 153 - { 154 - "map lookup and null branch prediction", 155 - .insns = { 156 - BPF_MOV64_IMM(BPF_REG_1, 10), 157 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 158 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 159 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 160 - BPF_LD_MAP_FD(BPF_REG_1, 0), 161 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 162 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 163 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 2), 164 - BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1), 165 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_10, 10), 166 - BPF_EXIT_INSN(), 167 - }, 168 - .fixup_map_hash_8b = { 4 }, 169 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 170 - .result = ACCEPT, 171 - }, 172 - { 173 - "MAP_VALUE_OR_NULL check_ids() in regsafe()", 174 - .insns = { 175 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 176 - /* r9 = map_lookup_elem(...) */ 177 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 178 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 179 - BPF_LD_MAP_FD(BPF_REG_1, 180 - 0), 181 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 182 - BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), 183 - /* r8 = map_lookup_elem(...) */ 184 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 185 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 186 - BPF_LD_MAP_FD(BPF_REG_1, 187 - 0), 188 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 189 - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 190 - /* r7 = ktime_get_ns() */ 191 - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), 192 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 193 - /* r6 = ktime_get_ns() */ 194 - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), 195 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 196 - /* if r6 > r7 goto +1 ; no new information about the state is derived from 197 - * ; this check, thus produced verifier states differ 198 - * ; only in 'insn_idx' 199 - * r9 = r8 ; optionally share ID between r9 and r8 200 - */ 201 - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), 202 - BPF_MOV64_REG(BPF_REG_9, BPF_REG_8), 203 - /* if r9 == 0 goto <exit> */ 204 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1), 205 - /* read map value via r8, this is not always 206 - * safe because r8 might be not equal to r9. 207 - */ 208 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_8, 0), 209 - /* exit 0 */ 210 - BPF_MOV64_IMM(BPF_REG_0, 0), 211 - BPF_EXIT_INSN(), 212 - }, 213 - .flags = BPF_F_TEST_STATE_FREQ, 214 - .fixup_map_hash_8b = { 3, 9 }, 215 - .result = REJECT, 216 - .errstr = "R8 invalid mem access 'map_value_or_null'", 217 - .result_unpriv = REJECT, 218 - .errstr_unpriv = "", 219 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 220 - },