Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: verifier/value_ptr_arith converted to inline assembly

Test verifier/value_ptr_arith automatically converted to use inline assembly.

Test cases "sanitation: alu with different scalars 2" and
"sanitation: alu with different scalars 3" are updated to
avoid -ENOENT as return value, as __retval() annotation
only supports numeric literals.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230421174234.2391278-25-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
4db10a82 efe25a33

+1451 -1146
+28 -6
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 62 62 #include "verifier_value.skel.h" 63 63 #include "verifier_value_illegal_alu.skel.h" 64 64 #include "verifier_value_or_null.skel.h" 65 + #include "verifier_value_ptr_arith.skel.h" 65 66 #include "verifier_var_off.skel.h" 66 67 #include "verifier_xadd.skel.h" 67 68 #include "verifier_xdp.skel.h" ··· 165 164 void test_verifier_xdp(void) { RUN(verifier_xdp); } 166 165 void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); } 167 166 168 - static int init_array_access_maps(struct bpf_object *obj) 167 + static int init_test_val_map(struct bpf_object *obj, char *map_name) 169 168 { 170 - struct bpf_map *array_ro; 171 169 struct test_val value = { 172 170 .index = (6 + 1) * sizeof(int), 173 171 .foo[6] = 0xabcdef12, 174 172 }; 173 + struct bpf_map *map; 175 174 int err, key = 0; 176 175 177 - array_ro = bpf_object__find_map_by_name(obj, "map_array_ro"); 178 - if (!ASSERT_OK_PTR(array_ro, "lookup map_array_ro")) 176 + map = bpf_object__find_map_by_name(obj, map_name); 177 + if (!map) { 178 + PRINT_FAIL("Can't find map '%s'\n", map_name); 179 179 return -EINVAL; 180 + } 180 181 181 - err = bpf_map_update_elem(bpf_map__fd(array_ro), &key, &value, 0); 182 - if (!ASSERT_OK(err, "map_array_ro update")) 182 + err = bpf_map_update_elem(bpf_map__fd(map), &key, &value, 0); 183 + if (err) { 184 + PRINT_FAIL("Error while updating map '%s': %d\n", map_name, err); 183 185 return err; 186 + } 184 187 185 188 return 0; 189 + } 190 + 191 + static int init_array_access_maps(struct bpf_object *obj) 192 + { 193 + return init_test_val_map(obj, "map_array_ro"); 186 194 } 187 195 188 196 void test_verifier_array_access(void) ··· 199 189 run_tests_aux("verifier_array_access", 200 190 verifier_array_access__elf_bytes, 201 191 init_array_access_maps); 192 + } 193 + 194 + static int init_value_ptr_arith_maps(struct bpf_object *obj) 195 + { 196 + return init_test_val_map(obj, "map_array_48b"); 197 + } 198 + 199 + void test_verifier_value_ptr_arith(void) 200 + { 201 + run_tests_aux("verifier_value_ptr_arith", 202 + verifier_value_ptr_arith__elf_bytes, 203 + init_value_ptr_arith_maps); 202 204 }
+1423
tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Converted from tools/testing/selftests/bpf/verifier/value_ptr_arith.c */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include <errno.h> 7 + #include "bpf_misc.h" 8 + 9 + #define MAX_ENTRIES 11 10 + 11 + struct test_val { 12 + unsigned int index; 13 + int foo[MAX_ENTRIES]; 14 + }; 15 + 16 + struct { 17 + __uint(type, BPF_MAP_TYPE_ARRAY); 18 + __uint(max_entries, 1); 19 + __type(key, int); 20 + __type(value, struct test_val); 21 + } map_array_48b SEC(".maps"); 22 + 23 + struct other_val { 24 + long long foo; 25 + long long bar; 26 + }; 27 + 28 + struct { 29 + __uint(type, BPF_MAP_TYPE_HASH); 30 + __uint(max_entries, 1); 31 + __type(key, long long); 32 + __type(value, struct other_val); 33 + } map_hash_16b SEC(".maps"); 34 + 35 + struct { 36 + __uint(type, BPF_MAP_TYPE_HASH); 37 + __uint(max_entries, 1); 38 + __type(key, long long); 39 + __type(value, struct test_val); 40 + } map_hash_48b SEC(".maps"); 41 + 42 + SEC("socket") 43 + __description("map access: known scalar += value_ptr unknown vs const") 44 + __success __failure_unpriv 45 + __msg_unpriv("R1 tried to add from different maps, paths or scalars") 46 + __retval(1) 47 + __naked void value_ptr_unknown_vs_const(void) 48 + { 49 + asm volatile (" \ 50 + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ 51 + r1 = 0; \ 52 + *(u64*)(r10 - 8) = r1; \ 53 + r2 = r10; \ 54 + r2 += -8; \ 55 + if r0 == 1 goto l0_%=; \ 56 + r1 = %[map_hash_16b] ll; \ 57 + if r0 != 1 goto l1_%=; \ 58 + l0_%=: r1 = %[map_array_48b] ll; \ 59 + l1_%=: call %[bpf_map_lookup_elem]; \ 60 + if r0 == 0 goto l2_%=; \ 61 + r4 = *(u8*)(r0 + 0); \ 62 + if r4 == 1 goto l3_%=; \ 63 + r1 = 6; \ 64 + r1 = -r1; \ 65 + r1 &= 0x7; \ 66 + goto l4_%=; \ 67 + l3_%=: r1 = 3; \ 68 + l4_%=: r1 += r0; \ 69 + r0 = *(u8*)(r1 + 0); \ 70 + l2_%=: r0 = 1; \ 71 + exit; \ 72 + " : 73 + : __imm(bpf_map_lookup_elem), 74 + __imm_addr(map_array_48b), 75 + __imm_addr(map_hash_16b), 76 + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) 77 + : __clobber_all); 78 + } 79 + 80 + SEC("socket") 81 + __description("map access: known scalar += value_ptr const vs unknown") 82 + __success __failure_unpriv 83 + __msg_unpriv("R1 tried to add from different maps, paths or scalars") 84 + __retval(1) 85 + __naked void value_ptr_const_vs_unknown(void) 86 + { 87 + asm volatile (" \ 88 + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ 89 + r1 = 0; \ 90 + *(u64*)(r10 - 8) = r1; \ 91 + r2 = r10; \ 92 + r2 += -8; \ 93 + if r0 == 1 goto l0_%=; \ 94 + r1 = %[map_hash_16b] ll; \ 95 + if r0 != 1 goto l1_%=; \ 96 + l0_%=: r1 = %[map_array_48b] ll; \ 97 + l1_%=: call %[bpf_map_lookup_elem]; \ 98 + if r0 == 0 goto l2_%=; \ 99 + r4 = *(u8*)(r0 + 0); \ 100 + if r4 == 1 goto l3_%=; \ 101 + r1 = 3; \ 102 + goto l4_%=; \ 103 + l3_%=: r1 = 6; \ 104 + r1 = -r1; \ 105 + r1 &= 0x7; \ 106 + l4_%=: r1 += r0; \ 107 + r0 = *(u8*)(r1 + 0); \ 108 + l2_%=: r0 = 1; \ 109 + exit; \ 110 + " : 111 + : __imm(bpf_map_lookup_elem), 112 + __imm_addr(map_array_48b), 113 + __imm_addr(map_hash_16b), 114 + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) 115 + : __clobber_all); 116 + } 117 + 118 + SEC("socket") 119 + __description("map access: known scalar += value_ptr const vs const (ne)") 120 + __success __failure_unpriv 121 + __msg_unpriv("R1 tried to add from different maps, paths or scalars") 122 + __retval(1) 123 + __naked void ptr_const_vs_const_ne(void) 124 + { 125 + asm volatile (" \ 126 + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ 127 + r1 = 0; \ 128 + *(u64*)(r10 - 8) = r1; \ 129 + r2 = r10; \ 130 + r2 += -8; \ 131 + if r0 == 1 goto l0_%=; \ 132 + r1 = %[map_hash_16b] ll; \ 133 + if r0 != 1 goto l1_%=; \ 134 + l0_%=: r1 = %[map_array_48b] ll; \ 135 + l1_%=: call %[bpf_map_lookup_elem]; \ 136 + if r0 == 0 goto l2_%=; \ 137 + r4 = *(u8*)(r0 + 0); \ 138 + if r4 == 1 goto l3_%=; \ 139 + r1 = 3; \ 140 + goto l4_%=; \ 141 + l3_%=: r1 = 5; \ 142 + l4_%=: r1 += r0; \ 143 + r0 = *(u8*)(r1 + 0); \ 144 + l2_%=: r0 = 1; \ 145 + exit; \ 146 + " : 147 + : __imm(bpf_map_lookup_elem), 148 + __imm_addr(map_array_48b), 149 + __imm_addr(map_hash_16b), 150 + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) 151 + : __clobber_all); 152 + } 153 + 154 + SEC("socket") 155 + __description("map access: known scalar += value_ptr const vs const (eq)") 156 + __success __success_unpriv __retval(1) 157 + __naked void ptr_const_vs_const_eq(void) 158 + { 159 + asm volatile (" \ 160 + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ 161 + r1 = 0; \ 162 + *(u64*)(r10 - 8) = r1; \ 163 + r2 = r10; \ 164 + r2 += -8; \ 165 + if r0 == 1 goto l0_%=; \ 166 + r1 = %[map_hash_16b] ll; \ 167 + if r0 != 1 goto l1_%=; \ 168 + l0_%=: r1 = %[map_array_48b] ll; \ 169 + l1_%=: call %[bpf_map_lookup_elem]; \ 170 + if r0 == 0 goto l2_%=; \ 171 + r4 = *(u8*)(r0 + 0); \ 172 + if r4 == 1 goto l3_%=; \ 173 + r1 = 5; \ 174 + goto l4_%=; \ 175 + l3_%=: r1 = 5; \ 176 + l4_%=: r1 += r0; \ 177 + r0 = *(u8*)(r1 + 0); \ 178 + l2_%=: r0 = 1; \ 179 + exit; \ 180 + " : 181 + : __imm(bpf_map_lookup_elem), 182 + __imm_addr(map_array_48b), 183 + __imm_addr(map_hash_16b), 184 + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) 185 + : __clobber_all); 186 + } 187 + 188 + SEC("socket") 189 + __description("map access: known scalar += value_ptr unknown vs unknown (eq)") 190 + __success __success_unpriv __retval(1) 191 + __naked void ptr_unknown_vs_unknown_eq(void) 192 + { 193 + asm volatile (" \ 194 + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ 195 + r1 = 0; \ 196 + *(u64*)(r10 - 8) = r1; \ 197 + r2 = r10; \ 198 + r2 += -8; \ 199 + if r0 == 1 goto l0_%=; \ 200 + r1 = %[map_hash_16b] ll; \ 201 + if r0 != 1 goto l1_%=; \ 202 + l0_%=: r1 = %[map_array_48b] ll; \ 203 + l1_%=: call %[bpf_map_lookup_elem]; \ 204 + if r0 == 0 goto l2_%=; \ 205 + r4 = *(u8*)(r0 + 0); \ 206 + if r4 == 1 goto l3_%=; \ 207 + r1 = 6; \ 208 + r1 = -r1; \ 209 + r1 &= 0x7; \ 210 + goto l4_%=; \ 211 + l3_%=: r1 = 6; \ 212 + r1 = -r1; \ 213 + r1 &= 0x7; \ 214 + l4_%=: r1 += r0; \ 215 + r0 = *(u8*)(r1 + 0); \ 216 + l2_%=: r0 = 1; \ 217 + exit; \ 218 + " : 219 + : __imm(bpf_map_lookup_elem), 220 + __imm_addr(map_array_48b), 221 + __imm_addr(map_hash_16b), 222 + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) 223 + : __clobber_all); 224 + } 225 + 226 + SEC("socket") 227 + __description("map access: known scalar += value_ptr unknown vs unknown (lt)") 228 + __success __failure_unpriv 229 + __msg_unpriv("R1 tried to add from different maps, paths or scalars") 230 + __retval(1) 231 + __naked void ptr_unknown_vs_unknown_lt(void) 232 + { 233 + asm volatile (" \ 234 + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ 235 + r1 = 0; \ 236 + *(u64*)(r10 - 8) = r1; \ 237 + r2 = r10; \ 238 + r2 += -8; \ 239 + if r0 == 1 goto l0_%=; \ 240 + r1 = %[map_hash_16b] ll; \ 241 + if r0 != 1 goto l1_%=; \ 242 + l0_%=: r1 = %[map_array_48b] ll; \ 243 + l1_%=: call %[bpf_map_lookup_elem]; \ 244 + if r0 == 0 goto l2_%=; \ 245 + r4 = *(u8*)(r0 + 0); \ 246 + if r4 == 1 goto l3_%=; \ 247 + r1 = 6; \ 248 + r1 = -r1; \ 249 + r1 &= 0x3; \ 250 + goto l4_%=; \ 251 + l3_%=: r1 = 6; \ 252 + r1 = -r1; \ 253 + r1 &= 0x7; \ 254 + l4_%=: r1 += r0; \ 255 + r0 = *(u8*)(r1 + 0); \ 256 + l2_%=: r0 = 1; \ 257 + exit; \ 258 + " : 259 + : __imm(bpf_map_lookup_elem), 260 + __imm_addr(map_array_48b), 261 + __imm_addr(map_hash_16b), 262 + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) 263 + : __clobber_all); 264 + } 265 + 266 + SEC("socket") 267 + __description("map access: known scalar += value_ptr unknown vs unknown (gt)") 268 + __success __failure_unpriv 269 + __msg_unpriv("R1 tried to add from different maps, paths or scalars") 270 + __retval(1) 271 + __naked void ptr_unknown_vs_unknown_gt(void) 272 + { 273 + asm volatile (" \ 274 + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ 275 + r1 = 0; \ 276 + *(u64*)(r10 - 8) = r1; \ 277 + r2 = r10; \ 278 + r2 += -8; \ 279 + if r0 == 1 goto l0_%=; \ 280 + r1 = %[map_hash_16b] ll; \ 281 + if r0 != 1 goto l1_%=; \ 282 + l0_%=: r1 = %[map_array_48b] ll; \ 283 + l1_%=: call %[bpf_map_lookup_elem]; \ 284 + if r0 == 0 goto l2_%=; \ 285 + r4 = *(u8*)(r0 + 0); \ 286 + if r4 == 1 goto l3_%=; \ 287 + r1 = 6; \ 288 + r1 = -r1; \ 289 + r1 &= 0x7; \ 290 + goto l4_%=; \ 291 + l3_%=: r1 = 6; \ 292 + r1 = -r1; \ 293 + r1 &= 0x3; \ 294 + l4_%=: r1 += r0; \ 295 + r0 = *(u8*)(r1 + 0); \ 296 + l2_%=: r0 = 1; \ 297 + exit; \ 298 + " : 299 + : __imm(bpf_map_lookup_elem), 300 + __imm_addr(map_array_48b), 301 + __imm_addr(map_hash_16b), 302 + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) 303 + : __clobber_all); 304 + } 305 + 306 + SEC("socket") 307 + __description("map access: known scalar += value_ptr from different maps") 308 + __success __success_unpriv __retval(1) 309 + __naked void value_ptr_from_different_maps(void) 310 + { 311 + asm volatile (" \ 312 + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ 313 + r1 = 0; \ 314 + *(u64*)(r10 - 8) = r1; \ 315 + r2 = r10; \ 316 + r2 += -8; \ 317 + if r0 == 1 goto l0_%=; \ 318 + r1 = %[map_hash_16b] ll; \ 319 + if r0 != 1 goto l1_%=; \ 320 + l0_%=: r1 = %[map_array_48b] ll; \ 321 + l1_%=: call %[bpf_map_lookup_elem]; \ 322 + if r0 == 0 goto l2_%=; \ 323 + r1 = 4; \ 324 + r1 += r0; \ 325 + r0 = *(u8*)(r1 + 0); \ 326 + l2_%=: r0 = 1; \ 327 + exit; \ 328 + " : 329 + : __imm(bpf_map_lookup_elem), 330 + __imm_addr(map_array_48b), 331 + __imm_addr(map_hash_16b), 332 + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) 333 + : __clobber_all); 334 + } 335 + 336 + SEC("socket") 337 + __description("map access: value_ptr -= known scalar from different maps") 338 + __success __failure_unpriv 339 + __msg_unpriv("R0 min value is outside of the allowed memory range") 340 + __retval(1) 341 + __naked void known_scalar_from_different_maps(void) 342 + { 343 + asm volatile (" \ 344 + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ 345 + r1 = 0; \ 346 + *(u64*)(r10 - 8) = r1; \ 347 + r2 = r10; \ 348 + r2 += -8; \ 349 + if r0 == 1 goto l0_%=; \ 350 + r1 = %[map_hash_16b] ll; \ 351 + if r0 != 1 goto l1_%=; \ 352 + l0_%=: r1 = %[map_array_48b] ll; \ 353 + l1_%=: call %[bpf_map_lookup_elem]; \ 354 + if r0 == 0 goto l2_%=; \ 355 + r1 = 4; \ 356 + r0 -= r1; \ 357 + r0 += r1; \ 358 + r0 = *(u8*)(r0 + 0); \ 359 + l2_%=: r0 = 1; \ 360 + exit; \ 361 + " : 362 + : __imm(bpf_map_lookup_elem), 363 + __imm_addr(map_array_48b), 364 + __imm_addr(map_hash_16b), 365 + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) 366 + : __clobber_all); 367 + } 368 + 369 + SEC("socket") 370 + __description("map access: known scalar += value_ptr from different maps, but same value properties") 371 + __success __success_unpriv __retval(1) 372 + __naked void maps_but_same_value_properties(void) 373 + { 374 + asm volatile (" \ 375 + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ 376 + r1 = 0; \ 377 + *(u64*)(r10 - 8) = r1; \ 378 + r2 = r10; \ 379 + r2 += -8; \ 380 + if r0 == 1 goto l0_%=; \ 381 + r1 = %[map_hash_48b] ll; \ 382 + if r0 != 1 goto l1_%=; \ 383 + l0_%=: r1 = %[map_array_48b] ll; \ 384 + l1_%=: call %[bpf_map_lookup_elem]; \ 385 + if r0 == 0 goto l2_%=; \ 386 + r1 = 4; \ 387 + r1 += r0; \ 388 + r0 = *(u8*)(r1 + 0); \ 389 + l2_%=: r0 = 1; \ 390 + exit; \ 391 + " : 392 + : __imm(bpf_map_lookup_elem), 393 + __imm_addr(map_array_48b), 394 + __imm_addr(map_hash_48b), 395 + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) 396 + : __clobber_all); 397 + } 398 + 399 + SEC("socket") 400 + __description("map access: mixing value pointer and scalar, 1") 401 + __success __failure_unpriv __msg_unpriv("R2 pointer comparison prohibited") 402 + __retval(0) 403 + __naked void value_pointer_and_scalar_1(void) 404 + { 405 + asm volatile (" \ 406 + /* load map value pointer into r0 and r2 */ \ 407 + r0 = 1; \ 408 + r1 = %[map_array_48b] ll; \ 409 + r2 = r10; \ 410 + r2 += -16; \ 411 + r6 = 0; \ 412 + *(u64*)(r10 - 16) = r6; \ 413 + call %[bpf_map_lookup_elem]; \ 414 + if r0 != 0 goto l0_%=; \ 415 + exit; \ 416 + l0_%=: /* load some number from the map into r1 */ \ 417 + r1 = *(u8*)(r0 + 0); \ 418 + /* depending on r1, branch: */ \ 419 + if r1 != 0 goto l1_%=; \ 420 + /* branch A */ \ 421 + r2 = r0; \ 422 + r3 = 0; \ 423 + goto l2_%=; \ 424 + l1_%=: /* branch B */ \ 425 + r2 = 0; \ 426 + r3 = 0x100000; \ 427 + l2_%=: /* common instruction */ \ 428 + r2 += r3; \ 429 + /* depending on r1, branch: */ \ 430 + if r1 != 0 goto l3_%=; \ 431 + /* branch A */ \ 432 + goto l4_%=; \ 433 + l3_%=: /* branch B */ \ 434 + r0 = 0x13371337; \ 435 + /* verifier follows fall-through */ \ 436 + if r2 != 0x100000 goto l4_%=; \ 437 + r0 = 0; \ 438 + exit; \ 439 + l4_%=: /* fake-dead code; targeted from branch A to \ 440 + * prevent dead code sanitization \ 441 + */ \ 442 + r0 = *(u8*)(r0 + 0); \ 443 + r0 = 0; \ 444 + exit; \ 445 + " : 446 + : __imm(bpf_map_lookup_elem), 447 + __imm_addr(map_array_48b) 448 + : __clobber_all); 449 + } 450 + 451 + SEC("socket") 452 + __description("map access: mixing value pointer and scalar, 2") 453 + __success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") 454 + __retval(0) 455 + __naked void value_pointer_and_scalar_2(void) 456 + { 457 + asm volatile (" \ 458 + /* load map value pointer into r0 and r2 */ \ 459 + r0 = 1; \ 460 + r1 = %[map_array_48b] ll; \ 461 + r2 = r10; \ 462 + r2 += -16; \ 463 + r6 = 0; \ 464 + *(u64*)(r10 - 16) = r6; \ 465 + call %[bpf_map_lookup_elem]; \ 466 + if r0 != 0 goto l0_%=; \ 467 + exit; \ 468 + l0_%=: /* load some number from the map into r1 */ \ 469 + r1 = *(u8*)(r0 + 0); \ 470 + /* depending on r1, branch: */ \ 471 + if r1 == 0 goto l1_%=; \ 472 + /* branch A */ \ 473 + r2 = 0; \ 474 + r3 = 0x100000; \ 475 + goto l2_%=; \ 476 + l1_%=: /* branch B */ \ 477 + r2 = r0; \ 478 + r3 = 0; \ 479 + l2_%=: /* common instruction */ \ 480 + r2 += r3; \ 481 + /* depending on r1, branch: */ \ 482 + if r1 != 0 goto l3_%=; \ 483 + /* branch A */ \ 484 + goto l4_%=; \ 485 + l3_%=: /* branch B */ \ 486 + r0 = 0x13371337; \ 487 + /* verifier follows fall-through */ \ 488 + if r2 != 0x100000 goto l4_%=; \ 489 + r0 = 0; \ 490 + exit; \ 491 + l4_%=: /* fake-dead code; targeted from branch A to \ 492 + * prevent dead code sanitization, rejected \ 493 + * via branch B however \ 494 + */ \ 495 + r0 = *(u8*)(r0 + 0); \ 496 + r0 = 0; \ 497 + exit; \ 498 + " : 499 + : __imm(bpf_map_lookup_elem), 500 + __imm_addr(map_array_48b) 501 + : __clobber_all); 502 + } 503 + 504 + SEC("socket") 505 + __description("sanitation: alu with different scalars 1") 506 + __success __success_unpriv __retval(0x100000) 507 + __naked void alu_with_different_scalars_1(void) 508 + { 509 + asm volatile (" \ 510 + r0 = 1; \ 511 + r1 = %[map_array_48b] ll; \ 512 + r2 = r10; \ 513 + r2 += -16; \ 514 + r6 = 0; \ 515 + *(u64*)(r10 - 16) = r6; \ 516 + call %[bpf_map_lookup_elem]; \ 517 + if r0 != 0 goto l0_%=; \ 518 + exit; \ 519 + l0_%=: r1 = *(u32*)(r0 + 0); \ 520 + if r1 == 0 goto l1_%=; \ 521 + r2 = 0; \ 522 + r3 = 0x100000; \ 523 + goto l2_%=; \ 524 + l1_%=: r2 = 42; \ 525 + r3 = 0x100001; \ 526 + l2_%=: r2 += r3; \ 527 + r0 = r2; \ 528 + exit; \ 529 + " : 530 + : __imm(bpf_map_lookup_elem), 531 + __imm_addr(map_array_48b) 532 + : __clobber_all); 533 + } 534 + 535 + SEC("socket") 536 + __description("sanitation: alu with different scalars 2") 537 + __success __success_unpriv __retval(0) 538 + __naked void alu_with_different_scalars_2(void) 539 + { 540 + asm volatile (" \ 541 + r0 = 1; \ 542 + r1 = %[map_array_48b] ll; \ 543 + r6 = r1; \ 544 + r2 = r10; \ 545 + r2 += -16; \ 546 + r7 = 0; \ 547 + *(u64*)(r10 - 16) = r7; \ 548 + call %[bpf_map_delete_elem]; \ 549 + r7 = r0; \ 550 + r1 = r6; \ 551 + r2 = r10; \ 552 + r2 += -16; \ 553 + call %[bpf_map_delete_elem]; \ 554 + r6 = r0; \ 555 + r8 = r6; \ 556 + r8 += r7; \ 557 + r0 = r8; \ 558 + r0 += %[einval]; \ 559 + r0 += %[einval]; \ 560 + exit; \ 561 + " : 562 + : __imm(bpf_map_delete_elem), 563 + __imm_addr(map_array_48b), 564 + __imm_const(einval, EINVAL) 565 + : __clobber_all); 566 + } 567 + 568 + SEC("socket") 569 + __description("sanitation: alu with different scalars 3") 570 + __success __success_unpriv __retval(0) 571 + __naked void alu_with_different_scalars_3(void) 572 + { 573 + asm volatile (" \ 574 + r0 = %[einval]; \ 575 + r0 *= -1; \ 576 + r7 = r0; \ 577 + r0 = %[einval]; \ 578 + r0 *= -1; \ 579 + r6 = r0; \ 580 + r8 = r6; \ 581 + r8 += r7; \ 582 + r0 = r8; \ 583 + r0 += %[einval]; \ 584 + r0 += %[einval]; \ 585 + exit; \ 586 + " : 587 + : __imm_const(einval, EINVAL) 588 + : __clobber_all); 589 + } 590 + 591 + SEC("socket") 592 + __description("map access: value_ptr += known scalar, upper oob arith, test 1") 593 + __success __failure_unpriv 594 + __msg_unpriv("R0 pointer arithmetic of map value goes out of range") 595 + __retval(1) 596 + __naked void upper_oob_arith_test_1(void) 597 + { 598 + asm volatile (" \ 599 + r1 = 0; \ 600 + *(u64*)(r10 - 8) = r1; \ 601 + r2 = r10; \ 602 + r2 += -8; \ 603 + r1 = %[map_array_48b] ll; \ 604 + call %[bpf_map_lookup_elem]; \ 605 + if r0 == 0 goto l0_%=; \ 606 + r1 = 48; \ 607 + r0 += r1; \ 608 + r0 -= r1; \ 609 + r0 = *(u8*)(r0 + 0); \ 610 + l0_%=: r0 = 1; \ 611 + exit; \ 612 + " : 613 + : __imm(bpf_map_lookup_elem), 614 + __imm_addr(map_array_48b) 615 + : __clobber_all); 616 + } 617 + 618 + SEC("socket") 619 + __description("map access: value_ptr += known scalar, upper oob arith, test 2") 620 + __success __failure_unpriv 621 + __msg_unpriv("R0 pointer arithmetic of map value goes out of range") 622 + __retval(1) 623 + __naked void upper_oob_arith_test_2(void) 624 + { 625 + asm volatile (" \ 626 + r1 = 0; \ 627 + *(u64*)(r10 - 8) = r1; \ 628 + r2 = r10; \ 629 + r2 += -8; \ 630 + r1 = %[map_array_48b] ll; \ 631 + call %[bpf_map_lookup_elem]; \ 632 + if r0 == 0 goto l0_%=; \ 633 + r1 = 49; \ 634 + r0 += r1; \ 635 + r0 -= r1; \ 636 + r0 = *(u8*)(r0 + 0); \ 637 + l0_%=: r0 = 1; \ 638 + exit; \ 639 + " : 640 + : __imm(bpf_map_lookup_elem), 641 + __imm_addr(map_array_48b) 642 + : __clobber_all); 643 + } 644 + 645 + SEC("socket") 646 + __description("map access: value_ptr += known scalar, upper oob arith, test 3") 647 + __success __success_unpriv __retval(1) 648 + __naked void upper_oob_arith_test_3(void) 649 + { 650 + asm volatile (" \ 651 + r1 = 0; \ 652 + *(u64*)(r10 - 8) = r1; \ 653 + r2 = r10; \ 654 + r2 += -8; \ 655 + r1 = %[map_array_48b] ll; \ 656 + call %[bpf_map_lookup_elem]; \ 657 + if r0 == 0 goto l0_%=; \ 658 + r1 = 47; \ 659 + r0 += r1; \ 660 + r0 -= r1; \ 661 + r0 = *(u8*)(r0 + 0); \ 662 + l0_%=: r0 = 1; \ 663 + exit; \ 664 + " : 665 + : __imm(bpf_map_lookup_elem), 666 + __imm_addr(map_array_48b) 667 + : __clobber_all); 668 + } 669 + 670 + SEC("socket") 671 + __description("map access: value_ptr -= known scalar, lower oob arith, test 1") 672 + __failure __msg("R0 min value is outside of the allowed memory range") 673 + __failure_unpriv 674 + __msg_unpriv("R0 pointer arithmetic of map value goes out of range") 675 + __naked void lower_oob_arith_test_1(void) 676 + { 677 + asm volatile (" \ 678 + r1 = 0; \ 679 + *(u64*)(r10 - 8) = r1; \ 680 + r2 = r10; \ 681 + r2 += -8; \ 682 + r1 = %[map_array_48b] ll; \ 683 + call %[bpf_map_lookup_elem]; \ 684 + if r0 == 0 goto l0_%=; \ 685 + r1 = 47; \ 686 + r0 += r1; \ 687 + r1 = 48; \ 688 + r0 -= r1; \ 689 + r0 = *(u8*)(r0 + 0); \ 690 + l0_%=: r0 = 1; \ 691 + exit; \ 692 + " : 693 + : __imm(bpf_map_lookup_elem), 694 + __imm_addr(map_array_48b) 695 + : __clobber_all); 696 + } 697 + 698 + SEC("socket") 699 + __description("map access: value_ptr -= known scalar, lower oob arith, test 2") 700 + __success __failure_unpriv 701 + __msg_unpriv("R0 pointer arithmetic of map value goes out of range") 702 + __retval(1) 703 + __naked void lower_oob_arith_test_2(void) 704 + { 705 + asm volatile (" \ 706 + r1 = 0; \ 707 + *(u64*)(r10 - 8) = r1; \ 708 + r2 = r10; \ 709 + r2 += -8; \ 710 + r1 = %[map_array_48b] ll; \ 711 + call %[bpf_map_lookup_elem]; \ 712 + if r0 == 0 goto l0_%=; \ 713 + r1 = 47; \ 714 + r0 += r1; \ 715 + r1 = 48; \ 716 + r0 -= r1; \ 717 + r1 = 1; \ 718 + r0 += r1; \ 719 + r0 = *(u8*)(r0 + 0); \ 720 + l0_%=: r0 = 1; \ 721 + exit; \ 722 + " : 723 + : __imm(bpf_map_lookup_elem), 724 + __imm_addr(map_array_48b) 725 + : __clobber_all); 726 + } 727 + 728 + SEC("socket") 729 + __description("map access: value_ptr -= known scalar, lower oob arith, test 3") 730 + __success __success_unpriv __retval(1) 731 + __naked void lower_oob_arith_test_3(void) 732 + { 733 + asm volatile (" \ 734 + r1 = 0; \ 735 + *(u64*)(r10 - 8) = r1; \ 736 + r2 = r10; \ 737 + r2 += -8; \ 738 + r1 = %[map_array_48b] ll; \ 739 + call %[bpf_map_lookup_elem]; \ 740 + if r0 == 0 goto l0_%=; \ 741 + r1 = 47; \ 742 + r0 += r1; \ 743 + r1 = 47; \ 744 + r0 -= r1; \ 745 + r0 = *(u8*)(r0 + 0); \ 746 + l0_%=: r0 = 1; \ 747 + exit; \ 748 + " : 749 + : __imm(bpf_map_lookup_elem), 750 + __imm_addr(map_array_48b) 751 + : __clobber_all); 752 + } 753 + 754 + SEC("socket") 755 + __description("map access: known scalar += value_ptr") 756 + __success __success_unpriv __retval(1) 757 + __naked void access_known_scalar_value_ptr_1(void) 758 + { 759 + asm volatile (" \ 760 + r1 = 0; \ 761 + *(u64*)(r10 - 8) = r1; \ 762 + r2 = r10; \ 763 + r2 += -8; \ 764 + r1 = %[map_array_48b] ll; \ 765 + call %[bpf_map_lookup_elem]; \ 766 + if r0 == 0 goto l0_%=; \ 767 + r1 = 4; \ 768 + r1 += r0; \ 769 + r0 = *(u8*)(r1 + 0); \ 770 + l0_%=: r0 = 1; \ 771 + exit; \ 772 + " : 773 + : __imm(bpf_map_lookup_elem), 774 + __imm_addr(map_array_48b) 775 + : __clobber_all); 776 + } 777 + 778 + SEC("socket") 779 + __description("map access: value_ptr += known scalar, 1") 780 + __success __success_unpriv __retval(1) 781 + __naked void value_ptr_known_scalar_1(void) 782 + { 783 + asm volatile (" \ 784 + r1 = 0; \ 785 + *(u64*)(r10 - 8) = r1; \ 786 + r2 = r10; \ 787 + r2 += -8; \ 788 + r1 = %[map_array_48b] ll; \ 789 + call %[bpf_map_lookup_elem]; \ 790 + if r0 == 0 goto l0_%=; \ 791 + r1 = 4; \ 792 + r0 += r1; \ 793 + r1 = *(u8*)(r0 + 0); \ 794 + l0_%=: r0 = 1; \ 795 + exit; \ 796 + " : 797 + : __imm(bpf_map_lookup_elem), 798 + __imm_addr(map_array_48b) 799 + : __clobber_all); 800 + } 801 + 802 + SEC("socket") 803 + __description("map access: value_ptr += known scalar, 2") 804 + __failure __msg("invalid access to map value") 805 + __failure_unpriv 806 + __naked void value_ptr_known_scalar_2_1(void) 807 + { 808 + asm volatile (" \ 809 + r1 = 0; \ 810 + *(u64*)(r10 - 8) = r1; \ 811 + r2 = r10; \ 812 + r2 += -8; \ 813 + r1 = %[map_array_48b] ll; \ 814 + call %[bpf_map_lookup_elem]; \ 815 + if r0 == 0 goto l0_%=; \ 816 + r1 = 49; \ 817 + r0 += r1; \ 818 + r1 = *(u8*)(r0 + 0); \ 819 + l0_%=: r0 = 1; \ 820 + exit; \ 821 + " : 822 + : __imm(bpf_map_lookup_elem), 823 + __imm_addr(map_array_48b) 824 + : __clobber_all); 825 + } 826 + 827 + SEC("socket") 828 + __description("map access: value_ptr += known scalar, 3") 829 + __failure __msg("invalid access to map value") 830 + __failure_unpriv 831 + __naked void value_ptr_known_scalar_3(void) 832 + { 833 + asm volatile (" \ 834 + r1 = 0; \ 835 + *(u64*)(r10 - 8) = r1; \ 836 + r2 = r10; \ 837 + r2 += -8; \ 838 + r1 = %[map_array_48b] ll; \ 839 + call %[bpf_map_lookup_elem]; \ 840 + if r0 == 0 goto l0_%=; \ 841 + r1 = -1; \ 842 + r0 += r1; \ 843 + r1 = *(u8*)(r0 + 0); \ 844 + l0_%=: r0 = 1; \ 845 + exit; \ 846 + " : 847 + : __imm(bpf_map_lookup_elem), 848 + __imm_addr(map_array_48b) 849 + : __clobber_all); 850 + } 851 + 852 + SEC("socket") 853 + __description("map access: value_ptr += known scalar, 4") 854 + __success __success_unpriv __retval(1) 855 + __naked void value_ptr_known_scalar_4(void) 856 + { 857 + asm volatile (" \ 858 + r1 = 0; \ 859 + *(u64*)(r10 - 8) = r1; \ 860 + r2 = r10; \ 861 + r2 += -8; \ 862 + r1 = %[map_array_48b] ll; \ 863 + call %[bpf_map_lookup_elem]; \ 864 + if r0 == 0 goto l0_%=; \ 865 + r1 = 5; \ 866 + r0 += r1; \ 867 + r1 = -2; \ 868 + r0 += r1; \ 869 + r1 = -1; \ 870 + r0 += r1; \ 871 + r1 = *(u8*)(r0 + 0); \ 872 + l0_%=: r0 = 1; \ 873 + exit; \ 874 + " : 875 + : __imm(bpf_map_lookup_elem), 876 + __imm_addr(map_array_48b) 877 + : __clobber_all); 878 + } 879 + 880 + SEC("socket") 881 + __description("map access: value_ptr += known scalar, 5") 882 + __success __success_unpriv __retval(0xabcdef12) 883 + __naked void value_ptr_known_scalar_5(void) 884 + { 885 + asm volatile (" \ 886 + r1 = 0; \ 887 + *(u64*)(r10 - 8) = r1; \ 888 + r2 = r10; \ 889 + r2 += -8; \ 890 + r1 = %[map_array_48b] ll; \ 891 + call %[bpf_map_lookup_elem]; \ 892 + if r0 == 0 goto l0_%=; \ 893 + r1 = %[__imm_0]; \ 894 + r1 += r0; \ 895 + r0 = *(u32*)(r1 + 0); \ 896 + l0_%=: exit; \ 897 + " : 898 + : __imm(bpf_map_lookup_elem), 899 + __imm_addr(map_array_48b), 900 + __imm_const(__imm_0, (6 + 1) * sizeof(int)) 901 + : __clobber_all); 902 + } 903 + 904 + SEC("socket") 905 + __description("map access: value_ptr += known scalar, 6") 906 + __success __success_unpriv __retval(0xabcdef12) 907 + __naked void value_ptr_known_scalar_6(void) 908 + { 909 + asm volatile (" \ 910 + r1 = 0; \ 911 + *(u64*)(r10 - 8) = r1; \ 912 + r2 = r10; \ 913 + r2 += -8; \ 914 + r1 = %[map_array_48b] ll; \ 915 + call %[bpf_map_lookup_elem]; \ 916 + if r0 == 0 goto l0_%=; \ 917 + r1 = %[__imm_0]; \ 918 + r0 += r1; \ 919 + r1 = %[__imm_1]; \ 920 + r0 += r1; \ 921 + r0 = *(u32*)(r0 + 0); \ 922 + l0_%=: exit; \ 923 + " : 924 + : __imm(bpf_map_lookup_elem), 925 + __imm_addr(map_array_48b), 926 + __imm_const(__imm_0, (3 + 1) * sizeof(int)), 927 + __imm_const(__imm_1, 3 * sizeof(int)) 928 + : __clobber_all); 929 + } 930 + 931 + SEC("socket") 932 + __description("map access: value_ptr += N, value_ptr -= N known scalar") 933 + __success __success_unpriv __retval(0x12345678) 934 + __naked void value_ptr_n_known_scalar(void) 935 + { 936 + asm volatile (" \ 937 + r1 = 0; \ 938 + *(u64*)(r10 - 8) = r1; \ 939 + r2 = r10; \ 940 + r2 += -8; \ 941 + r1 = %[map_array_48b] ll; \ 942 + call %[bpf_map_lookup_elem]; \ 943 + if r0 == 0 goto l0_%=; \ 944 + w1 = 0x12345678; \ 945 + *(u32*)(r0 + 0) = r1; \ 946 + r0 += 2; \ 947 + r1 = 2; \ 948 + r0 -= r1; \ 949 + r0 = *(u32*)(r0 + 0); \ 950 + l0_%=: exit; \ 951 + " : 952 + : __imm(bpf_map_lookup_elem), 953 + __imm_addr(map_array_48b) 954 + : __clobber_all); 955 + } 956 + 957 + SEC("socket") 958 + __description("map access: unknown scalar += value_ptr, 1") 959 + __success __success_unpriv __retval(1) 960 + __naked void unknown_scalar_value_ptr_1(void) 961 + { 962 + asm volatile (" \ 963 + r1 = 0; \ 964 + *(u64*)(r10 - 8) = r1; \ 965 + r2 = r10; \ 966 + r2 += -8; \ 967 + r1 = %[map_array_48b] ll; \ 968 + call %[bpf_map_lookup_elem]; \ 969 + if r0 == 0 goto l0_%=; \ 970 + r1 = *(u8*)(r0 + 0); \ 971 + r1 &= 0xf; \ 972 + r1 += r0; \ 973 + r0 = *(u8*)(r1 + 0); \ 974 + l0_%=: r0 = 1; \ 975 + exit; \ 976 + " : 977 + : __imm(bpf_map_lookup_elem), 978 + __imm_addr(map_array_48b) 979 + : __clobber_all); 980 + } 981 + 982 + SEC("socket") 983 + __description("map access: unknown scalar += value_ptr, 2") 984 + __success __success_unpriv __retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT) 985 + __naked void unknown_scalar_value_ptr_2(void) 986 + { 987 + asm volatile (" \ 988 + r1 = 0; \ 989 + *(u64*)(r10 - 8) = r1; \ 990 + r2 = r10; \ 991 + r2 += -8; \ 992 + r1 = %[map_array_48b] ll; \ 993 + call %[bpf_map_lookup_elem]; \ 994 + if r0 == 0 goto l0_%=; \ 995 + r1 = *(u32*)(r0 + 0); \ 996 + r1 &= 31; \ 997 + r1 += r0; \ 998 + r0 = *(u32*)(r1 + 0); \ 999 + l0_%=: exit; \ 1000 + " : 1001 + : __imm(bpf_map_lookup_elem), 1002 + __imm_addr(map_array_48b) 1003 + : __clobber_all); 1004 + } 1005 + 1006 + SEC("socket") 1007 + __description("map access: unknown scalar += value_ptr, 3") 1008 + __success __failure_unpriv 1009 + __msg_unpriv("R0 pointer arithmetic of map value goes out of range") 1010 + __retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT) 1011 + __naked void unknown_scalar_value_ptr_3(void) 1012 + { 1013 + asm volatile (" \ 1014 + r1 = 0; \ 1015 + *(u64*)(r10 - 8) = r1; \ 1016 + r2 = r10; \ 1017 + r2 += -8; \ 1018 + r1 = %[map_array_48b] ll; \ 1019 + call %[bpf_map_lookup_elem]; \ 1020 + if r0 == 0 goto l0_%=; \ 1021 + r1 = -1; \ 1022 + r0 += r1; \ 1023 + r1 = 1; \ 1024 + r0 += r1; \ 1025 + r1 = *(u32*)(r0 + 0); \ 1026 + r1 &= 31; \ 1027 + r1 += r0; \ 1028 + r0 = *(u32*)(r1 + 0); \ 1029 + l0_%=: exit; \ 1030 + " : 1031 + : __imm(bpf_map_lookup_elem), 1032 + __imm_addr(map_array_48b) 1033 + : __clobber_all); 1034 + } 1035 + 1036 + SEC("socket") 1037 + __description("map access: unknown scalar += value_ptr, 4") 1038 + __failure __msg("R1 max value is outside of the allowed memory range") 1039 + __msg_unpriv("R1 pointer arithmetic of map value goes out of range") 1040 + __flag(BPF_F_ANY_ALIGNMENT) 1041 + __naked void unknown_scalar_value_ptr_4(void) 1042 + { 1043 + asm volatile (" \ 1044 + r1 = 0; \ 1045 + *(u64*)(r10 - 8) = r1; \ 1046 + r2 = r10; \ 1047 + r2 += -8; \ 1048 + r1 = %[map_array_48b] ll; \ 1049 + call %[bpf_map_lookup_elem]; \ 1050 + if r0 == 0 goto l0_%=; \ 1051 + r1 = 19; \ 1052 + r0 += r1; \ 1053 + r1 = *(u32*)(r0 + 0); \ 1054 + r1 &= 31; \ 1055 + r1 += r0; \ 1056 + r0 = *(u32*)(r1 + 0); \ 1057 + l0_%=: exit; \ 1058 + " : 1059 + : __imm(bpf_map_lookup_elem), 1060 + __imm_addr(map_array_48b) 1061 + : __clobber_all); 1062 + } 1063 + 1064 + SEC("socket") 1065 + __description("map access: value_ptr += unknown scalar, 1") 1066 + __success __success_unpriv __retval(1) 1067 + __naked void value_ptr_unknown_scalar_1(void) 1068 + { 1069 + asm volatile (" \ 1070 + r1 = 0; \ 1071 + *(u64*)(r10 - 8) = r1; \ 1072 + r2 = r10; \ 1073 + r2 += -8; \ 1074 + r1 = %[map_array_48b] ll; \ 1075 + call %[bpf_map_lookup_elem]; \ 1076 + if r0 == 0 goto l0_%=; \ 1077 + r1 = *(u8*)(r0 + 0); \ 1078 + r1 &= 0xf; \ 1079 + r0 += r1; \ 1080 + r1 = *(u8*)(r0 + 0); \ 1081 + l0_%=: r0 = 1; \ 1082 + exit; \ 1083 + " : 1084 + : __imm(bpf_map_lookup_elem), 1085 + __imm_addr(map_array_48b) 1086 + : __clobber_all); 1087 + } 1088 + 1089 + SEC("socket") 1090 + __description("map access: value_ptr += unknown scalar, 2") 1091 + __success __success_unpriv __retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT) 1092 + __naked void value_ptr_unknown_scalar_2_1(void) 1093 + { 1094 + asm volatile (" \ 1095 + r1 = 0; \ 1096 + *(u64*)(r10 - 8) = r1; \ 1097 + r2 = r10; \ 1098 + r2 += -8; \ 1099 + r1 = %[map_array_48b] ll; \ 1100 + call %[bpf_map_lookup_elem]; \ 1101 + if r0 == 0 goto l0_%=; \ 1102 + r1 = *(u32*)(r0 + 0); \ 1103 + r1 &= 31; \ 1104 + r0 += r1; \ 1105 + r0 = *(u32*)(r0 + 0); \ 1106 + l0_%=: exit; \ 1107 + " : 1108 + : __imm(bpf_map_lookup_elem), 1109 + __imm_addr(map_array_48b) 1110 + : __clobber_all); 1111 + } 1112 + 1113 + SEC("socket") 1114 + __description("map access: value_ptr += unknown scalar, 3") 1115 + __success __success_unpriv __retval(1) 1116 + __naked void value_ptr_unknown_scalar_3(void) 1117 + { 1118 + asm volatile (" \ 1119 + r1 = 0; \ 1120 + *(u64*)(r10 - 8) = r1; \ 1121 + r2 = r10; \ 1122 + r2 += -8; \ 1123 + r1 = %[map_array_48b] ll; \ 1124 + call %[bpf_map_lookup_elem]; \ 1125 + if r0 == 0 goto l0_%=; \ 1126 + r1 = *(u64*)(r0 + 0); \ 1127 + r2 = *(u64*)(r0 + 8); \ 1128 + r3 = *(u64*)(r0 + 16); \ 1129 + r1 &= 0xf; \ 1130 + r3 &= 1; \ 1131 + r3 |= 1; \ 1132 + if r2 > r3 goto l0_%=; \ 1133 + r0 += r3; \ 1134 + r0 = *(u8*)(r0 + 0); \ 1135 + r0 = 1; \ 1136 + l1_%=: exit; \ 1137 + l0_%=: r0 = 2; \ 1138 + goto l1_%=; \ 1139 + " : 1140 + : __imm(bpf_map_lookup_elem), 1141 + __imm_addr(map_array_48b) 1142 + : __clobber_all); 1143 + } 1144 + 1145 + SEC("socket") 1146 + __description("map access: value_ptr += value_ptr") 1147 + __failure __msg("R0 pointer += pointer prohibited") 1148 + __failure_unpriv 1149 + __naked void access_value_ptr_value_ptr_1(void) 1150 + { 1151 + asm volatile (" \ 1152 + r1 = 0; \ 1153 + *(u64*)(r10 - 8) = r1; \ 1154 + r2 = r10; \ 1155 + r2 += -8; \ 1156 + r1 = %[map_array_48b] ll; \ 1157 + call %[bpf_map_lookup_elem]; \ 1158 + if r0 == 0 goto l0_%=; \ 1159 + r0 += r0; \ 1160 + r1 = *(u8*)(r0 + 0); \ 1161 + l0_%=: r0 = 1; \ 1162 + exit; \ 1163 + " : 1164 + : __imm(bpf_map_lookup_elem), 1165 + __imm_addr(map_array_48b) 1166 + : __clobber_all); 1167 + } 1168 + 1169 + SEC("socket") 1170 + __description("map access: known scalar -= value_ptr") 1171 + __failure __msg("R1 tried to subtract pointer from scalar") 1172 + __failure_unpriv 1173 + __naked void access_known_scalar_value_ptr_2(void) 1174 + { 1175 + asm volatile (" \ 1176 + r1 = 0; \ 1177 + *(u64*)(r10 - 8) = r1; \ 1178 + r2 = r10; \ 1179 + r2 += -8; \ 1180 + r1 = %[map_array_48b] ll; \ 1181 + call %[bpf_map_lookup_elem]; \ 1182 + if r0 == 0 goto l0_%=; \ 1183 + r1 = 4; \ 1184 + r1 -= r0; \ 1185 + r0 = *(u8*)(r1 + 0); \ 1186 + l0_%=: r0 = 1; \ 1187 + exit; \ 1188 + " : 1189 + : __imm(bpf_map_lookup_elem), 1190 + __imm_addr(map_array_48b) 1191 + : __clobber_all); 1192 + } 1193 + 1194 + SEC("socket") 1195 + __description("map access: value_ptr -= known scalar") 1196 + __failure __msg("R0 min value is outside of the allowed memory range") 1197 + __failure_unpriv 1198 + __naked void access_value_ptr_known_scalar(void) 1199 + { 1200 + asm volatile (" \ 1201 + r1 = 0; \ 1202 + *(u64*)(r10 - 8) = r1; \ 1203 + r2 = r10; \ 1204 + r2 += -8; \ 1205 + r1 = %[map_array_48b] ll; \ 1206 + call %[bpf_map_lookup_elem]; \ 1207 + if r0 == 0 goto l0_%=; \ 1208 + r1 = 4; \ 1209 + r0 -= r1; \ 1210 + r1 = *(u8*)(r0 + 0); \ 1211 + l0_%=: r0 = 1; \ 1212 + exit; \ 1213 + " : 1214 + : __imm(bpf_map_lookup_elem), 1215 + __imm_addr(map_array_48b) 1216 + : __clobber_all); 1217 + } 1218 + 1219 + SEC("socket") 1220 + __description("map access: value_ptr -= known scalar, 2") 1221 + __success __success_unpriv __retval(1) 1222 + __naked void value_ptr_known_scalar_2_2(void) 1223 + { 1224 + asm volatile (" \ 1225 + r1 = 0; \ 1226 + *(u64*)(r10 - 8) = r1; \ 1227 + r2 = r10; \ 1228 + r2 += -8; \ 1229 + r1 = %[map_array_48b] ll; \ 1230 + call %[bpf_map_lookup_elem]; \ 1231 + if r0 == 0 goto l0_%=; \ 1232 + r1 = 6; \ 1233 + r2 = 4; \ 1234 + r0 += r1; \ 1235 + r0 -= r2; \ 1236 + r1 = *(u8*)(r0 + 0); \ 1237 + l0_%=: r0 = 1; \ 1238 + exit; \ 1239 + " : 1240 + : __imm(bpf_map_lookup_elem), 1241 + __imm_addr(map_array_48b) 1242 + : __clobber_all); 1243 + } 1244 + 1245 + SEC("socket") 1246 + __description("map access: unknown scalar -= value_ptr") 1247 + __failure __msg("R1 tried to subtract pointer from scalar") 1248 + __failure_unpriv 1249 + __naked void access_unknown_scalar_value_ptr(void) 1250 + { 1251 + asm volatile (" \ 1252 + r1 = 0; \ 1253 + *(u64*)(r10 - 8) = r1; \ 1254 + r2 = r10; \ 1255 + r2 += -8; \ 1256 + r1 = %[map_array_48b] ll; \ 1257 + call %[bpf_map_lookup_elem]; \ 1258 + if r0 == 0 goto l0_%=; \ 1259 + r1 = *(u8*)(r0 + 0); \ 1260 + r1 &= 0xf; \ 1261 + r1 -= r0; \ 1262 + r0 = *(u8*)(r1 + 0); \ 1263 + l0_%=: r0 = 1; \ 1264 + exit; \ 1265 + " : 1266 + : __imm(bpf_map_lookup_elem), 1267 + __imm_addr(map_array_48b) 1268 + : __clobber_all); 1269 + } 1270 + 1271 + SEC("socket") 1272 + __description("map access: value_ptr -= unknown scalar") 1273 + __failure __msg("R0 min value is negative") 1274 + __failure_unpriv 1275 + __naked void access_value_ptr_unknown_scalar(void) 1276 + { 1277 + asm volatile (" \ 1278 + r1 = 0; \ 1279 + *(u64*)(r10 - 8) = r1; \ 1280 + r2 = r10; \ 1281 + r2 += -8; \ 1282 + r1 = %[map_array_48b] ll; \ 1283 + call %[bpf_map_lookup_elem]; \ 1284 + if r0 == 0 goto l0_%=; \ 1285 + r1 = *(u8*)(r0 + 0); \ 1286 + r1 &= 0xf; \ 1287 + r0 -= r1; \ 1288 + r1 = *(u8*)(r0 + 0); \ 1289 + l0_%=: r0 = 1; \ 1290 + exit; \ 1291 + " : 1292 + : __imm(bpf_map_lookup_elem), 1293 + __imm_addr(map_array_48b) 1294 + : __clobber_all); 1295 + } 1296 + 1297 + SEC("socket") 1298 + __description("map access: value_ptr -= unknown scalar, 2") 1299 + __success __failure_unpriv 1300 + __msg_unpriv("R0 pointer arithmetic of map value goes out of range") 1301 + __retval(1) 1302 + __naked void value_ptr_unknown_scalar_2_2(void) 1303 + { 1304 + asm volatile (" \ 1305 + r1 = 0; \ 1306 + *(u64*)(r10 - 8) = r1; \ 1307 + r2 = r10; \ 1308 + r2 += -8; \ 1309 + r1 = %[map_array_48b] ll; \ 1310 + call %[bpf_map_lookup_elem]; \ 1311 + if r0 == 0 goto l0_%=; \ 1312 + r1 = *(u8*)(r0 + 0); \ 1313 + r1 &= 0xf; \ 1314 + r1 |= 0x7; \ 1315 + r0 += r1; \ 1316 + r1 = *(u8*)(r0 + 0); \ 1317 + r1 &= 0x7; \ 1318 + r0 -= r1; \ 1319 + r1 = *(u8*)(r0 + 0); \ 1320 + l0_%=: r0 = 1; \ 1321 + exit; \ 1322 + " : 1323 + : __imm(bpf_map_lookup_elem), 1324 + __imm_addr(map_array_48b) 1325 + : __clobber_all); 1326 + } 1327 + 1328 + SEC("socket") 1329 + __description("map access: value_ptr -= value_ptr") 1330 + __failure __msg("R0 invalid mem access 'scalar'") 1331 + __msg_unpriv("R0 pointer -= pointer prohibited") 1332 + __naked void access_value_ptr_value_ptr_2(void) 1333 + { 1334 + asm volatile (" \ 1335 + r1 = 0; \ 1336 + *(u64*)(r10 - 8) = r1; \ 1337 + r2 = r10; \ 1338 + r2 += -8; \ 1339 + r1 = %[map_array_48b] ll; \ 1340 + call %[bpf_map_lookup_elem]; \ 1341 + if r0 == 0 goto l0_%=; \ 1342 + r0 -= r0; \ 1343 + r1 = *(u8*)(r0 + 0); \ 1344 + l0_%=: r0 = 1; \ 1345 + exit; \ 1346 + " : 1347 + : __imm(bpf_map_lookup_elem), 1348 + __imm_addr(map_array_48b) 1349 + : __clobber_all); 1350 + } 1351 + 1352 + SEC("socket") 1353 + __description("map access: trying to leak tainted dst reg") 1354 + __failure __msg("math between map_value pointer and 4294967295 is not allowed") 1355 + __failure_unpriv 1356 + __naked void to_leak_tainted_dst_reg(void) 1357 + { 1358 + asm volatile (" \ 1359 + r0 = 0; \ 1360 + r1 = 0; \ 1361 + *(u64*)(r10 - 8) = r1; \ 1362 + r2 = r10; \ 1363 + r2 += -8; \ 1364 + r1 = %[map_array_48b] ll; \ 1365 + call %[bpf_map_lookup_elem]; \ 1366 + if r0 != 0 goto l0_%=; \ 1367 + exit; \ 1368 + l0_%=: r2 = r0; \ 1369 + w1 = 0xFFFFFFFF; \ 1370 + w1 = w1; \ 1371 + r2 -= r1; \ 1372 + *(u64*)(r0 + 0) = r2; \ 1373 + r0 = 0; \ 1374 + exit; \ 1375 + " : 1376 + : __imm(bpf_map_lookup_elem), 1377 + __imm_addr(map_array_48b) 1378 + : __clobber_all); 1379 + } 1380 + 1381 + SEC("tc") 1382 + __description("32bit pkt_ptr -= scalar") 1383 + __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) 1384 + __naked void _32bit_pkt_ptr_scalar(void) 1385 + { 1386 + asm volatile (" \ 1387 + r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 1388 + r7 = *(u32*)(r1 + %[__sk_buff_data]); \ 1389 + r6 = r7; \ 1390 + r6 += 40; \ 1391 + if r6 > r8 goto l0_%=; \ 1392 + w4 = w7; \ 1393 + w6 -= w4; \ 1394 + l0_%=: r0 = 0; \ 1395 + exit; \ 1396 + " : 1397 + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 1398 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 1399 + : __clobber_all); 1400 + } 1401 + 1402 + SEC("tc") 1403 + __description("32bit scalar -= pkt_ptr") 1404 + __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) 1405 + __naked void _32bit_scalar_pkt_ptr(void) 1406 + { 1407 + asm volatile (" \ 1408 + r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 1409 + r7 = *(u32*)(r1 + %[__sk_buff_data]); \ 1410 + r6 = r7; \ 1411 + r6 += 40; \ 1412 + if r6 > r8 goto l0_%=; \ 1413 + w4 = w6; \ 1414 + w4 -= w7; \ 1415 + l0_%=: r0 = 0; \ 1416 + exit; \ 1417 + " : 1418 + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 1419 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 1420 + : __clobber_all); 1421 + } 1422 + 1423 + char _license[] SEC("license") = "GPL";
-1140
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
··· 1 - { 2 - "map access: known scalar += value_ptr unknown vs const", 3 - .insns = { 4 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 5 - offsetof(struct __sk_buff, len)), 6 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 7 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 8 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 9 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), 10 - BPF_LD_MAP_FD(BPF_REG_1, 0), 11 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 12 - BPF_LD_MAP_FD(BPF_REG_1, 0), 13 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 14 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 15 - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), 16 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), 17 - BPF_MOV64_IMM(BPF_REG_1, 6), 18 - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), 19 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), 20 - BPF_JMP_IMM(BPF_JA, 0, 0, 1), 21 - BPF_MOV64_IMM(BPF_REG_1, 3), 22 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 23 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), 24 - BPF_MOV64_IMM(BPF_REG_0, 1), 25 - BPF_EXIT_INSN(), 26 - }, 27 - .fixup_map_hash_16b = { 5 }, 28 - .fixup_map_array_48b = { 8 }, 29 - .result_unpriv = REJECT, 30 - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", 31 - .result = ACCEPT, 32 - .retval = 1, 33 - }, 34 - { 35 - "map access: known scalar += value_ptr const vs unknown", 36 - .insns = { 37 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 38 - offsetof(struct __sk_buff, len)), 39 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 40 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 41 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 42 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), 43 - BPF_LD_MAP_FD(BPF_REG_1, 0), 44 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 45 - BPF_LD_MAP_FD(BPF_REG_1, 0), 46 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 47 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 48 - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), 49 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2), 50 - BPF_MOV64_IMM(BPF_REG_1, 3), 51 - BPF_JMP_IMM(BPF_JA, 0, 0, 3), 52 - BPF_MOV64_IMM(BPF_REG_1, 6), 53 - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), 54 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), 55 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 56 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), 57 - BPF_MOV64_IMM(BPF_REG_0, 1), 58 - BPF_EXIT_INSN(), 59 - }, 60 - .fixup_map_hash_16b = { 5 }, 61 - .fixup_map_array_48b = { 8 }, 62 - .result_unpriv = REJECT, 63 - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", 64 - .result = ACCEPT, 65 - .retval = 1, 66 - }, 67 - { 68 - "map access: known scalar += value_ptr const vs const (ne)", 69 - .insns = { 70 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 71 - offsetof(struct __sk_buff, len)), 72 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 73 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 74 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 75 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), 76 - BPF_LD_MAP_FD(BPF_REG_1, 0), 77 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 78 - BPF_LD_MAP_FD(BPF_REG_1, 0), 79 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 80 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 81 - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), 82 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2), 83 - BPF_MOV64_IMM(BPF_REG_1, 3), 84 - BPF_JMP_IMM(BPF_JA, 0, 0, 1), 85 - BPF_MOV64_IMM(BPF_REG_1, 5), 86 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 87 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), 88 - BPF_MOV64_IMM(BPF_REG_0, 1), 89 - BPF_EXIT_INSN(), 90 - }, 91 - .fixup_map_hash_16b = { 5 }, 92 - .fixup_map_array_48b = { 8 }, 93 - .result_unpriv = REJECT, 94 - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", 95 - .result = ACCEPT, 96 - .retval = 1, 97 - }, 98 - { 99 - "map access: known scalar += value_ptr const vs const (eq)", 100 - .insns = { 101 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 102 - offsetof(struct __sk_buff, len)), 103 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 104 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 105 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 106 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), 107 - BPF_LD_MAP_FD(BPF_REG_1, 0), 108 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 109 - BPF_LD_MAP_FD(BPF_REG_1, 0), 110 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 111 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 112 - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), 113 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2), 114 - BPF_MOV64_IMM(BPF_REG_1, 5), 115 - BPF_JMP_IMM(BPF_JA, 0, 0, 1), 116 - BPF_MOV64_IMM(BPF_REG_1, 5), 117 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 118 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), 119 - BPF_MOV64_IMM(BPF_REG_0, 1), 120 - BPF_EXIT_INSN(), 121 - }, 122 - .fixup_map_hash_16b = { 5 }, 123 - .fixup_map_array_48b = { 8 }, 124 - .result = ACCEPT, 125 - .retval = 1, 126 - }, 127 - { 128 - "map access: known scalar += value_ptr unknown vs unknown (eq)", 129 - .insns = { 130 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 131 - offsetof(struct __sk_buff, len)), 132 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 133 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 134 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 135 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), 136 - BPF_LD_MAP_FD(BPF_REG_1, 0), 137 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 138 - BPF_LD_MAP_FD(BPF_REG_1, 0), 139 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 140 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), 141 - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), 142 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), 143 - BPF_MOV64_IMM(BPF_REG_1, 6), 144 - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), 145 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), 146 - BPF_JMP_IMM(BPF_JA, 0, 0, 3), 147 - BPF_MOV64_IMM(BPF_REG_1, 6), 148 - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), 149 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), 150 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 151 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), 152 - BPF_MOV64_IMM(BPF_REG_0, 1), 153 - BPF_EXIT_INSN(), 154 - }, 155 - .fixup_map_hash_16b = { 5 }, 156 - .fixup_map_array_48b = { 8 }, 157 - .result = ACCEPT, 158 - .retval = 1, 159 - }, 160 - { 161 - "map access: known scalar += value_ptr unknown vs unknown (lt)", 162 - .insns = { 163 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 164 - offsetof(struct __sk_buff, len)), 165 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 166 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 167 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 168 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), 169 - BPF_LD_MAP_FD(BPF_REG_1, 0), 170 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 171 - BPF_LD_MAP_FD(BPF_REG_1, 0), 172 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 173 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), 174 - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), 175 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), 176 - BPF_MOV64_IMM(BPF_REG_1, 6), 177 - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), 178 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3), 179 - BPF_JMP_IMM(BPF_JA, 0, 0, 3), 180 - BPF_MOV64_IMM(BPF_REG_1, 6), 181 - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), 182 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), 183 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 184 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), 185 - BPF_MOV64_IMM(BPF_REG_0, 1), 186 - BPF_EXIT_INSN(), 187 - }, 188 - .fixup_map_hash_16b = { 5 }, 189 - .fixup_map_array_48b = { 8 }, 190 - .result_unpriv = REJECT, 191 - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", 192 - .result = ACCEPT, 193 - .retval = 1, 194 - }, 195 - { 196 - "map access: known scalar += value_ptr unknown vs unknown (gt)", 197 - .insns = { 198 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 199 - offsetof(struct __sk_buff, len)), 200 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 201 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 202 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 203 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), 204 - BPF_LD_MAP_FD(BPF_REG_1, 0), 205 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 206 - BPF_LD_MAP_FD(BPF_REG_1, 0), 207 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 208 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), 209 - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), 210 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), 211 - BPF_MOV64_IMM(BPF_REG_1, 6), 212 - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), 213 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), 214 - BPF_JMP_IMM(BPF_JA, 0, 0, 3), 215 - BPF_MOV64_IMM(BPF_REG_1, 6), 216 - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), 217 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3), 218 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 219 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), 220 - BPF_MOV64_IMM(BPF_REG_0, 1), 221 - BPF_EXIT_INSN(), 222 - }, 223 - .fixup_map_hash_16b = { 5 }, 224 - .fixup_map_array_48b = { 8 }, 225 - .result_unpriv = REJECT, 226 - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", 227 - .result = ACCEPT, 228 - .retval = 1, 229 - }, 230 - { 231 - "map access: known scalar += value_ptr from different maps", 232 - .insns = { 233 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 234 - offsetof(struct __sk_buff, len)), 235 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 236 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 237 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 238 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), 239 - BPF_LD_MAP_FD(BPF_REG_1, 0), 240 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 241 - BPF_LD_MAP_FD(BPF_REG_1, 0), 242 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 243 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 244 - BPF_MOV64_IMM(BPF_REG_1, 4), 245 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 246 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), 247 - BPF_MOV64_IMM(BPF_REG_0, 1), 248 - BPF_EXIT_INSN(), 249 - }, 250 - .fixup_map_hash_16b = { 5 }, 251 - .fixup_map_array_48b = { 8 }, 252 - .result = ACCEPT, 253 - .retval = 1, 254 - }, 255 - { 256 - "map access: value_ptr -= known scalar from different maps", 257 - .insns = { 258 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 259 - offsetof(struct __sk_buff, len)), 260 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 261 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 262 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 263 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), 264 - BPF_LD_MAP_FD(BPF_REG_1, 0), 265 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 266 - BPF_LD_MAP_FD(BPF_REG_1, 0), 267 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 268 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 269 - BPF_MOV64_IMM(BPF_REG_1, 4), 270 - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 271 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 272 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 273 - BPF_MOV64_IMM(BPF_REG_0, 1), 274 - BPF_EXIT_INSN(), 275 - }, 276 - .fixup_map_hash_16b = { 5 }, 277 - .fixup_map_array_48b = { 8 }, 278 - .result = ACCEPT, 279 - .result_unpriv = REJECT, 280 - .errstr_unpriv = "R0 min value is outside of the allowed memory range", 281 - .retval = 1, 282 - }, 283 - { 284 - "map access: known scalar += value_ptr from different maps, but same value properties", 285 - .insns = { 286 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 287 - offsetof(struct __sk_buff, len)), 288 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 289 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 290 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 291 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), 292 - BPF_LD_MAP_FD(BPF_REG_1, 0), 293 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 294 - BPF_LD_MAP_FD(BPF_REG_1, 0), 295 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 296 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 297 - BPF_MOV64_IMM(BPF_REG_1, 4), 298 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 299 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), 300 - BPF_MOV64_IMM(BPF_REG_0, 1), 301 - BPF_EXIT_INSN(), 302 - }, 303 - .fixup_map_hash_48b = { 5 }, 304 - .fixup_map_array_48b = { 8 }, 305 - .result = ACCEPT, 306 - .retval = 1, 307 - }, 308 - { 309 - "map access: mixing value pointer and scalar, 1", 310 - .insns = { 311 - // load map value pointer into r0 and r2 312 - BPF_MOV64_IMM(BPF_REG_0, 1), 313 - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), 314 - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), 315 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), 316 - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), 317 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 318 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 319 - BPF_EXIT_INSN(), 320 - // load some number from the map into r1 321 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 322 - // depending on r1, branch: 323 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3), 324 - // branch A 325 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 326 - BPF_MOV64_IMM(BPF_REG_3, 0), 327 - BPF_JMP_A(2), 328 - // branch B 329 - BPF_MOV64_IMM(BPF_REG_2, 0), 330 - BPF_MOV64_IMM(BPF_REG_3, 0x100000), 331 - // common instruction 332 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), 333 - // depending on r1, branch: 334 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), 335 - // branch A 336 - BPF_JMP_A(4), 337 - // branch B 338 - BPF_MOV64_IMM(BPF_REG_0, 0x13371337), 339 - // verifier follows fall-through 340 - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), 341 - BPF_MOV64_IMM(BPF_REG_0, 0), 342 - BPF_EXIT_INSN(), 343 - // fake-dead code; targeted from branch A to 344 - // prevent dead code sanitization 345 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 346 - BPF_MOV64_IMM(BPF_REG_0, 0), 347 - BPF_EXIT_INSN(), 348 - }, 349 - .fixup_map_array_48b = { 1 }, 350 - .result = ACCEPT, 351 - .result_unpriv = REJECT, 352 - .errstr_unpriv = "R2 pointer comparison prohibited", 353 - .retval = 0, 354 - }, 355 - { 356 - "map access: mixing value pointer and scalar, 2", 357 - .insns = { 358 - // load map value pointer into r0 and r2 359 - BPF_MOV64_IMM(BPF_REG_0, 1), 360 - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), 361 - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), 362 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), 363 - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), 364 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 365 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 366 - BPF_EXIT_INSN(), 367 - // load some number from the map into r1 368 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 369 - // depending on r1, branch: 370 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 371 - // branch A 372 - BPF_MOV64_IMM(BPF_REG_2, 0), 373 - BPF_MOV64_IMM(BPF_REG_3, 0x100000), 374 - BPF_JMP_A(2), 375 - // branch B 376 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 377 - BPF_MOV64_IMM(BPF_REG_3, 0), 378 - // common instruction 379 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), 380 - // depending on r1, branch: 381 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), 382 - // branch A 383 - BPF_JMP_A(4), 384 - // branch B 385 - BPF_MOV64_IMM(BPF_REG_0, 0x13371337), 386 - // verifier follows fall-through 387 - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), 388 - BPF_MOV64_IMM(BPF_REG_0, 0), 389 - BPF_EXIT_INSN(), 390 - // fake-dead code; targeted from branch A to 391 - // prevent dead code sanitization, rejected 392 - // via branch B however 393 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 394 - BPF_MOV64_IMM(BPF_REG_0, 0), 395 - BPF_EXIT_INSN(), 396 - }, 397 - .fixup_map_array_48b = { 1 }, 398 - .result = ACCEPT, 399 - .result_unpriv = REJECT, 400 - .errstr_unpriv = "R0 invalid mem access 'scalar'", 401 - .retval = 0, 402 - }, 403 - { 404 - "sanitation: alu with different scalars 1", 405 - .insns = { 406 - BPF_MOV64_IMM(BPF_REG_0, 1), 407 - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), 408 - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), 409 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), 410 - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), 411 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 412 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 413 - BPF_EXIT_INSN(), 414 - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 415 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 416 - BPF_MOV64_IMM(BPF_REG_2, 0), 417 - BPF_MOV64_IMM(BPF_REG_3, 0x100000), 418 - BPF_JMP_A(2), 419 - BPF_MOV64_IMM(BPF_REG_2, 42), 420 - BPF_MOV64_IMM(BPF_REG_3, 0x100001), 421 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), 422 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 423 - BPF_EXIT_INSN(), 424 - }, 425 - .fixup_map_array_48b = { 1 }, 426 - .result = ACCEPT, 427 - .retval = 0x100000, 428 - }, 429 - { 430 - "sanitation: alu with different scalars 2", 431 - .insns = { 432 - BPF_MOV64_IMM(BPF_REG_0, 1), 433 - BPF_LD_MAP_FD(BPF_REG_1, 0), 434 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 435 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), 436 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 437 - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), 438 - BPF_EMIT_CALL(BPF_FUNC_map_delete_elem), 439 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 440 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 441 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), 442 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 443 - BPF_EMIT_CALL(BPF_FUNC_map_delete_elem), 444 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 445 - BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), 446 - BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7), 447 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), 448 - BPF_EXIT_INSN(), 449 - }, 450 - .fixup_map_array_48b = { 1 }, 451 - .result = ACCEPT, 452 - .retval = -EINVAL * 2, 453 - }, 454 - { 455 - "sanitation: alu with different scalars 3", 456 - .insns = { 457 - BPF_MOV64_IMM(BPF_REG_0, EINVAL), 458 - BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1), 459 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 460 - BPF_MOV64_IMM(BPF_REG_0, EINVAL), 461 - BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1), 462 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 463 - BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), 464 - BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7), 465 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), 466 - BPF_EXIT_INSN(), 467 - }, 468 - .result = ACCEPT, 469 - .retval = -EINVAL * 2, 470 - }, 471 - { 472 - "map access: value_ptr += known scalar, upper oob arith, test 1", 473 - .insns = { 474 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 475 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 476 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 477 - BPF_LD_MAP_FD(BPF_REG_1, 0), 478 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 479 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 480 - BPF_MOV64_IMM(BPF_REG_1, 48), 481 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 482 - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 483 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 484 - BPF_MOV64_IMM(BPF_REG_0, 1), 485 - BPF_EXIT_INSN(), 486 - }, 487 - .fixup_map_array_48b = { 3 }, 488 - .result = ACCEPT, 489 - .result_unpriv = REJECT, 490 - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", 491 - .retval = 1, 492 - }, 493 - { 494 - "map access: value_ptr += known scalar, upper oob arith, test 2", 495 - .insns = { 496 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 497 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 498 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 499 - BPF_LD_MAP_FD(BPF_REG_1, 0), 500 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 501 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 502 - BPF_MOV64_IMM(BPF_REG_1, 49), 503 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 504 - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 505 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 506 - BPF_MOV64_IMM(BPF_REG_0, 1), 507 - BPF_EXIT_INSN(), 508 - }, 509 - .fixup_map_array_48b = { 3 }, 510 - .result = ACCEPT, 511 - .result_unpriv = REJECT, 512 - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", 513 - .retval = 1, 514 - }, 515 - { 516 - "map access: value_ptr += known scalar, upper oob arith, test 3", 517 - .insns = { 518 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 519 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 520 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 521 - BPF_LD_MAP_FD(BPF_REG_1, 0), 522 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 523 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 524 - BPF_MOV64_IMM(BPF_REG_1, 47), 525 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 526 - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 527 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 528 - BPF_MOV64_IMM(BPF_REG_0, 1), 529 - BPF_EXIT_INSN(), 530 - }, 531 - .fixup_map_array_48b = { 3 }, 532 - .result = ACCEPT, 533 - .retval = 1, 534 - }, 535 - { 536 - "map access: value_ptr -= known scalar, lower oob arith, test 1", 537 - .insns = { 538 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 539 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 540 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 541 - BPF_LD_MAP_FD(BPF_REG_1, 0), 542 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 543 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 544 - BPF_MOV64_IMM(BPF_REG_1, 47), 545 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 546 - BPF_MOV64_IMM(BPF_REG_1, 48), 547 - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 548 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 549 - BPF_MOV64_IMM(BPF_REG_0, 1), 550 - BPF_EXIT_INSN(), 551 - }, 552 - .fixup_map_array_48b = { 3 }, 553 - .result = REJECT, 554 - .errstr = "R0 min value is outside of the allowed memory range", 555 - .result_unpriv = REJECT, 556 - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", 557 - }, 558 - { 559 - "map access: value_ptr -= known scalar, lower oob arith, test 2", 560 - .insns = { 561 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 562 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 563 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 564 - BPF_LD_MAP_FD(BPF_REG_1, 0), 565 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 566 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 567 - BPF_MOV64_IMM(BPF_REG_1, 47), 568 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 569 - BPF_MOV64_IMM(BPF_REG_1, 48), 570 - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 571 - BPF_MOV64_IMM(BPF_REG_1, 1), 572 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 573 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 574 - BPF_MOV64_IMM(BPF_REG_0, 1), 575 - BPF_EXIT_INSN(), 576 - }, 577 - .fixup_map_array_48b = { 3 }, 578 - .result = ACCEPT, 579 - .result_unpriv = REJECT, 580 - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", 581 - .retval = 1, 582 - }, 583 - { 584 - "map access: value_ptr -= known scalar, lower oob arith, test 3", 585 - .insns = { 586 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 587 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 588 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 589 - BPF_LD_MAP_FD(BPF_REG_1, 0), 590 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 591 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 592 - BPF_MOV64_IMM(BPF_REG_1, 47), 593 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 594 - BPF_MOV64_IMM(BPF_REG_1, 47), 595 - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 596 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 597 - BPF_MOV64_IMM(BPF_REG_0, 1), 598 - BPF_EXIT_INSN(), 599 - }, 600 - .fixup_map_array_48b = { 3 }, 601 - .result = ACCEPT, 602 - .retval = 1, 603 - }, 604 - { 605 - "map access: known scalar += value_ptr", 606 - .insns = { 607 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 608 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 609 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 610 - BPF_LD_MAP_FD(BPF_REG_1, 0), 611 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 612 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 613 - BPF_MOV64_IMM(BPF_REG_1, 4), 614 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 615 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), 616 - BPF_MOV64_IMM(BPF_REG_0, 1), 617 - BPF_EXIT_INSN(), 618 - }, 619 - .fixup_map_array_48b = { 3 }, 620 - .result = ACCEPT, 621 - .retval = 1, 622 - }, 623 - { 624 - "map access: value_ptr += known scalar, 1", 625 - .insns = { 626 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 627 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 628 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 629 - BPF_LD_MAP_FD(BPF_REG_1, 0), 630 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 631 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 632 - BPF_MOV64_IMM(BPF_REG_1, 4), 633 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 634 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 635 - BPF_MOV64_IMM(BPF_REG_0, 1), 636 - BPF_EXIT_INSN(), 637 - }, 638 - .fixup_map_array_48b = { 3 }, 639 - .result = ACCEPT, 640 - .retval = 1, 641 - }, 642 - { 643 - "map access: value_ptr += known scalar, 2", 644 - .insns = { 645 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 646 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 647 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 648 - BPF_LD_MAP_FD(BPF_REG_1, 0), 649 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 650 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 651 - BPF_MOV64_IMM(BPF_REG_1, 49), 652 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 653 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 654 - BPF_MOV64_IMM(BPF_REG_0, 1), 655 - BPF_EXIT_INSN(), 656 - }, 657 - .fixup_map_array_48b = { 3 }, 658 - .result = REJECT, 659 - .errstr = "invalid access to map value", 660 - }, 661 - { 662 - "map access: value_ptr += known scalar, 3", 663 - .insns = { 664 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 665 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 666 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 667 - BPF_LD_MAP_FD(BPF_REG_1, 0), 668 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 669 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 670 - BPF_MOV64_IMM(BPF_REG_1, -1), 671 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 672 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 673 - BPF_MOV64_IMM(BPF_REG_0, 1), 674 - BPF_EXIT_INSN(), 675 - }, 676 - .fixup_map_array_48b = { 3 }, 677 - .result = REJECT, 678 - .errstr = "invalid access to map value", 679 - }, 680 - { 681 - "map access: value_ptr += known scalar, 4", 682 - .insns = { 683 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 684 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 685 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 686 - BPF_LD_MAP_FD(BPF_REG_1, 0), 687 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 688 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 689 - BPF_MOV64_IMM(BPF_REG_1, 5), 690 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 691 - BPF_MOV64_IMM(BPF_REG_1, -2), 692 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 693 - BPF_MOV64_IMM(BPF_REG_1, -1), 694 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 695 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 696 - BPF_MOV64_IMM(BPF_REG_0, 1), 697 - BPF_EXIT_INSN(), 698 - }, 699 - .fixup_map_array_48b = { 3 }, 700 - .result = ACCEPT, 701 - .retval = 1, 702 - }, 703 - { 704 - "map access: value_ptr += known scalar, 5", 705 - .insns = { 706 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 707 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 708 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 709 - BPF_LD_MAP_FD(BPF_REG_1, 0), 710 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 711 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 712 - BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)), 713 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 714 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 715 - BPF_EXIT_INSN(), 716 - }, 717 - .fixup_map_array_48b = { 3 }, 718 - .result = ACCEPT, 719 - .retval = 0xabcdef12, 720 - }, 721 - { 722 - "map access: value_ptr += known scalar, 6", 723 - .insns = { 724 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 725 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 726 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 727 - BPF_LD_MAP_FD(BPF_REG_1, 0), 728 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 729 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 730 - BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)), 731 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 732 - BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)), 733 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 734 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), 735 - BPF_EXIT_INSN(), 736 - }, 737 - .fixup_map_array_48b = { 3 }, 738 - .result = ACCEPT, 739 - .retval = 0xabcdef12, 740 - }, 741 - { 742 - "map access: value_ptr += N, value_ptr -= N known scalar", 743 - .insns = { 744 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 745 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 746 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 747 - BPF_LD_MAP_FD(BPF_REG_1, 0), 748 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 749 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 750 - BPF_MOV32_IMM(BPF_REG_1, 0x12345678), 751 - BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 752 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), 753 - BPF_MOV64_IMM(BPF_REG_1, 2), 754 - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 755 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), 756 - BPF_EXIT_INSN(), 757 - }, 758 - .fixup_map_array_48b = { 3 }, 759 - .result = ACCEPT, 760 - .retval = 0x12345678, 761 - }, 762 - { 763 - "map access: unknown scalar += value_ptr, 1", 764 - .insns = { 765 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 766 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 767 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 768 - BPF_LD_MAP_FD(BPF_REG_1, 0), 769 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 770 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 771 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 772 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), 773 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 774 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), 775 - BPF_MOV64_IMM(BPF_REG_0, 1), 776 - BPF_EXIT_INSN(), 777 - }, 778 - .fixup_map_array_48b = { 3 }, 779 - .result = ACCEPT, 780 - .retval = 1, 781 - }, 782 - { 783 - "map access: unknown scalar += value_ptr, 2", 784 - .insns = { 785 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 786 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 787 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 788 - BPF_LD_MAP_FD(BPF_REG_1, 0), 789 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 790 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 791 - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 792 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), 793 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 794 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 795 - BPF_EXIT_INSN(), 796 - }, 797 - .fixup_map_array_48b = { 3 }, 798 - .result = ACCEPT, 799 - .retval = 0xabcdef12, 800 - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 801 - }, 802 - { 803 - "map access: unknown scalar += value_ptr, 3", 804 - .insns = { 805 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 806 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 807 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 808 - BPF_LD_MAP_FD(BPF_REG_1, 0), 809 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 810 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 811 - BPF_MOV64_IMM(BPF_REG_1, -1), 812 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 813 - BPF_MOV64_IMM(BPF_REG_1, 1), 814 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 815 - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 816 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), 817 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 818 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 819 - BPF_EXIT_INSN(), 820 - }, 821 - .fixup_map_array_48b = { 3 }, 822 - .result = ACCEPT, 823 - .result_unpriv = REJECT, 824 - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", 825 - .retval = 0xabcdef12, 826 - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 827 - }, 828 - { 829 - "map access: unknown scalar += value_ptr, 4", 830 - .insns = { 831 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 832 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 833 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 834 - BPF_LD_MAP_FD(BPF_REG_1, 0), 835 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 836 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 837 - BPF_MOV64_IMM(BPF_REG_1, 19), 838 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 839 - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 840 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), 841 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), 842 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 843 - BPF_EXIT_INSN(), 844 - }, 845 - .fixup_map_array_48b = { 3 }, 846 - .result = REJECT, 847 - .errstr = "R1 max value is outside of the allowed memory range", 848 - .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range", 849 - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 850 - }, 851 - { 852 - "map access: value_ptr += unknown scalar, 1", 853 - .insns = { 854 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 855 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 856 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 857 - BPF_LD_MAP_FD(BPF_REG_1, 0), 858 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 859 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 860 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 861 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), 862 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 863 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 864 - BPF_MOV64_IMM(BPF_REG_0, 1), 865 - BPF_EXIT_INSN(), 866 - }, 867 - .fixup_map_array_48b = { 3 }, 868 - .result = ACCEPT, 869 - .retval = 1, 870 - }, 871 - { 872 - "map access: value_ptr += unknown scalar, 2", 873 - .insns = { 874 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 875 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 876 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 877 - BPF_LD_MAP_FD(BPF_REG_1, 0), 878 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 879 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 880 - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 881 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), 882 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 883 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), 884 - BPF_EXIT_INSN(), 885 - }, 886 - .fixup_map_array_48b = { 3 }, 887 - .result = ACCEPT, 888 - .retval = 0xabcdef12, 889 - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 890 - }, 891 - { 892 - "map access: value_ptr += unknown scalar, 3", 893 - .insns = { 894 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 895 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 896 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 897 - BPF_LD_MAP_FD(BPF_REG_1, 0), 898 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 899 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), 900 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 901 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8), 902 - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16), 903 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), 904 - BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1), 905 - BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1), 906 - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4), 907 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), 908 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 909 - BPF_MOV64_IMM(BPF_REG_0, 1), 910 - BPF_EXIT_INSN(), 911 - BPF_MOV64_IMM(BPF_REG_0, 2), 912 - BPF_JMP_IMM(BPF_JA, 0, 0, -3), 913 - }, 914 - .fixup_map_array_48b = { 3 }, 915 - .result = ACCEPT, 916 - .retval = 1, 917 - }, 918 - { 919 - "map access: value_ptr += value_ptr", 920 - .insns = { 921 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 922 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 923 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 924 - BPF_LD_MAP_FD(BPF_REG_1, 0), 925 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 926 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 927 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0), 928 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 929 - BPF_MOV64_IMM(BPF_REG_0, 1), 930 - BPF_EXIT_INSN(), 931 - }, 932 - .fixup_map_array_48b = { 3 }, 933 - .result = REJECT, 934 - .errstr = "R0 pointer += pointer prohibited", 935 - }, 936 - { 937 - "map access: known scalar -= value_ptr", 938 - .insns = { 939 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 940 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 941 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 942 - BPF_LD_MAP_FD(BPF_REG_1, 0), 943 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 944 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 945 - BPF_MOV64_IMM(BPF_REG_1, 4), 946 - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), 947 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), 948 - BPF_MOV64_IMM(BPF_REG_0, 1), 949 - BPF_EXIT_INSN(), 950 - }, 951 - .fixup_map_array_48b = { 3 }, 952 - .result = REJECT, 953 - .errstr = "R1 tried to subtract pointer from scalar", 954 - }, 955 - { 956 - "map access: value_ptr -= known scalar", 957 - .insns = { 958 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 959 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 960 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 961 - BPF_LD_MAP_FD(BPF_REG_1, 0), 962 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 963 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 964 - BPF_MOV64_IMM(BPF_REG_1, 4), 965 - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 966 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 967 - BPF_MOV64_IMM(BPF_REG_0, 1), 968 - BPF_EXIT_INSN(), 969 - }, 970 - .fixup_map_array_48b = { 3 }, 971 - .result = REJECT, 972 - .errstr = "R0 min value is outside of the allowed memory range", 973 - }, 974 - { 975 - "map access: value_ptr -= known scalar, 2", 976 - .insns = { 977 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 978 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 979 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 980 - BPF_LD_MAP_FD(BPF_REG_1, 0), 981 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 982 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 983 - BPF_MOV64_IMM(BPF_REG_1, 6), 984 - BPF_MOV64_IMM(BPF_REG_2, 4), 985 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 986 - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2), 987 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 988 - BPF_MOV64_IMM(BPF_REG_0, 1), 989 - BPF_EXIT_INSN(), 990 - }, 991 - .fixup_map_array_48b = { 3 }, 992 - .result = ACCEPT, 993 - .retval = 1, 994 - }, 995 - { 996 - "map access: unknown scalar -= value_ptr", 997 - .insns = { 998 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 999 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1000 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1001 - BPF_LD_MAP_FD(BPF_REG_1, 0), 1002 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1003 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 1004 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 1005 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), 1006 - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), 1007 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), 1008 - BPF_MOV64_IMM(BPF_REG_0, 1), 1009 - BPF_EXIT_INSN(), 1010 - }, 1011 - .fixup_map_array_48b = { 3 }, 1012 - .result = REJECT, 1013 - .errstr = "R1 tried to subtract pointer from scalar", 1014 - }, 1015 - { 1016 - "map access: value_ptr -= unknown scalar", 1017 - .insns = { 1018 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1019 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1020 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1021 - BPF_LD_MAP_FD(BPF_REG_1, 0), 1022 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1023 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 1024 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 1025 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), 1026 - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 1027 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 1028 - BPF_MOV64_IMM(BPF_REG_0, 1), 1029 - BPF_EXIT_INSN(), 1030 - }, 1031 - .fixup_map_array_48b = { 3 }, 1032 - .result = REJECT, 1033 - .errstr = "R0 min value is negative", 1034 - }, 1035 - { 1036 - "map access: value_ptr -= unknown scalar, 2", 1037 - .insns = { 1038 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1039 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1040 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1041 - BPF_LD_MAP_FD(BPF_REG_1, 0), 1042 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1043 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 1044 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 1045 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), 1046 - BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7), 1047 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 1048 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 1049 - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), 1050 - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 1051 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 1052 - BPF_MOV64_IMM(BPF_REG_0, 1), 1053 - BPF_EXIT_INSN(), 1054 - }, 1055 - .fixup_map_array_48b = { 3 }, 1056 - .result = ACCEPT, 1057 - .result_unpriv = REJECT, 1058 - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", 1059 - .retval = 1, 1060 - }, 1061 - { 1062 - "map access: value_ptr -= value_ptr", 1063 - .insns = { 1064 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1065 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1066 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1067 - BPF_LD_MAP_FD(BPF_REG_1, 0), 1068 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1069 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 1070 - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0), 1071 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 1072 - BPF_MOV64_IMM(BPF_REG_0, 1), 1073 - BPF_EXIT_INSN(), 1074 - }, 1075 - .fixup_map_array_48b = { 3 }, 1076 - .result = REJECT, 1077 - .errstr = "R0 invalid mem access 'scalar'", 1078 - .errstr_unpriv = "R0 pointer -= pointer prohibited", 1079 - }, 1080 - { 1081 - "map access: trying to leak tainted dst reg", 1082 - .insns = { 1083 - BPF_MOV64_IMM(BPF_REG_0, 0), 1084 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1085 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1086 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1087 - BPF_LD_MAP_FD(BPF_REG_1, 0), 1088 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1089 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 1090 - BPF_EXIT_INSN(), 1091 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 1092 - BPF_MOV32_IMM(BPF_REG_1, 0xFFFFFFFF), 1093 - BPF_MOV32_REG(BPF_REG_1, BPF_REG_1), 1094 - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), 1095 - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), 1096 - BPF_MOV64_IMM(BPF_REG_0, 0), 1097 - BPF_EXIT_INSN(), 1098 - }, 1099 - .fixup_map_array_48b = { 4 }, 1100 - .result = REJECT, 1101 - .errstr = "math between map_value pointer and 4294967295 is not allowed", 1102 - }, 1103 - { 1104 - "32bit pkt_ptr -= scalar", 1105 - .insns = { 1106 - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, 1107 - offsetof(struct __sk_buff, data_end)), 1108 - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 1109 - offsetof(struct __sk_buff, data)), 1110 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), 1111 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 40), 1112 - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_8, 2), 1113 - BPF_ALU32_REG(BPF_MOV, BPF_REG_4, BPF_REG_7), 1114 - BPF_ALU32_REG(BPF_SUB, BPF_REG_6, BPF_REG_4), 1115 - BPF_MOV64_IMM(BPF_REG_0, 0), 1116 - BPF_EXIT_INSN(), 1117 - }, 1118 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1119 - .result = ACCEPT, 1120 - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1121 - }, 1122 - { 1123 - "32bit scalar -= pkt_ptr", 1124 - .insns = { 1125 - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, 1126 - offsetof(struct __sk_buff, data_end)), 1127 - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 1128 - offsetof(struct __sk_buff, data)), 1129 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), 1130 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 40), 1131 - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_8, 2), 1132 - BPF_ALU32_REG(BPF_MOV, BPF_REG_4, BPF_REG_6), 1133 - BPF_ALU32_REG(BPF_SUB, BPF_REG_4, BPF_REG_7), 1134 - BPF_MOV64_IMM(BPF_REG_0, 0), 1135 - BPF_EXIT_INSN(), 1136 - }, 1137 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1138 - .result = ACCEPT, 1139 - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1140 - },