Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: verifier/helper_packet_access.c converted to inline assembly

Test verifier/helper_packet_access.c automatically converted to use inline assembly.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230325025524.144043-22-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
fb179fe6 b37d776b

+552 -460
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 18 18 #include "verifier_div0.skel.h" 19 19 #include "verifier_div_overflow.skel.h" 20 20 #include "verifier_helper_access_var_len.skel.h" 21 + #include "verifier_helper_packet_access.skel.h" 21 22 22 23 __maybe_unused 23 24 static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) ··· 59 58 void test_verifier_div0(void) { RUN(verifier_div0); } 60 59 void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } 61 60 void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); } 61 + void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); }
+550
tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Converted from tools/testing/selftests/bpf/verifier/helper_packet_access.c */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "bpf_misc.h" 7 + 8 + struct { 9 + __uint(type, BPF_MAP_TYPE_HASH); 10 + __uint(max_entries, 1); 11 + __type(key, long long); 12 + __type(value, long long); 13 + } map_hash_8b SEC(".maps"); 14 + 15 + SEC("xdp") 16 + __description("helper access to packet: test1, valid packet_ptr range") 17 + __success __retval(0) 18 + __naked void test1_valid_packet_ptr_range(void) 19 + { 20 + asm volatile (" \ 21 + r2 = *(u32*)(r1 + %[xdp_md_data]); \ 22 + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 23 + r1 = r2; \ 24 + r1 += 8; \ 25 + if r1 > r3 goto l0_%=; \ 26 + r1 = %[map_hash_8b] ll; \ 27 + r3 = r2; \ 28 + r4 = 0; \ 29 + call %[bpf_map_update_elem]; \ 30 + l0_%=: r0 = 0; \ 31 + exit; \ 32 + " : 33 + : __imm(bpf_map_update_elem), 34 + __imm_addr(map_hash_8b), 35 + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 36 + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 37 + : __clobber_all); 38 + } 39 + 40 + SEC("xdp") 41 + __description("helper access to packet: test2, unchecked packet_ptr") 42 + __failure __msg("invalid access to packet") 43 + __naked void packet_test2_unchecked_packet_ptr(void) 44 + { 45 + asm volatile (" \ 46 + r2 = *(u32*)(r1 + %[xdp_md_data]); \ 47 + r1 = %[map_hash_8b] ll; \ 48 + call %[bpf_map_lookup_elem]; \ 49 + r0 = 0; \ 50 + exit; \ 51 + " : 52 + : __imm(bpf_map_lookup_elem), 53 + __imm_addr(map_hash_8b), 54 + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)) 55 + : __clobber_all); 56 + } 57 + 58 + SEC("xdp") 59 + __description("helper access to packet: test3, variable add") 60 + __success __retval(0) 61 + __naked void to_packet_test3_variable_add(void) 62 + { 63 + asm volatile (" \ 64 + r2 = *(u32*)(r1 + %[xdp_md_data]); \ 65 + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 66 + r4 = r2; \ 67 + r4 += 8; \ 68 + if r4 > r3 goto l0_%=; \ 69 + r5 = *(u8*)(r2 + 0); \ 70 + r4 = r2; \ 71 + r4 += r5; \ 72 + r5 = r4; \ 73 + r5 += 8; \ 74 + if r5 > r3 goto l0_%=; \ 75 + r1 = %[map_hash_8b] ll; \ 76 + r2 = r4; \ 77 + call %[bpf_map_lookup_elem]; \ 78 + l0_%=: r0 = 0; \ 79 + exit; \ 80 + " : 81 + : __imm(bpf_map_lookup_elem), 82 + __imm_addr(map_hash_8b), 83 + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 84 + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 85 + : __clobber_all); 86 + } 87 + 88 + SEC("xdp") 89 + __description("helper access to packet: test4, packet_ptr with bad range") 90 + __failure __msg("invalid access to packet") 91 + __naked void packet_ptr_with_bad_range_1(void) 92 + { 93 + asm volatile (" \ 94 + r2 = *(u32*)(r1 + %[xdp_md_data]); \ 95 + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 96 + r4 = r2; \ 97 + r4 += 4; \ 98 + if r4 > r3 goto l0_%=; \ 99 + r0 = 0; \ 100 + exit; \ 101 + l0_%=: r1 = %[map_hash_8b] ll; \ 102 + call %[bpf_map_lookup_elem]; \ 103 + r0 = 0; \ 104 + exit; \ 105 + " : 106 + : __imm(bpf_map_lookup_elem), 107 + __imm_addr(map_hash_8b), 108 + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 109 + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 110 + : __clobber_all); 111 + } 112 + 113 + SEC("xdp") 114 + __description("helper access to packet: test5, packet_ptr with too short range") 115 + __failure __msg("invalid access to packet") 116 + __naked void ptr_with_too_short_range_1(void) 117 + { 118 + asm volatile (" \ 119 + r2 = *(u32*)(r1 + %[xdp_md_data]); \ 120 + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 121 + r2 += 1; \ 122 + r4 = r2; \ 123 + r4 += 7; \ 124 + if r4 > r3 goto l0_%=; \ 125 + r1 = %[map_hash_8b] ll; \ 126 + call %[bpf_map_lookup_elem]; \ 127 + l0_%=: r0 = 0; \ 128 + exit; \ 129 + " : 130 + : __imm(bpf_map_lookup_elem), 131 + __imm_addr(map_hash_8b), 132 + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 133 + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 134 + : __clobber_all); 135 + } 136 + 137 + SEC("tc") 138 + __description("helper access to packet: test6, cls valid packet_ptr range") 139 + __success __retval(0) 140 + __naked void cls_valid_packet_ptr_range(void) 141 + { 142 + asm volatile (" \ 143 + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ 144 + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 145 + r1 = r2; \ 146 + r1 += 8; \ 147 + if r1 > r3 goto l0_%=; \ 148 + r1 = %[map_hash_8b] ll; \ 149 + r3 = r2; \ 150 + r4 = 0; \ 151 + call %[bpf_map_update_elem]; \ 152 + l0_%=: r0 = 0; \ 153 + exit; \ 154 + " : 155 + : __imm(bpf_map_update_elem), 156 + __imm_addr(map_hash_8b), 157 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 158 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 159 + : __clobber_all); 160 + } 161 + 162 + SEC("tc") 163 + __description("helper access to packet: test7, cls unchecked packet_ptr") 164 + __failure __msg("invalid access to packet") 165 + __naked void test7_cls_unchecked_packet_ptr(void) 166 + { 167 + asm volatile (" \ 168 + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ 169 + r1 = %[map_hash_8b] ll; \ 170 + call %[bpf_map_lookup_elem]; \ 171 + r0 = 0; \ 172 + exit; \ 173 + " : 174 + : __imm(bpf_map_lookup_elem), 175 + __imm_addr(map_hash_8b), 176 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)) 177 + : __clobber_all); 178 + } 179 + 180 + SEC("tc") 181 + __description("helper access to packet: test8, cls variable add") 182 + __success __retval(0) 183 + __naked void packet_test8_cls_variable_add(void) 184 + { 185 + asm volatile (" \ 186 + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ 187 + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 188 + r4 = r2; \ 189 + r4 += 8; \ 190 + if r4 > r3 goto l0_%=; \ 191 + r5 = *(u8*)(r2 + 0); \ 192 + r4 = r2; \ 193 + r4 += r5; \ 194 + r5 = r4; \ 195 + r5 += 8; \ 196 + if r5 > r3 goto l0_%=; \ 197 + r1 = %[map_hash_8b] ll; \ 198 + r2 = r4; \ 199 + call %[bpf_map_lookup_elem]; \ 200 + l0_%=: r0 = 0; \ 201 + exit; \ 202 + " : 203 + : __imm(bpf_map_lookup_elem), 204 + __imm_addr(map_hash_8b), 205 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 206 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 207 + : __clobber_all); 208 + } 209 + 210 + SEC("tc") 211 + __description("helper access to packet: test9, cls packet_ptr with bad range") 212 + __failure __msg("invalid access to packet") 213 + __naked void packet_ptr_with_bad_range_2(void) 214 + { 215 + asm volatile (" \ 216 + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ 217 + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 218 + r4 = r2; \ 219 + r4 += 4; \ 220 + if r4 > r3 goto l0_%=; \ 221 + r0 = 0; \ 222 + exit; \ 223 + l0_%=: r1 = %[map_hash_8b] ll; \ 224 + call %[bpf_map_lookup_elem]; \ 225 + r0 = 0; \ 226 + exit; \ 227 + " : 228 + : __imm(bpf_map_lookup_elem), 229 + __imm_addr(map_hash_8b), 230 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 231 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 232 + : __clobber_all); 233 + } 234 + 235 + SEC("tc") 236 + __description("helper access to packet: test10, cls packet_ptr with too short range") 237 + __failure __msg("invalid access to packet") 238 + __naked void ptr_with_too_short_range_2(void) 239 + { 240 + asm volatile (" \ 241 + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ 242 + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 243 + r2 += 1; \ 244 + r4 = r2; \ 245 + r4 += 7; \ 246 + if r4 > r3 goto l0_%=; \ 247 + r1 = %[map_hash_8b] ll; \ 248 + call %[bpf_map_lookup_elem]; \ 249 + l0_%=: r0 = 0; \ 250 + exit; \ 251 + " : 252 + : __imm(bpf_map_lookup_elem), 253 + __imm_addr(map_hash_8b), 254 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 255 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 256 + : __clobber_all); 257 + } 258 + 259 + SEC("tc") 260 + __description("helper access to packet: test11, cls unsuitable helper 1") 261 + __failure __msg("helper access to the packet") 262 + __naked void test11_cls_unsuitable_helper_1(void) 263 + { 264 + asm volatile (" \ 265 + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ 266 + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 267 + r6 += 1; \ 268 + r3 = r6; \ 269 + r3 += 7; \ 270 + if r3 > r7 goto l0_%=; \ 271 + r2 = 0; \ 272 + r4 = 42; \ 273 + r5 = 0; \ 274 + call %[bpf_skb_store_bytes]; \ 275 + l0_%=: r0 = 0; \ 276 + exit; \ 277 + " : 278 + : __imm(bpf_skb_store_bytes), 279 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 280 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 281 + : __clobber_all); 282 + } 283 + 284 + SEC("tc") 285 + __description("helper access to packet: test12, cls unsuitable helper 2") 286 + __failure __msg("helper access to the packet") 287 + __naked void test12_cls_unsuitable_helper_2(void) 288 + { 289 + asm volatile (" \ 290 + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ 291 + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 292 + r3 = r6; \ 293 + r6 += 8; \ 294 + if r6 > r7 goto l0_%=; \ 295 + r2 = 0; \ 296 + r4 = 4; \ 297 + call %[bpf_skb_load_bytes]; \ 298 + l0_%=: r0 = 0; \ 299 + exit; \ 300 + " : 301 + : __imm(bpf_skb_load_bytes), 302 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 303 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 304 + : __clobber_all); 305 + } 306 + 307 + SEC("tc") 308 + __description("helper access to packet: test13, cls helper ok") 309 + __success __retval(0) 310 + __naked void packet_test13_cls_helper_ok(void) 311 + { 312 + asm volatile (" \ 313 + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ 314 + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 315 + r6 += 1; \ 316 + r1 = r6; \ 317 + r1 += 7; \ 318 + if r1 > r7 goto l0_%=; \ 319 + r1 = r6; \ 320 + r2 = 4; \ 321 + r3 = 0; \ 322 + r4 = 0; \ 323 + r5 = 0; \ 324 + call %[bpf_csum_diff]; \ 325 + l0_%=: r0 = 0; \ 326 + exit; \ 327 + " : 328 + : __imm(bpf_csum_diff), 329 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 330 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 331 + : __clobber_all); 332 + } 333 + 334 + SEC("tc") 335 + __description("helper access to packet: test14, cls helper ok sub") 336 + __success __retval(0) 337 + __naked void test14_cls_helper_ok_sub(void) 338 + { 339 + asm volatile (" \ 340 + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ 341 + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 342 + r6 += 1; \ 343 + r1 = r6; \ 344 + r1 += 7; \ 345 + if r1 > r7 goto l0_%=; \ 346 + r1 -= 4; \ 347 + r2 = 4; \ 348 + r3 = 0; \ 349 + r4 = 0; \ 350 + r5 = 0; \ 351 + call %[bpf_csum_diff]; \ 352 + l0_%=: r0 = 0; \ 353 + exit; \ 354 + " : 355 + : __imm(bpf_csum_diff), 356 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 357 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 358 + : __clobber_all); 359 + } 360 + 361 + SEC("tc") 362 + __description("helper access to packet: test15, cls helper fail sub") 363 + __failure __msg("invalid access to packet") 364 + __naked void test15_cls_helper_fail_sub(void) 365 + { 366 + asm volatile (" \ 367 + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ 368 + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 369 + r6 += 1; \ 370 + r1 = r6; \ 371 + r1 += 7; \ 372 + if r1 > r7 goto l0_%=; \ 373 + r1 -= 12; \ 374 + r2 = 4; \ 375 + r3 = 0; \ 376 + r4 = 0; \ 377 + r5 = 0; \ 378 + call %[bpf_csum_diff]; \ 379 + l0_%=: r0 = 0; \ 380 + exit; \ 381 + " : 382 + : __imm(bpf_csum_diff), 383 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 384 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 385 + : __clobber_all); 386 + } 387 + 388 + SEC("tc") 389 + __description("helper access to packet: test16, cls helper fail range 1") 390 + __failure __msg("invalid access to packet") 391 + __naked void cls_helper_fail_range_1(void) 392 + { 393 + asm volatile (" \ 394 + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ 395 + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 396 + r6 += 1; \ 397 + r1 = r6; \ 398 + r1 += 7; \ 399 + if r1 > r7 goto l0_%=; \ 400 + r1 = r6; \ 401 + r2 = 8; \ 402 + r3 = 0; \ 403 + r4 = 0; \ 404 + r5 = 0; \ 405 + call %[bpf_csum_diff]; \ 406 + l0_%=: r0 = 0; \ 407 + exit; \ 408 + " : 409 + : __imm(bpf_csum_diff), 410 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 411 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 412 + : __clobber_all); 413 + } 414 + 415 + SEC("tc") 416 + __description("helper access to packet: test17, cls helper fail range 2") 417 + __failure __msg("R2 min value is negative") 418 + __naked void cls_helper_fail_range_2(void) 419 + { 420 + asm volatile (" \ 421 + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ 422 + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 423 + r6 += 1; \ 424 + r1 = r6; \ 425 + r1 += 7; \ 426 + if r1 > r7 goto l0_%=; \ 427 + r1 = r6; \ 428 + r2 = -9; \ 429 + r3 = 0; \ 430 + r4 = 0; \ 431 + r5 = 0; \ 432 + call %[bpf_csum_diff]; \ 433 + l0_%=: r0 = 0; \ 434 + exit; \ 435 + " : 436 + : __imm(bpf_csum_diff), 437 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 438 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 439 + : __clobber_all); 440 + } 441 + 442 + SEC("tc") 443 + __description("helper access to packet: test18, cls helper fail range 3") 444 + __failure __msg("R2 min value is negative") 445 + __naked void cls_helper_fail_range_3(void) 446 + { 447 + asm volatile (" \ 448 + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ 449 + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 450 + r6 += 1; \ 451 + r1 = r6; \ 452 + r1 += 7; \ 453 + if r1 > r7 goto l0_%=; \ 454 + r1 = r6; \ 455 + r2 = %[__imm_0]; \ 456 + r3 = 0; \ 457 + r4 = 0; \ 458 + r5 = 0; \ 459 + call %[bpf_csum_diff]; \ 460 + l0_%=: r0 = 0; \ 461 + exit; \ 462 + " : 463 + : __imm(bpf_csum_diff), 464 + __imm_const(__imm_0, ~0), 465 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 466 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 467 + : __clobber_all); 468 + } 469 + 470 + SEC("tc") 471 + __description("helper access to packet: test19, cls helper range zero") 472 + __success __retval(0) 473 + __naked void test19_cls_helper_range_zero(void) 474 + { 475 + asm volatile (" \ 476 + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ 477 + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 478 + r6 += 1; \ 479 + r1 = r6; \ 480 + r1 += 7; \ 481 + if r1 > r7 goto l0_%=; \ 482 + r1 = r6; \ 483 + r2 = 0; \ 484 + r3 = 0; \ 485 + r4 = 0; \ 486 + r5 = 0; \ 487 + call %[bpf_csum_diff]; \ 488 + l0_%=: r0 = 0; \ 489 + exit; \ 490 + " : 491 + : __imm(bpf_csum_diff), 492 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 493 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 494 + : __clobber_all); 495 + } 496 + 497 + SEC("tc") 498 + __description("helper access to packet: test20, pkt end as input") 499 + __failure __msg("R1 type=pkt_end expected=fp") 500 + __naked void test20_pkt_end_as_input(void) 501 + { 502 + asm volatile (" \ 503 + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ 504 + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 505 + r6 += 1; \ 506 + r1 = r6; \ 507 + r1 += 7; \ 508 + if r1 > r7 goto l0_%=; \ 509 + r1 = r7; \ 510 + r2 = 4; \ 511 + r3 = 0; \ 512 + r4 = 0; \ 513 + r5 = 0; \ 514 + call %[bpf_csum_diff]; \ 515 + l0_%=: r0 = 0; \ 516 + exit; \ 517 + " : 518 + : __imm(bpf_csum_diff), 519 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 520 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 521 + : __clobber_all); 522 + } 523 + 524 + SEC("tc") 525 + __description("helper access to packet: test21, wrong reg") 526 + __failure __msg("invalid access to packet") 527 + __naked void to_packet_test21_wrong_reg(void) 528 + { 529 + asm volatile (" \ 530 + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ 531 + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 532 + r6 += 1; \ 533 + r1 = r6; \ 534 + r1 += 7; \ 535 + if r1 > r7 goto l0_%=; \ 536 + r2 = 4; \ 537 + r3 = 0; \ 538 + r4 = 0; \ 539 + r5 = 0; \ 540 + call %[bpf_csum_diff]; \ 541 + r0 = 0; \ 542 + l0_%=: exit; \ 543 + " : 544 + : __imm(bpf_csum_diff), 545 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 546 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 547 + : __clobber_all); 548 + } 549 + 550 + char _license[] SEC("license") = "GPL";
-460
tools/testing/selftests/bpf/verifier/helper_packet_access.c
··· 1 - { 2 - "helper access to packet: test1, valid packet_ptr range", 3 - .insns = { 4 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), 5 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 6 - offsetof(struct xdp_md, data_end)), 7 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 8 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 9 - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5), 10 - BPF_LD_MAP_FD(BPF_REG_1, 0), 11 - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), 12 - BPF_MOV64_IMM(BPF_REG_4, 0), 13 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), 14 - BPF_MOV64_IMM(BPF_REG_0, 0), 15 - BPF_EXIT_INSN(), 16 - }, 17 - .fixup_map_hash_8b = { 5 }, 18 - .result_unpriv = ACCEPT, 19 - .result = ACCEPT, 20 - .prog_type = BPF_PROG_TYPE_XDP, 21 - }, 22 - { 23 - "helper access to packet: test2, unchecked packet_ptr", 24 - .insns = { 25 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), 26 - BPF_LD_MAP_FD(BPF_REG_1, 0), 27 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 28 - BPF_MOV64_IMM(BPF_REG_0, 0), 29 - BPF_EXIT_INSN(), 30 - }, 31 - .fixup_map_hash_8b = { 1 }, 32 - .result = REJECT, 33 - .errstr = "invalid access to packet", 34 - .prog_type = BPF_PROG_TYPE_XDP, 35 - }, 36 - { 37 - "helper access to packet: test3, variable add", 38 - .insns = { 39 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), 40 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 41 - offsetof(struct xdp_md, data_end)), 42 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 43 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), 44 - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10), 45 - BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0), 46 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 47 - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5), 48 - BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), 49 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8), 50 - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), 51 - BPF_LD_MAP_FD(BPF_REG_1, 0), 52 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), 53 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 54 - BPF_MOV64_IMM(BPF_REG_0, 0), 55 - BPF_EXIT_INSN(), 56 - }, 57 - .fixup_map_hash_8b = { 11 }, 58 - .result = ACCEPT, 59 - .prog_type = BPF_PROG_TYPE_XDP, 60 - }, 61 - { 62 - "helper access to packet: test4, packet_ptr with bad range", 63 - .insns = { 64 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), 65 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 66 - offsetof(struct xdp_md, data_end)), 67 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 68 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 69 - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2), 70 - BPF_MOV64_IMM(BPF_REG_0, 0), 71 - BPF_EXIT_INSN(), 72 - BPF_LD_MAP_FD(BPF_REG_1, 0), 73 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 74 - BPF_MOV64_IMM(BPF_REG_0, 0), 75 - BPF_EXIT_INSN(), 76 - }, 77 - .fixup_map_hash_8b = { 7 }, 78 - .result = REJECT, 79 - .errstr = "invalid access to packet", 80 - .prog_type = BPF_PROG_TYPE_XDP, 81 - }, 82 - { 83 - "helper access to packet: test5, packet_ptr with too short range", 84 - .insns = { 85 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), 86 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 87 - offsetof(struct xdp_md, data_end)), 88 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), 89 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 90 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), 91 - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), 92 - BPF_LD_MAP_FD(BPF_REG_1, 0), 93 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 94 - BPF_MOV64_IMM(BPF_REG_0, 0), 95 - BPF_EXIT_INSN(), 96 - }, 97 - .fixup_map_hash_8b = { 6 }, 98 - .result = REJECT, 99 - .errstr = "invalid access to packet", 100 - .prog_type = BPF_PROG_TYPE_XDP, 101 - }, 102 - { 103 - "helper access to packet: test6, cls valid packet_ptr range", 104 - .insns = { 105 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 106 - offsetof(struct __sk_buff, data)), 107 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 108 - offsetof(struct __sk_buff, data_end)), 109 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 110 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 111 - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5), 112 - BPF_LD_MAP_FD(BPF_REG_1, 0), 113 - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), 114 - BPF_MOV64_IMM(BPF_REG_4, 0), 115 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), 116 - BPF_MOV64_IMM(BPF_REG_0, 0), 117 - BPF_EXIT_INSN(), 118 - }, 119 - .fixup_map_hash_8b = { 5 }, 120 - .result = ACCEPT, 121 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 122 - }, 123 - { 124 - "helper access to packet: test7, cls unchecked packet_ptr", 125 - .insns = { 126 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 127 - offsetof(struct __sk_buff, data)), 128 - BPF_LD_MAP_FD(BPF_REG_1, 0), 129 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 130 - BPF_MOV64_IMM(BPF_REG_0, 0), 131 - BPF_EXIT_INSN(), 132 - }, 133 - .fixup_map_hash_8b = { 1 }, 134 - .result = REJECT, 135 - .errstr = "invalid access to packet", 136 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 137 - }, 138 - { 139 - "helper access to packet: test8, cls variable add", 140 - .insns = { 141 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 142 - offsetof(struct __sk_buff, data)), 143 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 144 - offsetof(struct __sk_buff, data_end)), 145 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 146 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), 147 - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10), 148 - BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0), 149 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 150 - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5), 151 - BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), 152 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8), 153 - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), 154 - BPF_LD_MAP_FD(BPF_REG_1, 0), 155 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), 156 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 157 - BPF_MOV64_IMM(BPF_REG_0, 0), 158 - BPF_EXIT_INSN(), 159 - }, 160 - .fixup_map_hash_8b = { 11 }, 161 - .result = ACCEPT, 162 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 163 - }, 164 - { 165 - "helper access to packet: test9, cls packet_ptr with bad range", 166 - .insns = { 167 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 168 - offsetof(struct __sk_buff, data)), 169 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 170 - offsetof(struct __sk_buff, data_end)), 171 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 172 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 173 - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2), 174 - BPF_MOV64_IMM(BPF_REG_0, 0), 175 - BPF_EXIT_INSN(), 176 - BPF_LD_MAP_FD(BPF_REG_1, 0), 177 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 178 - BPF_MOV64_IMM(BPF_REG_0, 0), 179 - BPF_EXIT_INSN(), 180 - }, 181 - .fixup_map_hash_8b = { 7 }, 182 - .result = REJECT, 183 - .errstr = "invalid access to packet", 184 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 185 - }, 186 - { 187 - "helper access to packet: test10, cls packet_ptr with too short range", 188 - .insns = { 189 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 190 - offsetof(struct __sk_buff, data)), 191 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 192 - offsetof(struct __sk_buff, data_end)), 193 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), 194 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 195 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), 196 - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), 197 - BPF_LD_MAP_FD(BPF_REG_1, 0), 198 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 199 - BPF_MOV64_IMM(BPF_REG_0, 0), 200 - BPF_EXIT_INSN(), 201 - }, 202 - .fixup_map_hash_8b = { 6 }, 203 - .result = REJECT, 204 - .errstr = "invalid access to packet", 205 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 206 - }, 207 - { 208 - "helper access to packet: test11, cls unsuitable helper 1", 209 - .insns = { 210 - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 211 - offsetof(struct __sk_buff, data)), 212 - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 213 - offsetof(struct __sk_buff, data_end)), 214 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 215 - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 216 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7), 217 - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4), 218 - BPF_MOV64_IMM(BPF_REG_2, 0), 219 - BPF_MOV64_IMM(BPF_REG_4, 42), 220 - BPF_MOV64_IMM(BPF_REG_5, 0), 221 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_store_bytes), 222 - BPF_MOV64_IMM(BPF_REG_0, 0), 223 - BPF_EXIT_INSN(), 224 - }, 225 - .result = REJECT, 226 - .errstr = "helper access to the packet", 227 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 228 - }, 229 - { 230 - "helper access to packet: test12, cls unsuitable helper 2", 231 - .insns = { 232 - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 233 - offsetof(struct __sk_buff, data)), 234 - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 235 - offsetof(struct __sk_buff, data_end)), 236 - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 237 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), 238 - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3), 239 - BPF_MOV64_IMM(BPF_REG_2, 0), 240 - BPF_MOV64_IMM(BPF_REG_4, 4), 241 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 242 - BPF_MOV64_IMM(BPF_REG_0, 0), 243 - BPF_EXIT_INSN(), 244 - }, 245 - .result = REJECT, 246 - .errstr = "helper access to the packet", 247 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 248 - }, 249 - { 250 - "helper access to packet: test13, cls helper ok", 251 - .insns = { 252 - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 253 - offsetof(struct __sk_buff, data)), 254 - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 255 - offsetof(struct __sk_buff, data_end)), 256 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 257 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 258 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 259 - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 260 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 261 - BPF_MOV64_IMM(BPF_REG_2, 4), 262 - BPF_MOV64_IMM(BPF_REG_3, 0), 263 - BPF_MOV64_IMM(BPF_REG_4, 0), 264 - BPF_MOV64_IMM(BPF_REG_5, 0), 265 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 266 - BPF_MOV64_IMM(BPF_REG_0, 0), 267 - BPF_EXIT_INSN(), 268 - }, 269 - .result = ACCEPT, 270 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 271 - }, 272 - { 273 - "helper access to packet: test14, cls helper ok sub", 274 - .insns = { 275 - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 276 - offsetof(struct __sk_buff, data)), 277 - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 278 - offsetof(struct __sk_buff, data_end)), 279 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 280 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 281 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 282 - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 283 - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4), 284 - BPF_MOV64_IMM(BPF_REG_2, 4), 285 - BPF_MOV64_IMM(BPF_REG_3, 0), 286 - BPF_MOV64_IMM(BPF_REG_4, 0), 287 - BPF_MOV64_IMM(BPF_REG_5, 0), 288 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 289 - BPF_MOV64_IMM(BPF_REG_0, 0), 290 - BPF_EXIT_INSN(), 291 - }, 292 - .result = ACCEPT, 293 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 294 - }, 295 - { 296 - "helper access to packet: test15, cls helper fail sub", 297 - .insns = { 298 - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 299 - offsetof(struct __sk_buff, data)), 300 - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 301 - offsetof(struct __sk_buff, data_end)), 302 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 303 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 304 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 305 - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 306 - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12), 307 - BPF_MOV64_IMM(BPF_REG_2, 4), 308 - BPF_MOV64_IMM(BPF_REG_3, 0), 309 - BPF_MOV64_IMM(BPF_REG_4, 0), 310 - BPF_MOV64_IMM(BPF_REG_5, 0), 311 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 312 - BPF_MOV64_IMM(BPF_REG_0, 0), 313 - BPF_EXIT_INSN(), 314 - }, 315 - .result = REJECT, 316 - .errstr = "invalid access to packet", 317 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 318 - }, 319 - { 320 - "helper access to packet: test16, cls helper fail range 1", 321 - .insns = { 322 - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 323 - offsetof(struct __sk_buff, data)), 324 - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 325 - offsetof(struct __sk_buff, data_end)), 326 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 327 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 328 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 329 - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 330 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 331 - BPF_MOV64_IMM(BPF_REG_2, 8), 332 - BPF_MOV64_IMM(BPF_REG_3, 0), 333 - BPF_MOV64_IMM(BPF_REG_4, 0), 334 - BPF_MOV64_IMM(BPF_REG_5, 0), 335 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 336 - BPF_MOV64_IMM(BPF_REG_0, 0), 337 - BPF_EXIT_INSN(), 338 - }, 339 - .result = REJECT, 340 - .errstr = "invalid access to packet", 341 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 342 - }, 343 - { 344 - "helper access to packet: test17, cls helper fail range 2", 345 - .insns = { 346 - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 347 - offsetof(struct __sk_buff, data)), 348 - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 349 - offsetof(struct __sk_buff, data_end)), 350 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 351 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 352 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 353 - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 354 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 355 - BPF_MOV64_IMM(BPF_REG_2, -9), 356 - BPF_MOV64_IMM(BPF_REG_3, 0), 357 - BPF_MOV64_IMM(BPF_REG_4, 0), 358 - BPF_MOV64_IMM(BPF_REG_5, 0), 359 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 360 - BPF_MOV64_IMM(BPF_REG_0, 0), 361 - BPF_EXIT_INSN(), 362 - }, 363 - .result = REJECT, 364 - .errstr = "R2 min value is negative", 365 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 366 - }, 367 - { 368 - "helper access to packet: test18, cls helper fail range 3", 369 - .insns = { 370 - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 371 - offsetof(struct __sk_buff, data)), 372 - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 373 - offsetof(struct __sk_buff, data_end)), 374 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 375 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 376 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 377 - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 378 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 379 - BPF_MOV64_IMM(BPF_REG_2, ~0), 380 - BPF_MOV64_IMM(BPF_REG_3, 0), 381 - BPF_MOV64_IMM(BPF_REG_4, 0), 382 - BPF_MOV64_IMM(BPF_REG_5, 0), 383 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 384 - BPF_MOV64_IMM(BPF_REG_0, 0), 385 - BPF_EXIT_INSN(), 386 - }, 387 - .result = REJECT, 388 - .errstr = "R2 min value is negative", 389 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 390 - }, 391 - { 392 - "helper access to packet: test19, cls helper range zero", 393 - .insns = { 394 - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 395 - offsetof(struct __sk_buff, data)), 396 - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 397 - offsetof(struct __sk_buff, data_end)), 398 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 399 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 400 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 401 - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 402 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 403 - BPF_MOV64_IMM(BPF_REG_2, 0), 404 - BPF_MOV64_IMM(BPF_REG_3, 0), 405 - BPF_MOV64_IMM(BPF_REG_4, 0), 406 - BPF_MOV64_IMM(BPF_REG_5, 0), 407 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 408 - BPF_MOV64_IMM(BPF_REG_0, 0), 409 - BPF_EXIT_INSN(), 410 - }, 411 - .result = ACCEPT, 412 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 413 - }, 414 - { 415 - "helper access to packet: test20, pkt end as input", 416 - .insns = { 417 - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 418 - offsetof(struct __sk_buff, data)), 419 - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 420 - offsetof(struct __sk_buff, data_end)), 421 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 422 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 423 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 424 - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 425 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 426 - BPF_MOV64_IMM(BPF_REG_2, 4), 427 - BPF_MOV64_IMM(BPF_REG_3, 0), 428 - BPF_MOV64_IMM(BPF_REG_4, 0), 429 - BPF_MOV64_IMM(BPF_REG_5, 0), 430 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 431 - BPF_MOV64_IMM(BPF_REG_0, 0), 432 - BPF_EXIT_INSN(), 433 - }, 434 - .result = REJECT, 435 - .errstr = "R1 type=pkt_end expected=fp", 436 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 437 - }, 438 - { 439 - "helper access to packet: test21, wrong reg", 440 - .insns = { 441 - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 442 - offsetof(struct __sk_buff, data)), 443 - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 444 - offsetof(struct __sk_buff, data_end)), 445 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 446 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 447 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 448 - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 449 - BPF_MOV64_IMM(BPF_REG_2, 4), 450 - BPF_MOV64_IMM(BPF_REG_3, 0), 451 - BPF_MOV64_IMM(BPF_REG_4, 0), 452 - BPF_MOV64_IMM(BPF_REG_5, 0), 453 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 454 - BPF_MOV64_IMM(BPF_REG_0, 0), 455 - BPF_EXIT_INSN(), 456 - }, 457 - .result = REJECT, 458 - .errstr = "invalid access to packet", 459 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 460 - },