Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: verifier/sock converted to inline assembly

Test verifier/sock automatically converted to use inline assembly.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230421174234.2391278-20-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
426fc0e3 034d9ad2

+982 -706
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 50 50 #include "verifier_ringbuf.skel.h" 51 51 #include "verifier_runtime_jit.skel.h" 52 52 #include "verifier_search_pruning.skel.h" 53 + #include "verifier_sock.skel.h" 53 54 #include "verifier_spill_fill.skel.h" 54 55 #include "verifier_stack_ptr.skel.h" 55 56 #include "verifier_uninit.skel.h" ··· 142 141 void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } 143 142 void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); } 144 143 void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); } 144 + void test_verifier_sock(void) { RUN(verifier_sock); } 145 145 void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } 146 146 void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } 147 147 void test_verifier_uninit(void) { RUN(verifier_uninit); }
+980
tools/testing/selftests/bpf/progs/verifier_sock.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Converted from tools/testing/selftests/bpf/verifier/sock.c */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "bpf_misc.h" 7 + 8 + #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) 9 + #define offsetofend(TYPE, MEMBER) \ 10 + (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) 11 + 12 + struct { 13 + __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY); 14 + __uint(max_entries, 1); 15 + __type(key, __u32); 16 + __type(value, __u64); 17 + } map_reuseport_array SEC(".maps"); 18 + 19 + struct { 20 + __uint(type, BPF_MAP_TYPE_SOCKHASH); 21 + __uint(max_entries, 1); 22 + __type(key, int); 23 + __type(value, int); 24 + } map_sockhash SEC(".maps"); 25 + 26 + struct { 27 + __uint(type, BPF_MAP_TYPE_SOCKMAP); 28 + __uint(max_entries, 1); 29 + __type(key, int); 30 + __type(value, int); 31 + } map_sockmap SEC(".maps"); 32 + 33 + struct { 34 + __uint(type, BPF_MAP_TYPE_XSKMAP); 35 + __uint(max_entries, 1); 36 + __type(key, int); 37 + __type(value, int); 38 + } map_xskmap SEC(".maps"); 39 + 40 + struct val { 41 + int cnt; 42 + struct bpf_spin_lock l; 43 + }; 44 + 45 + struct { 46 + __uint(type, BPF_MAP_TYPE_SK_STORAGE); 47 + __uint(max_entries, 0); 48 + __type(key, int); 49 + __type(value, struct val); 50 + __uint(map_flags, BPF_F_NO_PREALLOC); 51 + } sk_storage_map SEC(".maps"); 52 + 53 + SEC("cgroup/skb") 54 + __description("skb->sk: no NULL check") 55 + __failure __msg("invalid mem access 'sock_common_or_null'") 56 + __failure_unpriv 57 + __naked void skb_sk_no_null_check(void) 58 + { 59 + asm volatile (" \ 60 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 61 + r0 = *(u32*)(r1 + 0); \ 62 + r0 = 0; \ 63 + exit; \ 64 + " : 65 + : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 66 + : __clobber_all); 67 + } 68 + 69 + SEC("cgroup/skb") 70 + __description("skb->sk: sk->family [non fullsock field]") 71 + __success __success_unpriv __retval(0) 72 + __naked void sk_family_non_fullsock_field_1(void) 73 + { 74 + asm volatile (" \ 75 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 76 + if r1 != 0 goto l0_%=; \ 77 + r0 = 0; \ 78 + exit; \ 79 + l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \ 80 + r0 = 0; \ 81 + exit; \ 82 + " : 83 + : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 84 + __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family)) 85 + : __clobber_all); 86 + } 87 + 88 + SEC("cgroup/skb") 89 + __description("skb->sk: sk->type [fullsock field]") 90 + __failure __msg("invalid sock_common access") 91 + __failure_unpriv 92 + __naked void sk_sk_type_fullsock_field_1(void) 93 + { 94 + asm volatile (" \ 95 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 96 + if r1 != 0 goto l0_%=; \ 97 + r0 = 0; \ 98 + exit; \ 99 + l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \ 100 + r0 = 0; \ 101 + exit; \ 102 + " : 103 + : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 104 + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 105 + : __clobber_all); 106 + } 107 + 108 + SEC("cgroup/skb") 109 + __description("bpf_sk_fullsock(skb->sk): no !skb->sk check") 110 + __failure __msg("type=sock_common_or_null expected=sock_common") 111 + __failure_unpriv 112 + __naked void sk_no_skb_sk_check_1(void) 113 + { 114 + asm volatile (" \ 115 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 116 + call %[bpf_sk_fullsock]; \ 117 + r0 = 0; \ 118 + exit; \ 119 + " : 120 + : __imm(bpf_sk_fullsock), 121 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 122 + : __clobber_all); 123 + } 124 + 125 + SEC("cgroup/skb") 126 + __description("sk_fullsock(skb->sk): no NULL check on ret") 127 + __failure __msg("invalid mem access 'sock_or_null'") 128 + __failure_unpriv 129 + __naked void no_null_check_on_ret_1(void) 130 + { 131 + asm volatile (" \ 132 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 133 + if r1 != 0 goto l0_%=; \ 134 + r0 = 0; \ 135 + exit; \ 136 + l0_%=: call %[bpf_sk_fullsock]; \ 137 + r0 = *(u32*)(r0 + %[bpf_sock_type]); \ 138 + r0 = 0; \ 139 + exit; \ 140 + " : 141 + : __imm(bpf_sk_fullsock), 142 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 143 + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 144 + : __clobber_all); 145 + } 146 + 147 + SEC("cgroup/skb") 148 + __description("sk_fullsock(skb->sk): sk->type [fullsock field]") 149 + __success __success_unpriv __retval(0) 150 + __naked void sk_sk_type_fullsock_field_2(void) 151 + { 152 + asm volatile (" \ 153 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 154 + if r1 != 0 goto l0_%=; \ 155 + r0 = 0; \ 156 + exit; \ 157 + l0_%=: call %[bpf_sk_fullsock]; \ 158 + if r0 != 0 goto l1_%=; \ 159 + r0 = 0; \ 160 + exit; \ 161 + l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \ 162 + r0 = 0; \ 163 + exit; \ 164 + " : 165 + : __imm(bpf_sk_fullsock), 166 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 167 + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 168 + : __clobber_all); 169 + } 170 + 171 + SEC("cgroup/skb") 172 + __description("sk_fullsock(skb->sk): sk->family [non fullsock field]") 173 + __success __success_unpriv __retval(0) 174 + __naked void sk_family_non_fullsock_field_2(void) 175 + { 176 + asm volatile (" \ 177 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 178 + if r1 != 0 goto l0_%=; \ 179 + r0 = 0; \ 180 + exit; \ 181 + l0_%=: call %[bpf_sk_fullsock]; \ 182 + if r0 != 0 goto l1_%=; \ 183 + exit; \ 184 + l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \ 185 + r0 = 0; \ 186 + exit; \ 187 + " : 188 + : __imm(bpf_sk_fullsock), 189 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 190 + __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family)) 191 + : __clobber_all); 192 + } 193 + 194 + SEC("cgroup/skb") 195 + __description("sk_fullsock(skb->sk): sk->state [narrow load]") 196 + __success __success_unpriv __retval(0) 197 + __naked void sk_sk_state_narrow_load(void) 198 + { 199 + asm volatile (" \ 200 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 201 + if r1 != 0 goto l0_%=; \ 202 + r0 = 0; \ 203 + exit; \ 204 + l0_%=: call %[bpf_sk_fullsock]; \ 205 + if r0 != 0 goto l1_%=; \ 206 + r0 = 0; \ 207 + exit; \ 208 + l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \ 209 + r0 = 0; \ 210 + exit; \ 211 + " : 212 + : __imm(bpf_sk_fullsock), 213 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 214 + __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state)) 215 + : __clobber_all); 216 + } 217 + 218 + SEC("cgroup/skb") 219 + __description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)") 220 + __success __success_unpriv __retval(0) 221 + __naked void port_word_load_backward_compatibility(void) 222 + { 223 + asm volatile (" \ 224 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 225 + if r1 != 0 goto l0_%=; \ 226 + r0 = 0; \ 227 + exit; \ 228 + l0_%=: call %[bpf_sk_fullsock]; \ 229 + if r0 != 0 goto l1_%=; \ 230 + r0 = 0; \ 231 + exit; \ 232 + l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \ 233 + r0 = 0; \ 234 + exit; \ 235 + " : 236 + : __imm(bpf_sk_fullsock), 237 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 238 + __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) 239 + : __clobber_all); 240 + } 241 + 242 + SEC("cgroup/skb") 243 + __description("sk_fullsock(skb->sk): sk->dst_port [half load]") 244 + __success __success_unpriv __retval(0) 245 + __naked void sk_dst_port_half_load(void) 246 + { 247 + asm volatile (" \ 248 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 249 + if r1 != 0 goto l0_%=; \ 250 + r0 = 0; \ 251 + exit; \ 252 + l0_%=: call %[bpf_sk_fullsock]; \ 253 + if r0 != 0 goto l1_%=; \ 254 + r0 = 0; \ 255 + exit; \ 256 + l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \ 257 + r0 = 0; \ 258 + exit; \ 259 + " : 260 + : __imm(bpf_sk_fullsock), 261 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 262 + __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) 263 + : __clobber_all); 264 + } 265 + 266 + SEC("cgroup/skb") 267 + __description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)") 268 + __failure __msg("invalid sock access") 269 + __failure_unpriv 270 + __naked void dst_port_half_load_invalid_1(void) 271 + { 272 + asm volatile (" \ 273 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 274 + if r1 != 0 goto l0_%=; \ 275 + r0 = 0; \ 276 + exit; \ 277 + l0_%=: call %[bpf_sk_fullsock]; \ 278 + if r0 != 0 goto l1_%=; \ 279 + r0 = 0; \ 280 + exit; \ 281 + l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \ 282 + r0 = 0; \ 283 + exit; \ 284 + " : 285 + : __imm(bpf_sk_fullsock), 286 + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2), 287 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 288 + : __clobber_all); 289 + } 290 + 291 + SEC("cgroup/skb") 292 + __description("sk_fullsock(skb->sk): sk->dst_port [byte load]") 293 + __success __success_unpriv __retval(0) 294 + __naked void sk_dst_port_byte_load(void) 295 + { 296 + asm volatile (" \ 297 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 298 + if r1 != 0 goto l0_%=; \ 299 + r0 = 0; \ 300 + exit; \ 301 + l0_%=: call %[bpf_sk_fullsock]; \ 302 + if r0 != 0 goto l1_%=; \ 303 + r0 = 0; \ 304 + exit; \ 305 + l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \ 306 + r2 = *(u8*)(r0 + %[__imm_0]); \ 307 + r0 = 0; \ 308 + exit; \ 309 + " : 310 + : __imm(bpf_sk_fullsock), 311 + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1), 312 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 313 + __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) 314 + : __clobber_all); 315 + } 316 + 317 + SEC("cgroup/skb") 318 + __description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)") 319 + __failure __msg("invalid sock access") 320 + __failure_unpriv 321 + __naked void dst_port_byte_load_invalid(void) 322 + { 323 + asm volatile (" \ 324 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 325 + if r1 != 0 goto l0_%=; \ 326 + r0 = 0; \ 327 + exit; \ 328 + l0_%=: call %[bpf_sk_fullsock]; \ 329 + if r0 != 0 goto l1_%=; \ 330 + r0 = 0; \ 331 + exit; \ 332 + l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \ 333 + r0 = 0; \ 334 + exit; \ 335 + " : 336 + : __imm(bpf_sk_fullsock), 337 + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2), 338 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 339 + : __clobber_all); 340 + } 341 + 342 + SEC("cgroup/skb") 343 + __description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)") 344 + __failure __msg("invalid sock access") 345 + __failure_unpriv 346 + __naked void dst_port_half_load_invalid_2(void) 347 + { 348 + asm volatile (" \ 349 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 350 + if r1 != 0 goto l0_%=; \ 351 + r0 = 0; \ 352 + exit; \ 353 + l0_%=: call %[bpf_sk_fullsock]; \ 354 + if r0 != 0 goto l1_%=; \ 355 + r0 = 0; \ 356 + exit; \ 357 + l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \ 358 + r0 = 0; \ 359 + exit; \ 360 + " : 361 + : __imm(bpf_sk_fullsock), 362 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 363 + __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port)) 364 + : __clobber_all); 365 + } 366 + 367 + SEC("cgroup/skb") 368 + __description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]") 369 + __success __success_unpriv __retval(0) 370 + __naked void dst_ip6_load_2nd_byte(void) 371 + { 372 + asm volatile (" \ 373 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 374 + if r1 != 0 goto l0_%=; \ 375 + r0 = 0; \ 376 + exit; \ 377 + l0_%=: call %[bpf_sk_fullsock]; \ 378 + if r0 != 0 goto l1_%=; \ 379 + r0 = 0; \ 380 + exit; \ 381 + l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \ 382 + r0 = 0; \ 383 + exit; \ 384 + " : 385 + : __imm(bpf_sk_fullsock), 386 + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1), 387 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 388 + : __clobber_all); 389 + } 390 + 391 + SEC("cgroup/skb") 392 + __description("sk_fullsock(skb->sk): sk->type [narrow load]") 393 + __success __success_unpriv __retval(0) 394 + __naked void sk_sk_type_narrow_load(void) 395 + { 396 + asm volatile (" \ 397 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 398 + if r1 != 0 goto l0_%=; \ 399 + r0 = 0; \ 400 + exit; \ 401 + l0_%=: call %[bpf_sk_fullsock]; \ 402 + if r0 != 0 goto l1_%=; \ 403 + r0 = 0; \ 404 + exit; \ 405 + l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \ 406 + r0 = 0; \ 407 + exit; \ 408 + " : 409 + : __imm(bpf_sk_fullsock), 410 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 411 + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 412 + : __clobber_all); 413 + } 414 + 415 + SEC("cgroup/skb") 416 + __description("sk_fullsock(skb->sk): sk->protocol [narrow load]") 417 + __success __success_unpriv __retval(0) 418 + __naked void sk_sk_protocol_narrow_load(void) 419 + { 420 + asm volatile (" \ 421 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 422 + if r1 != 0 goto l0_%=; \ 423 + r0 = 0; \ 424 + exit; \ 425 + l0_%=: call %[bpf_sk_fullsock]; \ 426 + if r0 != 0 goto l1_%=; \ 427 + r0 = 0; \ 428 + exit; \ 429 + l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \ 430 + r0 = 0; \ 431 + exit; \ 432 + " : 433 + : __imm(bpf_sk_fullsock), 434 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 435 + __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol)) 436 + : __clobber_all); 437 + } 438 + 439 + SEC("cgroup/skb") 440 + __description("sk_fullsock(skb->sk): beyond last field") 441 + __failure __msg("invalid sock access") 442 + __failure_unpriv 443 + __naked void skb_sk_beyond_last_field_1(void) 444 + { 445 + asm volatile (" \ 446 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 447 + if r1 != 0 goto l0_%=; \ 448 + r0 = 0; \ 449 + exit; \ 450 + l0_%=: call %[bpf_sk_fullsock]; \ 451 + if r0 != 0 goto l1_%=; \ 452 + r0 = 0; \ 453 + exit; \ 454 + l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\ 455 + r0 = 0; \ 456 + exit; \ 457 + " : 458 + : __imm(bpf_sk_fullsock), 459 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 460 + __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping)) 461 + : __clobber_all); 462 + } 463 + 464 + SEC("cgroup/skb") 465 + __description("bpf_tcp_sock(skb->sk): no !skb->sk check") 466 + __failure __msg("type=sock_common_or_null expected=sock_common") 467 + __failure_unpriv 468 + __naked void sk_no_skb_sk_check_2(void) 469 + { 470 + asm volatile (" \ 471 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 472 + call %[bpf_tcp_sock]; \ 473 + r0 = 0; \ 474 + exit; \ 475 + " : 476 + : __imm(bpf_tcp_sock), 477 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 478 + : __clobber_all); 479 + } 480 + 481 + SEC("cgroup/skb") 482 + __description("bpf_tcp_sock(skb->sk): no NULL check on ret") 483 + __failure __msg("invalid mem access 'tcp_sock_or_null'") 484 + __failure_unpriv 485 + __naked void no_null_check_on_ret_2(void) 486 + { 487 + asm volatile (" \ 488 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 489 + if r1 != 0 goto l0_%=; \ 490 + r0 = 0; \ 491 + exit; \ 492 + l0_%=: call %[bpf_tcp_sock]; \ 493 + r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ 494 + r0 = 0; \ 495 + exit; \ 496 + " : 497 + : __imm(bpf_tcp_sock), 498 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 499 + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) 500 + : __clobber_all); 501 + } 502 + 503 + SEC("cgroup/skb") 504 + __description("bpf_tcp_sock(skb->sk): tp->snd_cwnd") 505 + __success __success_unpriv __retval(0) 506 + __naked void skb_sk_tp_snd_cwnd_1(void) 507 + { 508 + asm volatile (" \ 509 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 510 + if r1 != 0 goto l0_%=; \ 511 + r0 = 0; \ 512 + exit; \ 513 + l0_%=: call %[bpf_tcp_sock]; \ 514 + if r0 != 0 goto l1_%=; \ 515 + exit; \ 516 + l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ 517 + r0 = 0; \ 518 + exit; \ 519 + " : 520 + : __imm(bpf_tcp_sock), 521 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 522 + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) 523 + : __clobber_all); 524 + } 525 + 526 + SEC("cgroup/skb") 527 + __description("bpf_tcp_sock(skb->sk): tp->bytes_acked") 528 + __success __success_unpriv __retval(0) 529 + __naked void skb_sk_tp_bytes_acked(void) 530 + { 531 + asm volatile (" \ 532 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 533 + if r1 != 0 goto l0_%=; \ 534 + r0 = 0; \ 535 + exit; \ 536 + l0_%=: call %[bpf_tcp_sock]; \ 537 + if r0 != 0 goto l1_%=; \ 538 + exit; \ 539 + l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \ 540 + r0 = 0; \ 541 + exit; \ 542 + " : 543 + : __imm(bpf_tcp_sock), 544 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 545 + __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked)) 546 + : __clobber_all); 547 + } 548 + 549 + SEC("cgroup/skb") 550 + __description("bpf_tcp_sock(skb->sk): beyond last field") 551 + __failure __msg("invalid tcp_sock access") 552 + __failure_unpriv 553 + __naked void skb_sk_beyond_last_field_2(void) 554 + { 555 + asm volatile (" \ 556 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 557 + if r1 != 0 goto l0_%=; \ 558 + r0 = 0; \ 559 + exit; \ 560 + l0_%=: call %[bpf_tcp_sock]; \ 561 + if r0 != 0 goto l1_%=; \ 562 + exit; \ 563 + l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\ 564 + r0 = 0; \ 565 + exit; \ 566 + " : 567 + : __imm(bpf_tcp_sock), 568 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 569 + __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked)) 570 + : __clobber_all); 571 + } 572 + 573 + SEC("cgroup/skb") 574 + __description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd") 575 + __success __success_unpriv __retval(0) 576 + __naked void skb_sk_tp_snd_cwnd_2(void) 577 + { 578 + asm volatile (" \ 579 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 580 + if r1 != 0 goto l0_%=; \ 581 + r0 = 0; \ 582 + exit; \ 583 + l0_%=: call %[bpf_sk_fullsock]; \ 584 + if r0 != 0 goto l1_%=; \ 585 + exit; \ 586 + l1_%=: r1 = r0; \ 587 + call %[bpf_tcp_sock]; \ 588 + if r0 != 0 goto l2_%=; \ 589 + exit; \ 590 + l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ 591 + r0 = 0; \ 592 + exit; \ 593 + " : 594 + : __imm(bpf_sk_fullsock), 595 + __imm(bpf_tcp_sock), 596 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 597 + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) 598 + : __clobber_all); 599 + } 600 + 601 + SEC("tc") 602 + __description("bpf_sk_release(skb->sk)") 603 + __failure __msg("R1 must be referenced when passed to release function") 604 + __naked void bpf_sk_release_skb_sk(void) 605 + { 606 + asm volatile (" \ 607 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 608 + if r1 == 0 goto l0_%=; \ 609 + call %[bpf_sk_release]; \ 610 + l0_%=: r0 = 0; \ 611 + exit; \ 612 + " : 613 + : __imm(bpf_sk_release), 614 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 615 + : __clobber_all); 616 + } 617 + 618 + SEC("tc") 619 + __description("bpf_sk_release(bpf_sk_fullsock(skb->sk))") 620 + __failure __msg("R1 must be referenced when passed to release function") 621 + __naked void bpf_sk_fullsock_skb_sk(void) 622 + { 623 + asm volatile (" \ 624 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 625 + if r1 != 0 goto l0_%=; \ 626 + r0 = 0; \ 627 + exit; \ 628 + l0_%=: call %[bpf_sk_fullsock]; \ 629 + if r0 != 0 goto l1_%=; \ 630 + exit; \ 631 + l1_%=: r1 = r0; \ 632 + call %[bpf_sk_release]; \ 633 + r0 = 1; \ 634 + exit; \ 635 + " : 636 + : __imm(bpf_sk_fullsock), 637 + __imm(bpf_sk_release), 638 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 639 + : __clobber_all); 640 + } 641 + 642 + SEC("tc") 643 + __description("bpf_sk_release(bpf_tcp_sock(skb->sk))") 644 + __failure __msg("R1 must be referenced when passed to release function") 645 + __naked void bpf_tcp_sock_skb_sk(void) 646 + { 647 + asm volatile (" \ 648 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 649 + if r1 != 0 goto l0_%=; \ 650 + r0 = 0; \ 651 + exit; \ 652 + l0_%=: call %[bpf_tcp_sock]; \ 653 + if r0 != 0 goto l1_%=; \ 654 + exit; \ 655 + l1_%=: r1 = r0; \ 656 + call %[bpf_sk_release]; \ 657 + r0 = 1; \ 658 + exit; \ 659 + " : 660 + : __imm(bpf_sk_release), 661 + __imm(bpf_tcp_sock), 662 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 663 + : __clobber_all); 664 + } 665 + 666 + SEC("tc") 667 + __description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL") 668 + __success __retval(0) 669 + __naked void sk_null_0_value_null(void) 670 + { 671 + asm volatile (" \ 672 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 673 + if r1 != 0 goto l0_%=; \ 674 + r0 = 0; \ 675 + exit; \ 676 + l0_%=: call %[bpf_sk_fullsock]; \ 677 + if r0 != 0 goto l1_%=; \ 678 + r0 = 0; \ 679 + exit; \ 680 + l1_%=: r4 = 0; \ 681 + r3 = 0; \ 682 + r2 = r0; \ 683 + r1 = %[sk_storage_map] ll; \ 684 + call %[bpf_sk_storage_get]; \ 685 + r0 = 0; \ 686 + exit; \ 687 + " : 688 + : __imm(bpf_sk_fullsock), 689 + __imm(bpf_sk_storage_get), 690 + __imm_addr(sk_storage_map), 691 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 692 + : __clobber_all); 693 + } 694 + 695 + SEC("tc") 696 + __description("sk_storage_get(map, skb->sk, 1, 1): value == 1") 697 + __failure __msg("R3 type=scalar expected=fp") 698 + __naked void sk_1_1_value_1(void) 699 + { 700 + asm volatile (" \ 701 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 702 + if r1 != 0 goto l0_%=; \ 703 + r0 = 0; \ 704 + exit; \ 705 + l0_%=: call %[bpf_sk_fullsock]; \ 706 + if r0 != 0 goto l1_%=; \ 707 + r0 = 0; \ 708 + exit; \ 709 + l1_%=: r4 = 1; \ 710 + r3 = 1; \ 711 + r2 = r0; \ 712 + r1 = %[sk_storage_map] ll; \ 713 + call %[bpf_sk_storage_get]; \ 714 + r0 = 0; \ 715 + exit; \ 716 + " : 717 + : __imm(bpf_sk_fullsock), 718 + __imm(bpf_sk_storage_get), 719 + __imm_addr(sk_storage_map), 720 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 721 + : __clobber_all); 722 + } 723 + 724 + SEC("tc") 725 + __description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value") 726 + __success __retval(0) 727 + __naked void stack_value_1_stack_value(void) 728 + { 729 + asm volatile (" \ 730 + r2 = 0; \ 731 + *(u64*)(r10 - 8) = r2; \ 732 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 733 + if r1 != 0 goto l0_%=; \ 734 + r0 = 0; \ 735 + exit; \ 736 + l0_%=: call %[bpf_sk_fullsock]; \ 737 + if r0 != 0 goto l1_%=; \ 738 + r0 = 0; \ 739 + exit; \ 740 + l1_%=: r4 = 1; \ 741 + r3 = r10; \ 742 + r3 += -8; \ 743 + r2 = r0; \ 744 + r1 = %[sk_storage_map] ll; \ 745 + call %[bpf_sk_storage_get]; \ 746 + r0 = 0; \ 747 + exit; \ 748 + " : 749 + : __imm(bpf_sk_fullsock), 750 + __imm(bpf_sk_storage_get), 751 + __imm_addr(sk_storage_map), 752 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 753 + : __clobber_all); 754 + } 755 + 756 + SEC("tc") 757 + __description("bpf_map_lookup_elem(smap, &key)") 758 + __failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem") 759 + __naked void map_lookup_elem_smap_key(void) 760 + { 761 + asm volatile (" \ 762 + r1 = 0; \ 763 + *(u32*)(r10 - 4) = r1; \ 764 + r2 = r10; \ 765 + r2 += -4; \ 766 + r1 = %[sk_storage_map] ll; \ 767 + call %[bpf_map_lookup_elem]; \ 768 + r0 = 0; \ 769 + exit; \ 770 + " : 771 + : __imm(bpf_map_lookup_elem), 772 + __imm_addr(sk_storage_map) 773 + : __clobber_all); 774 + } 775 + 776 + SEC("xdp") 777 + __description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id") 778 + __success __retval(0) 779 + __naked void xskmap_key_xs_queue_id(void) 780 + { 781 + asm volatile (" \ 782 + r1 = 0; \ 783 + *(u32*)(r10 - 8) = r1; \ 784 + r2 = r10; \ 785 + r2 += -8; \ 786 + r1 = %[map_xskmap] ll; \ 787 + call %[bpf_map_lookup_elem]; \ 788 + if r0 != 0 goto l0_%=; \ 789 + exit; \ 790 + l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \ 791 + r0 = 0; \ 792 + exit; \ 793 + " : 794 + : __imm(bpf_map_lookup_elem), 795 + __imm_addr(map_xskmap), 796 + __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id)) 797 + : __clobber_all); 798 + } 799 + 800 + SEC("sk_skb") 801 + __description("bpf_map_lookup_elem(sockmap, &key)") 802 + __failure __msg("Unreleased reference id=2 alloc_insn=6") 803 + __naked void map_lookup_elem_sockmap_key(void) 804 + { 805 + asm volatile (" \ 806 + r1 = 0; \ 807 + *(u32*)(r10 - 4) = r1; \ 808 + r2 = r10; \ 809 + r2 += -4; \ 810 + r1 = %[map_sockmap] ll; \ 811 + call %[bpf_map_lookup_elem]; \ 812 + r0 = 0; \ 813 + exit; \ 814 + " : 815 + : __imm(bpf_map_lookup_elem), 816 + __imm_addr(map_sockmap) 817 + : __clobber_all); 818 + } 819 + 820 + SEC("sk_skb") 821 + __description("bpf_map_lookup_elem(sockhash, &key)") 822 + __failure __msg("Unreleased reference id=2 alloc_insn=6") 823 + __naked void map_lookup_elem_sockhash_key(void) 824 + { 825 + asm volatile (" \ 826 + r1 = 0; \ 827 + *(u32*)(r10 - 4) = r1; \ 828 + r2 = r10; \ 829 + r2 += -4; \ 830 + r1 = %[map_sockhash] ll; \ 831 + call %[bpf_map_lookup_elem]; \ 832 + r0 = 0; \ 833 + exit; \ 834 + " : 835 + : __imm(bpf_map_lookup_elem), 836 + __imm_addr(map_sockhash) 837 + : __clobber_all); 838 + } 839 + 840 + SEC("sk_skb") 841 + __description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)") 842 + __success 843 + __naked void field_bpf_sk_release_sk_1(void) 844 + { 845 + asm volatile (" \ 846 + r1 = 0; \ 847 + *(u32*)(r10 - 4) = r1; \ 848 + r2 = r10; \ 849 + r2 += -4; \ 850 + r1 = %[map_sockmap] ll; \ 851 + call %[bpf_map_lookup_elem]; \ 852 + if r0 != 0 goto l0_%=; \ 853 + exit; \ 854 + l0_%=: r1 = r0; \ 855 + r0 = *(u32*)(r0 + %[bpf_sock_type]); \ 856 + call %[bpf_sk_release]; \ 857 + exit; \ 858 + " : 859 + : __imm(bpf_map_lookup_elem), 860 + __imm(bpf_sk_release), 861 + __imm_addr(map_sockmap), 862 + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 863 + : __clobber_all); 864 + } 865 + 866 + SEC("sk_skb") 867 + __description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)") 868 + __success 869 + __naked void field_bpf_sk_release_sk_2(void) 870 + { 871 + asm volatile (" \ 872 + r1 = 0; \ 873 + *(u32*)(r10 - 4) = r1; \ 874 + r2 = r10; \ 875 + r2 += -4; \ 876 + r1 = %[map_sockhash] ll; \ 877 + call %[bpf_map_lookup_elem]; \ 878 + if r0 != 0 goto l0_%=; \ 879 + exit; \ 880 + l0_%=: r1 = r0; \ 881 + r0 = *(u32*)(r0 + %[bpf_sock_type]); \ 882 + call %[bpf_sk_release]; \ 883 + exit; \ 884 + " : 885 + : __imm(bpf_map_lookup_elem), 886 + __imm(bpf_sk_release), 887 + __imm_addr(map_sockhash), 888 + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 889 + : __clobber_all); 890 + } 891 + 892 + SEC("sk_reuseport") 893 + __description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)") 894 + __success 895 + __naked void ctx_reuseport_array_key_flags(void) 896 + { 897 + asm volatile (" \ 898 + r4 = 0; \ 899 + r2 = 0; \ 900 + *(u32*)(r10 - 4) = r2; \ 901 + r3 = r10; \ 902 + r3 += -4; \ 903 + r2 = %[map_reuseport_array] ll; \ 904 + call %[bpf_sk_select_reuseport]; \ 905 + exit; \ 906 + " : 907 + : __imm(bpf_sk_select_reuseport), 908 + __imm_addr(map_reuseport_array) 909 + : __clobber_all); 910 + } 911 + 912 + SEC("sk_reuseport") 913 + __description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)") 914 + __success 915 + __naked void reuseport_ctx_sockmap_key_flags(void) 916 + { 917 + asm volatile (" \ 918 + r4 = 0; \ 919 + r2 = 0; \ 920 + *(u32*)(r10 - 4) = r2; \ 921 + r3 = r10; \ 922 + r3 += -4; \ 923 + r2 = %[map_sockmap] ll; \ 924 + call %[bpf_sk_select_reuseport]; \ 925 + exit; \ 926 + " : 927 + : __imm(bpf_sk_select_reuseport), 928 + __imm_addr(map_sockmap) 929 + : __clobber_all); 930 + } 931 + 932 + SEC("sk_reuseport") 933 + __description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)") 934 + __success 935 + __naked void reuseport_ctx_sockhash_key_flags(void) 936 + { 937 + asm volatile (" \ 938 + r4 = 0; \ 939 + r2 = 0; \ 940 + *(u32*)(r10 - 4) = r2; \ 941 + r3 = r10; \ 942 + r3 += -4; \ 943 + r2 = %[map_sockmap] ll; \ 944 + call %[bpf_sk_select_reuseport]; \ 945 + exit; \ 946 + " : 947 + : __imm(bpf_sk_select_reuseport), 948 + __imm_addr(map_sockmap) 949 + : __clobber_all); 950 + } 951 + 952 + SEC("tc") 953 + __description("mark null check on return value of bpf_skc_to helpers") 954 + __failure __msg("invalid mem access") 955 + __naked void of_bpf_skc_to_helpers(void) 956 + { 957 + asm volatile (" \ 958 + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 959 + if r1 != 0 goto l0_%=; \ 960 + r0 = 0; \ 961 + exit; \ 962 + l0_%=: r6 = r1; \ 963 + call %[bpf_skc_to_tcp_sock]; \ 964 + r7 = r0; \ 965 + r1 = r6; \ 966 + call %[bpf_skc_to_tcp_request_sock]; \ 967 + r8 = r0; \ 968 + if r8 != 0 goto l1_%=; \ 969 + r0 = 0; \ 970 + exit; \ 971 + l1_%=: r0 = *(u8*)(r7 + 0); \ 972 + exit; \ 973 + " : 974 + : __imm(bpf_skc_to_tcp_request_sock), 975 + __imm(bpf_skc_to_tcp_sock), 976 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 977 + : __clobber_all); 978 + } 979 + 980 + char _license[] SEC("license") = "GPL";
-706
tools/testing/selftests/bpf/verifier/sock.c
··· 1 - { 2 - "skb->sk: no NULL check", 3 - .insns = { 4 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 5 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 6 - BPF_MOV64_IMM(BPF_REG_0, 0), 7 - BPF_EXIT_INSN(), 8 - }, 9 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 10 - .result = REJECT, 11 - .errstr = "invalid mem access 'sock_common_or_null'", 12 - }, 13 - { 14 - "skb->sk: sk->family [non fullsock field]", 15 - .insns = { 16 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 17 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 18 - BPF_MOV64_IMM(BPF_REG_0, 0), 19 - BPF_EXIT_INSN(), 20 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, family)), 21 - BPF_MOV64_IMM(BPF_REG_0, 0), 22 - BPF_EXIT_INSN(), 23 - }, 24 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 25 - .result = ACCEPT, 26 - }, 27 - { 28 - "skb->sk: sk->type [fullsock field]", 29 - .insns = { 30 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 31 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 32 - BPF_MOV64_IMM(BPF_REG_0, 0), 33 - BPF_EXIT_INSN(), 34 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, type)), 35 - BPF_MOV64_IMM(BPF_REG_0, 0), 36 - BPF_EXIT_INSN(), 37 - }, 38 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 39 - .result = REJECT, 40 - .errstr = "invalid sock_common access", 41 - }, 42 - { 43 - "bpf_sk_fullsock(skb->sk): no !skb->sk check", 44 - .insns = { 45 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 46 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 47 - BPF_MOV64_IMM(BPF_REG_0, 0), 48 - BPF_EXIT_INSN(), 49 - }, 50 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 51 - .result = REJECT, 52 - .errstr = "type=sock_common_or_null expected=sock_common", 53 - }, 54 - { 55 - "sk_fullsock(skb->sk): no NULL check on ret", 56 - .insns = { 57 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 58 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 59 - BPF_MOV64_IMM(BPF_REG_0, 0), 60 - BPF_EXIT_INSN(), 61 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 62 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), 63 - BPF_MOV64_IMM(BPF_REG_0, 0), 64 - BPF_EXIT_INSN(), 65 - }, 66 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 67 - .result = REJECT, 68 - .errstr = "invalid mem access 'sock_or_null'", 69 - }, 70 - { 71 - "sk_fullsock(skb->sk): sk->type [fullsock field]", 72 - .insns = { 73 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 74 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 75 - BPF_MOV64_IMM(BPF_REG_0, 0), 76 - BPF_EXIT_INSN(), 77 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 78 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 79 - BPF_MOV64_IMM(BPF_REG_0, 0), 80 - BPF_EXIT_INSN(), 81 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), 82 - BPF_MOV64_IMM(BPF_REG_0, 0), 83 - BPF_EXIT_INSN(), 84 - }, 85 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 86 - .result = ACCEPT, 87 - }, 88 - { 89 - "sk_fullsock(skb->sk): sk->family [non fullsock field]", 90 - .insns = { 91 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 92 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 93 - BPF_MOV64_IMM(BPF_REG_0, 0), 94 - BPF_EXIT_INSN(), 95 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 96 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 97 - BPF_EXIT_INSN(), 98 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, family)), 99 - BPF_MOV64_IMM(BPF_REG_0, 0), 100 - BPF_EXIT_INSN(), 101 - }, 102 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 103 - .result = ACCEPT, 104 - }, 105 - { 106 - "sk_fullsock(skb->sk): sk->state [narrow load]", 107 - .insns = { 108 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 109 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 110 - BPF_MOV64_IMM(BPF_REG_0, 0), 111 - BPF_EXIT_INSN(), 112 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 113 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 114 - BPF_MOV64_IMM(BPF_REG_0, 0), 115 - BPF_EXIT_INSN(), 116 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, state)), 117 - BPF_MOV64_IMM(BPF_REG_0, 0), 118 - BPF_EXIT_INSN(), 119 - }, 120 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 121 - .result = ACCEPT, 122 - }, 123 - { 124 - "sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)", 125 - .insns = { 126 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 127 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 128 - BPF_MOV64_IMM(BPF_REG_0, 0), 129 - BPF_EXIT_INSN(), 130 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 131 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 132 - BPF_MOV64_IMM(BPF_REG_0, 0), 133 - BPF_EXIT_INSN(), 134 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)), 135 - BPF_MOV64_IMM(BPF_REG_0, 0), 136 - BPF_EXIT_INSN(), 137 - }, 138 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 139 - .result = ACCEPT, 140 - }, 141 - { 142 - "sk_fullsock(skb->sk): sk->dst_port [half load]", 143 - .insns = { 144 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 145 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 146 - BPF_MOV64_IMM(BPF_REG_0, 0), 147 - BPF_EXIT_INSN(), 148 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 149 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 150 - BPF_MOV64_IMM(BPF_REG_0, 0), 151 - BPF_EXIT_INSN(), 152 - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)), 153 - BPF_MOV64_IMM(BPF_REG_0, 0), 154 - BPF_EXIT_INSN(), 155 - }, 156 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 157 - .result = ACCEPT, 158 - }, 159 - { 160 - "sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)", 161 - .insns = { 162 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 163 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 164 - BPF_MOV64_IMM(BPF_REG_0, 0), 165 - BPF_EXIT_INSN(), 166 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 167 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 168 - BPF_MOV64_IMM(BPF_REG_0, 0), 169 - BPF_EXIT_INSN(), 170 - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2), 171 - BPF_MOV64_IMM(BPF_REG_0, 0), 172 - BPF_EXIT_INSN(), 173 - }, 174 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 175 - .result = REJECT, 176 - .errstr = "invalid sock access", 177 - }, 178 - { 179 - "sk_fullsock(skb->sk): sk->dst_port [byte load]", 180 - .insns = { 181 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 182 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 183 - BPF_MOV64_IMM(BPF_REG_0, 0), 184 - BPF_EXIT_INSN(), 185 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 186 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 187 - BPF_MOV64_IMM(BPF_REG_0, 0), 188 - BPF_EXIT_INSN(), 189 - BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port)), 190 - BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1), 191 - BPF_MOV64_IMM(BPF_REG_0, 0), 192 - BPF_EXIT_INSN(), 193 - }, 194 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 195 - .result = ACCEPT, 196 - }, 197 - { 198 - "sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)", 199 - .insns = { 200 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 201 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 202 - BPF_MOV64_IMM(BPF_REG_0, 0), 203 - BPF_EXIT_INSN(), 204 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 205 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 206 - BPF_MOV64_IMM(BPF_REG_0, 0), 207 - BPF_EXIT_INSN(), 208 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2), 209 - BPF_MOV64_IMM(BPF_REG_0, 0), 210 - BPF_EXIT_INSN(), 211 - }, 212 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 213 - .result = REJECT, 214 - .errstr = "invalid sock access", 215 - }, 216 - { 217 - "sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)", 218 - .insns = { 219 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 220 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 221 - BPF_MOV64_IMM(BPF_REG_0, 0), 222 - BPF_EXIT_INSN(), 223 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 224 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 225 - BPF_MOV64_IMM(BPF_REG_0, 0), 226 - BPF_EXIT_INSN(), 227 - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, dst_port)), 228 - BPF_MOV64_IMM(BPF_REG_0, 0), 229 - BPF_EXIT_INSN(), 230 - }, 231 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 232 - .result = REJECT, 233 - .errstr = "invalid sock access", 234 - }, 235 - { 236 - "sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]", 237 - .insns = { 238 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 239 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 240 - BPF_MOV64_IMM(BPF_REG_0, 0), 241 - BPF_EXIT_INSN(), 242 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 243 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 244 - BPF_MOV64_IMM(BPF_REG_0, 0), 245 - BPF_EXIT_INSN(), 246 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1), 247 - BPF_MOV64_IMM(BPF_REG_0, 0), 248 - BPF_EXIT_INSN(), 249 - }, 250 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 251 - .result = ACCEPT, 252 - }, 253 - { 254 - "sk_fullsock(skb->sk): sk->type [narrow load]", 255 - .insns = { 256 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 257 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 258 - BPF_MOV64_IMM(BPF_REG_0, 0), 259 - BPF_EXIT_INSN(), 260 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 261 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 262 - BPF_MOV64_IMM(BPF_REG_0, 0), 263 - BPF_EXIT_INSN(), 264 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), 265 - BPF_MOV64_IMM(BPF_REG_0, 0), 266 - BPF_EXIT_INSN(), 267 - }, 268 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 269 - .result = ACCEPT, 270 - }, 271 - { 272 - "sk_fullsock(skb->sk): sk->protocol [narrow load]", 273 - .insns = { 274 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 275 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 276 - BPF_MOV64_IMM(BPF_REG_0, 0), 277 - BPF_EXIT_INSN(), 278 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 279 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 280 - BPF_MOV64_IMM(BPF_REG_0, 0), 281 - BPF_EXIT_INSN(), 282 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, protocol)), 283 - BPF_MOV64_IMM(BPF_REG_0, 0), 284 - BPF_EXIT_INSN(), 285 - }, 286 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 287 - .result = ACCEPT, 288 - }, 289 - { 290 - "sk_fullsock(skb->sk): beyond last field", 291 - .insns = { 292 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 293 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 294 - BPF_MOV64_IMM(BPF_REG_0, 0), 295 - BPF_EXIT_INSN(), 296 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 297 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 298 - BPF_MOV64_IMM(BPF_REG_0, 0), 299 - BPF_EXIT_INSN(), 300 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, rx_queue_mapping)), 301 - BPF_MOV64_IMM(BPF_REG_0, 0), 302 - BPF_EXIT_INSN(), 303 - }, 304 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 305 - .result = REJECT, 306 - .errstr = "invalid sock access", 307 - }, 308 - { 309 - "bpf_tcp_sock(skb->sk): no !skb->sk check", 310 - .insns = { 311 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 312 - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), 313 - BPF_MOV64_IMM(BPF_REG_0, 0), 314 - BPF_EXIT_INSN(), 315 - }, 316 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 317 - .result = REJECT, 318 - .errstr = "type=sock_common_or_null expected=sock_common", 319 - }, 320 - { 321 - "bpf_tcp_sock(skb->sk): no NULL check on ret", 322 - .insns = { 323 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 324 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 325 - BPF_MOV64_IMM(BPF_REG_0, 0), 326 - BPF_EXIT_INSN(), 327 - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), 328 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)), 329 - BPF_MOV64_IMM(BPF_REG_0, 0), 330 - BPF_EXIT_INSN(), 331 - }, 332 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 333 - .result = REJECT, 334 - .errstr = "invalid mem access 'tcp_sock_or_null'", 335 - }, 336 - { 337 - "bpf_tcp_sock(skb->sk): tp->snd_cwnd", 338 - .insns = { 339 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 340 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 341 - BPF_MOV64_IMM(BPF_REG_0, 0), 342 - BPF_EXIT_INSN(), 343 - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), 344 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 345 - BPF_EXIT_INSN(), 346 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)), 347 - BPF_MOV64_IMM(BPF_REG_0, 0), 348 - BPF_EXIT_INSN(), 349 - }, 350 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 351 - .result = ACCEPT, 352 - }, 353 - { 354 - "bpf_tcp_sock(skb->sk): tp->bytes_acked", 355 - .insns = { 356 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 357 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 358 - BPF_MOV64_IMM(BPF_REG_0, 0), 359 - BPF_EXIT_INSN(), 360 - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), 361 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 362 - BPF_EXIT_INSN(), 363 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, bytes_acked)), 364 - BPF_MOV64_IMM(BPF_REG_0, 0), 365 - BPF_EXIT_INSN(), 366 - }, 367 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 368 - .result = ACCEPT, 369 - }, 370 - { 371 - "bpf_tcp_sock(skb->sk): beyond last field", 372 - .insns = { 373 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 374 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 375 - BPF_MOV64_IMM(BPF_REG_0, 0), 376 - BPF_EXIT_INSN(), 377 - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), 378 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 379 - BPF_EXIT_INSN(), 380 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_tcp_sock, bytes_acked)), 381 - BPF_MOV64_IMM(BPF_REG_0, 0), 382 - BPF_EXIT_INSN(), 383 - }, 384 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 385 - .result = REJECT, 386 - .errstr = "invalid tcp_sock access", 387 - }, 388 - { 389 - "bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd", 390 - .insns = { 391 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 392 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 393 - BPF_MOV64_IMM(BPF_REG_0, 0), 394 - BPF_EXIT_INSN(), 395 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 396 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 397 - BPF_EXIT_INSN(), 398 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 399 - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), 400 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 401 - BPF_EXIT_INSN(), 402 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)), 403 - BPF_MOV64_IMM(BPF_REG_0, 0), 404 - BPF_EXIT_INSN(), 405 - }, 406 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 407 - .result = ACCEPT, 408 - }, 409 - { 410 - "bpf_sk_release(skb->sk)", 411 - .insns = { 412 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 413 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 414 - BPF_EMIT_CALL(BPF_FUNC_sk_release), 415 - BPF_MOV64_IMM(BPF_REG_0, 0), 416 - BPF_EXIT_INSN(), 417 - }, 418 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 419 - .result = REJECT, 420 - .errstr = "R1 must be referenced when passed to release function", 421 - }, 422 - { 423 - "bpf_sk_release(bpf_sk_fullsock(skb->sk))", 424 - .insns = { 425 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 426 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 427 - BPF_MOV64_IMM(BPF_REG_0, 0), 428 - BPF_EXIT_INSN(), 429 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 430 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 431 - BPF_EXIT_INSN(), 432 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 433 - BPF_EMIT_CALL(BPF_FUNC_sk_release), 434 - BPF_MOV64_IMM(BPF_REG_0, 1), 435 - BPF_EXIT_INSN(), 436 - }, 437 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 438 - .result = REJECT, 439 - .errstr = "R1 must be referenced when passed to release function", 440 - }, 441 - { 442 - "bpf_sk_release(bpf_tcp_sock(skb->sk))", 443 - .insns = { 444 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 445 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 446 - BPF_MOV64_IMM(BPF_REG_0, 0), 447 - BPF_EXIT_INSN(), 448 - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), 449 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 450 - BPF_EXIT_INSN(), 451 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 452 - BPF_EMIT_CALL(BPF_FUNC_sk_release), 453 - BPF_MOV64_IMM(BPF_REG_0, 1), 454 - BPF_EXIT_INSN(), 455 - }, 456 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 457 - .result = REJECT, 458 - .errstr = "R1 must be referenced when passed to release function", 459 - }, 460 - { 461 - "sk_storage_get(map, skb->sk, NULL, 0): value == NULL", 462 - .insns = { 463 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 464 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 465 - BPF_MOV64_IMM(BPF_REG_0, 0), 466 - BPF_EXIT_INSN(), 467 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 468 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 469 - BPF_MOV64_IMM(BPF_REG_0, 0), 470 - BPF_EXIT_INSN(), 471 - BPF_MOV64_IMM(BPF_REG_4, 0), 472 - BPF_MOV64_IMM(BPF_REG_3, 0), 473 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 474 - BPF_LD_MAP_FD(BPF_REG_1, 0), 475 - BPF_EMIT_CALL(BPF_FUNC_sk_storage_get), 476 - BPF_MOV64_IMM(BPF_REG_0, 0), 477 - BPF_EXIT_INSN(), 478 - }, 479 - .fixup_sk_storage_map = { 11 }, 480 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 481 - .result = ACCEPT, 482 - }, 483 - { 484 - "sk_storage_get(map, skb->sk, 1, 1): value == 1", 485 - .insns = { 486 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 487 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 488 - BPF_MOV64_IMM(BPF_REG_0, 0), 489 - BPF_EXIT_INSN(), 490 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 491 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 492 - BPF_MOV64_IMM(BPF_REG_0, 0), 493 - BPF_EXIT_INSN(), 494 - BPF_MOV64_IMM(BPF_REG_4, 1), 495 - BPF_MOV64_IMM(BPF_REG_3, 1), 496 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 497 - BPF_LD_MAP_FD(BPF_REG_1, 0), 498 - BPF_EMIT_CALL(BPF_FUNC_sk_storage_get), 499 - BPF_MOV64_IMM(BPF_REG_0, 0), 500 - BPF_EXIT_INSN(), 501 - }, 502 - .fixup_sk_storage_map = { 11 }, 503 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 504 - .result = REJECT, 505 - .errstr = "R3 type=scalar expected=fp", 506 - }, 507 - { 508 - "sk_storage_get(map, skb->sk, &stack_value, 1): stack_value", 509 - .insns = { 510 - BPF_MOV64_IMM(BPF_REG_2, 0), 511 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), 512 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 513 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 514 - BPF_MOV64_IMM(BPF_REG_0, 0), 515 - BPF_EXIT_INSN(), 516 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 517 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 518 - BPF_MOV64_IMM(BPF_REG_0, 0), 519 - BPF_EXIT_INSN(), 520 - BPF_MOV64_IMM(BPF_REG_4, 1), 521 - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), 522 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8), 523 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 524 - BPF_LD_MAP_FD(BPF_REG_1, 0), 525 - BPF_EMIT_CALL(BPF_FUNC_sk_storage_get), 526 - BPF_MOV64_IMM(BPF_REG_0, 0), 527 - BPF_EXIT_INSN(), 528 - }, 529 - .fixup_sk_storage_map = { 14 }, 530 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 531 - .result = ACCEPT, 532 - }, 533 - { 534 - "bpf_map_lookup_elem(smap, &key)", 535 - .insns = { 536 - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), 537 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 538 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 539 - BPF_LD_MAP_FD(BPF_REG_1, 0), 540 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 541 - BPF_MOV64_IMM(BPF_REG_0, 0), 542 - BPF_EXIT_INSN(), 543 - }, 544 - .fixup_sk_storage_map = { 3 }, 545 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 546 - .result = REJECT, 547 - .errstr = "cannot pass map_type 24 into func bpf_map_lookup_elem", 548 - }, 549 - { 550 - "bpf_map_lookup_elem(xskmap, &key); xs->queue_id", 551 - .insns = { 552 - BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), 553 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 554 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 555 - BPF_LD_MAP_FD(BPF_REG_1, 0), 556 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 557 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 558 - BPF_EXIT_INSN(), 559 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_xdp_sock, queue_id)), 560 - BPF_MOV64_IMM(BPF_REG_0, 0), 561 - BPF_EXIT_INSN(), 562 - }, 563 - .fixup_map_xskmap = { 3 }, 564 - .prog_type = BPF_PROG_TYPE_XDP, 565 - .result = ACCEPT, 566 - }, 567 - { 568 - "bpf_map_lookup_elem(sockmap, &key)", 569 - .insns = { 570 - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), 571 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 572 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 573 - BPF_LD_MAP_FD(BPF_REG_1, 0), 574 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 575 - BPF_MOV64_IMM(BPF_REG_0, 0), 576 - BPF_EXIT_INSN(), 577 - }, 578 - .fixup_map_sockmap = { 3 }, 579 - .prog_type = BPF_PROG_TYPE_SK_SKB, 580 - .result = REJECT, 581 - .errstr = "Unreleased reference id=2 alloc_insn=5", 582 - }, 583 - { 584 - "bpf_map_lookup_elem(sockhash, &key)", 585 - .insns = { 586 - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), 587 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 588 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 589 - BPF_LD_MAP_FD(BPF_REG_1, 0), 590 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 591 - BPF_MOV64_IMM(BPF_REG_0, 0), 592 - BPF_EXIT_INSN(), 593 - }, 594 - .fixup_map_sockhash = { 3 }, 595 - .prog_type = BPF_PROG_TYPE_SK_SKB, 596 - .result = REJECT, 597 - .errstr = "Unreleased reference id=2 alloc_insn=5", 598 - }, 599 - { 600 - "bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)", 601 - .insns = { 602 - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), 603 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 604 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 605 - BPF_LD_MAP_FD(BPF_REG_1, 0), 606 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 607 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 608 - BPF_EXIT_INSN(), 609 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 610 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), 611 - BPF_EMIT_CALL(BPF_FUNC_sk_release), 612 - BPF_EXIT_INSN(), 613 - }, 614 - .fixup_map_sockmap = { 3 }, 615 - .prog_type = BPF_PROG_TYPE_SK_SKB, 616 - .result = ACCEPT, 617 - }, 618 - { 619 - "bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)", 620 - .insns = { 621 - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), 622 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 623 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 624 - BPF_LD_MAP_FD(BPF_REG_1, 0), 625 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 626 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 627 - BPF_EXIT_INSN(), 628 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 629 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), 630 - BPF_EMIT_CALL(BPF_FUNC_sk_release), 631 - BPF_EXIT_INSN(), 632 - }, 633 - .fixup_map_sockhash = { 3 }, 634 - .prog_type = BPF_PROG_TYPE_SK_SKB, 635 - .result = ACCEPT, 636 - }, 637 - { 638 - "bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)", 639 - .insns = { 640 - BPF_MOV64_IMM(BPF_REG_4, 0), 641 - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), 642 - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), 643 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -4), 644 - BPF_LD_MAP_FD(BPF_REG_2, 0), 645 - BPF_EMIT_CALL(BPF_FUNC_sk_select_reuseport), 646 - BPF_EXIT_INSN(), 647 - }, 648 - .fixup_map_reuseport_array = { 4 }, 649 - .prog_type = BPF_PROG_TYPE_SK_REUSEPORT, 650 - .result = ACCEPT, 651 - }, 652 - { 653 - "bpf_sk_select_reuseport(ctx, sockmap, &key, flags)", 654 - .insns = { 655 - BPF_MOV64_IMM(BPF_REG_4, 0), 656 - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), 657 - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), 658 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -4), 659 - BPF_LD_MAP_FD(BPF_REG_2, 0), 660 - BPF_EMIT_CALL(BPF_FUNC_sk_select_reuseport), 661 - BPF_EXIT_INSN(), 662 - }, 663 - .fixup_map_sockmap = { 4 }, 664 - .prog_type = BPF_PROG_TYPE_SK_REUSEPORT, 665 - .result = ACCEPT, 666 - }, 667 - { 668 - "bpf_sk_select_reuseport(ctx, sockhash, &key, flags)", 669 - .insns = { 670 - BPF_MOV64_IMM(BPF_REG_4, 0), 671 - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), 672 - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), 673 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -4), 674 - BPF_LD_MAP_FD(BPF_REG_2, 0), 675 - BPF_EMIT_CALL(BPF_FUNC_sk_select_reuseport), 676 - BPF_EXIT_INSN(), 677 - }, 678 - .fixup_map_sockmap = { 4 }, 679 - .prog_type = BPF_PROG_TYPE_SK_REUSEPORT, 680 - .result = ACCEPT, 681 - }, 682 - { 683 - "mark null check on return value of bpf_skc_to helpers", 684 - .insns = { 685 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), 686 - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 687 - BPF_MOV64_IMM(BPF_REG_0, 0), 688 - BPF_EXIT_INSN(), 689 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 690 - BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock), 691 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 692 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 693 - BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_request_sock), 694 - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 695 - BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0, 2), 696 - BPF_MOV64_IMM(BPF_REG_0, 0), 697 - BPF_EXIT_INSN(), 698 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_7, 0), 699 - BPF_EXIT_INSN(), 700 - }, 701 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 702 - .result = REJECT, 703 - .errstr = "invalid mem access", 704 - .result_unpriv = REJECT, 705 - .errstr_unpriv = "unknown func", 706 - },