Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: verifier/meta_access.c converted to inline assembly

Test verifier/meta_access.c automatically converted to use inline assembly.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230325025524.144043-31-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
65428312 ade3f08f

+286 -235
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 27 27 #include "verifier_map_ptr.skel.h" 28 28 #include "verifier_map_ret_val.skel.h" 29 29 #include "verifier_masking.skel.h" 30 + #include "verifier_meta_access.skel.h" 30 31 31 32 __maybe_unused 32 33 static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) ··· 77 76 void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } 78 77 void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } 79 78 void test_verifier_masking(void) { RUN(verifier_masking); } 79 + void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
+284
tools/testing/selftests/bpf/progs/verifier_meta_access.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Converted from tools/testing/selftests/bpf/verifier/meta_access.c */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "bpf_misc.h" 7 + 8 + SEC("xdp") 9 + __description("meta access, test1") 10 + __success __retval(0) 11 + __naked void meta_access_test1(void) 12 + { 13 + asm volatile (" \ 14 + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 15 + r3 = *(u32*)(r1 + %[xdp_md_data]); \ 16 + r0 = r2; \ 17 + r0 += 8; \ 18 + if r0 > r3 goto l0_%=; \ 19 + r0 = *(u8*)(r2 + 0); \ 20 + l0_%=: r0 = 0; \ 21 + exit; \ 22 + " : 23 + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 24 + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 25 + : __clobber_all); 26 + } 27 + 28 + SEC("xdp") 29 + __description("meta access, test2") 30 + __failure __msg("invalid access to packet, off=-8") 31 + __naked void meta_access_test2(void) 32 + { 33 + asm volatile (" \ 34 + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 35 + r3 = *(u32*)(r1 + %[xdp_md_data]); \ 36 + r0 = r2; \ 37 + r0 -= 8; \ 38 + r4 = r2; \ 39 + r4 += 8; \ 40 + if r4 > r3 goto l0_%=; \ 41 + r0 = *(u8*)(r0 + 0); \ 42 + l0_%=: r0 = 0; \ 43 + exit; \ 44 + " : 45 + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 46 + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 47 + : __clobber_all); 48 + } 49 + 50 + SEC("xdp") 51 + __description("meta access, test3") 52 + __failure __msg("invalid access to packet") 53 + __naked void meta_access_test3(void) 54 + { 55 + asm volatile (" \ 56 + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 57 + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 58 + r0 = r2; \ 59 + r0 += 8; \ 60 + if r0 > r3 goto l0_%=; \ 61 + r0 = *(u8*)(r2 + 0); \ 62 + l0_%=: r0 = 0; \ 63 + exit; \ 64 + " : 65 + : __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), 66 + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 67 + : __clobber_all); 68 + } 69 + 70 + SEC("xdp") 71 + __description("meta access, test4") 72 + __failure __msg("invalid access to packet") 73 + __naked void meta_access_test4(void) 74 + { 75 + asm volatile (" \ 76 + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 77 + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 78 + r4 = *(u32*)(r1 + %[xdp_md_data]); \ 79 + r0 = r4; \ 80 + r0 += 8; \ 81 + if r0 > r3 goto l0_%=; \ 82 + r0 = *(u8*)(r2 + 0); \ 83 + l0_%=: r0 = 0; \ 84 + exit; \ 85 + " : 86 + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 87 + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), 88 + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 89 + : __clobber_all); 90 + } 91 + 92 + SEC("xdp") 93 + __description("meta access, test5") 94 + __failure __msg("R3 !read_ok") 95 + __naked void meta_access_test5(void) 96 + { 97 + asm volatile (" \ 98 + r3 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 99 + r4 = *(u32*)(r1 + %[xdp_md_data]); \ 100 + r0 = r3; \ 101 + r0 += 8; \ 102 + if r0 > r4 goto l0_%=; \ 103 + r2 = -8; \ 104 + call %[bpf_xdp_adjust_meta]; \ 105 + r0 = *(u8*)(r3 + 0); \ 106 + l0_%=: r0 = 0; \ 107 + exit; \ 108 + " : 109 + : __imm(bpf_xdp_adjust_meta), 110 + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 111 + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 112 + : __clobber_all); 113 + } 114 + 115 + SEC("xdp") 116 + __description("meta access, test6") 117 + __failure __msg("invalid access to packet") 118 + __naked void meta_access_test6(void) 119 + { 120 + asm volatile (" \ 121 + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 122 + r3 = *(u32*)(r1 + %[xdp_md_data]); \ 123 + r0 = r3; \ 124 + r0 += 8; \ 125 + r4 = r2; \ 126 + r4 += 8; \ 127 + if r4 > r0 goto l0_%=; \ 128 + r0 = *(u8*)(r2 + 0); \ 129 + l0_%=: r0 = 0; \ 130 + exit; \ 131 + " : 132 + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 133 + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 134 + : __clobber_all); 135 + } 136 + 137 + SEC("xdp") 138 + __description("meta access, test7") 139 + __success __retval(0) 140 + __naked void meta_access_test7(void) 141 + { 142 + asm volatile (" \ 143 + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 144 + r3 = *(u32*)(r1 + %[xdp_md_data]); \ 145 + r0 = r3; \ 146 + r0 += 8; \ 147 + r4 = r2; \ 148 + r4 += 8; \ 149 + if r4 > r3 goto l0_%=; \ 150 + r0 = *(u8*)(r2 + 0); \ 151 + l0_%=: r0 = 0; \ 152 + exit; \ 153 + " : 154 + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 155 + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 156 + : __clobber_all); 157 + } 158 + 159 + SEC("xdp") 160 + __description("meta access, test8") 161 + __success __retval(0) 162 + __naked void meta_access_test8(void) 163 + { 164 + asm volatile (" \ 165 + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 166 + r3 = *(u32*)(r1 + %[xdp_md_data]); \ 167 + r4 = r2; \ 168 + r4 += 0xFFFF; \ 169 + if r4 > r3 goto l0_%=; \ 170 + r0 = *(u8*)(r2 + 0); \ 171 + l0_%=: r0 = 0; \ 172 + exit; \ 173 + " : 174 + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 175 + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 176 + : __clobber_all); 177 + } 178 + 179 + SEC("xdp") 180 + __description("meta access, test9") 181 + __failure __msg("invalid access to packet") 182 + __naked void meta_access_test9(void) 183 + { 184 + asm volatile (" \ 185 + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 186 + r3 = *(u32*)(r1 + %[xdp_md_data]); \ 187 + r4 = r2; \ 188 + r4 += 0xFFFF; \ 189 + r4 += 1; \ 190 + if r4 > r3 goto l0_%=; \ 191 + r0 = *(u8*)(r2 + 0); \ 192 + l0_%=: r0 = 0; \ 193 + exit; \ 194 + " : 195 + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 196 + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 197 + : __clobber_all); 198 + } 199 + 200 + SEC("xdp") 201 + __description("meta access, test10") 202 + __failure __msg("invalid access to packet") 203 + __naked void meta_access_test10(void) 204 + { 205 + asm volatile (" \ 206 + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 207 + r3 = *(u32*)(r1 + %[xdp_md_data]); \ 208 + r4 = *(u32*)(r1 + %[xdp_md_data_end]); \ 209 + r5 = 42; \ 210 + r6 = 24; \ 211 + *(u64*)(r10 - 8) = r5; \ 212 + lock *(u64 *)(r10 - 8) += r6; \ 213 + r5 = *(u64*)(r10 - 8); \ 214 + if r5 > 100 goto l0_%=; \ 215 + r3 += r5; \ 216 + r5 = r3; \ 217 + r6 = r2; \ 218 + r6 += 8; \ 219 + if r6 > r5 goto l0_%=; \ 220 + r2 = *(u8*)(r2 + 0); \ 221 + l0_%=: r0 = 0; \ 222 + exit; \ 223 + " : 224 + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 225 + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), 226 + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 227 + : __clobber_all); 228 + } 229 + 230 + SEC("xdp") 231 + __description("meta access, test11") 232 + __success __retval(0) 233 + __naked void meta_access_test11(void) 234 + { 235 + asm volatile (" \ 236 + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 237 + r3 = *(u32*)(r1 + %[xdp_md_data]); \ 238 + r5 = 42; \ 239 + r6 = 24; \ 240 + *(u64*)(r10 - 8) = r5; \ 241 + lock *(u64 *)(r10 - 8) += r6; \ 242 + r5 = *(u64*)(r10 - 8); \ 243 + if r5 > 100 goto l0_%=; \ 244 + r2 += r5; \ 245 + r5 = r2; \ 246 + r6 = r2; \ 247 + r6 += 8; \ 248 + if r6 > r3 goto l0_%=; \ 249 + r5 = *(u8*)(r5 + 0); \ 250 + l0_%=: r0 = 0; \ 251 + exit; \ 252 + " : 253 + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 254 + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 255 + : __clobber_all); 256 + } 257 + 258 + SEC("xdp") 259 + __description("meta access, test12") 260 + __success __retval(0) 261 + __naked void meta_access_test12(void) 262 + { 263 + asm volatile (" \ 264 + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 265 + r3 = *(u32*)(r1 + %[xdp_md_data]); \ 266 + r4 = *(u32*)(r1 + %[xdp_md_data_end]); \ 267 + r5 = r3; \ 268 + r5 += 16; \ 269 + if r5 > r4 goto l0_%=; \ 270 + r0 = *(u8*)(r3 + 0); \ 271 + r5 = r2; \ 272 + r5 += 16; \ 273 + if r5 > r3 goto l0_%=; \ 274 + r0 = *(u8*)(r2 + 0); \ 275 + l0_%=: r0 = 0; \ 276 + exit; \ 277 + " : 278 + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 279 + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), 280 + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 281 + : __clobber_all); 282 + } 283 + 284 + char _license[] SEC("license") = "GPL";
-235
tools/testing/selftests/bpf/verifier/meta_access.c
··· 1 - { 2 - "meta access, test1", 3 - .insns = { 4 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 5 - offsetof(struct xdp_md, data_meta)), 6 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), 7 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 8 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 9 - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 10 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 11 - BPF_MOV64_IMM(BPF_REG_0, 0), 12 - BPF_EXIT_INSN(), 13 - }, 14 - .result = ACCEPT, 15 - .prog_type = BPF_PROG_TYPE_XDP, 16 - }, 17 - { 18 - "meta access, test2", 19 - .insns = { 20 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 21 - offsetof(struct xdp_md, data_meta)), 22 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), 23 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 24 - BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8), 25 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 26 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), 27 - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), 28 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 29 - BPF_MOV64_IMM(BPF_REG_0, 0), 30 - BPF_EXIT_INSN(), 31 - }, 32 - .result = REJECT, 33 - .errstr = "invalid access to packet, off=-8", 34 - .prog_type = BPF_PROG_TYPE_XDP, 35 - }, 36 - { 37 - "meta access, test3", 38 - .insns = { 39 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 40 - offsetof(struct xdp_md, data_meta)), 41 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 42 - offsetof(struct xdp_md, data_end)), 43 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 44 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 45 - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 46 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 47 - BPF_MOV64_IMM(BPF_REG_0, 0), 48 - BPF_EXIT_INSN(), 49 - }, 50 - .result = REJECT, 51 - .errstr = "invalid access to packet", 52 - .prog_type = BPF_PROG_TYPE_XDP, 53 - }, 54 - { 55 - "meta access, test4", 56 - .insns = { 57 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 58 - offsetof(struct xdp_md, data_meta)), 59 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 60 - offsetof(struct xdp_md, data_end)), 61 - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)), 62 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), 63 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 64 - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 65 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 66 - BPF_MOV64_IMM(BPF_REG_0, 0), 67 - BPF_EXIT_INSN(), 68 - }, 69 - .result = REJECT, 70 - .errstr = "invalid access to packet", 71 - .prog_type = BPF_PROG_TYPE_XDP, 72 - }, 73 - { 74 - "meta access, test5", 75 - .insns = { 76 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 77 - offsetof(struct xdp_md, data_meta)), 78 - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)), 79 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), 80 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 81 - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3), 82 - BPF_MOV64_IMM(BPF_REG_2, -8), 83 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_meta), 84 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0), 85 - BPF_MOV64_IMM(BPF_REG_0, 0), 86 - BPF_EXIT_INSN(), 87 - }, 88 - .result = REJECT, 89 - .errstr = "R3 !read_ok", 90 - .prog_type = BPF_PROG_TYPE_XDP, 91 - }, 92 - { 93 - "meta access, test6", 94 - .insns = { 95 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 96 - offsetof(struct xdp_md, data_meta)), 97 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), 98 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), 99 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 100 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 101 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), 102 - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1), 103 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 104 - BPF_MOV64_IMM(BPF_REG_0, 0), 105 - BPF_EXIT_INSN(), 106 - }, 107 - .result = REJECT, 108 - .errstr = "invalid access to packet", 109 - .prog_type = BPF_PROG_TYPE_XDP, 110 - }, 111 - { 112 - "meta access, test7", 113 - .insns = { 114 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 115 - offsetof(struct xdp_md, data_meta)), 116 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), 117 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), 118 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 119 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 120 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), 121 - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), 122 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 123 - BPF_MOV64_IMM(BPF_REG_0, 0), 124 - BPF_EXIT_INSN(), 125 - }, 126 - .result = ACCEPT, 127 - .prog_type = BPF_PROG_TYPE_XDP, 128 - }, 129 - { 130 - "meta access, test8", 131 - .insns = { 132 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 133 - offsetof(struct xdp_md, data_meta)), 134 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), 135 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 136 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF), 137 - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), 138 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 139 - BPF_MOV64_IMM(BPF_REG_0, 0), 140 - BPF_EXIT_INSN(), 141 - }, 142 - .result = ACCEPT, 143 - .prog_type = BPF_PROG_TYPE_XDP, 144 - }, 145 - { 146 - "meta access, test9", 147 - .insns = { 148 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 149 - offsetof(struct xdp_md, data_meta)), 150 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), 151 - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 152 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF), 153 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1), 154 - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), 155 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 156 - BPF_MOV64_IMM(BPF_REG_0, 0), 157 - BPF_EXIT_INSN(), 158 - }, 159 - .result = REJECT, 160 - .errstr = "invalid access to packet", 161 - .prog_type = BPF_PROG_TYPE_XDP, 162 - }, 163 - { 164 - "meta access, test10", 165 - .insns = { 166 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 167 - offsetof(struct xdp_md, data_meta)), 168 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), 169 - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 170 - offsetof(struct xdp_md, data_end)), 171 - BPF_MOV64_IMM(BPF_REG_5, 42), 172 - BPF_MOV64_IMM(BPF_REG_6, 24), 173 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), 174 - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8), 175 - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), 176 - BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), 177 - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5), 178 - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 179 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 180 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), 181 - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1), 182 - BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 183 - BPF_MOV64_IMM(BPF_REG_0, 0), 184 - BPF_EXIT_INSN(), 185 - }, 186 - .result = REJECT, 187 - .errstr = "invalid access to packet", 188 - .prog_type = BPF_PROG_TYPE_XDP, 189 - }, 190 - { 191 - "meta access, test11", 192 - .insns = { 193 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 194 - offsetof(struct xdp_md, data_meta)), 195 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), 196 - BPF_MOV64_IMM(BPF_REG_5, 42), 197 - BPF_MOV64_IMM(BPF_REG_6, 24), 198 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), 199 - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8), 200 - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), 201 - BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), 202 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5), 203 - BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 204 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 205 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), 206 - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1), 207 - BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0), 208 - BPF_MOV64_IMM(BPF_REG_0, 0), 209 - BPF_EXIT_INSN(), 210 - }, 211 - .result = ACCEPT, 212 - .prog_type = BPF_PROG_TYPE_XDP, 213 - }, 214 - { 215 - "meta access, test12", 216 - .insns = { 217 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 218 - offsetof(struct xdp_md, data_meta)), 219 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), 220 - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 221 - offsetof(struct xdp_md, data_end)), 222 - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 223 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16), 224 - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5), 225 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0), 226 - BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 227 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16), 228 - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1), 229 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 230 - BPF_MOV64_IMM(BPF_REG_0, 0), 231 - BPF_EXIT_INSN(), 232 - }, 233 - .result = ACCEPT, 234 - .prog_type = BPF_PROG_TYPE_XDP, 235 - },