Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: verifier/runtime_jit converted to inline assembly

Test verifier/runtime_jit automatically converted to use inline assembly.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230421174234.2391278-18-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
65222842 16a42573

+362 -231
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 48 48 #include "verifier_ref_tracking.skel.h" 49 49 #include "verifier_regalloc.skel.h" 50 50 #include "verifier_ringbuf.skel.h" 51 + #include "verifier_runtime_jit.skel.h" 51 52 #include "verifier_spill_fill.skel.h" 52 53 #include "verifier_stack_ptr.skel.h" 53 54 #include "verifier_uninit.skel.h" ··· 138 137 void test_verifier_ref_tracking(void) { RUN(verifier_ref_tracking); } 139 138 void test_verifier_regalloc(void) { RUN(verifier_regalloc); } 140 139 void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } 140 + void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); } 141 141 void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } 142 142 void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } 143 143 void test_verifier_uninit(void) { RUN(verifier_uninit); }
+360
tools/testing/selftests/bpf/progs/verifier_runtime_jit.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Converted from tools/testing/selftests/bpf/verifier/runtime_jit.c */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "bpf_misc.h" 7 + 8 + void dummy_prog_42_socket(void); 9 + void dummy_prog_24_socket(void); 10 + void dummy_prog_loop1_socket(void); 11 + void dummy_prog_loop2_socket(void); 12 + 13 + struct { 14 + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); 15 + __uint(max_entries, 4); 16 + __uint(key_size, sizeof(int)); 17 + __array(values, void (void)); 18 + } map_prog1_socket SEC(".maps") = { 19 + .values = { 20 + [0] = (void *)&dummy_prog_42_socket, 21 + [1] = (void *)&dummy_prog_loop1_socket, 22 + [2] = (void *)&dummy_prog_24_socket, 23 + }, 24 + }; 25 + 26 + struct { 27 + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); 28 + __uint(max_entries, 8); 29 + __uint(key_size, sizeof(int)); 30 + __array(values, void (void)); 31 + } map_prog2_socket SEC(".maps") = { 32 + .values = { 33 + [1] = (void *)&dummy_prog_loop2_socket, 34 + [2] = (void *)&dummy_prog_24_socket, 35 + [7] = (void *)&dummy_prog_42_socket, 36 + }, 37 + }; 38 + 39 + SEC("socket") 40 + __auxiliary __auxiliary_unpriv 41 + __naked void dummy_prog_42_socket(void) 42 + { 43 + asm volatile ("r0 = 42; exit;"); 44 + } 45 + 46 + SEC("socket") 47 + __auxiliary __auxiliary_unpriv 48 + __naked void dummy_prog_24_socket(void) 49 + { 50 + asm volatile ("r0 = 24; exit;"); 51 + } 52 + 53 + SEC("socket") 54 + __auxiliary __auxiliary_unpriv 55 + __naked void dummy_prog_loop1_socket(void) 56 + { 57 + asm volatile (" \ 58 + r3 = 1; \ 59 + r2 = %[map_prog1_socket] ll; \ 60 + call %[bpf_tail_call]; \ 61 + r0 = 41; \ 62 + exit; \ 63 + " : 64 + : __imm(bpf_tail_call), 65 + __imm_addr(map_prog1_socket) 66 + : __clobber_all); 67 + } 68 + 69 + SEC("socket") 70 + __auxiliary __auxiliary_unpriv 71 + __naked void dummy_prog_loop2_socket(void) 72 + { 73 + asm volatile (" \ 74 + r3 = 1; \ 75 + r2 = %[map_prog2_socket] ll; \ 76 + call %[bpf_tail_call]; \ 77 + r0 = 41; \ 78 + exit; \ 79 + " : 80 + : __imm(bpf_tail_call), 81 + __imm_addr(map_prog2_socket) 82 + : __clobber_all); 83 + } 84 + 85 + SEC("socket") 86 + __description("runtime/jit: tail_call within bounds, prog once") 87 + __success __success_unpriv __retval(42) 88 + __naked void call_within_bounds_prog_once(void) 89 + { 90 + asm volatile (" \ 91 + r3 = 0; \ 92 + r2 = %[map_prog1_socket] ll; \ 93 + call %[bpf_tail_call]; \ 94 + r0 = 1; \ 95 + exit; \ 96 + " : 97 + : __imm(bpf_tail_call), 98 + __imm_addr(map_prog1_socket) 99 + : __clobber_all); 100 + } 101 + 102 + SEC("socket") 103 + __description("runtime/jit: tail_call within bounds, prog loop") 104 + __success __success_unpriv __retval(41) 105 + __naked void call_within_bounds_prog_loop(void) 106 + { 107 + asm volatile (" \ 108 + r3 = 1; \ 109 + r2 = %[map_prog1_socket] ll; \ 110 + call %[bpf_tail_call]; \ 111 + r0 = 1; \ 112 + exit; \ 113 + " : 114 + : __imm(bpf_tail_call), 115 + __imm_addr(map_prog1_socket) 116 + : __clobber_all); 117 + } 118 + 119 + SEC("socket") 120 + __description("runtime/jit: tail_call within bounds, no prog") 121 + __success __success_unpriv __retval(1) 122 + __naked void call_within_bounds_no_prog(void) 123 + { 124 + asm volatile (" \ 125 + r3 = 3; \ 126 + r2 = %[map_prog1_socket] ll; \ 127 + call %[bpf_tail_call]; \ 128 + r0 = 1; \ 129 + exit; \ 130 + " : 131 + : __imm(bpf_tail_call), 132 + __imm_addr(map_prog1_socket) 133 + : __clobber_all); 134 + } 135 + 136 + SEC("socket") 137 + __description("runtime/jit: tail_call within bounds, key 2") 138 + __success __success_unpriv __retval(24) 139 + __naked void call_within_bounds_key_2(void) 140 + { 141 + asm volatile (" \ 142 + r3 = 2; \ 143 + r2 = %[map_prog1_socket] ll; \ 144 + call %[bpf_tail_call]; \ 145 + r0 = 1; \ 146 + exit; \ 147 + " : 148 + : __imm(bpf_tail_call), 149 + __imm_addr(map_prog1_socket) 150 + : __clobber_all); 151 + } 152 + 153 + SEC("socket") 154 + __description("runtime/jit: tail_call within bounds, key 2 / key 2, first branch") 155 + __success __success_unpriv __retval(24) 156 + __naked void _2_key_2_first_branch(void) 157 + { 158 + asm volatile (" \ 159 + r0 = 13; \ 160 + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ 161 + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ 162 + if r0 == 13 goto l0_%=; \ 163 + r3 = 2; \ 164 + r2 = %[map_prog1_socket] ll; \ 165 + goto l1_%=; \ 166 + l0_%=: r3 = 2; \ 167 + r2 = %[map_prog1_socket] ll; \ 168 + l1_%=: call %[bpf_tail_call]; \ 169 + r0 = 1; \ 170 + exit; \ 171 + " : 172 + : __imm(bpf_tail_call), 173 + __imm_addr(map_prog1_socket), 174 + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) 175 + : __clobber_all); 176 + } 177 + 178 + SEC("socket") 179 + __description("runtime/jit: tail_call within bounds, key 2 / key 2, second branch") 180 + __success __success_unpriv __retval(24) 181 + __naked void _2_key_2_second_branch(void) 182 + { 183 + asm volatile (" \ 184 + r0 = 14; \ 185 + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ 186 + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ 187 + if r0 == 13 goto l0_%=; \ 188 + r3 = 2; \ 189 + r2 = %[map_prog1_socket] ll; \ 190 + goto l1_%=; \ 191 + l0_%=: r3 = 2; \ 192 + r2 = %[map_prog1_socket] ll; \ 193 + l1_%=: call %[bpf_tail_call]; \ 194 + r0 = 1; \ 195 + exit; \ 196 + " : 197 + : __imm(bpf_tail_call), 198 + __imm_addr(map_prog1_socket), 199 + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) 200 + : __clobber_all); 201 + } 202 + 203 + SEC("socket") 204 + __description("runtime/jit: tail_call within bounds, key 0 / key 2, first branch") 205 + __success __success_unpriv __retval(24) 206 + __naked void _0_key_2_first_branch(void) 207 + { 208 + asm volatile (" \ 209 + r0 = 13; \ 210 + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ 211 + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ 212 + if r0 == 13 goto l0_%=; \ 213 + r3 = 0; \ 214 + r2 = %[map_prog1_socket] ll; \ 215 + goto l1_%=; \ 216 + l0_%=: r3 = 2; \ 217 + r2 = %[map_prog1_socket] ll; \ 218 + l1_%=: call %[bpf_tail_call]; \ 219 + r0 = 1; \ 220 + exit; \ 221 + " : 222 + : __imm(bpf_tail_call), 223 + __imm_addr(map_prog1_socket), 224 + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) 225 + : __clobber_all); 226 + } 227 + 228 + SEC("socket") 229 + __description("runtime/jit: tail_call within bounds, key 0 / key 2, second branch") 230 + __success __success_unpriv __retval(42) 231 + __naked void _0_key_2_second_branch(void) 232 + { 233 + asm volatile (" \ 234 + r0 = 14; \ 235 + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ 236 + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ 237 + if r0 == 13 goto l0_%=; \ 238 + r3 = 0; \ 239 + r2 = %[map_prog1_socket] ll; \ 240 + goto l1_%=; \ 241 + l0_%=: r3 = 2; \ 242 + r2 = %[map_prog1_socket] ll; \ 243 + l1_%=: call %[bpf_tail_call]; \ 244 + r0 = 1; \ 245 + exit; \ 246 + " : 247 + : __imm(bpf_tail_call), 248 + __imm_addr(map_prog1_socket), 249 + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) 250 + : __clobber_all); 251 + } 252 + 253 + SEC("socket") 254 + __description("runtime/jit: tail_call within bounds, different maps, first branch") 255 + __success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr") 256 + __retval(1) 257 + __naked void bounds_different_maps_first_branch(void) 258 + { 259 + asm volatile (" \ 260 + r0 = 13; \ 261 + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ 262 + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ 263 + if r0 == 13 goto l0_%=; \ 264 + r3 = 0; \ 265 + r2 = %[map_prog1_socket] ll; \ 266 + goto l1_%=; \ 267 + l0_%=: r3 = 0; \ 268 + r2 = %[map_prog2_socket] ll; \ 269 + l1_%=: call %[bpf_tail_call]; \ 270 + r0 = 1; \ 271 + exit; \ 272 + " : 273 + : __imm(bpf_tail_call), 274 + __imm_addr(map_prog1_socket), 275 + __imm_addr(map_prog2_socket), 276 + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) 277 + : __clobber_all); 278 + } 279 + 280 + SEC("socket") 281 + __description("runtime/jit: tail_call within bounds, different maps, second branch") 282 + __success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr") 283 + __retval(42) 284 + __naked void bounds_different_maps_second_branch(void) 285 + { 286 + asm volatile (" \ 287 + r0 = 14; \ 288 + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ 289 + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ 290 + if r0 == 13 goto l0_%=; \ 291 + r3 = 0; \ 292 + r2 = %[map_prog1_socket] ll; \ 293 + goto l1_%=; \ 294 + l0_%=: r3 = 0; \ 295 + r2 = %[map_prog2_socket] ll; \ 296 + l1_%=: call %[bpf_tail_call]; \ 297 + r0 = 1; \ 298 + exit; \ 299 + " : 300 + : __imm(bpf_tail_call), 301 + __imm_addr(map_prog1_socket), 302 + __imm_addr(map_prog2_socket), 303 + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) 304 + : __clobber_all); 305 + } 306 + 307 + SEC("socket") 308 + __description("runtime/jit: tail_call out of bounds") 309 + __success __success_unpriv __retval(2) 310 + __naked void tail_call_out_of_bounds(void) 311 + { 312 + asm volatile (" \ 313 + r3 = 256; \ 314 + r2 = %[map_prog1_socket] ll; \ 315 + call %[bpf_tail_call]; \ 316 + r0 = 2; \ 317 + exit; \ 318 + " : 319 + : __imm(bpf_tail_call), 320 + __imm_addr(map_prog1_socket) 321 + : __clobber_all); 322 + } 323 + 324 + SEC("socket") 325 + __description("runtime/jit: pass negative index to tail_call") 326 + __success __success_unpriv __retval(2) 327 + __naked void negative_index_to_tail_call(void) 328 + { 329 + asm volatile (" \ 330 + r3 = -1; \ 331 + r2 = %[map_prog1_socket] ll; \ 332 + call %[bpf_tail_call]; \ 333 + r0 = 2; \ 334 + exit; \ 335 + " : 336 + : __imm(bpf_tail_call), 337 + __imm_addr(map_prog1_socket) 338 + : __clobber_all); 339 + } 340 + 341 + SEC("socket") 342 + __description("runtime/jit: pass > 32bit index to tail_call") 343 + __success __success_unpriv __retval(42) 344 + /* Verifier rewrite for unpriv skips tail call here. */ 345 + __retval_unpriv(2) 346 + __naked void _32bit_index_to_tail_call(void) 347 + { 348 + asm volatile (" \ 349 + r3 = 0x100000000 ll; \ 350 + r2 = %[map_prog1_socket] ll; \ 351 + call %[bpf_tail_call]; \ 352 + r0 = 2; \ 353 + exit; \ 354 + " : 355 + : __imm(bpf_tail_call), 356 + __imm_addr(map_prog1_socket) 357 + : __clobber_all); 358 + } 359 + 360 + char _license[] SEC("license") = "GPL";
-231
tools/testing/selftests/bpf/verifier/runtime_jit.c
··· 1 - { 2 - "runtime/jit: tail_call within bounds, prog once", 3 - .insns = { 4 - BPF_MOV64_IMM(BPF_REG_3, 0), 5 - BPF_LD_MAP_FD(BPF_REG_2, 0), 6 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 7 - BPF_MOV64_IMM(BPF_REG_0, 1), 8 - BPF_EXIT_INSN(), 9 - }, 10 - .fixup_prog1 = { 1 }, 11 - .result = ACCEPT, 12 - .retval = 42, 13 - }, 14 - { 15 - "runtime/jit: tail_call within bounds, prog loop", 16 - .insns = { 17 - BPF_MOV64_IMM(BPF_REG_3, 1), 18 - BPF_LD_MAP_FD(BPF_REG_2, 0), 19 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 20 - BPF_MOV64_IMM(BPF_REG_0, 1), 21 - BPF_EXIT_INSN(), 22 - }, 23 - .fixup_prog1 = { 1 }, 24 - .result = ACCEPT, 25 - .retval = 41, 26 - }, 27 - { 28 - "runtime/jit: tail_call within bounds, no prog", 29 - .insns = { 30 - BPF_MOV64_IMM(BPF_REG_3, 3), 31 - BPF_LD_MAP_FD(BPF_REG_2, 0), 32 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 33 - BPF_MOV64_IMM(BPF_REG_0, 1), 34 - BPF_EXIT_INSN(), 35 - }, 36 - .fixup_prog1 = { 1 }, 37 - .result = ACCEPT, 38 - .retval = 1, 39 - }, 40 - { 41 - "runtime/jit: tail_call within bounds, key 2", 42 - .insns = { 43 - BPF_MOV64_IMM(BPF_REG_3, 2), 44 - BPF_LD_MAP_FD(BPF_REG_2, 0), 45 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 46 - BPF_MOV64_IMM(BPF_REG_0, 1), 47 - BPF_EXIT_INSN(), 48 - }, 49 - .fixup_prog1 = { 1 }, 50 - .result = ACCEPT, 51 - .retval = 24, 52 - }, 53 - { 54 - "runtime/jit: tail_call within bounds, key 2 / key 2, first branch", 55 - .insns = { 56 - BPF_MOV64_IMM(BPF_REG_0, 13), 57 - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 58 - offsetof(struct __sk_buff, cb[0])), 59 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 60 - offsetof(struct __sk_buff, cb[0])), 61 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), 62 - BPF_MOV64_IMM(BPF_REG_3, 2), 63 - BPF_LD_MAP_FD(BPF_REG_2, 0), 64 - BPF_JMP_IMM(BPF_JA, 0, 0, 3), 65 - BPF_MOV64_IMM(BPF_REG_3, 2), 66 - BPF_LD_MAP_FD(BPF_REG_2, 0), 67 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 68 - BPF_MOV64_IMM(BPF_REG_0, 1), 69 - BPF_EXIT_INSN(), 70 - }, 71 - .fixup_prog1 = { 5, 9 }, 72 - .result = ACCEPT, 73 - .retval = 24, 74 - }, 75 - { 76 - "runtime/jit: tail_call within bounds, key 2 / key 2, second branch", 77 - .insns = { 78 - BPF_MOV64_IMM(BPF_REG_0, 14), 79 - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 80 - offsetof(struct __sk_buff, cb[0])), 81 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 82 - offsetof(struct __sk_buff, cb[0])), 83 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), 84 - BPF_MOV64_IMM(BPF_REG_3, 2), 85 - BPF_LD_MAP_FD(BPF_REG_2, 0), 86 - BPF_JMP_IMM(BPF_JA, 0, 0, 3), 87 - BPF_MOV64_IMM(BPF_REG_3, 2), 88 - BPF_LD_MAP_FD(BPF_REG_2, 0), 89 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 90 - BPF_MOV64_IMM(BPF_REG_0, 1), 91 - BPF_EXIT_INSN(), 92 - }, 93 - .fixup_prog1 = { 5, 9 }, 94 - .result = ACCEPT, 95 - .retval = 24, 96 - }, 97 - { 98 - "runtime/jit: tail_call within bounds, key 0 / key 2, first branch", 99 - .insns = { 100 - BPF_MOV64_IMM(BPF_REG_0, 13), 101 - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 102 - offsetof(struct __sk_buff, cb[0])), 103 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 104 - offsetof(struct __sk_buff, cb[0])), 105 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), 106 - BPF_MOV64_IMM(BPF_REG_3, 0), 107 - BPF_LD_MAP_FD(BPF_REG_2, 0), 108 - BPF_JMP_IMM(BPF_JA, 0, 0, 3), 109 - BPF_MOV64_IMM(BPF_REG_3, 2), 110 - BPF_LD_MAP_FD(BPF_REG_2, 0), 111 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 112 - BPF_MOV64_IMM(BPF_REG_0, 1), 113 - BPF_EXIT_INSN(), 114 - }, 115 - .fixup_prog1 = { 5, 9 }, 116 - .result = ACCEPT, 117 - .retval = 24, 118 - }, 119 - { 120 - "runtime/jit: tail_call within bounds, key 0 / key 2, second branch", 121 - .insns = { 122 - BPF_MOV64_IMM(BPF_REG_0, 14), 123 - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 124 - offsetof(struct __sk_buff, cb[0])), 125 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 126 - offsetof(struct __sk_buff, cb[0])), 127 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), 128 - BPF_MOV64_IMM(BPF_REG_3, 0), 129 - BPF_LD_MAP_FD(BPF_REG_2, 0), 130 - BPF_JMP_IMM(BPF_JA, 0, 0, 3), 131 - BPF_MOV64_IMM(BPF_REG_3, 2), 132 - BPF_LD_MAP_FD(BPF_REG_2, 0), 133 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 134 - BPF_MOV64_IMM(BPF_REG_0, 1), 135 - BPF_EXIT_INSN(), 136 - }, 137 - .fixup_prog1 = { 5, 9 }, 138 - .result = ACCEPT, 139 - .retval = 42, 140 - }, 141 - { 142 - "runtime/jit: tail_call within bounds, different maps, first branch", 143 - .insns = { 144 - BPF_MOV64_IMM(BPF_REG_0, 13), 145 - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 146 - offsetof(struct __sk_buff, cb[0])), 147 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 148 - offsetof(struct __sk_buff, cb[0])), 149 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), 150 - BPF_MOV64_IMM(BPF_REG_3, 0), 151 - BPF_LD_MAP_FD(BPF_REG_2, 0), 152 - BPF_JMP_IMM(BPF_JA, 0, 0, 3), 153 - BPF_MOV64_IMM(BPF_REG_3, 0), 154 - BPF_LD_MAP_FD(BPF_REG_2, 0), 155 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 156 - BPF_MOV64_IMM(BPF_REG_0, 1), 157 - BPF_EXIT_INSN(), 158 - }, 159 - .fixup_prog1 = { 5 }, 160 - .fixup_prog2 = { 9 }, 161 - .result_unpriv = REJECT, 162 - .errstr_unpriv = "tail_call abusing map_ptr", 163 - .result = ACCEPT, 164 - .retval = 1, 165 - }, 166 - { 167 - "runtime/jit: tail_call within bounds, different maps, second branch", 168 - .insns = { 169 - BPF_MOV64_IMM(BPF_REG_0, 14), 170 - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 171 - offsetof(struct __sk_buff, cb[0])), 172 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 173 - offsetof(struct __sk_buff, cb[0])), 174 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), 175 - BPF_MOV64_IMM(BPF_REG_3, 0), 176 - BPF_LD_MAP_FD(BPF_REG_2, 0), 177 - BPF_JMP_IMM(BPF_JA, 0, 0, 3), 178 - BPF_MOV64_IMM(BPF_REG_3, 0), 179 - BPF_LD_MAP_FD(BPF_REG_2, 0), 180 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 181 - BPF_MOV64_IMM(BPF_REG_0, 1), 182 - BPF_EXIT_INSN(), 183 - }, 184 - .fixup_prog1 = { 5 }, 185 - .fixup_prog2 = { 9 }, 186 - .result_unpriv = REJECT, 187 - .errstr_unpriv = "tail_call abusing map_ptr", 188 - .result = ACCEPT, 189 - .retval = 42, 190 - }, 191 - { 192 - "runtime/jit: tail_call out of bounds", 193 - .insns = { 194 - BPF_MOV64_IMM(BPF_REG_3, 256), 195 - BPF_LD_MAP_FD(BPF_REG_2, 0), 196 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 197 - BPF_MOV64_IMM(BPF_REG_0, 2), 198 - BPF_EXIT_INSN(), 199 - }, 200 - .fixup_prog1 = { 1 }, 201 - .result = ACCEPT, 202 - .retval = 2, 203 - }, 204 - { 205 - "runtime/jit: pass negative index to tail_call", 206 - .insns = { 207 - BPF_MOV64_IMM(BPF_REG_3, -1), 208 - BPF_LD_MAP_FD(BPF_REG_2, 0), 209 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 210 - BPF_MOV64_IMM(BPF_REG_0, 2), 211 - BPF_EXIT_INSN(), 212 - }, 213 - .fixup_prog1 = { 1 }, 214 - .result = ACCEPT, 215 - .retval = 2, 216 - }, 217 - { 218 - "runtime/jit: pass > 32bit index to tail_call", 219 - .insns = { 220 - BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL), 221 - BPF_LD_MAP_FD(BPF_REG_2, 0), 222 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 223 - BPF_MOV64_IMM(BPF_REG_0, 2), 224 - BPF_EXIT_INSN(), 225 - }, 226 - .fixup_prog1 = { 2 }, 227 - .result = ACCEPT, 228 - .retval = 42, 229 - /* Verifier rewrite for unpriv skips tail call here. */ 230 - .retval_unpriv = 2, 231 - },