Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: verifier/var_off.c converted to inline assembly

Test verifier/var_off.c automatically converted to use inline assembly.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230325025524.144043-41-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
d15f5b68 d3305286

+351 -291
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 37 37 #include "verifier_value_adj_spill.skel.h" 38 38 #include "verifier_value.skel.h" 39 39 #include "verifier_value_or_null.skel.h" 40 + #include "verifier_var_off.skel.h" 40 41 41 42 __maybe_unused 42 43 static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) ··· 97 96 void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); } 98 97 void test_verifier_value(void) { RUN(verifier_value); } 99 98 void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); } 99 + void test_verifier_var_off(void) { RUN(verifier_var_off); }
+349
tools/testing/selftests/bpf/progs/verifier_var_off.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Converted from tools/testing/selftests/bpf/verifier/var_off.c */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "bpf_misc.h" 7 + 8 + struct { 9 + __uint(type, BPF_MAP_TYPE_HASH); 10 + __uint(max_entries, 1); 11 + __type(key, long long); 12 + __type(value, long long); 13 + } map_hash_8b SEC(".maps"); 14 + 15 + SEC("lwt_in") 16 + __description("variable-offset ctx access") 17 + __failure __msg("variable ctx access var_off=(0x0; 0x4)") 18 + __naked void variable_offset_ctx_access(void) 19 + { 20 + asm volatile (" \ 21 + /* Get an unknown value */ \ 22 + r2 = *(u32*)(r1 + 0); \ 23 + /* Make it small and 4-byte aligned */ \ 24 + r2 &= 4; \ 25 + /* add it to skb. We now have either &skb->len or\ 26 + * &skb->pkt_type, but we don't know which \ 27 + */ \ 28 + r1 += r2; \ 29 + /* dereference it */ \ 30 + r0 = *(u32*)(r1 + 0); \ 31 + exit; \ 32 + " ::: __clobber_all); 33 + } 34 + 35 + SEC("cgroup/skb") 36 + __description("variable-offset stack read, priv vs unpriv") 37 + __success __failure_unpriv 38 + __msg_unpriv("R2 variable stack access prohibited for !root") 39 + __retval(0) 40 + __naked void stack_read_priv_vs_unpriv(void) 41 + { 42 + asm volatile (" \ 43 + /* Fill the top 8 bytes of the stack */ \ 44 + r0 = 0; \ 45 + *(u64*)(r10 - 8) = r0; \ 46 + /* Get an unknown value */ \ 47 + r2 = *(u32*)(r1 + 0); \ 48 + /* Make it small and 4-byte aligned */ \ 49 + r2 &= 4; \ 50 + r2 -= 8; \ 51 + /* add it to fp. We now have either fp-4 or fp-8, but\ 52 + * we don't know which \ 53 + */ \ 54 + r2 += r10; \ 55 + /* dereference it for a stack read */ \ 56 + r0 = *(u32*)(r2 + 0); \ 57 + r0 = 0; \ 58 + exit; \ 59 + " ::: __clobber_all); 60 + } 61 + 62 + SEC("lwt_in") 63 + __description("variable-offset stack read, uninitialized") 64 + __failure __msg("invalid variable-offset read from stack R2") 65 + __naked void variable_offset_stack_read_uninitialized(void) 66 + { 67 + asm volatile (" \ 68 + /* Get an unknown value */ \ 69 + r2 = *(u32*)(r1 + 0); \ 70 + /* Make it small and 4-byte aligned */ \ 71 + r2 &= 4; \ 72 + r2 -= 8; \ 73 + /* add it to fp. We now have either fp-4 or fp-8, but\ 74 + * we don't know which \ 75 + */ \ 76 + r2 += r10; \ 77 + /* dereference it for a stack read */ \ 78 + r0 = *(u32*)(r2 + 0); \ 79 + r0 = 0; \ 80 + exit; \ 81 + " ::: __clobber_all); 82 + } 83 + 84 + SEC("socket") 85 + __description("variable-offset stack write, priv vs unpriv") 86 + __success __failure_unpriv 87 + /* Variable stack access is rejected for unprivileged. 88 + */ 89 + __msg_unpriv("R2 variable stack access prohibited for !root") 90 + __retval(0) 91 + __naked void stack_write_priv_vs_unpriv(void) 92 + { 93 + asm volatile (" \ 94 + /* Get an unknown value */ \ 95 + r2 = *(u32*)(r1 + 0); \ 96 + /* Make it small and 8-byte aligned */ \ 97 + r2 &= 8; \ 98 + r2 -= 16; \ 99 + /* Add it to fp. We now have either fp-8 or fp-16, but\ 100 + * we don't know which \ 101 + */ \ 102 + r2 += r10; \ 103 + /* Dereference it for a stack write */ \ 104 + r0 = 0; \ 105 + *(u64*)(r2 + 0) = r0; \ 106 + /* Now read from the address we just wrote. This shows\ 107 + * that, after a variable-offset write, a priviledged\ 108 + * program can read the slots that were in the range of\ 109 + * that write (even if the verifier doesn't actually know\ 110 + * if the slot being read was really written to or not.\ 111 + */ \ 112 + r3 = *(u64*)(r2 + 0); \ 113 + r0 = 0; \ 114 + exit; \ 115 + " ::: __clobber_all); 116 + } 117 + 118 + SEC("socket") 119 + __description("variable-offset stack write clobbers spilled regs") 120 + __failure 121 + /* In the priviledged case, dereferencing a spilled-and-then-filled 122 + * register is rejected because the previous variable offset stack 123 + * write might have overwritten the spilled pointer (i.e. we lose track 124 + * of the spilled register when we analyze the write). 125 + */ 126 + __msg("R2 invalid mem access 'scalar'") 127 + __failure_unpriv 128 + /* The unprivileged case is not too interesting; variable 129 + * stack access is rejected. 130 + */ 131 + __msg_unpriv("R2 variable stack access prohibited for !root") 132 + __naked void stack_write_clobbers_spilled_regs(void) 133 + { 134 + asm volatile (" \ 135 + /* Dummy instruction; needed because we need to patch the next one\ 136 + * and we can't patch the first instruction. \ 137 + */ \ 138 + r6 = 0; \ 139 + /* Make R0 a map ptr */ \ 140 + r0 = %[map_hash_8b] ll; \ 141 + /* Get an unknown value */ \ 142 + r2 = *(u32*)(r1 + 0); \ 143 + /* Make it small and 8-byte aligned */ \ 144 + r2 &= 8; \ 145 + r2 -= 16; \ 146 + /* Add it to fp. We now have either fp-8 or fp-16, but\ 147 + * we don't know which. \ 148 + */ \ 149 + r2 += r10; \ 150 + /* Spill R0(map ptr) into stack */ \ 151 + *(u64*)(r10 - 8) = r0; \ 152 + /* Dereference the unknown value for a stack write */\ 153 + r0 = 0; \ 154 + *(u64*)(r2 + 0) = r0; \ 155 + /* Fill the register back into R2 */ \ 156 + r2 = *(u64*)(r10 - 8); \ 157 + /* Try to dereference R2 for a memory load */ \ 158 + r0 = *(u64*)(r2 + 8); \ 159 + exit; \ 160 + " : 161 + : __imm_addr(map_hash_8b) 162 + : __clobber_all); 163 + } 164 + 165 + SEC("sockops") 166 + __description("indirect variable-offset stack access, unbounded") 167 + __failure __msg("invalid unbounded variable-offset indirect access to stack R4") 168 + __naked void variable_offset_stack_access_unbounded(void) 169 + { 170 + asm volatile (" \ 171 + r2 = 6; \ 172 + r3 = 28; \ 173 + /* Fill the top 16 bytes of the stack. */ \ 174 + r4 = 0; \ 175 + *(u64*)(r10 - 16) = r4; \ 176 + r4 = 0; \ 177 + *(u64*)(r10 - 8) = r4; \ 178 + /* Get an unknown value. */ \ 179 + r4 = *(u64*)(r1 + %[bpf_sock_ops_bytes_received]);\ 180 + /* Check the lower bound but don't check the upper one. */\ 181 + if r4 s< 0 goto l0_%=; \ 182 + /* Point the lower bound to initialized stack. Offset is now in range\ 183 + * from fp-16 to fp+0x7fffffffffffffef, i.e. max value is unbounded.\ 184 + */ \ 185 + r4 -= 16; \ 186 + r4 += r10; \ 187 + r5 = 8; \ 188 + /* Dereference it indirectly. */ \ 189 + call %[bpf_getsockopt]; \ 190 + l0_%=: r0 = 0; \ 191 + exit; \ 192 + " : 193 + : __imm(bpf_getsockopt), 194 + __imm_const(bpf_sock_ops_bytes_received, offsetof(struct bpf_sock_ops, bytes_received)) 195 + : __clobber_all); 196 + } 197 + 198 + SEC("lwt_in") 199 + __description("indirect variable-offset stack access, max out of bound") 200 + __failure __msg("invalid variable-offset indirect access to stack R2") 201 + __naked void access_max_out_of_bound(void) 202 + { 203 + asm volatile (" \ 204 + /* Fill the top 8 bytes of the stack */ \ 205 + r2 = 0; \ 206 + *(u64*)(r10 - 8) = r2; \ 207 + /* Get an unknown value */ \ 208 + r2 = *(u32*)(r1 + 0); \ 209 + /* Make it small and 4-byte aligned */ \ 210 + r2 &= 4; \ 211 + r2 -= 8; \ 212 + /* add it to fp. We now have either fp-4 or fp-8, but\ 213 + * we don't know which \ 214 + */ \ 215 + r2 += r10; \ 216 + /* dereference it indirectly */ \ 217 + r1 = %[map_hash_8b] ll; \ 218 + call %[bpf_map_lookup_elem]; \ 219 + r0 = 0; \ 220 + exit; \ 221 + " : 222 + : __imm(bpf_map_lookup_elem), 223 + __imm_addr(map_hash_8b) 224 + : __clobber_all); 225 + } 226 + 227 + SEC("lwt_in") 228 + __description("indirect variable-offset stack access, min out of bound") 229 + __failure __msg("invalid variable-offset indirect access to stack R2") 230 + __naked void access_min_out_of_bound(void) 231 + { 232 + asm volatile (" \ 233 + /* Fill the top 8 bytes of the stack */ \ 234 + r2 = 0; \ 235 + *(u64*)(r10 - 8) = r2; \ 236 + /* Get an unknown value */ \ 237 + r2 = *(u32*)(r1 + 0); \ 238 + /* Make it small and 4-byte aligned */ \ 239 + r2 &= 4; \ 240 + r2 -= 516; \ 241 + /* add it to fp. We now have either fp-516 or fp-512, but\ 242 + * we don't know which \ 243 + */ \ 244 + r2 += r10; \ 245 + /* dereference it indirectly */ \ 246 + r1 = %[map_hash_8b] ll; \ 247 + call %[bpf_map_lookup_elem]; \ 248 + r0 = 0; \ 249 + exit; \ 250 + " : 251 + : __imm(bpf_map_lookup_elem), 252 + __imm_addr(map_hash_8b) 253 + : __clobber_all); 254 + } 255 + 256 + SEC("lwt_in") 257 + __description("indirect variable-offset stack access, min_off < min_initialized") 258 + __failure __msg("invalid indirect read from stack R2 var_off") 259 + __naked void access_min_off_min_initialized(void) 260 + { 261 + asm volatile (" \ 262 + /* Fill only the top 8 bytes of the stack. */ \ 263 + r2 = 0; \ 264 + *(u64*)(r10 - 8) = r2; \ 265 + /* Get an unknown value */ \ 266 + r2 = *(u32*)(r1 + 0); \ 267 + /* Make it small and 4-byte aligned. */ \ 268 + r2 &= 4; \ 269 + r2 -= 16; \ 270 + /* Add it to fp. We now have either fp-12 or fp-16, but we don't know\ 271 + * which. fp-16 size 8 is partially uninitialized stack.\ 272 + */ \ 273 + r2 += r10; \ 274 + /* Dereference it indirectly. */ \ 275 + r1 = %[map_hash_8b] ll; \ 276 + call %[bpf_map_lookup_elem]; \ 277 + r0 = 0; \ 278 + exit; \ 279 + " : 280 + : __imm(bpf_map_lookup_elem), 281 + __imm_addr(map_hash_8b) 282 + : __clobber_all); 283 + } 284 + 285 + SEC("cgroup/skb") 286 + __description("indirect variable-offset stack access, priv vs unpriv") 287 + __success __failure_unpriv 288 + __msg_unpriv("R2 variable stack access prohibited for !root") 289 + __retval(0) 290 + __naked void stack_access_priv_vs_unpriv(void) 291 + { 292 + asm volatile (" \ 293 + /* Fill the top 16 bytes of the stack. */ \ 294 + r2 = 0; \ 295 + *(u64*)(r10 - 16) = r2; \ 296 + r2 = 0; \ 297 + *(u64*)(r10 - 8) = r2; \ 298 + /* Get an unknown value. */ \ 299 + r2 = *(u32*)(r1 + 0); \ 300 + /* Make it small and 4-byte aligned. */ \ 301 + r2 &= 4; \ 302 + r2 -= 16; \ 303 + /* Add it to fp. We now have either fp-12 or fp-16, we don't know\ 304 + * which, but either way it points to initialized stack.\ 305 + */ \ 306 + r2 += r10; \ 307 + /* Dereference it indirectly. */ \ 308 + r1 = %[map_hash_8b] ll; \ 309 + call %[bpf_map_lookup_elem]; \ 310 + r0 = 0; \ 311 + exit; \ 312 + " : 313 + : __imm(bpf_map_lookup_elem), 314 + __imm_addr(map_hash_8b) 315 + : __clobber_all); 316 + } 317 + 318 + SEC("lwt_in") 319 + __description("indirect variable-offset stack access, ok") 320 + __success __retval(0) 321 + __naked void variable_offset_stack_access_ok(void) 322 + { 323 + asm volatile (" \ 324 + /* Fill the top 16 bytes of the stack. */ \ 325 + r2 = 0; \ 326 + *(u64*)(r10 - 16) = r2; \ 327 + r2 = 0; \ 328 + *(u64*)(r10 - 8) = r2; \ 329 + /* Get an unknown value. */ \ 330 + r2 = *(u32*)(r1 + 0); \ 331 + /* Make it small and 4-byte aligned. */ \ 332 + r2 &= 4; \ 333 + r2 -= 16; \ 334 + /* Add it to fp. We now have either fp-12 or fp-16, we don't know\ 335 + * which, but either way it points to initialized stack.\ 336 + */ \ 337 + r2 += r10; \ 338 + /* Dereference it indirectly. */ \ 339 + r1 = %[map_hash_8b] ll; \ 340 + call %[bpf_map_lookup_elem]; \ 341 + r0 = 0; \ 342 + exit; \ 343 + " : 344 + : __imm(bpf_map_lookup_elem), 345 + __imm_addr(map_hash_8b) 346 + : __clobber_all); 347 + } 348 + 349 + char _license[] SEC("license") = "GPL";
-291
tools/testing/selftests/bpf/verifier/var_off.c
··· 1 - { 2 - "variable-offset ctx access", 3 - .insns = { 4 - /* Get an unknown value */ 5 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 6 - /* Make it small and 4-byte aligned */ 7 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), 8 - /* add it to skb. We now have either &skb->len or 9 - * &skb->pkt_type, but we don't know which 10 - */ 11 - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), 12 - /* dereference it */ 13 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 14 - BPF_EXIT_INSN(), 15 - }, 16 - .errstr = "variable ctx access var_off=(0x0; 0x4)", 17 - .result = REJECT, 18 - .prog_type = BPF_PROG_TYPE_LWT_IN, 19 - }, 20 - { 21 - "variable-offset stack read, priv vs unpriv", 22 - .insns = { 23 - /* Fill the top 8 bytes of the stack */ 24 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 25 - /* Get an unknown value */ 26 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 27 - /* Make it small and 4-byte aligned */ 28 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), 29 - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), 30 - /* add it to fp. We now have either fp-4 or fp-8, but 31 - * we don't know which 32 - */ 33 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), 34 - /* dereference it for a stack read */ 35 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), 36 - BPF_MOV64_IMM(BPF_REG_0, 0), 37 - BPF_EXIT_INSN(), 38 - }, 39 - .result = ACCEPT, 40 - .result_unpriv = REJECT, 41 - .errstr_unpriv = "R2 variable stack access prohibited for !root", 42 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 43 - }, 44 - { 45 - "variable-offset stack read, uninitialized", 46 - .insns = { 47 - /* Get an unknown value */ 48 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 49 - /* Make it small and 4-byte aligned */ 50 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), 51 - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), 52 - /* add it to fp. We now have either fp-4 or fp-8, but 53 - * we don't know which 54 - */ 55 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), 56 - /* dereference it for a stack read */ 57 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), 58 - BPF_MOV64_IMM(BPF_REG_0, 0), 59 - BPF_EXIT_INSN(), 60 - }, 61 - .result = REJECT, 62 - .errstr = "invalid variable-offset read from stack R2", 63 - .prog_type = BPF_PROG_TYPE_LWT_IN, 64 - }, 65 - { 66 - "variable-offset stack write, priv vs unpriv", 67 - .insns = { 68 - /* Get an unknown value */ 69 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 70 - /* Make it small and 8-byte aligned */ 71 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8), 72 - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), 73 - /* Add it to fp. We now have either fp-8 or fp-16, but 74 - * we don't know which 75 - */ 76 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), 77 - /* Dereference it for a stack write */ 78 - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 79 - /* Now read from the address we just wrote. This shows 80 - * that, after a variable-offset write, a priviledged 81 - * program can read the slots that were in the range of 82 - * that write (even if the verifier doesn't actually know 83 - * if the slot being read was really written to or not. 84 - */ 85 - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_2, 0), 86 - BPF_MOV64_IMM(BPF_REG_0, 0), 87 - BPF_EXIT_INSN(), 88 - }, 89 - /* Variable stack access is rejected for unprivileged. 90 - */ 91 - .errstr_unpriv = "R2 variable stack access prohibited for !root", 92 - .result_unpriv = REJECT, 93 - .result = ACCEPT, 94 - }, 95 - { 96 - "variable-offset stack write clobbers spilled regs", 97 - .insns = { 98 - /* Dummy instruction; needed because we need to patch the next one 99 - * and we can't patch the first instruction. 100 - */ 101 - BPF_MOV64_IMM(BPF_REG_6, 0), 102 - /* Make R0 a map ptr */ 103 - BPF_LD_MAP_FD(BPF_REG_0, 0), 104 - /* Get an unknown value */ 105 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 106 - /* Make it small and 8-byte aligned */ 107 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8), 108 - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), 109 - /* Add it to fp. We now have either fp-8 or fp-16, but 110 - * we don't know which. 111 - */ 112 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), 113 - /* Spill R0(map ptr) into stack */ 114 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 115 - /* Dereference the unknown value for a stack write */ 116 - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 117 - /* Fill the register back into R2 */ 118 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), 119 - /* Try to dereference R2 for a memory load */ 120 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), 121 - BPF_EXIT_INSN(), 122 - }, 123 - .fixup_map_hash_8b = { 1 }, 124 - /* The unprivileged case is not too interesting; variable 125 - * stack access is rejected. 126 - */ 127 - .errstr_unpriv = "R2 variable stack access prohibited for !root", 128 - .result_unpriv = REJECT, 129 - /* In the priviledged case, dereferencing a spilled-and-then-filled 130 - * register is rejected because the previous variable offset stack 131 - * write might have overwritten the spilled pointer (i.e. we lose track 132 - * of the spilled register when we analyze the write). 133 - */ 134 - .errstr = "R2 invalid mem access 'scalar'", 135 - .result = REJECT, 136 - }, 137 - { 138 - "indirect variable-offset stack access, unbounded", 139 - .insns = { 140 - BPF_MOV64_IMM(BPF_REG_2, 6), 141 - BPF_MOV64_IMM(BPF_REG_3, 28), 142 - /* Fill the top 16 bytes of the stack. */ 143 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), 144 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 145 - /* Get an unknown value. */ 146 - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, offsetof(struct bpf_sock_ops, 147 - bytes_received)), 148 - /* Check the lower bound but don't check the upper one. */ 149 - BPF_JMP_IMM(BPF_JSLT, BPF_REG_4, 0, 4), 150 - /* Point the lower bound to initialized stack. Offset is now in range 151 - * from fp-16 to fp+0x7fffffffffffffef, i.e. max value is unbounded. 152 - */ 153 - BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16), 154 - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10), 155 - BPF_MOV64_IMM(BPF_REG_5, 8), 156 - /* Dereference it indirectly. */ 157 - BPF_EMIT_CALL(BPF_FUNC_getsockopt), 158 - BPF_MOV64_IMM(BPF_REG_0, 0), 159 - BPF_EXIT_INSN(), 160 - }, 161 - .errstr = "invalid unbounded variable-offset indirect access to stack R4", 162 - .result = REJECT, 163 - .prog_type = BPF_PROG_TYPE_SOCK_OPS, 164 - }, 165 - { 166 - "indirect variable-offset stack access, max out of bound", 167 - .insns = { 168 - /* Fill the top 8 bytes of the stack */ 169 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 170 - /* Get an unknown value */ 171 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 172 - /* Make it small and 4-byte aligned */ 173 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), 174 - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), 175 - /* add it to fp. We now have either fp-4 or fp-8, but 176 - * we don't know which 177 - */ 178 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), 179 - /* dereference it indirectly */ 180 - BPF_LD_MAP_FD(BPF_REG_1, 0), 181 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 182 - BPF_MOV64_IMM(BPF_REG_0, 0), 183 - BPF_EXIT_INSN(), 184 - }, 185 - .fixup_map_hash_8b = { 5 }, 186 - .errstr = "invalid variable-offset indirect access to stack R2", 187 - .result = REJECT, 188 - .prog_type = BPF_PROG_TYPE_LWT_IN, 189 - }, 190 - { 191 - "indirect variable-offset stack access, min out of bound", 192 - .insns = { 193 - /* Fill the top 8 bytes of the stack */ 194 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 195 - /* Get an unknown value */ 196 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 197 - /* Make it small and 4-byte aligned */ 198 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), 199 - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 516), 200 - /* add it to fp. We now have either fp-516 or fp-512, but 201 - * we don't know which 202 - */ 203 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), 204 - /* dereference it indirectly */ 205 - BPF_LD_MAP_FD(BPF_REG_1, 0), 206 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 207 - BPF_MOV64_IMM(BPF_REG_0, 0), 208 - BPF_EXIT_INSN(), 209 - }, 210 - .fixup_map_hash_8b = { 5 }, 211 - .errstr = "invalid variable-offset indirect access to stack R2", 212 - .result = REJECT, 213 - .prog_type = BPF_PROG_TYPE_LWT_IN, 214 - }, 215 - { 216 - "indirect variable-offset stack access, min_off < min_initialized", 217 - .insns = { 218 - /* Fill only the top 8 bytes of the stack. */ 219 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 220 - /* Get an unknown value */ 221 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 222 - /* Make it small and 4-byte aligned. */ 223 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), 224 - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), 225 - /* Add it to fp. We now have either fp-12 or fp-16, but we don't know 226 - * which. fp-16 size 8 is partially uninitialized stack. 227 - */ 228 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), 229 - /* Dereference it indirectly. */ 230 - BPF_LD_MAP_FD(BPF_REG_1, 0), 231 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 232 - BPF_MOV64_IMM(BPF_REG_0, 0), 233 - BPF_EXIT_INSN(), 234 - }, 235 - .fixup_map_hash_8b = { 5 }, 236 - .errstr = "invalid indirect read from stack R2 var_off", 237 - .result = REJECT, 238 - .prog_type = BPF_PROG_TYPE_LWT_IN, 239 - }, 240 - { 241 - "indirect variable-offset stack access, priv vs unpriv", 242 - .insns = { 243 - /* Fill the top 16 bytes of the stack. */ 244 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), 245 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 246 - /* Get an unknown value. */ 247 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 248 - /* Make it small and 4-byte aligned. */ 249 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), 250 - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), 251 - /* Add it to fp. We now have either fp-12 or fp-16, we don't know 252 - * which, but either way it points to initialized stack. 253 - */ 254 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), 255 - /* Dereference it indirectly. */ 256 - BPF_LD_MAP_FD(BPF_REG_1, 0), 257 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 258 - BPF_MOV64_IMM(BPF_REG_0, 0), 259 - BPF_EXIT_INSN(), 260 - }, 261 - .fixup_map_hash_8b = { 6 }, 262 - .errstr_unpriv = "R2 variable stack access prohibited for !root", 263 - .result_unpriv = REJECT, 264 - .result = ACCEPT, 265 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 266 - }, 267 - { 268 - "indirect variable-offset stack access, ok", 269 - .insns = { 270 - /* Fill the top 16 bytes of the stack. */ 271 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), 272 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 273 - /* Get an unknown value. */ 274 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 275 - /* Make it small and 4-byte aligned. */ 276 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), 277 - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), 278 - /* Add it to fp. We now have either fp-12 or fp-16, we don't know 279 - * which, but either way it points to initialized stack. 280 - */ 281 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), 282 - /* Dereference it indirectly. */ 283 - BPF_LD_MAP_FD(BPF_REG_1, 0), 284 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 285 - BPF_MOV64_IMM(BPF_REG_0, 0), 286 - BPF_EXIT_INSN(), 287 - }, 288 - .fixup_map_hash_8b = { 6 }, 289 - .result = ACCEPT, 290 - .prog_type = BPF_PROG_TYPE_LWT_IN, 291 - },