Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/array_access.c */
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6#include "bpf_misc.h"
7
8#define MAX_ENTRIES 11
9
10struct test_val {
11 unsigned int index;
12 int foo[MAX_ENTRIES];
13};
14
15struct {
16 __uint(type, BPF_MAP_TYPE_ARRAY);
17 __uint(max_entries, 1);
18 __type(key, int);
19 __type(value, struct test_val);
20 __uint(map_flags, BPF_F_RDONLY_PROG);
21} map_array_ro SEC(".maps");
22
23struct {
24 __uint(type, BPF_MAP_TYPE_ARRAY);
25 __uint(max_entries, 1);
26 __type(key, int);
27 __type(value, struct test_val);
28 __uint(map_flags, BPF_F_WRONLY_PROG);
29} map_array_wo SEC(".maps");
30
31struct {
32 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
33 __uint(max_entries, 2);
34 __type(key, __u32);
35 __type(value, struct test_val);
36} map_array_pcpu SEC(".maps");
37
38struct {
39 __uint(type, BPF_MAP_TYPE_ARRAY);
40 __uint(max_entries, 2);
41 __type(key, __u32);
42 __type(value, struct test_val);
43} map_array SEC(".maps");
44
45struct {
46 __uint(type, BPF_MAP_TYPE_HASH);
47 __uint(max_entries, 1);
48 __type(key, long long);
49 __type(value, struct test_val);
50} map_hash_48b SEC(".maps");
51
52SEC("socket")
53__description("valid map access into an array with a constant")
54__success __failure_unpriv __msg_unpriv("R0 leaks addr")
55__retval(0)
56__naked void an_array_with_a_constant_1(void)
57{
58 asm volatile (" \
59 r1 = 0; \
60 *(u64*)(r10 - 8) = r1; \
61 r2 = r10; \
62 r2 += -8; \
63 r1 = %[map_hash_48b] ll; \
64 call %[bpf_map_lookup_elem]; \
65 if r0 == 0 goto l0_%=; \
66 r1 = %[test_val_foo]; \
67 *(u64*)(r0 + 0) = r1; \
68l0_%=: exit; \
69" :
70 : __imm(bpf_map_lookup_elem),
71 __imm_addr(map_hash_48b),
72 __imm_const(test_val_foo, offsetof(struct test_val, foo))
73 : __clobber_all);
74}
75
76SEC("socket")
77__description("valid map access into an array with a register")
78__success __failure_unpriv __msg_unpriv("R0 leaks addr")
79__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
80__naked void an_array_with_a_register_1(void)
81{
82 asm volatile (" \
83 r1 = 0; \
84 *(u64*)(r10 - 8) = r1; \
85 r2 = r10; \
86 r2 += -8; \
87 r1 = %[map_hash_48b] ll; \
88 call %[bpf_map_lookup_elem]; \
89 if r0 == 0 goto l0_%=; \
90 r1 = 4; \
91 r1 <<= 2; \
92 r0 += r1; \
93 r1 = %[test_val_foo]; \
94 *(u64*)(r0 + 0) = r1; \
95l0_%=: exit; \
96" :
97 : __imm(bpf_map_lookup_elem),
98 __imm_addr(map_hash_48b),
99 __imm_const(test_val_foo, offsetof(struct test_val, foo))
100 : __clobber_all);
101}
102
103SEC("socket")
104__description("valid map access into an array with a variable")
105__success __failure_unpriv __msg_unpriv("R0 leaks addr")
106__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
107__naked void an_array_with_a_variable_1(void)
108{
109 asm volatile (" \
110 r1 = 0; \
111 *(u64*)(r10 - 8) = r1; \
112 r2 = r10; \
113 r2 += -8; \
114 r1 = %[map_hash_48b] ll; \
115 call %[bpf_map_lookup_elem]; \
116 if r0 == 0 goto l0_%=; \
117 r1 = *(u32*)(r0 + 0); \
118 if r1 >= %[max_entries] goto l0_%=; \
119 r1 <<= 2; \
120 r0 += r1; \
121 r1 = %[test_val_foo]; \
122 *(u64*)(r0 + 0) = r1; \
123l0_%=: exit; \
124" :
125 : __imm(bpf_map_lookup_elem),
126 __imm_addr(map_hash_48b),
127 __imm_const(max_entries, MAX_ENTRIES),
128 __imm_const(test_val_foo, offsetof(struct test_val, foo))
129 : __clobber_all);
130}
131
132SEC("socket")
133__description("valid map access into an array with a signed variable")
134__success __failure_unpriv __msg_unpriv("R0 leaks addr")
135__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
136__naked void array_with_a_signed_variable(void)
137{
138 asm volatile (" \
139 r1 = 0; \
140 *(u64*)(r10 - 8) = r1; \
141 r2 = r10; \
142 r2 += -8; \
143 r1 = %[map_hash_48b] ll; \
144 call %[bpf_map_lookup_elem]; \
145 if r0 == 0 goto l0_%=; \
146 r1 = *(u32*)(r0 + 0); \
147 if w1 s> 0xffffffff goto l1_%=; \
148 w1 = 0; \
149l1_%=: w2 = %[max_entries]; \
150 if r2 s> r1 goto l2_%=; \
151 w1 = 0; \
152l2_%=: w1 <<= 2; \
153 r0 += r1; \
154 r1 = %[test_val_foo]; \
155 *(u64*)(r0 + 0) = r1; \
156l0_%=: exit; \
157" :
158 : __imm(bpf_map_lookup_elem),
159 __imm_addr(map_hash_48b),
160 __imm_const(max_entries, MAX_ENTRIES),
161 __imm_const(test_val_foo, offsetof(struct test_val, foo))
162 : __clobber_all);
163}
164
165SEC("socket")
166__description("invalid map access into an array with a constant")
167__failure __msg("invalid access to map value, value_size=48 off=48 size=8")
168__failure_unpriv
169__naked void an_array_with_a_constant_2(void)
170{
171 asm volatile (" \
172 r1 = 0; \
173 *(u64*)(r10 - 8) = r1; \
174 r2 = r10; \
175 r2 += -8; \
176 r1 = %[map_hash_48b] ll; \
177 call %[bpf_map_lookup_elem]; \
178 if r0 == 0 goto l0_%=; \
179 r1 = %[test_val_foo]; \
180 *(u64*)(r0 + %[__imm_0]) = r1; \
181l0_%=: exit; \
182" :
183 : __imm(bpf_map_lookup_elem),
184 __imm_addr(map_hash_48b),
185 __imm_const(__imm_0, (MAX_ENTRIES + 1) << 2),
186 __imm_const(test_val_foo, offsetof(struct test_val, foo))
187 : __clobber_all);
188}
189
190SEC("socket")
191__description("invalid map access into an array with a register")
192__failure __msg("R0 min value is outside of the allowed memory range")
193__failure_unpriv
194__flag(BPF_F_ANY_ALIGNMENT)
195__naked void an_array_with_a_register_2(void)
196{
197 asm volatile (" \
198 r1 = 0; \
199 *(u64*)(r10 - 8) = r1; \
200 r2 = r10; \
201 r2 += -8; \
202 r1 = %[map_hash_48b] ll; \
203 call %[bpf_map_lookup_elem]; \
204 if r0 == 0 goto l0_%=; \
205 r1 = %[__imm_0]; \
206 r1 <<= 2; \
207 r0 += r1; \
208 r1 = %[test_val_foo]; \
209 *(u64*)(r0 + 0) = r1; \
210l0_%=: exit; \
211" :
212 : __imm(bpf_map_lookup_elem),
213 __imm_addr(map_hash_48b),
214 __imm_const(__imm_0, MAX_ENTRIES + 1),
215 __imm_const(test_val_foo, offsetof(struct test_val, foo))
216 : __clobber_all);
217}
218
219SEC("socket")
220__description("invalid map access into an array with a variable")
221__failure
222__msg("R0 unbounded memory access, make sure to bounds check any such access")
223__failure_unpriv
224__flag(BPF_F_ANY_ALIGNMENT)
225__naked void an_array_with_a_variable_2(void)
226{
227 asm volatile (" \
228 r1 = 0; \
229 *(u64*)(r10 - 8) = r1; \
230 r2 = r10; \
231 r2 += -8; \
232 r1 = %[map_hash_48b] ll; \
233 call %[bpf_map_lookup_elem]; \
234 if r0 == 0 goto l0_%=; \
235 r1 = *(u32*)(r0 + 0); \
236 r1 <<= 2; \
237 r0 += r1; \
238 r1 = %[test_val_foo]; \
239 *(u64*)(r0 + 0) = r1; \
240l0_%=: exit; \
241" :
242 : __imm(bpf_map_lookup_elem),
243 __imm_addr(map_hash_48b),
244 __imm_const(test_val_foo, offsetof(struct test_val, foo))
245 : __clobber_all);
246}
247
248SEC("socket")
249__description("invalid map access into an array with no floor check")
250__failure __msg("R0 unbounded memory access")
251__failure_unpriv __msg_unpriv("R0 leaks addr")
252__flag(BPF_F_ANY_ALIGNMENT)
253__naked void array_with_no_floor_check(void)
254{
255 asm volatile (" \
256 r1 = 0; \
257 *(u64*)(r10 - 8) = r1; \
258 r2 = r10; \
259 r2 += -8; \
260 r1 = %[map_hash_48b] ll; \
261 call %[bpf_map_lookup_elem]; \
262 if r0 == 0 goto l0_%=; \
263 r1 = *(u64*)(r0 + 0); \
264 w2 = %[max_entries]; \
265 if r2 s> r1 goto l1_%=; \
266 w1 = 0; \
267l1_%=: w1 <<= 2; \
268 r0 += r1; \
269 r1 = %[test_val_foo]; \
270 *(u64*)(r0 + 0) = r1; \
271l0_%=: exit; \
272" :
273 : __imm(bpf_map_lookup_elem),
274 __imm_addr(map_hash_48b),
275 __imm_const(max_entries, MAX_ENTRIES),
276 __imm_const(test_val_foo, offsetof(struct test_val, foo))
277 : __clobber_all);
278}
279
280SEC("socket")
281__description("invalid map access into an array with a invalid max check")
282__failure __msg("invalid access to map value, value_size=48 off=44 size=8")
283__failure_unpriv __msg_unpriv("R0 leaks addr")
284__flag(BPF_F_ANY_ALIGNMENT)
285__naked void with_a_invalid_max_check_1(void)
286{
287 asm volatile (" \
288 r1 = 0; \
289 *(u64*)(r10 - 8) = r1; \
290 r2 = r10; \
291 r2 += -8; \
292 r1 = %[map_hash_48b] ll; \
293 call %[bpf_map_lookup_elem]; \
294 if r0 == 0 goto l0_%=; \
295 r1 = *(u32*)(r0 + 0); \
296 w2 = %[__imm_0]; \
297 if r2 > r1 goto l1_%=; \
298 w1 = 0; \
299l1_%=: w1 <<= 2; \
300 r0 += r1; \
301 r1 = %[test_val_foo]; \
302 *(u64*)(r0 + 0) = r1; \
303l0_%=: exit; \
304" :
305 : __imm(bpf_map_lookup_elem),
306 __imm_addr(map_hash_48b),
307 __imm_const(__imm_0, MAX_ENTRIES + 1),
308 __imm_const(test_val_foo, offsetof(struct test_val, foo))
309 : __clobber_all);
310}
311
312SEC("socket")
313__description("invalid map access into an array with a invalid max check")
314__failure __msg("R0 pointer += pointer")
315__failure_unpriv
316__flag(BPF_F_ANY_ALIGNMENT)
317__naked void with_a_invalid_max_check_2(void)
318{
319 asm volatile (" \
320 r1 = 0; \
321 *(u64*)(r10 - 8) = r1; \
322 r2 = r10; \
323 r2 += -8; \
324 r1 = %[map_hash_48b] ll; \
325 call %[bpf_map_lookup_elem]; \
326 if r0 == 0 goto l0_%=; \
327 r8 = r0; \
328 r1 = 0; \
329 *(u64*)(r10 - 8) = r1; \
330 r2 = r10; \
331 r2 += -8; \
332 r1 = %[map_hash_48b] ll; \
333 call %[bpf_map_lookup_elem]; \
334 if r0 == 0 goto l0_%=; \
335 r0 += r8; \
336 r0 = *(u32*)(r0 + %[test_val_foo]); \
337l0_%=: exit; \
338" :
339 : __imm(bpf_map_lookup_elem),
340 __imm_addr(map_hash_48b),
341 __imm_const(test_val_foo, offsetof(struct test_val, foo))
342 : __clobber_all);
343}
344
345SEC("socket")
346__description("valid read map access into a read-only array 1")
347__success __success_unpriv __retval(28)
348__naked void a_read_only_array_1_1(void)
349{
350 asm volatile (" \
351 r1 = 0; \
352 *(u64*)(r10 - 8) = r1; \
353 r2 = r10; \
354 r2 += -8; \
355 r1 = %[map_array_ro] ll; \
356 call %[bpf_map_lookup_elem]; \
357 if r0 == 0 goto l0_%=; \
358 r0 = *(u32*)(r0 + 0); \
359l0_%=: exit; \
360" :
361 : __imm(bpf_map_lookup_elem),
362 __imm_addr(map_array_ro)
363 : __clobber_all);
364}
365
366SEC("tc")
367__description("valid read map access into a read-only array 2")
368__success __retval(65507)
369__naked void a_read_only_array_2_1(void)
370{
371 asm volatile (" \
372 r1 = 0; \
373 *(u64*)(r10 - 8) = r1; \
374 r2 = r10; \
375 r2 += -8; \
376 r1 = %[map_array_ro] ll; \
377 call %[bpf_map_lookup_elem]; \
378 if r0 == 0 goto l0_%=; \
379 r1 = r0; \
380 r2 = 4; \
381 r3 = 0; \
382 r4 = 0; \
383 r5 = 0; \
384 call %[bpf_csum_diff]; \
385l0_%=: exit; \
386" :
387 : __imm(bpf_csum_diff),
388 __imm(bpf_map_lookup_elem),
389 __imm_addr(map_array_ro)
390 : __clobber_all);
391}
392
393SEC("socket")
394__description("invalid write map access into a read-only array 1")
395__failure __msg("write into map forbidden")
396__failure_unpriv
397__naked void a_read_only_array_1_2(void)
398{
399 asm volatile (" \
400 r1 = 0; \
401 *(u64*)(r10 - 8) = r1; \
402 r2 = r10; \
403 r2 += -8; \
404 r1 = %[map_array_ro] ll; \
405 call %[bpf_map_lookup_elem]; \
406 if r0 == 0 goto l0_%=; \
407 r1 = 42; \
408 *(u64*)(r0 + 0) = r1; \
409l0_%=: exit; \
410" :
411 : __imm(bpf_map_lookup_elem),
412 __imm_addr(map_array_ro)
413 : __clobber_all);
414}
415
416SEC("tc")
417__description("invalid write map access into a read-only array 2")
418__failure __msg("write into map forbidden")
419__naked void a_read_only_array_2_2(void)
420{
421 asm volatile (" \
422 r6 = r1; \
423 r1 = 0; \
424 *(u64*)(r10 - 8) = r1; \
425 r2 = r10; \
426 r2 += -8; \
427 r1 = %[map_array_ro] ll; \
428 call %[bpf_map_lookup_elem]; \
429 if r0 == 0 goto l0_%=; \
430 r1 = r6; \
431 r2 = 0; \
432 r3 = r0; \
433 r4 = 8; \
434 call %[bpf_skb_load_bytes]; \
435l0_%=: exit; \
436" :
437 : __imm(bpf_map_lookup_elem),
438 __imm(bpf_skb_load_bytes),
439 __imm_addr(map_array_ro)
440 : __clobber_all);
441}
442
443SEC("socket")
444__description("valid write map access into a write-only array 1")
445__success __success_unpriv __retval(1)
446__naked void a_write_only_array_1_1(void)
447{
448 asm volatile (" \
449 r1 = 0; \
450 *(u64*)(r10 - 8) = r1; \
451 r2 = r10; \
452 r2 += -8; \
453 r1 = %[map_array_wo] ll; \
454 call %[bpf_map_lookup_elem]; \
455 if r0 == 0 goto l0_%=; \
456 r1 = 42; \
457 *(u64*)(r0 + 0) = r1; \
458l0_%=: r0 = 1; \
459 exit; \
460" :
461 : __imm(bpf_map_lookup_elem),
462 __imm_addr(map_array_wo)
463 : __clobber_all);
464}
465
466SEC("tc")
467__description("valid write map access into a write-only array 2")
468__success __retval(0)
469__naked void a_write_only_array_2_1(void)
470{
471 asm volatile (" \
472 r6 = r1; \
473 r1 = 0; \
474 *(u64*)(r10 - 8) = r1; \
475 r2 = r10; \
476 r2 += -8; \
477 r1 = %[map_array_wo] ll; \
478 call %[bpf_map_lookup_elem]; \
479 if r0 == 0 goto l0_%=; \
480 r1 = r6; \
481 r2 = 0; \
482 r3 = r0; \
483 r4 = 8; \
484 call %[bpf_skb_load_bytes]; \
485l0_%=: exit; \
486" :
487 : __imm(bpf_map_lookup_elem),
488 __imm(bpf_skb_load_bytes),
489 __imm_addr(map_array_wo)
490 : __clobber_all);
491}
492
493SEC("socket")
494__description("invalid read map access into a write-only array 1")
495__failure __msg("read from map forbidden")
496__failure_unpriv
497__naked void a_write_only_array_1_2(void)
498{
499 asm volatile (" \
500 r1 = 0; \
501 *(u64*)(r10 - 8) = r1; \
502 r2 = r10; \
503 r2 += -8; \
504 r1 = %[map_array_wo] ll; \
505 call %[bpf_map_lookup_elem]; \
506 if r0 == 0 goto l0_%=; \
507 r0 = *(u64*)(r0 + 0); \
508l0_%=: exit; \
509" :
510 : __imm(bpf_map_lookup_elem),
511 __imm_addr(map_array_wo)
512 : __clobber_all);
513}
514
515SEC("tc")
516__description("invalid read map access into a write-only array 2")
517__failure __msg("read from map forbidden")
518__naked void a_write_only_array_2_2(void)
519{
520 asm volatile (" \
521 r1 = 0; \
522 *(u64*)(r10 - 8) = r1; \
523 r2 = r10; \
524 r2 += -8; \
525 r1 = %[map_array_wo] ll; \
526 call %[bpf_map_lookup_elem]; \
527 if r0 == 0 goto l0_%=; \
528 r1 = r0; \
529 r2 = 4; \
530 r3 = 0; \
531 r4 = 0; \
532 r5 = 0; \
533 call %[bpf_csum_diff]; \
534l0_%=: exit; \
535" :
536 : __imm(bpf_csum_diff),
537 __imm(bpf_map_lookup_elem),
538 __imm_addr(map_array_wo)
539 : __clobber_all);
540}
541
542SEC("socket")
543__description("valid map access into an array using constant without nullness")
544__success __retval(4) __log_level(2)
545__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}")
546unsigned int an_array_with_a_constant_no_nullness(void)
547{
548 /* Need 8-byte alignment for spill tracking */
549 __u32 __attribute__((aligned(8))) key = 1;
550 struct test_val *val;
551
552 val = bpf_map_lookup_elem(&map_array, &key);
553 val->index = offsetof(struct test_val, foo);
554
555 return val->index;
556}
557
558SEC("socket")
559__description("valid multiple map access into an array using constant without nullness")
560__success __retval(8) __log_level(2)
561__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -16) = {{(0|r[0-9])}}")
562__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}")
563unsigned int multiple_array_with_a_constant_no_nullness(void)
564{
565 __u32 __attribute__((aligned(8))) key = 1;
566 __u32 __attribute__((aligned(8))) key2 = 0;
567 struct test_val *val, *val2;
568
569 val = bpf_map_lookup_elem(&map_array, &key);
570 val->index = offsetof(struct test_val, foo);
571
572 val2 = bpf_map_lookup_elem(&map_array, &key2);
573 val2->index = offsetof(struct test_val, foo);
574
575 return val->index + val2->index;
576}
577
578SEC("socket")
579__description("valid map access into an array using natural aligned 32-bit constant 0 without nullness")
580__success __retval(4)
581unsigned int an_array_with_a_32bit_constant_0_no_nullness(void)
582{
583 /* Unlike the above tests, 32-bit zeroing is precisely tracked even
584 * if writes are not aligned to BPF_REG_SIZE. This tests that our
585 * STACK_ZERO handling functions.
586 */
587 struct test_val *val;
588 __u32 key = 0;
589
590 val = bpf_map_lookup_elem(&map_array, &key);
591 val->index = offsetof(struct test_val, foo);
592
593 return val->index;
594}
595
596SEC("socket")
597__description("valid map access into a pcpu array using constant without nullness")
598__success __retval(4) __log_level(2)
599__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}")
600unsigned int a_pcpu_array_with_a_constant_no_nullness(void)
601{
602 __u32 __attribute__((aligned(8))) key = 1;
603 struct test_val *val;
604
605 val = bpf_map_lookup_elem(&map_array_pcpu, &key);
606 val->index = offsetof(struct test_val, foo);
607
608 return val->index;
609}
610
611SEC("socket")
612__description("invalid map access into an array using constant without nullness")
613__failure __msg("R0 invalid mem access 'map_value_or_null'")
614unsigned int an_array_with_a_constant_no_nullness_out_of_bounds(void)
615{
616 /* Out of bounds */
617 __u32 __attribute__((aligned(8))) key = 3;
618 struct test_val *val;
619
620 val = bpf_map_lookup_elem(&map_array, &key);
621 val->index = offsetof(struct test_val, foo);
622
623 return val->index;
624}
625
626SEC("socket")
627__description("invalid map access into an array using constant smaller than key_size")
628__failure __msg("R0 invalid mem access 'map_value_or_null'")
629unsigned int an_array_with_a_constant_too_small(void)
630{
631 __u32 __attribute__((aligned(8))) key;
632 struct test_val *val;
633
634 /* Mark entire key as STACK_MISC */
635 bpf_probe_read_user(&key, sizeof(key), NULL);
636
637 /* Spilling only the bottom byte results in a tnum const of 1.
638 * We want to check that the verifier rejects it, as the spill is < 4B.
639 */
640 *(__u8 *)&key = 1;
641 val = bpf_map_lookup_elem(&map_array, &key);
642
643 /* Should fail, as verifier cannot prove in-bound lookup */
644 val->index = offsetof(struct test_val, foo);
645
646 return val->index;
647}
648
649SEC("socket")
650__description("invalid map access into an array using constant larger than key_size")
651__failure __msg("R0 invalid mem access 'map_value_or_null'")
652unsigned int an_array_with_a_constant_too_big(void)
653{
654 struct test_val *val;
655 __u64 key = 1;
656
657 /* Even if the constant value is < max_entries, if the spill size is
658 * larger than the key size, the set bits may not be where we expect them
659 * to be on different endian architectures.
660 */
661 val = bpf_map_lookup_elem(&map_array, &key);
662 val->index = offsetof(struct test_val, foo);
663
664 return val->index;
665}
666
667SEC("socket")
668__description("invalid elided lookup using const and non-const key")
669__failure __msg("R0 invalid mem access 'map_value_or_null'")
670unsigned int mixed_const_and_non_const_key_lookup(void)
671{
672 __u32 __attribute__((aligned(8))) key;
673 struct test_val *val;
674 __u32 rand;
675
676 rand = bpf_get_prandom_u32();
677 key = rand > 42 ? 1 : rand;
678 val = bpf_map_lookup_elem(&map_array, &key);
679
680 return val->index;
681}
682
683SEC("socket")
684__failure __msg("invalid read from stack R2 off=4096 size=4")
685__naked void key_lookup_at_invalid_fp(void)
686{
687 asm volatile (" \
688 r1 = %[map_array] ll; \
689 r2 = r10; \
690 r2 += 4096; \
691 call %[bpf_map_lookup_elem]; \
692 r0 = *(u64*)(r0 + 0); \
693 exit; \
694" :
695 : __imm(bpf_map_lookup_elem),
696 __imm_addr(map_array)
697 : __clobber_all);
698}
699
700volatile __u32 __attribute__((aligned(8))) global_key;
701
702SEC("socket")
703__description("invalid elided lookup using non-stack key")
704__failure __msg("R0 invalid mem access 'map_value_or_null'")
705unsigned int non_stack_key_lookup(void)
706{
707 struct test_val *val;
708
709 global_key = 1;
710 val = bpf_map_lookup_elem(&map_array, (void *)&global_key);
711 val->index = offsetof(struct test_val, foo);
712
713 return val->index;
714}
715
716SEC("socket")
717__description("doesn't reject UINT64_MAX as s64 for irrelevant maps")
718__success __retval(42)
719unsigned int doesnt_reject_irrelevant_maps(void)
720{
721 __u64 key = 0xFFFFFFFFFFFFFFFF;
722 struct test_val *val;
723
724 val = bpf_map_lookup_elem(&map_hash_48b, &key);
725 if (val)
726 return val->index;
727
728 return 42;
729}
730
731char _license[] SEC("license") = "GPL";