Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1{
2 "calls: invalid kfunc call not eliminated",
3 .insns = {
4 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
5 BPF_MOV64_IMM(BPF_REG_0, 1),
6 BPF_EXIT_INSN(),
7 },
8 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9 .result = REJECT,
10 .errstr = "invalid kernel function call not eliminated in verifier pass",
11},
12{
13 "calls: invalid kfunc call unreachable",
14 .insns = {
15 BPF_MOV64_IMM(BPF_REG_0, 1),
16 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2),
17 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
18 BPF_MOV64_IMM(BPF_REG_0, 1),
19 BPF_EXIT_INSN(),
20 },
21 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
22 .result = ACCEPT,
23},
24{
25 "calls: invalid kfunc call: ptr_to_mem to struct with non-scalar",
26 .insns = {
27 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
28 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
29 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
30 BPF_EXIT_INSN(),
31 },
32 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
33 .result = REJECT,
34 .errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar",
35 .fixup_kfunc_btf_id = {
36 { "bpf_kfunc_call_test_fail1", 2 },
37 },
38},
39{
40 "calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4",
41 .insns = {
42 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
43 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
44 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
45 BPF_EXIT_INSN(),
46 },
47 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
48 .result = REJECT,
49 .errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2",
50 .fixup_kfunc_btf_id = {
51 { "bpf_kfunc_call_test_fail2", 2 },
52 },
53},
54{
55 "calls: invalid kfunc call: ptr_to_mem to struct with FAM",
56 .insns = {
57 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
58 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
59 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
60 BPF_EXIT_INSN(),
61 },
62 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
63 .result = REJECT,
64 .errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar",
65 .fixup_kfunc_btf_id = {
66 { "bpf_kfunc_call_test_fail3", 2 },
67 },
68},
69{
70 "calls: invalid kfunc call: reg->type != PTR_TO_CTX",
71 .insns = {
72 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
73 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
74 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
75 BPF_EXIT_INSN(),
76 },
77 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
78 .result = REJECT,
79 .errstr = "R1 must have zero offset when passed to release func or trusted arg to kfunc",
80 .fixup_kfunc_btf_id = {
81 { "bpf_kfunc_call_test_pass_ctx", 2 },
82 },
83},
84{
85 "calls: invalid kfunc call: void * not allowed in func proto without mem size arg",
86 .insns = {
87 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
89 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
90 BPF_EXIT_INSN(),
91 },
92 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
93 .result = REJECT,
94 .errstr = "arg#0 pointer type UNKNOWN must point to scalar",
95 .fixup_kfunc_btf_id = {
96 { "bpf_kfunc_call_test_mem_len_fail1", 2 },
97 },
98},
99{
100 "calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX",
101 .insns = {
102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
104 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
107 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
108 BPF_EXIT_INSN(),
109 },
110 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
111 .result = REJECT,
112 .errstr = "Possibly NULL pointer passed to trusted arg0",
113 .fixup_kfunc_btf_id = {
114 { "bpf_kfunc_call_test_acquire", 3 },
115 { "bpf_kfunc_call_test_release", 5 },
116 },
117},
118{
119 "calls: invalid kfunc call: reg->off must be zero when passed to release kfunc",
120 .insns = {
121 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
123 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
124 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
125 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
126 BPF_EXIT_INSN(),
127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
130 BPF_MOV64_IMM(BPF_REG_0, 0),
131 BPF_EXIT_INSN(),
132 },
133 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
134 .result = REJECT,
135 .errstr = "R1 must have zero offset when passed to release func",
136 .fixup_kfunc_btf_id = {
137 { "bpf_kfunc_call_test_acquire", 3 },
138 { "bpf_kfunc_call_memb_release", 8 },
139 },
140},
141{
142 "calls: invalid kfunc call: don't match first member type when passed to release kfunc",
143 .insns = {
144 BPF_MOV64_IMM(BPF_REG_0, 0),
145 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
146 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
147 BPF_EXIT_INSN(),
148 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
149 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
150 BPF_MOV64_IMM(BPF_REG_0, 0),
151 BPF_EXIT_INSN(),
152 },
153 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
154 .result = REJECT,
155 .errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
156 .fixup_kfunc_btf_id = {
157 { "bpf_kfunc_call_memb_acquire", 1 },
158 { "bpf_kfunc_call_memb1_release", 5 },
159 },
160},
161{
162 "calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
163 .insns = {
164 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
166 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
167 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
168 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
169 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
170 BPF_EXIT_INSN(),
171 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4),
173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
174 BPF_MOV64_IMM(BPF_REG_0, 0),
175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
176 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
177 BPF_MOV64_IMM(BPF_REG_0, 0),
178 BPF_EXIT_INSN(),
179 },
180 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
181 .fixup_kfunc_btf_id = {
182 { "bpf_kfunc_call_test_acquire", 3 },
183 { "bpf_kfunc_call_test_offset", 9 },
184 { "bpf_kfunc_call_test_release", 12 },
185 },
186 .result_unpriv = REJECT,
187 .result = REJECT,
188 .errstr = "ptr R1 off=-4 disallowed",
189},
190{
191 "calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset",
192 .insns = {
193 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
195 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
197 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
198 BPF_EXIT_INSN(),
199 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
200 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
201 BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3),
202 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
203 BPF_MOV64_IMM(BPF_REG_0, 0),
204 BPF_EXIT_INSN(),
205 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3),
206 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
207 BPF_MOV64_IMM(BPF_REG_0, 0),
208 BPF_EXIT_INSN(),
209 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
210 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
211 BPF_MOV64_IMM(BPF_REG_0, 0),
212 BPF_EXIT_INSN(),
213 },
214 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
215 .fixup_kfunc_btf_id = {
216 { "bpf_kfunc_call_test_acquire", 3 },
217 { "bpf_kfunc_call_test_release", 9 },
218 { "bpf_kfunc_call_test_release", 13 },
219 { "bpf_kfunc_call_test_release", 17 },
220 },
221 .result_unpriv = REJECT,
222 .result = REJECT,
223 .errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed",
224},
225{
226 "calls: invalid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
227 .insns = {
228 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
230 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
231 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
232 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
233 BPF_EXIT_INSN(),
234 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
235 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
236 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
237 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 16),
238 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
239 BPF_MOV64_IMM(BPF_REG_0, 0),
240 BPF_EXIT_INSN(),
241 },
242 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
243 .fixup_kfunc_btf_id = {
244 { "bpf_kfunc_call_test_acquire", 3 },
245 { "bpf_kfunc_call_test_ref", 8 },
246 { "bpf_kfunc_call_test_ref", 10 },
247 },
248 .result_unpriv = REJECT,
249 .result = REJECT,
250 .errstr = "R1 must be",
251},
252{
253 "calls: valid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
254 .insns = {
255 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
257 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
258 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
259 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
260 BPF_EXIT_INSN(),
261 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
262 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
263 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
264 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
266 BPF_MOV64_IMM(BPF_REG_0, 0),
267 BPF_EXIT_INSN(),
268 },
269 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
270 .fixup_kfunc_btf_id = {
271 { "bpf_kfunc_call_test_acquire", 3 },
272 { "bpf_kfunc_call_test_ref", 8 },
273 { "bpf_kfunc_call_test_release", 10 },
274 },
275 .result_unpriv = REJECT,
276 .result = ACCEPT,
277},
278{
279 "calls: basic sanity",
280 .insns = {
281 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
282 BPF_MOV64_IMM(BPF_REG_0, 1),
283 BPF_EXIT_INSN(),
284 BPF_MOV64_IMM(BPF_REG_0, 2),
285 BPF_EXIT_INSN(),
286 },
287 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
288 .result = ACCEPT,
289},
290{
291 "calls: not on unprivileged",
292 .insns = {
293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
294 BPF_MOV64_IMM(BPF_REG_0, 1),
295 BPF_EXIT_INSN(),
296 BPF_MOV64_IMM(BPF_REG_0, 2),
297 BPF_EXIT_INSN(),
298 },
299 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
300 .result_unpriv = REJECT,
301 .result = ACCEPT,
302 .retval = 1,
303},
304{
305 "calls: div by 0 in subprog",
306 .insns = {
307 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
308 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
309 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
310 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
311 offsetof(struct __sk_buff, data_end)),
312 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
314 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
315 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
316 BPF_MOV64_IMM(BPF_REG_0, 1),
317 BPF_EXIT_INSN(),
318 BPF_MOV32_IMM(BPF_REG_2, 0),
319 BPF_MOV32_IMM(BPF_REG_3, 1),
320 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
321 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
322 offsetof(struct __sk_buff, data)),
323 BPF_EXIT_INSN(),
324 },
325 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
326 .result = ACCEPT,
327 .retval = 1,
328},
329{
330 "calls: multiple ret types in subprog 1",
331 .insns = {
332 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
333 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
334 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
335 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
336 offsetof(struct __sk_buff, data_end)),
337 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
339 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
340 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
341 BPF_MOV64_IMM(BPF_REG_0, 1),
342 BPF_EXIT_INSN(),
343 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
344 offsetof(struct __sk_buff, data)),
345 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
346 BPF_MOV32_IMM(BPF_REG_0, 42),
347 BPF_EXIT_INSN(),
348 },
349 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
350 .result = REJECT,
351 .errstr = "R0 invalid mem access 'scalar'",
352},
353{
354 "calls: multiple ret types in subprog 2",
355 .insns = {
356 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
357 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
358 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
359 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
360 offsetof(struct __sk_buff, data_end)),
361 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
362 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
363 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
364 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
365 BPF_MOV64_IMM(BPF_REG_0, 1),
366 BPF_EXIT_INSN(),
367 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
368 offsetof(struct __sk_buff, data)),
369 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
370 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
371 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
372 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
374 BPF_LD_MAP_FD(BPF_REG_1, 0),
375 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
376 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
377 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
378 offsetof(struct __sk_buff, data)),
379 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
380 BPF_EXIT_INSN(),
381 },
382 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
383 .fixup_map_hash_8b = { 16 },
384 .result = REJECT,
385 .errstr = "R0 min value is outside of the allowed memory range",
386},
387{
388 "calls: overlapping caller/callee",
389 .insns = {
390 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
391 BPF_MOV64_IMM(BPF_REG_0, 1),
392 BPF_EXIT_INSN(),
393 },
394 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
395 .errstr = "last insn is not an exit or jmp",
396 .result = REJECT,
397},
398{
399 "calls: wrong recursive calls",
400 .insns = {
401 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
402 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
403 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
405 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
406 BPF_MOV64_IMM(BPF_REG_0, 1),
407 BPF_EXIT_INSN(),
408 },
409 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
410 .errstr = "jump out of range",
411 .result = REJECT,
412},
413{
414 "calls: wrong src reg",
415 .insns = {
416 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0),
417 BPF_MOV64_IMM(BPF_REG_0, 1),
418 BPF_EXIT_INSN(),
419 },
420 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
421 .errstr = "BPF_CALL uses reserved fields",
422 .result = REJECT,
423},
424{
425 "calls: wrong off value",
426 .insns = {
427 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
428 BPF_MOV64_IMM(BPF_REG_0, 1),
429 BPF_EXIT_INSN(),
430 BPF_MOV64_IMM(BPF_REG_0, 2),
431 BPF_EXIT_INSN(),
432 },
433 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
434 .errstr = "BPF_CALL uses reserved fields",
435 .result = REJECT,
436},
437{
438 "calls: jump back loop",
439 .insns = {
440 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
441 BPF_MOV64_IMM(BPF_REG_0, 1),
442 BPF_EXIT_INSN(),
443 },
444 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
445 .errstr = "the call stack of 9 frames is too deep",
446 .result = REJECT,
447},
448{
449 "calls: conditional call",
450 .insns = {
451 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
452 offsetof(struct __sk_buff, mark)),
453 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
454 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
455 BPF_MOV64_IMM(BPF_REG_0, 1),
456 BPF_EXIT_INSN(),
457 BPF_MOV64_IMM(BPF_REG_0, 2),
458 BPF_EXIT_INSN(),
459 },
460 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
461 .errstr = "jump out of range",
462 .result = REJECT,
463},
464{
465 "calls: conditional call 2",
466 .insns = {
467 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
468 offsetof(struct __sk_buff, mark)),
469 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
470 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
471 BPF_MOV64_IMM(BPF_REG_0, 1),
472 BPF_EXIT_INSN(),
473 BPF_MOV64_IMM(BPF_REG_0, 2),
474 BPF_EXIT_INSN(),
475 BPF_MOV64_IMM(BPF_REG_0, 3),
476 BPF_EXIT_INSN(),
477 },
478 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
479 .result = ACCEPT,
480},
481{
482 "calls: conditional call 3",
483 .insns = {
484 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
485 offsetof(struct __sk_buff, mark)),
486 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
487 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
488 BPF_MOV64_IMM(BPF_REG_0, 1),
489 BPF_EXIT_INSN(),
490 BPF_MOV64_IMM(BPF_REG_0, 1),
491 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
492 BPF_MOV64_IMM(BPF_REG_0, 3),
493 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
494 },
495 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
496 .errstr_unpriv = "back-edge from insn",
497 .result_unpriv = REJECT,
498 .result = ACCEPT,
499 .retval = 1,
500},
501{
502 "calls: conditional call 4",
503 .insns = {
504 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
505 offsetof(struct __sk_buff, mark)),
506 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
508 BPF_MOV64_IMM(BPF_REG_0, 1),
509 BPF_EXIT_INSN(),
510 BPF_MOV64_IMM(BPF_REG_0, 1),
511 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
512 BPF_MOV64_IMM(BPF_REG_0, 3),
513 BPF_EXIT_INSN(),
514 },
515 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
516 .result = ACCEPT,
517},
518{
519 "calls: conditional call 5",
520 .insns = {
521 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
522 offsetof(struct __sk_buff, mark)),
523 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
524 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
525 BPF_MOV64_IMM(BPF_REG_0, 1),
526 BPF_EXIT_INSN(),
527 BPF_MOV64_IMM(BPF_REG_0, 1),
528 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
529 BPF_MOV64_IMM(BPF_REG_0, 3),
530 BPF_EXIT_INSN(),
531 },
532 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
533 .result = ACCEPT,
534 .retval = 1,
535},
536{
537 "calls: conditional call 6",
538 .insns = {
539 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
540 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
541 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
542 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
543 BPF_EXIT_INSN(),
544 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
545 offsetof(struct __sk_buff, mark)),
546 BPF_EXIT_INSN(),
547 },
548 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
549 .errstr = "infinite loop detected",
550 .result = REJECT,
551},
552{
553 "calls: using r0 returned by callee",
554 .insns = {
555 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
556 BPF_EXIT_INSN(),
557 BPF_MOV64_IMM(BPF_REG_0, 2),
558 BPF_EXIT_INSN(),
559 },
560 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
561 .result = ACCEPT,
562},
563{
564 "calls: using uninit r0 from callee",
565 .insns = {
566 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
567 BPF_EXIT_INSN(),
568 BPF_EXIT_INSN(),
569 },
570 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
571 .errstr = "!read_ok",
572 .result = REJECT,
573},
574{
575 "calls: callee is using r1",
576 .insns = {
577 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
578 BPF_EXIT_INSN(),
579 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
580 offsetof(struct __sk_buff, len)),
581 BPF_EXIT_INSN(),
582 },
583 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
584 .result = ACCEPT,
585 .retval = TEST_DATA_LEN,
586},
587{
588 "calls: callee using args1",
589 .insns = {
590 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
591 BPF_EXIT_INSN(),
592 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
593 BPF_EXIT_INSN(),
594 },
595 .errstr_unpriv = "allowed for",
596 .result_unpriv = REJECT,
597 .result = ACCEPT,
598 .retval = POINTER_VALUE,
599},
600{
601 "calls: callee using wrong args2",
602 .insns = {
603 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
604 BPF_EXIT_INSN(),
605 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
606 BPF_EXIT_INSN(),
607 },
608 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
609 .errstr = "R2 !read_ok",
610 .result = REJECT,
611},
612{
613 "calls: callee using two args",
614 .insns = {
615 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
616 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
617 offsetof(struct __sk_buff, len)),
618 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
619 offsetof(struct __sk_buff, len)),
620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
621 BPF_EXIT_INSN(),
622 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
623 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
624 BPF_EXIT_INSN(),
625 },
626 .errstr_unpriv = "allowed for",
627 .result_unpriv = REJECT,
628 .result = ACCEPT,
629 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
630},
631{
632 "calls: callee changing pkt pointers",
633 .insns = {
634 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
635 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
636 offsetof(struct xdp_md, data_end)),
637 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
639 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
640 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
641 /* clear_all_pkt_pointers() has to walk all frames
642 * to make sure that pkt pointers in the caller
643 * are cleared when callee is calling a helper that
644 * adjusts packet size
645 */
646 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
647 BPF_MOV32_IMM(BPF_REG_0, 0),
648 BPF_EXIT_INSN(),
649 BPF_MOV64_IMM(BPF_REG_2, 0),
650 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
651 BPF_EXIT_INSN(),
652 },
653 .result = REJECT,
654 .errstr = "R6 invalid mem access 'scalar'",
655 .prog_type = BPF_PROG_TYPE_XDP,
656 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
657},
658{
659 "calls: ptr null check in subprog",
660 .insns = {
661 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
662 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
664 BPF_LD_MAP_FD(BPF_REG_1, 0),
665 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
666 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
667 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
668 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
669 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
670 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
671 BPF_EXIT_INSN(),
672 BPF_MOV64_IMM(BPF_REG_0, 0),
673 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
674 BPF_MOV64_IMM(BPF_REG_0, 1),
675 BPF_EXIT_INSN(),
676 },
677 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
678 .fixup_map_hash_48b = { 3 },
679 .result_unpriv = REJECT,
680 .result = ACCEPT,
681 .retval = 0,
682},
683{
684 "calls: two calls with args",
685 .insns = {
686 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
687 BPF_EXIT_INSN(),
688 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
689 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
690 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
691 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
693 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
694 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
695 BPF_EXIT_INSN(),
696 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
697 offsetof(struct __sk_buff, len)),
698 BPF_EXIT_INSN(),
699 },
700 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
701 .result = ACCEPT,
702 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
703},
704{
705 "calls: calls with stack arith",
706 .insns = {
707 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
709 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
710 BPF_EXIT_INSN(),
711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
712 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
713 BPF_EXIT_INSN(),
714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
715 BPF_MOV64_IMM(BPF_REG_0, 42),
716 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
717 BPF_EXIT_INSN(),
718 },
719 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
720 .result = ACCEPT,
721 .retval = 42,
722},
723{
724 "calls: calls with misaligned stack access",
725 .insns = {
726 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
729 BPF_EXIT_INSN(),
730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
731 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
732 BPF_EXIT_INSN(),
733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
734 BPF_MOV64_IMM(BPF_REG_0, 42),
735 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
736 BPF_EXIT_INSN(),
737 },
738 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
739 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
740 .errstr = "misaligned stack access",
741 .result = REJECT,
742},
743{
744 "calls: calls control flow, jump test",
745 .insns = {
746 BPF_MOV64_IMM(BPF_REG_0, 42),
747 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
748 BPF_MOV64_IMM(BPF_REG_0, 43),
749 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
750 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
751 BPF_EXIT_INSN(),
752 },
753 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
754 .result = ACCEPT,
755 .retval = 43,
756},
757{
758 "calls: calls control flow, jump test 2",
759 .insns = {
760 BPF_MOV64_IMM(BPF_REG_0, 42),
761 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
762 BPF_MOV64_IMM(BPF_REG_0, 43),
763 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
765 BPF_EXIT_INSN(),
766 },
767 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
768 .errstr = "jump out of range from insn 1 to 4",
769 .result = REJECT,
770},
771{
772 "calls: two calls with bad jump",
773 .insns = {
774 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
775 BPF_EXIT_INSN(),
776 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
777 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
778 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
779 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
780 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
781 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
782 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
783 BPF_EXIT_INSN(),
784 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
785 offsetof(struct __sk_buff, len)),
786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
787 BPF_EXIT_INSN(),
788 },
789 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
790 .errstr = "jump out of range from insn 11 to 9",
791 .result = REJECT,
792},
793{
794 "calls: recursive call. test1",
795 .insns = {
796 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
797 BPF_EXIT_INSN(),
798 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
799 BPF_EXIT_INSN(),
800 },
801 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
802 .errstr = "the call stack of 9 frames is too deep",
803 .result = REJECT,
804},
805{
806 "calls: recursive call. test2",
807 .insns = {
808 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
809 BPF_EXIT_INSN(),
810 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
811 BPF_EXIT_INSN(),
812 },
813 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
814 .errstr = "the call stack of 9 frames is too deep",
815 .result = REJECT,
816},
817{
818 "calls: unreachable code",
819 .insns = {
820 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
821 BPF_EXIT_INSN(),
822 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
823 BPF_EXIT_INSN(),
824 BPF_MOV64_IMM(BPF_REG_0, 0),
825 BPF_EXIT_INSN(),
826 BPF_MOV64_IMM(BPF_REG_0, 0),
827 BPF_EXIT_INSN(),
828 },
829 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
830 .errstr = "unreachable insn 6",
831 .result = REJECT,
832},
833{
834 "calls: invalid call",
835 .insns = {
836 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
837 BPF_EXIT_INSN(),
838 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
839 BPF_EXIT_INSN(),
840 },
841 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
842 .errstr = "invalid destination",
843 .result = REJECT,
844},
845{
846 "calls: invalid call 2",
847 .insns = {
848 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
849 BPF_EXIT_INSN(),
850 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
851 BPF_EXIT_INSN(),
852 },
853 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
854 .errstr = "invalid destination",
855 .result = REJECT,
856},
857{
858 "calls: jumping across function bodies. test1",
859 .insns = {
860 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
861 BPF_MOV64_IMM(BPF_REG_0, 0),
862 BPF_EXIT_INSN(),
863 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
864 BPF_EXIT_INSN(),
865 },
866 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
867 .errstr = "jump out of range",
868 .result = REJECT,
869},
870{
871 "calls: jumping across function bodies. test2",
872 .insns = {
873 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
874 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
875 BPF_MOV64_IMM(BPF_REG_0, 0),
876 BPF_EXIT_INSN(),
877 BPF_EXIT_INSN(),
878 },
879 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
880 .errstr = "jump out of range",
881 .result = REJECT,
882},
883{
884 "calls: call without exit",
885 .insns = {
886 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
887 BPF_EXIT_INSN(),
888 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
889 BPF_EXIT_INSN(),
890 BPF_MOV64_IMM(BPF_REG_0, 0),
891 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
892 },
893 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
894 .errstr = "not an exit",
895 .result = REJECT,
896},
897{
898 "calls: call into middle of ld_imm64",
899 .insns = {
900 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
901 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
902 BPF_MOV64_IMM(BPF_REG_0, 0),
903 BPF_EXIT_INSN(),
904 BPF_LD_IMM64(BPF_REG_0, 0),
905 BPF_EXIT_INSN(),
906 },
907 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
908 .errstr = "last insn",
909 .result = REJECT,
910},
911{
912 "calls: call into middle of other call",
913 .insns = {
914 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
915 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
916 BPF_MOV64_IMM(BPF_REG_0, 0),
917 BPF_EXIT_INSN(),
918 BPF_MOV64_IMM(BPF_REG_0, 0),
919 BPF_MOV64_IMM(BPF_REG_0, 0),
920 BPF_EXIT_INSN(),
921 },
922 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
923 .errstr = "last insn",
924 .result = REJECT,
925},
926{
927 "calls: subprog call with ld_abs in main prog",
928 .insns = {
929 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
930 BPF_LD_ABS(BPF_B, 0),
931 BPF_LD_ABS(BPF_H, 0),
932 BPF_LD_ABS(BPF_W, 0),
933 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
934 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
935 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
936 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
937 BPF_LD_ABS(BPF_B, 0),
938 BPF_LD_ABS(BPF_H, 0),
939 BPF_LD_ABS(BPF_W, 0),
940 BPF_EXIT_INSN(),
941 BPF_MOV64_IMM(BPF_REG_2, 1),
942 BPF_MOV64_IMM(BPF_REG_3, 2),
943 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
944 BPF_EXIT_INSN(),
945 },
946 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
947 .result = ACCEPT,
948},
949{
950 "calls: two calls with bad fallthrough",
951 .insns = {
952 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
953 BPF_EXIT_INSN(),
954 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
955 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
956 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
957 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
958 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
959 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
960 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
961 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
962 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
963 offsetof(struct __sk_buff, len)),
964 BPF_EXIT_INSN(),
965 },
966 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
967 .errstr = "not an exit",
968 .result = REJECT,
969},
970{
971 "calls: two calls with stack read",
972 .insns = {
973 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
974 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
975 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
976 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
977 BPF_EXIT_INSN(),
978 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
979 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
980 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
981 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
982 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
983 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
984 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
985 BPF_EXIT_INSN(),
986 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
987 BPF_EXIT_INSN(),
988 },
989 .prog_type = BPF_PROG_TYPE_XDP,
990 .result = ACCEPT,
991},
992{
993 "calls: two calls with stack write",
994 .insns = {
995 /* main prog */
996 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
997 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
999 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1000 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1001 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1002 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1003 BPF_EXIT_INSN(),
1004
1005 /* subprog 1 */
1006 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1007 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1008 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
1009 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
1010 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1011 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1012 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
1013 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
1014 /* write into stack frame of main prog */
1015 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1016 BPF_EXIT_INSN(),
1017
1018 /* subprog 2 */
1019 /* read from stack frame of main prog */
1020 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
1021 BPF_EXIT_INSN(),
1022 },
1023 .prog_type = BPF_PROG_TYPE_XDP,
1024 .result = ACCEPT,
1025},
1026{
1027 "calls: stack overflow using two frames (pre-call access)",
1028 .insns = {
1029 /* prog 1 */
1030 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1031 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
1032 BPF_EXIT_INSN(),
1033
1034 /* prog 2 */
1035 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1036 BPF_MOV64_IMM(BPF_REG_0, 0),
1037 BPF_EXIT_INSN(),
1038 },
1039 .prog_type = BPF_PROG_TYPE_XDP,
1040 .errstr = "combined stack size",
1041 .result = REJECT,
1042},
1043{
1044 "calls: stack overflow using two frames (post-call access)",
1045 .insns = {
1046 /* prog 1 */
1047 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
1048 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1049 BPF_EXIT_INSN(),
1050
1051 /* prog 2 */
1052 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1053 BPF_MOV64_IMM(BPF_REG_0, 0),
1054 BPF_EXIT_INSN(),
1055 },
1056 .prog_type = BPF_PROG_TYPE_XDP,
1057 .errstr = "combined stack size",
1058 .result = REJECT,
1059},
1060{
1061 "calls: stack depth check using three frames. test1",
1062 .insns = {
1063 /* main */
1064 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1065 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
1066 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
1067 BPF_MOV64_IMM(BPF_REG_0, 0),
1068 BPF_EXIT_INSN(),
1069 /* A */
1070 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1071 BPF_EXIT_INSN(),
1072 /* B */
1073 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
1074 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1075 BPF_EXIT_INSN(),
1076 },
1077 .prog_type = BPF_PROG_TYPE_XDP,
1078 /* stack_main=32, stack_A=256, stack_B=64
1079 * and max(main+A, main+A+B) < 512
1080 */
1081 .result = ACCEPT,
1082},
1083{
1084 "calls: stack depth check using three frames. test2",
1085 .insns = {
1086 /* main */
1087 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1088 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
1089 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
1090 BPF_MOV64_IMM(BPF_REG_0, 0),
1091 BPF_EXIT_INSN(),
1092 /* A */
1093 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1094 BPF_EXIT_INSN(),
1095 /* B */
1096 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
1097 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1098 BPF_EXIT_INSN(),
1099 },
1100 .prog_type = BPF_PROG_TYPE_XDP,
1101 /* stack_main=32, stack_A=64, stack_B=256
1102 * and max(main+A, main+A+B) < 512
1103 */
1104 .result = ACCEPT,
1105},
1106{
1107 "calls: stack depth check using three frames. test3",
1108 .insns = {
1109 /* main */
1110 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1111 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
1112 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1113 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
1114 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
1115 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1116 BPF_MOV64_IMM(BPF_REG_0, 0),
1117 BPF_EXIT_INSN(),
1118 /* A */
1119 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
1120 BPF_EXIT_INSN(),
1121 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
1122 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
1123 /* B */
1124 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
1125 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
1126 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1127 BPF_EXIT_INSN(),
1128 },
1129 .prog_type = BPF_PROG_TYPE_XDP,
1130 /* stack_main=64, stack_A=224, stack_B=256
1131 * and max(main+A, main+A+B) > 512
1132 */
1133 .errstr = "combined stack",
1134 .result = REJECT,
1135},
1136{
1137 "calls: stack depth check using three frames. test4",
1138 /* void main(void) {
1139 * func1(0);
1140 * func1(1);
1141 * func2(1);
1142 * }
1143 * void func1(int alloc_or_recurse) {
1144 * if (alloc_or_recurse) {
1145 * frame_pointer[-300] = 1;
1146 * } else {
1147 * func2(alloc_or_recurse);
1148 * }
1149 * }
1150 * void func2(int alloc_or_recurse) {
1151 * if (alloc_or_recurse) {
1152 * frame_pointer[-300] = 1;
1153 * }
1154 * }
1155 */
1156 .insns = {
1157 /* main */
1158 BPF_MOV64_IMM(BPF_REG_1, 0),
1159 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
1160 BPF_MOV64_IMM(BPF_REG_1, 1),
1161 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1162 BPF_MOV64_IMM(BPF_REG_1, 1),
1163 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
1164 BPF_MOV64_IMM(BPF_REG_0, 0),
1165 BPF_EXIT_INSN(),
1166 /* A */
1167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1168 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1169 BPF_EXIT_INSN(),
1170 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
1171 BPF_EXIT_INSN(),
1172 /* B */
1173 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1174 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1175 BPF_EXIT_INSN(),
1176 },
1177 .prog_type = BPF_PROG_TYPE_XDP,
1178 .result = REJECT,
1179 .errstr = "combined stack",
1180},
1181{
1182 "calls: stack depth check using three frames. test5",
1183 .insns = {
1184 /* main */
1185 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
1186 BPF_EXIT_INSN(),
1187 /* A */
1188 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
1189 BPF_EXIT_INSN(),
1190 /* B */
1191 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
1192 BPF_EXIT_INSN(),
1193 /* C */
1194 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
1195 BPF_EXIT_INSN(),
1196 /* D */
1197 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
1198 BPF_EXIT_INSN(),
1199 /* E */
1200 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
1201 BPF_EXIT_INSN(),
1202 /* F */
1203 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
1204 BPF_EXIT_INSN(),
1205 /* G */
1206 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
1207 BPF_EXIT_INSN(),
1208 /* H */
1209 BPF_MOV64_IMM(BPF_REG_0, 0),
1210 BPF_EXIT_INSN(),
1211 },
1212 .prog_type = BPF_PROG_TYPE_XDP,
1213 .errstr = "call stack",
1214 .result = REJECT,
1215},
1216{
1217 "calls: stack depth check in dead code",
1218 .insns = {
1219 /* main */
1220 BPF_MOV64_IMM(BPF_REG_1, 0),
1221 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
1222 BPF_EXIT_INSN(),
1223 /* A */
1224 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1225 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
1226 BPF_MOV64_IMM(BPF_REG_0, 0),
1227 BPF_EXIT_INSN(),
1228 /* B */
1229 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
1230 BPF_EXIT_INSN(),
1231 /* C */
1232 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
1233 BPF_EXIT_INSN(),
1234 /* D */
1235 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
1236 BPF_EXIT_INSN(),
1237 /* E */
1238 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
1239 BPF_EXIT_INSN(),
1240 /* F */
1241 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
1242 BPF_EXIT_INSN(),
1243 /* G */
1244 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
1245 BPF_EXIT_INSN(),
1246 /* H */
1247 BPF_MOV64_IMM(BPF_REG_0, 0),
1248 BPF_EXIT_INSN(),
1249 },
1250 .prog_type = BPF_PROG_TYPE_XDP,
1251 .errstr = "call stack",
1252 .result = REJECT,
1253},
1254{
1255 "calls: spill into caller stack frame",
1256 .insns = {
1257 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1258 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1260 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1261 BPF_EXIT_INSN(),
1262 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1263 BPF_MOV64_IMM(BPF_REG_0, 0),
1264 BPF_EXIT_INSN(),
1265 },
1266 .prog_type = BPF_PROG_TYPE_XDP,
1267 .errstr = "cannot spill",
1268 .result = REJECT,
1269},
1270{
1271 "calls: write into caller stack frame",
1272 .insns = {
1273 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1275 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1277 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1278 BPF_EXIT_INSN(),
1279 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
1280 BPF_MOV64_IMM(BPF_REG_0, 0),
1281 BPF_EXIT_INSN(),
1282 },
1283 .prog_type = BPF_PROG_TYPE_XDP,
1284 .result = ACCEPT,
1285 .retval = 42,
1286},
1287{
1288 "calls: write into callee stack frame",
1289 .insns = {
1290 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1291 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
1292 BPF_EXIT_INSN(),
1293 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
1295 BPF_EXIT_INSN(),
1296 },
1297 .prog_type = BPF_PROG_TYPE_XDP,
1298 .errstr = "cannot return stack pointer",
1299 .result = REJECT,
1300},
1301{
1302 "calls: two calls with stack write and void return",
1303 .insns = {
1304 /* main prog */
1305 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1306 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1308 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1309 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1310 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1311 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1312 BPF_EXIT_INSN(),
1313
1314 /* subprog 1 */
1315 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1316 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1317 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1318 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1319 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1320 BPF_EXIT_INSN(),
1321
1322 /* subprog 2 */
1323 /* write into stack frame of main prog */
1324 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
1325 BPF_EXIT_INSN(), /* void return */
1326 },
1327 .prog_type = BPF_PROG_TYPE_XDP,
1328 .result = ACCEPT,
1329},
1330{
1331 "calls: ambiguous return value",
1332 .insns = {
1333 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1334 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1335 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1336 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1338 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1339 BPF_EXIT_INSN(),
1340 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1341 BPF_MOV64_IMM(BPF_REG_0, 0),
1342 BPF_EXIT_INSN(),
1343 },
1344 .errstr_unpriv = "allowed for",
1345 .result_unpriv = REJECT,
1346 .errstr = "R0 !read_ok",
1347 .result = REJECT,
1348},
1349{
1350 "calls: two calls that return map_value",
1351 .insns = {
1352 /* main prog */
1353 /* pass fp-16, fp-8 into a function */
1354 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1356 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1358 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
1359
1360 /* fetch map_value_ptr from the stack of this function */
1361 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1362 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1363 /* write into map value */
1364 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1365 /* fetch secound map_value_ptr from the stack */
1366 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1367 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1368 /* write into map value */
1369 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1370 BPF_MOV64_IMM(BPF_REG_0, 0),
1371 BPF_EXIT_INSN(),
1372
1373 /* subprog 1 */
1374 /* call 3rd function twice */
1375 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1376 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1377 /* first time with fp-8 */
1378 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1379 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1380 /* second time with fp-16 */
1381 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1382 BPF_EXIT_INSN(),
1383
1384 /* subprog 2 */
1385 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1386 /* lookup from map */
1387 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1388 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1389 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1390 BPF_LD_MAP_FD(BPF_REG_1, 0),
1391 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1392 /* write map_value_ptr into stack frame of main prog */
1393 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1394 BPF_MOV64_IMM(BPF_REG_0, 0),
1395 BPF_EXIT_INSN(), /* return 0 */
1396 },
1397 .prog_type = BPF_PROG_TYPE_XDP,
1398 .fixup_map_hash_8b = { 23 },
1399 .result = ACCEPT,
1400},
1401{
1402 "calls: two calls that return map_value with bool condition",
1403 .insns = {
1404 /* main prog */
1405 /* pass fp-16, fp-8 into a function */
1406 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1408 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1410 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1411 BPF_MOV64_IMM(BPF_REG_0, 0),
1412 BPF_EXIT_INSN(),
1413
1414 /* subprog 1 */
1415 /* call 3rd function twice */
1416 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1417 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1418 /* first time with fp-8 */
1419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1420 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1421 /* fetch map_value_ptr from the stack of this function */
1422 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1423 /* write into map value */
1424 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1425 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1426 /* second time with fp-16 */
1427 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1428 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1429 /* fetch secound map_value_ptr from the stack */
1430 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1431 /* write into map value */
1432 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1433 BPF_EXIT_INSN(),
1434
1435 /* subprog 2 */
1436 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1437 /* lookup from map */
1438 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1439 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1441 BPF_LD_MAP_FD(BPF_REG_1, 0),
1442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1443 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1444 BPF_MOV64_IMM(BPF_REG_0, 0),
1445 BPF_EXIT_INSN(), /* return 0 */
1446 /* write map_value_ptr into stack frame of main prog */
1447 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1448 BPF_MOV64_IMM(BPF_REG_0, 1),
1449 BPF_EXIT_INSN(), /* return 1 */
1450 },
1451 .prog_type = BPF_PROG_TYPE_XDP,
1452 .fixup_map_hash_8b = { 23 },
1453 .result = ACCEPT,
1454},
1455{
1456 "calls: two calls that return map_value with incorrect bool check",
1457 .insns = {
1458 /* main prog */
1459 /* pass fp-16, fp-8 into a function */
1460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1462 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1465 BPF_MOV64_IMM(BPF_REG_0, 0),
1466 BPF_EXIT_INSN(),
1467
1468 /* subprog 1 */
1469 /* call 3rd function twice */
1470 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1471 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1472 /* first time with fp-8 */
1473 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1474 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1475 /* fetch map_value_ptr from the stack of this function */
1476 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1477 /* write into map value */
1478 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1480 /* second time with fp-16 */
1481 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1482 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1483 /* fetch secound map_value_ptr from the stack */
1484 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1485 /* write into map value */
1486 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1487 BPF_EXIT_INSN(),
1488
1489 /* subprog 2 */
1490 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1491 /* lookup from map */
1492 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1493 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1494 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1495 BPF_LD_MAP_FD(BPF_REG_1, 0),
1496 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1497 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1498 BPF_MOV64_IMM(BPF_REG_0, 0),
1499 BPF_EXIT_INSN(), /* return 0 */
1500 /* write map_value_ptr into stack frame of main prog */
1501 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1502 BPF_MOV64_IMM(BPF_REG_0, 1),
1503 BPF_EXIT_INSN(), /* return 1 */
1504 },
1505 .prog_type = BPF_PROG_TYPE_XDP,
1506 .fixup_map_hash_8b = { 23 },
1507 .result = REJECT,
1508 .errstr = "R0 invalid mem access 'scalar'",
1509 .result_unpriv = REJECT,
1510 .errstr_unpriv = "invalid read from stack R7 off=-16 size=8",
1511},
1512{
1513 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
1514 .insns = {
1515 /* main prog */
1516 /* pass fp-16, fp-8 into a function */
1517 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1518 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1519 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1521 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1522 BPF_MOV64_IMM(BPF_REG_0, 0),
1523 BPF_EXIT_INSN(),
1524
1525 /* subprog 1 */
1526 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1527 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1528 /* 1st lookup from map */
1529 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1530 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1532 BPF_LD_MAP_FD(BPF_REG_1, 0),
1533 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1534 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1535 BPF_MOV64_IMM(BPF_REG_8, 0),
1536 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1537 /* write map_value_ptr into stack frame of main prog at fp-8 */
1538 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1539 BPF_MOV64_IMM(BPF_REG_8, 1),
1540
1541 /* 2nd lookup from map */
1542 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1543 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1544 BPF_LD_MAP_FD(BPF_REG_1, 0),
1545 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1546 BPF_FUNC_map_lookup_elem),
1547 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1548 BPF_MOV64_IMM(BPF_REG_9, 0),
1549 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1550 /* write map_value_ptr into stack frame of main prog at fp-16 */
1551 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1552 BPF_MOV64_IMM(BPF_REG_9, 1),
1553
1554 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1555 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1556 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1557 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1558 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1559 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1560 BPF_EXIT_INSN(),
1561
1562 /* subprog 2 */
1563 /* if arg2 == 1 do *arg1 = 0 */
1564 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1565 /* fetch map_value_ptr from the stack of this function */
1566 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1567 /* write into map value */
1568 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1569
1570 /* if arg4 == 1 do *arg3 = 0 */
1571 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1572 /* fetch map_value_ptr from the stack of this function */
1573 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1574 /* write into map value */
1575 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1576 BPF_EXIT_INSN(),
1577 },
1578 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1579 .fixup_map_hash_8b = { 12, 22 },
1580 .result = REJECT,
1581 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1582 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1583},
1584{
1585 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
1586 .insns = {
1587 /* main prog */
1588 /* pass fp-16, fp-8 into a function */
1589 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1591 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1593 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1594 BPF_MOV64_IMM(BPF_REG_0, 0),
1595 BPF_EXIT_INSN(),
1596
1597 /* subprog 1 */
1598 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1599 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1600 /* 1st lookup from map */
1601 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1602 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1603 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1604 BPF_LD_MAP_FD(BPF_REG_1, 0),
1605 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1606 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1607 BPF_MOV64_IMM(BPF_REG_8, 0),
1608 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1609 /* write map_value_ptr into stack frame of main prog at fp-8 */
1610 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1611 BPF_MOV64_IMM(BPF_REG_8, 1),
1612
1613 /* 2nd lookup from map */
1614 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1616 BPF_LD_MAP_FD(BPF_REG_1, 0),
1617 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1618 BPF_FUNC_map_lookup_elem),
1619 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1620 BPF_MOV64_IMM(BPF_REG_9, 0),
1621 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1622 /* write map_value_ptr into stack frame of main prog at fp-16 */
1623 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1624 BPF_MOV64_IMM(BPF_REG_9, 1),
1625
1626 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1627 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1628 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1629 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1630 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1631 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1632 BPF_EXIT_INSN(),
1633
1634 /* subprog 2 */
1635 /* if arg2 == 1 do *arg1 = 0 */
1636 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1637 /* fetch map_value_ptr from the stack of this function */
1638 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1639 /* write into map value */
1640 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1641
1642 /* if arg4 == 1 do *arg3 = 0 */
1643 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1644 /* fetch map_value_ptr from the stack of this function */
1645 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1646 /* write into map value */
1647 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1648 BPF_EXIT_INSN(),
1649 },
1650 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1651 .fixup_map_hash_8b = { 12, 22 },
1652 .result = ACCEPT,
1653},
1654{
1655 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
1656 .insns = {
1657 /* main prog */
1658 /* pass fp-16, fp-8 into a function */
1659 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1663 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
1664 BPF_MOV64_IMM(BPF_REG_0, 0),
1665 BPF_EXIT_INSN(),
1666
1667 /* subprog 1 */
1668 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1669 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1670 /* 1st lookup from map */
1671 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
1672 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1673 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1674 BPF_LD_MAP_FD(BPF_REG_1, 0),
1675 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1676 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1677 BPF_MOV64_IMM(BPF_REG_8, 0),
1678 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1679 /* write map_value_ptr into stack frame of main prog at fp-8 */
1680 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1681 BPF_MOV64_IMM(BPF_REG_8, 1),
1682
1683 /* 2nd lookup from map */
1684 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1685 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1686 BPF_LD_MAP_FD(BPF_REG_1, 0),
1687 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1688 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1689 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
1690 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1691 /* write map_value_ptr into stack frame of main prog at fp-16 */
1692 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1693 BPF_MOV64_IMM(BPF_REG_9, 1),
1694
1695 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1696 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
1697 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1698 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1699 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1700 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
1701 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
1702
1703 /* subprog 2 */
1704 /* if arg2 == 1 do *arg1 = 0 */
1705 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1706 /* fetch map_value_ptr from the stack of this function */
1707 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1708 /* write into map value */
1709 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1710
1711 /* if arg4 == 1 do *arg3 = 0 */
1712 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1713 /* fetch map_value_ptr from the stack of this function */
1714 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1715 /* write into map value */
1716 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1717 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
1718 },
1719 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1720 .fixup_map_hash_8b = { 12, 22 },
1721 .result = REJECT,
1722 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1723 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1724},
1725{
1726 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
1727 .insns = {
1728 /* main prog */
1729 /* pass fp-16, fp-8 into a function */
1730 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1731 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1732 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1734 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1735 BPF_MOV64_IMM(BPF_REG_0, 0),
1736 BPF_EXIT_INSN(),
1737
1738 /* subprog 1 */
1739 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1740 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1741 /* 1st lookup from map */
1742 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1743 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1744 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1745 BPF_LD_MAP_FD(BPF_REG_1, 0),
1746 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1747 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1748 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1749 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1750 BPF_MOV64_IMM(BPF_REG_8, 0),
1751 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1752 BPF_MOV64_IMM(BPF_REG_8, 1),
1753
1754 /* 2nd lookup from map */
1755 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1756 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1757 BPF_LD_MAP_FD(BPF_REG_1, 0),
1758 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1759 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1760 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1761 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1762 BPF_MOV64_IMM(BPF_REG_9, 0),
1763 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1764 BPF_MOV64_IMM(BPF_REG_9, 1),
1765
1766 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1767 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1768 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1769 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1770 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1771 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1772 BPF_EXIT_INSN(),
1773
1774 /* subprog 2 */
1775 /* if arg2 == 1 do *arg1 = 0 */
1776 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1777 /* fetch map_value_ptr from the stack of this function */
1778 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1779 /* write into map value */
1780 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1781
1782 /* if arg4 == 1 do *arg3 = 0 */
1783 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1784 /* fetch map_value_ptr from the stack of this function */
1785 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1786 /* write into map value */
1787 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1788 BPF_EXIT_INSN(),
1789 },
1790 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1791 .fixup_map_hash_8b = { 12, 22 },
1792 .result = ACCEPT,
1793},
1794{
1795 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
1796 .insns = {
1797 /* main prog */
1798 /* pass fp-16, fp-8 into a function */
1799 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1800 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1801 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1804 BPF_MOV64_IMM(BPF_REG_0, 0),
1805 BPF_EXIT_INSN(),
1806
1807 /* subprog 1 */
1808 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1809 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1810 /* 1st lookup from map */
1811 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1812 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1813 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1814 BPF_LD_MAP_FD(BPF_REG_1, 0),
1815 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1816 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1817 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1818 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1819 BPF_MOV64_IMM(BPF_REG_8, 0),
1820 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1821 BPF_MOV64_IMM(BPF_REG_8, 1),
1822
1823 /* 2nd lookup from map */
1824 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1825 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1826 BPF_LD_MAP_FD(BPF_REG_1, 0),
1827 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1828 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1829 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1830 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1831 BPF_MOV64_IMM(BPF_REG_9, 0),
1832 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1833 BPF_MOV64_IMM(BPF_REG_9, 1),
1834
1835 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1836 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1837 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1838 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1839 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1840 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1841 BPF_EXIT_INSN(),
1842
1843 /* subprog 2 */
1844 /* if arg2 == 1 do *arg1 = 0 */
1845 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1846 /* fetch map_value_ptr from the stack of this function */
1847 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1848 /* write into map value */
1849 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1850
1851 /* if arg4 == 0 do *arg3 = 0 */
1852 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
1853 /* fetch map_value_ptr from the stack of this function */
1854 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1855 /* write into map value */
1856 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1857 BPF_EXIT_INSN(),
1858 },
1859 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1860 .fixup_map_hash_8b = { 12, 22 },
1861 .result = REJECT,
1862 .errstr = "R0 invalid mem access 'scalar'",
1863},
1864{
1865 "calls: pkt_ptr spill into caller stack",
1866 .insns = {
1867 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1869 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1870 BPF_EXIT_INSN(),
1871
1872 /* subprog 1 */
1873 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1874 offsetof(struct __sk_buff, data)),
1875 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1876 offsetof(struct __sk_buff, data_end)),
1877 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1878 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1879 /* spill unchecked pkt_ptr into stack of caller */
1880 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1881 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1882 /* now the pkt range is verified, read pkt_ptr from stack */
1883 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1884 /* write 4 bytes into packet */
1885 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1886 BPF_EXIT_INSN(),
1887 },
1888 .result = ACCEPT,
1889 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1890 .retval = POINTER_VALUE,
1891 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1892},
1893{
1894 "calls: pkt_ptr spill into caller stack 2",
1895 .insns = {
1896 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1898 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1899 /* Marking is still kept, but not in all cases safe. */
1900 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1901 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1902 BPF_EXIT_INSN(),
1903
1904 /* subprog 1 */
1905 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1906 offsetof(struct __sk_buff, data)),
1907 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1908 offsetof(struct __sk_buff, data_end)),
1909 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1911 /* spill unchecked pkt_ptr into stack of caller */
1912 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1913 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1914 /* now the pkt range is verified, read pkt_ptr from stack */
1915 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1916 /* write 4 bytes into packet */
1917 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1918 BPF_EXIT_INSN(),
1919 },
1920 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1921 .errstr = "invalid access to packet",
1922 .result = REJECT,
1923 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1924},
1925{
1926 "calls: pkt_ptr spill into caller stack 3",
1927 .insns = {
1928 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1929 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1930 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1931 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1932 /* Marking is still kept and safe here. */
1933 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1934 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1935 BPF_EXIT_INSN(),
1936
1937 /* subprog 1 */
1938 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1939 offsetof(struct __sk_buff, data)),
1940 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1941 offsetof(struct __sk_buff, data_end)),
1942 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1944 /* spill unchecked pkt_ptr into stack of caller */
1945 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1946 BPF_MOV64_IMM(BPF_REG_5, 0),
1947 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1948 BPF_MOV64_IMM(BPF_REG_5, 1),
1949 /* now the pkt range is verified, read pkt_ptr from stack */
1950 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1951 /* write 4 bytes into packet */
1952 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1953 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1954 BPF_EXIT_INSN(),
1955 },
1956 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1957 .result = ACCEPT,
1958 .retval = 1,
1959 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1960},
1961{
1962 "calls: pkt_ptr spill into caller stack 4",
1963 .insns = {
1964 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1966 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1967 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1968 /* Check marking propagated. */
1969 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1970 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1971 BPF_EXIT_INSN(),
1972
1973 /* subprog 1 */
1974 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1975 offsetof(struct __sk_buff, data)),
1976 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1977 offsetof(struct __sk_buff, data_end)),
1978 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1979 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1980 /* spill unchecked pkt_ptr into stack of caller */
1981 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1982 BPF_MOV64_IMM(BPF_REG_5, 0),
1983 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1984 BPF_MOV64_IMM(BPF_REG_5, 1),
1985 /* don't read back pkt_ptr from stack here */
1986 /* write 4 bytes into packet */
1987 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1988 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1989 BPF_EXIT_INSN(),
1990 },
1991 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1992 .result = ACCEPT,
1993 .retval = 1,
1994 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1995},
1996{
1997 "calls: pkt_ptr spill into caller stack 5",
1998 .insns = {
1999 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2000 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2001 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
2002 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2003 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2004 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2005 BPF_EXIT_INSN(),
2006
2007 /* subprog 1 */
2008 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2009 offsetof(struct __sk_buff, data)),
2010 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2011 offsetof(struct __sk_buff, data_end)),
2012 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2014 BPF_MOV64_IMM(BPF_REG_5, 0),
2015 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2016 /* spill checked pkt_ptr into stack of caller */
2017 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2018 BPF_MOV64_IMM(BPF_REG_5, 1),
2019 /* don't read back pkt_ptr from stack here */
2020 /* write 4 bytes into packet */
2021 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2022 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2023 BPF_EXIT_INSN(),
2024 },
2025 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2026 .errstr = "same insn cannot be used with different",
2027 .result = REJECT,
2028 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2029},
2030{
2031 "calls: pkt_ptr spill into caller stack 6",
2032 .insns = {
2033 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2034 offsetof(struct __sk_buff, data_end)),
2035 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2037 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2038 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2039 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2040 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2041 BPF_EXIT_INSN(),
2042
2043 /* subprog 1 */
2044 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2045 offsetof(struct __sk_buff, data)),
2046 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2047 offsetof(struct __sk_buff, data_end)),
2048 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2050 BPF_MOV64_IMM(BPF_REG_5, 0),
2051 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2052 /* spill checked pkt_ptr into stack of caller */
2053 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2054 BPF_MOV64_IMM(BPF_REG_5, 1),
2055 /* don't read back pkt_ptr from stack here */
2056 /* write 4 bytes into packet */
2057 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2058 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2059 BPF_EXIT_INSN(),
2060 },
2061 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2062 .errstr = "R4 invalid mem access",
2063 .result = REJECT,
2064 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2065},
2066{
2067 "calls: pkt_ptr spill into caller stack 7",
2068 .insns = {
2069 BPF_MOV64_IMM(BPF_REG_2, 0),
2070 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2071 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2072 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2073 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2074 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2075 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2076 BPF_EXIT_INSN(),
2077
2078 /* subprog 1 */
2079 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2080 offsetof(struct __sk_buff, data)),
2081 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2082 offsetof(struct __sk_buff, data_end)),
2083 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2084 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2085 BPF_MOV64_IMM(BPF_REG_5, 0),
2086 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2087 /* spill checked pkt_ptr into stack of caller */
2088 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2089 BPF_MOV64_IMM(BPF_REG_5, 1),
2090 /* don't read back pkt_ptr from stack here */
2091 /* write 4 bytes into packet */
2092 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2093 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2094 BPF_EXIT_INSN(),
2095 },
2096 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2097 .errstr = "R4 invalid mem access",
2098 .result = REJECT,
2099 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2100},
2101{
2102 "calls: pkt_ptr spill into caller stack 8",
2103 .insns = {
2104 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2105 offsetof(struct __sk_buff, data)),
2106 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2107 offsetof(struct __sk_buff, data_end)),
2108 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2110 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
2111 BPF_EXIT_INSN(),
2112 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2114 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2116 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2117 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2118 BPF_EXIT_INSN(),
2119
2120 /* subprog 1 */
2121 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2122 offsetof(struct __sk_buff, data)),
2123 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2124 offsetof(struct __sk_buff, data_end)),
2125 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2127 BPF_MOV64_IMM(BPF_REG_5, 0),
2128 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2129 /* spill checked pkt_ptr into stack of caller */
2130 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2131 BPF_MOV64_IMM(BPF_REG_5, 1),
2132 /* don't read back pkt_ptr from stack here */
2133 /* write 4 bytes into packet */
2134 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2135 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2136 BPF_EXIT_INSN(),
2137 },
2138 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2139 .result = ACCEPT,
2140 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2141},
2142{
2143 "calls: pkt_ptr spill into caller stack 9",
2144 .insns = {
2145 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2146 offsetof(struct __sk_buff, data)),
2147 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2148 offsetof(struct __sk_buff, data_end)),
2149 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2150 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2151 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
2152 BPF_EXIT_INSN(),
2153 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2155 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2156 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2157 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2158 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2159 BPF_EXIT_INSN(),
2160
2161 /* subprog 1 */
2162 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2163 offsetof(struct __sk_buff, data)),
2164 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2165 offsetof(struct __sk_buff, data_end)),
2166 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2167 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2168 BPF_MOV64_IMM(BPF_REG_5, 0),
2169 /* spill unchecked pkt_ptr into stack of caller */
2170 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2171 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2172 BPF_MOV64_IMM(BPF_REG_5, 1),
2173 /* don't read back pkt_ptr from stack here */
2174 /* write 4 bytes into packet */
2175 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2176 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2177 BPF_EXIT_INSN(),
2178 },
2179 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2180 .errstr = "invalid access to packet",
2181 .result = REJECT,
2182 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2183},
2184{
2185 "calls: caller stack init to zero or map_value_or_null",
2186 .insns = {
2187 BPF_MOV64_IMM(BPF_REG_0, 0),
2188 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2189 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2191 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2192 /* fetch map_value_or_null or const_zero from stack */
2193 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2194 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2195 /* store into map_value */
2196 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
2197 BPF_EXIT_INSN(),
2198
2199 /* subprog 1 */
2200 /* if (ctx == 0) return; */
2201 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
2202 /* else bpf_map_lookup() and *(fp - 8) = r0 */
2203 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2204 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2205 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2206 BPF_LD_MAP_FD(BPF_REG_1, 0),
2207 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2208 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
2209 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
2210 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
2211 BPF_EXIT_INSN(),
2212 },
2213 .fixup_map_hash_8b = { 13 },
2214 .result = ACCEPT,
2215 .prog_type = BPF_PROG_TYPE_XDP,
2216},
2217{
2218 "calls: stack init to zero and pruning",
2219 .insns = {
2220 /* first make allocated_stack 16 byte */
2221 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
2222 /* now fork the execution such that the false branch
2223 * of JGT insn will be verified second and it skisp zero
2224 * init of fp-8 stack slot. If stack liveness marking
2225 * is missing live_read marks from call map_lookup
2226 * processing then pruning will incorrectly assume
2227 * that fp-8 stack slot was unused in the fall-through
2228 * branch and will accept the program incorrectly
2229 */
2230 BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
2231 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2),
2232 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2233 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2234 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2235 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2236 BPF_LD_MAP_FD(BPF_REG_1, 0),
2237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
2238 BPF_MOV64_IMM(BPF_REG_0, 0),
2239 BPF_EXIT_INSN(),
2240 },
2241 .fixup_map_hash_48b = { 7 },
2242 .errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
2243 .result_unpriv = REJECT,
2244 /* in privileged mode reads from uninitialized stack locations are permitted */
2245 .result = ACCEPT,
2246},
2247{
2248 "calls: ctx read at start of subprog",
2249 .insns = {
2250 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
2251 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
2252 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
2253 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2254 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
2255 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2256 BPF_EXIT_INSN(),
2257 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2258 BPF_MOV64_IMM(BPF_REG_0, 0),
2259 BPF_EXIT_INSN(),
2260 },
2261 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2262 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2263 .result_unpriv = REJECT,
2264 .result = ACCEPT,
2265},
2266{
2267 "calls: cross frame pruning",
2268 .insns = {
2269 /* r8 = !!random();
2270 * call pruner()
2271 * if (r8)
2272 * do something bad;
2273 */
2274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2275 BPF_MOV64_IMM(BPF_REG_8, 0),
2276 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2277 BPF_MOV64_IMM(BPF_REG_8, 1),
2278 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2279 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2280 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2281 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2282 BPF_MOV64_IMM(BPF_REG_0, 0),
2283 BPF_EXIT_INSN(),
2284 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2285 BPF_EXIT_INSN(),
2286 },
2287 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2288 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2289 .errstr = "!read_ok",
2290 .result = REJECT,
2291},
2292{
2293 "calls: cross frame pruning - liveness propagation",
2294 .insns = {
2295 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2296 BPF_MOV64_IMM(BPF_REG_8, 0),
2297 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2298 BPF_MOV64_IMM(BPF_REG_8, 1),
2299 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2300 BPF_MOV64_IMM(BPF_REG_9, 0),
2301 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2302 BPF_MOV64_IMM(BPF_REG_9, 1),
2303 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2304 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2305 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2306 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
2307 BPF_MOV64_IMM(BPF_REG_0, 0),
2308 BPF_EXIT_INSN(),
2309 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2310 BPF_EXIT_INSN(),
2311 },
2312 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2313 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2314 .errstr = "!read_ok",
2315 .result = REJECT,
2316},
2317/* Make sure that verifier.c:states_equal() considers IDs from all
2318 * frames when building 'idmap' for check_ids().
2319 */
2320{
2321 "calls: check_ids() across call boundary",
2322 .insns = {
2323 /* Function main() */
2324 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2325 /* fp[-24] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */
2326 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2328 BPF_LD_MAP_FD(BPF_REG_1,
2329 0),
2330 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
2331 BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -24),
2332 /* fp[-32] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */
2333 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2335 BPF_LD_MAP_FD(BPF_REG_1,
2336 0),
2337 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
2338 BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -32),
2339 /* call foo(&fp[-24], &fp[-32]) ; both arguments have IDs in the current
2340 * ; stack frame
2341 */
2342 BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
2343 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -24),
2344 BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
2345 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
2346 BPF_CALL_REL(2),
2347 /* exit 0 */
2348 BPF_MOV64_IMM(BPF_REG_0, 0),
2349 BPF_EXIT_INSN(),
2350 /* Function foo()
2351 *
2352 * r9 = &frame[0].fp[-24] ; save arguments in the callee saved registers,
2353 * r8 = &frame[0].fp[-32] ; arguments are pointers to pointers to map value
2354 */
2355 BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
2356 BPF_MOV64_REG(BPF_REG_8, BPF_REG_2),
2357 /* r7 = ktime_get_ns() */
2358 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
2359 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
2360 /* r6 = ktime_get_ns() */
2361 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
2362 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
2363 /* if r6 > r7 goto +1 ; no new information about the state is derived from
2364 * ; this check, thus produced verifier states differ
2365 * ; only in 'insn_idx'
2366 * r9 = r8
2367 */
2368 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
2369 BPF_MOV64_REG(BPF_REG_9, BPF_REG_8),
2370 /* r9 = *r9 ; verifier get's to this point via two paths:
2371 * ; (I) one including r9 = r8, verified first;
2372 * ; (II) one excluding r9 = r8, verified next.
2373 * ; After load of *r9 to r9 the frame[0].fp[-24].id == r9.id.
2374 * ; Suppose that checkpoint is created here via path (I).
2375 * ; When verifying via (II) the r9.id must be compared against
2376 * ; frame[0].fp[-24].id, otherwise (I) and (II) would be
2377 * ; incorrectly deemed equivalent.
2378 * if r9 == 0 goto <exit>
2379 */
2380 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_9, 0),
2381 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1),
2382 /* r8 = *r8 ; read map value via r8, this is not safe
2383 * r0 = *r8 ; because r8 might be not equal to r9.
2384 */
2385 BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_8, 0),
2386 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_8, 0),
2387 /* exit 0 */
2388 BPF_MOV64_IMM(BPF_REG_0, 0),
2389 BPF_EXIT_INSN(),
2390 },
2391 .flags = BPF_F_TEST_STATE_FREQ,
2392 .fixup_map_hash_8b = { 3, 9 },
2393 .result = REJECT,
2394 .errstr = "R8 invalid mem access 'map_value_or_null'",
2395 .result_unpriv = REJECT,
2396 .errstr_unpriv = "",
2397 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
2398},