Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1{
2 "calls: invalid kfunc call not eliminated",
3 .insns = {
4 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
5 BPF_MOV64_IMM(BPF_REG_0, 1),
6 BPF_EXIT_INSN(),
7 },
8 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9 .result = REJECT,
10 .errstr = "invalid kernel function call not eliminated in verifier pass",
11},
12{
13 "calls: invalid kfunc call unreachable",
14 .insns = {
15 BPF_MOV64_IMM(BPF_REG_0, 1),
16 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2),
17 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
18 BPF_MOV64_IMM(BPF_REG_0, 1),
19 BPF_EXIT_INSN(),
20 },
21 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
22 .result = ACCEPT,
23},
24{
25 "calls: invalid kfunc call: ptr_to_mem to struct with non-scalar",
26 .insns = {
27 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
28 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
29 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
30 BPF_EXIT_INSN(),
31 },
32 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
33 .result = REJECT,
34 .errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar",
35 .fixup_kfunc_btf_id = {
36 { "bpf_kfunc_call_test_fail1", 2 },
37 },
38},
39{
40 "calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4",
41 .insns = {
42 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
43 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
44 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
45 BPF_EXIT_INSN(),
46 },
47 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
48 .result = REJECT,
49 .errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2",
50 .fixup_kfunc_btf_id = {
51 { "bpf_kfunc_call_test_fail2", 2 },
52 },
53},
54{
55 "calls: invalid kfunc call: ptr_to_mem to struct with FAM",
56 .insns = {
57 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
58 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
59 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
60 BPF_EXIT_INSN(),
61 },
62 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
63 .result = REJECT,
64 .errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar",
65 .fixup_kfunc_btf_id = {
66 { "bpf_kfunc_call_test_fail3", 2 },
67 },
68},
69{
70 "calls: invalid kfunc call: reg->type != PTR_TO_CTX",
71 .insns = {
72 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
73 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
74 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
75 BPF_EXIT_INSN(),
76 },
77 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
78 .result = REJECT,
79 .errstr = "arg#0 expected pointer to ctx, but got fp",
80 .fixup_kfunc_btf_id = {
81 { "bpf_kfunc_call_test_pass_ctx", 2 },
82 },
83},
84{
85 "calls: invalid kfunc call: void * not allowed in func proto without mem size arg",
86 .insns = {
87 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
89 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
90 BPF_EXIT_INSN(),
91 },
92 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
93 .result = REJECT,
94 .errstr = "arg#0 pointer type UNKNOWN must point to scalar",
95 .fixup_kfunc_btf_id = {
96 { "bpf_kfunc_call_test_mem_len_fail1", 2 },
97 },
98},
99{
100 "calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX",
101 .insns = {
102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
104 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
107 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
108 BPF_EXIT_INSN(),
109 },
110 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
111 .result = REJECT,
112 .errstr = "Possibly NULL pointer passed to trusted arg0",
113 .fixup_kfunc_btf_id = {
114 { "bpf_kfunc_call_test_acquire", 3 },
115 { "bpf_kfunc_call_test_release", 5 },
116 },
117},
118{
119 "calls: invalid kfunc call: reg->off must be zero when passed to release kfunc",
120 .insns = {
121 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
123 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
124 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
125 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
126 BPF_EXIT_INSN(),
127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
130 BPF_MOV64_IMM(BPF_REG_0, 0),
131 BPF_EXIT_INSN(),
132 },
133 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
134 .result = REJECT,
135 .errstr = "R1 must have zero offset when passed to release func",
136 .fixup_kfunc_btf_id = {
137 { "bpf_kfunc_call_test_acquire", 3 },
138 { "bpf_kfunc_call_memb_release", 8 },
139 },
140},
141{
142 "calls: invalid kfunc call: don't match first member type when passed to release kfunc",
143 .insns = {
144 BPF_MOV64_IMM(BPF_REG_0, 0),
145 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
146 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
147 BPF_EXIT_INSN(),
148 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
149 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
150 BPF_MOV64_IMM(BPF_REG_0, 0),
151 BPF_EXIT_INSN(),
152 },
153 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
154 .result = REJECT,
155 .errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
156 .fixup_kfunc_btf_id = {
157 { "bpf_kfunc_call_memb_acquire", 1 },
158 { "bpf_kfunc_call_memb1_release", 5 },
159 },
160},
161{
162 "calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
163 .insns = {
164 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
166 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
167 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
168 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
169 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
170 BPF_EXIT_INSN(),
171 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4),
173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
174 BPF_MOV64_IMM(BPF_REG_0, 0),
175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
176 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
177 BPF_MOV64_IMM(BPF_REG_0, 0),
178 BPF_EXIT_INSN(),
179 },
180 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
181 .fixup_kfunc_btf_id = {
182 { "bpf_kfunc_call_test_acquire", 3 },
183 { "bpf_kfunc_call_test_offset", 9 },
184 { "bpf_kfunc_call_test_release", 12 },
185 },
186 .result_unpriv = REJECT,
187 .result = REJECT,
188 .errstr = "ptr R1 off=-4 disallowed",
189},
190{
191 "calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset",
192 .insns = {
193 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
195 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
197 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
198 BPF_EXIT_INSN(),
199 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
200 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
201 BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3),
202 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
203 BPF_MOV64_IMM(BPF_REG_0, 0),
204 BPF_EXIT_INSN(),
205 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3),
206 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
207 BPF_MOV64_IMM(BPF_REG_0, 0),
208 BPF_EXIT_INSN(),
209 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
210 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
211 BPF_MOV64_IMM(BPF_REG_0, 0),
212 BPF_EXIT_INSN(),
213 },
214 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
215 .fixup_kfunc_btf_id = {
216 { "bpf_kfunc_call_test_acquire", 3 },
217 { "bpf_kfunc_call_test_release", 9 },
218 { "bpf_kfunc_call_test_release", 13 },
219 { "bpf_kfunc_call_test_release", 17 },
220 },
221 .result_unpriv = REJECT,
222 .result = REJECT,
223 .errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed",
224},
225{
226 "calls: invalid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
227 .insns = {
228 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
230 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
231 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
232 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
233 BPF_EXIT_INSN(),
234 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
235 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
236 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
237 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 16),
238 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
239 BPF_MOV64_IMM(BPF_REG_0, 0),
240 BPF_EXIT_INSN(),
241 },
242 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
243 .fixup_kfunc_btf_id = {
244 { "bpf_kfunc_call_test_acquire", 3 },
245 { "bpf_kfunc_call_test_ref", 8 },
246 { "bpf_kfunc_call_test_ref", 10 },
247 },
248 .result_unpriv = REJECT,
249 .result = REJECT,
250 .errstr = "R1 must be",
251},
252{
253 "calls: valid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
254 .insns = {
255 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
257 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
258 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
259 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
260 BPF_EXIT_INSN(),
261 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
262 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
263 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
264 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
266 BPF_MOV64_IMM(BPF_REG_0, 0),
267 BPF_EXIT_INSN(),
268 },
269 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
270 .fixup_kfunc_btf_id = {
271 { "bpf_kfunc_call_test_acquire", 3 },
272 { "bpf_kfunc_call_test_ref", 8 },
273 { "bpf_kfunc_call_test_release", 10 },
274 },
275 .result_unpriv = REJECT,
276 .result = ACCEPT,
277},
278{
279 "calls: invalid kfunc call: must provide (attach_prog_fd, btf_id) pair when freplace",
280 .insns = {
281 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
282 BPF_EXIT_INSN(),
283 },
284 .prog_type = BPF_PROG_TYPE_EXT,
285 .result = REJECT,
286 .errstr = "Tracing programs must provide btf_id",
287 .fixup_kfunc_btf_id = {
288 { "bpf_dynptr_from_skb", 0 },
289 },
290},
291{
292 "calls: basic sanity",
293 .insns = {
294 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
295 BPF_MOV64_IMM(BPF_REG_0, 1),
296 BPF_EXIT_INSN(),
297 BPF_MOV64_IMM(BPF_REG_0, 2),
298 BPF_EXIT_INSN(),
299 },
300 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
301 .result = ACCEPT,
302},
303{
304 "calls: not on unprivileged",
305 .insns = {
306 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
307 BPF_MOV64_IMM(BPF_REG_0, 1),
308 BPF_EXIT_INSN(),
309 BPF_MOV64_IMM(BPF_REG_0, 2),
310 BPF_EXIT_INSN(),
311 },
312 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
313 .result_unpriv = REJECT,
314 .result = ACCEPT,
315 .retval = 1,
316},
317{
318 "calls: div by 0 in subprog",
319 .insns = {
320 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
321 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
322 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
323 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
324 offsetof(struct __sk_buff, data_end)),
325 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
326 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
327 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
328 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
329 BPF_MOV64_IMM(BPF_REG_0, 1),
330 BPF_EXIT_INSN(),
331 BPF_MOV32_IMM(BPF_REG_2, 0),
332 BPF_MOV32_IMM(BPF_REG_3, 1),
333 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
334 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
335 offsetof(struct __sk_buff, data)),
336 BPF_EXIT_INSN(),
337 },
338 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
339 .result = ACCEPT,
340 .retval = 1,
341},
342{
343 "calls: multiple ret types in subprog 1",
344 .insns = {
345 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
346 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
347 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
348 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
349 offsetof(struct __sk_buff, data_end)),
350 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
351 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
352 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
353 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
354 BPF_MOV64_IMM(BPF_REG_0, 1),
355 BPF_EXIT_INSN(),
356 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
357 offsetof(struct __sk_buff, data)),
358 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
359 BPF_MOV32_IMM(BPF_REG_0, 42),
360 BPF_EXIT_INSN(),
361 },
362 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
363 .result = REJECT,
364 .errstr = "R0 invalid mem access 'scalar'",
365},
366{
367 "calls: multiple ret types in subprog 2",
368 .insns = {
369 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
370 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
371 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
372 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
373 offsetof(struct __sk_buff, data_end)),
374 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
375 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
376 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
377 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
378 BPF_MOV64_IMM(BPF_REG_0, 1),
379 BPF_EXIT_INSN(),
380 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
381 offsetof(struct __sk_buff, data)),
382 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
383 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
384 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
385 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
387 BPF_LD_MAP_FD(BPF_REG_1, 0),
388 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
389 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
390 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
391 offsetof(struct __sk_buff, data)),
392 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
393 BPF_EXIT_INSN(),
394 },
395 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
396 .fixup_map_hash_8b = { 16 },
397 .result = REJECT,
398 .errstr = "R0 min value is outside of the allowed memory range",
399},
400{
401 "calls: overlapping caller/callee",
402 .insns = {
403 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
404 BPF_MOV64_IMM(BPF_REG_0, 1),
405 BPF_EXIT_INSN(),
406 },
407 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
408 .errstr = "last insn is not an exit or jmp",
409 .result = REJECT,
410},
411{
412 "calls: wrong recursive calls",
413 .insns = {
414 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
415 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
416 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
417 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
418 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
419 BPF_MOV64_IMM(BPF_REG_0, 1),
420 BPF_EXIT_INSN(),
421 },
422 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
423 .errstr = "jump out of range",
424 .result = REJECT,
425},
426{
427 "calls: wrong src reg",
428 .insns = {
429 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0),
430 BPF_MOV64_IMM(BPF_REG_0, 1),
431 BPF_EXIT_INSN(),
432 },
433 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
434 .errstr = "BPF_CALL uses reserved fields",
435 .result = REJECT,
436},
437{
438 "calls: wrong off value",
439 .insns = {
440 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
441 BPF_MOV64_IMM(BPF_REG_0, 1),
442 BPF_EXIT_INSN(),
443 BPF_MOV64_IMM(BPF_REG_0, 2),
444 BPF_EXIT_INSN(),
445 },
446 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
447 .errstr = "BPF_CALL uses reserved fields",
448 .result = REJECT,
449},
450{
451 "calls: jump back loop",
452 .insns = {
453 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
454 BPF_MOV64_IMM(BPF_REG_0, 1),
455 BPF_EXIT_INSN(),
456 },
457 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
458 .errstr = "the call stack of 9 frames is too deep",
459 .result = REJECT,
460},
461{
462 "calls: conditional call",
463 .insns = {
464 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
465 offsetof(struct __sk_buff, mark)),
466 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
467 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
468 BPF_MOV64_IMM(BPF_REG_0, 1),
469 BPF_EXIT_INSN(),
470 BPF_MOV64_IMM(BPF_REG_0, 2),
471 BPF_EXIT_INSN(),
472 },
473 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
474 .errstr = "jump out of range",
475 .result = REJECT,
476},
477{
478 "calls: conditional call 2",
479 .insns = {
480 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
481 offsetof(struct __sk_buff, mark)),
482 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
483 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
484 BPF_MOV64_IMM(BPF_REG_0, 1),
485 BPF_EXIT_INSN(),
486 BPF_MOV64_IMM(BPF_REG_0, 2),
487 BPF_EXIT_INSN(),
488 BPF_MOV64_IMM(BPF_REG_0, 3),
489 BPF_EXIT_INSN(),
490 },
491 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
492 .result = ACCEPT,
493},
494{
495 "calls: conditional call 3",
496 .insns = {
497 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
498 offsetof(struct __sk_buff, mark)),
499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
500 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
501 BPF_MOV64_IMM(BPF_REG_0, 1),
502 BPF_EXIT_INSN(),
503 BPF_MOV64_IMM(BPF_REG_0, 1),
504 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
505 BPF_MOV64_IMM(BPF_REG_0, 3),
506 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
507 },
508 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
509 .errstr_unpriv = "back-edge from insn",
510 .result_unpriv = REJECT,
511 .result = ACCEPT,
512 .retval = 1,
513},
514{
515 "calls: conditional call 4",
516 .insns = {
517 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
518 offsetof(struct __sk_buff, mark)),
519 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
520 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
521 BPF_MOV64_IMM(BPF_REG_0, 1),
522 BPF_EXIT_INSN(),
523 BPF_MOV64_IMM(BPF_REG_0, 1),
524 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
525 BPF_MOV64_IMM(BPF_REG_0, 3),
526 BPF_EXIT_INSN(),
527 },
528 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
529 .result = ACCEPT,
530},
531{
532 "calls: conditional call 5",
533 .insns = {
534 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
535 offsetof(struct __sk_buff, mark)),
536 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
537 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
538 BPF_MOV64_IMM(BPF_REG_0, 1),
539 BPF_EXIT_INSN(),
540 BPF_MOV64_IMM(BPF_REG_0, 1),
541 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
542 BPF_MOV64_IMM(BPF_REG_0, 3),
543 BPF_EXIT_INSN(),
544 },
545 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
546 .result = ACCEPT,
547 .retval = 1,
548},
549{
550 "calls: conditional call 6",
551 .insns = {
552 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
553 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
554 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
555 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
556 BPF_EXIT_INSN(),
557 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
558 offsetof(struct __sk_buff, mark)),
559 BPF_EXIT_INSN(),
560 },
561 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
562 .errstr = "infinite loop detected",
563 .result = REJECT,
564},
565{
566 "calls: using r0 returned by callee",
567 .insns = {
568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
569 BPF_EXIT_INSN(),
570 BPF_MOV64_IMM(BPF_REG_0, 2),
571 BPF_EXIT_INSN(),
572 },
573 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
574 .result = ACCEPT,
575},
576{
577 "calls: using uninit r0 from callee",
578 .insns = {
579 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
580 BPF_EXIT_INSN(),
581 BPF_EXIT_INSN(),
582 },
583 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
584 .errstr = "!read_ok",
585 .result = REJECT,
586},
587{
588 "calls: callee is using r1",
589 .insns = {
590 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
591 BPF_EXIT_INSN(),
592 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
593 offsetof(struct __sk_buff, len)),
594 BPF_EXIT_INSN(),
595 },
596 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
597 .result = ACCEPT,
598 .retval = TEST_DATA_LEN,
599},
600{
601 "calls: callee using args1",
602 .insns = {
603 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
604 BPF_EXIT_INSN(),
605 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
606 BPF_EXIT_INSN(),
607 },
608 .errstr_unpriv = "allowed for",
609 .result_unpriv = REJECT,
610 .result = ACCEPT,
611 .retval = POINTER_VALUE,
612},
613{
614 "calls: callee using wrong args2",
615 .insns = {
616 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
617 BPF_EXIT_INSN(),
618 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
619 BPF_EXIT_INSN(),
620 },
621 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
622 .errstr = "R2 !read_ok",
623 .result = REJECT,
624},
625{
626 "calls: callee using two args",
627 .insns = {
628 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
629 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
630 offsetof(struct __sk_buff, len)),
631 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
632 offsetof(struct __sk_buff, len)),
633 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
634 BPF_EXIT_INSN(),
635 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
636 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
637 BPF_EXIT_INSN(),
638 },
639 .errstr_unpriv = "allowed for",
640 .result_unpriv = REJECT,
641 .result = ACCEPT,
642 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
643},
644{
645 "calls: callee changing pkt pointers",
646 .insns = {
647 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
648 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
649 offsetof(struct xdp_md, data_end)),
650 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
651 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
652 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
653 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
654 /* clear_all_pkt_pointers() has to walk all frames
655 * to make sure that pkt pointers in the caller
656 * are cleared when callee is calling a helper that
657 * adjusts packet size
658 */
659 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
660 BPF_MOV32_IMM(BPF_REG_0, 0),
661 BPF_EXIT_INSN(),
662 BPF_MOV64_IMM(BPF_REG_2, 0),
663 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
664 BPF_EXIT_INSN(),
665 },
666 .result = REJECT,
667 .errstr = "R6 invalid mem access 'scalar'",
668 .prog_type = BPF_PROG_TYPE_XDP,
669 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
670},
671{
672 "calls: ptr null check in subprog",
673 .insns = {
674 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
675 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
677 BPF_LD_MAP_FD(BPF_REG_1, 0),
678 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
679 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
680 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
681 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
682 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
683 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
684 BPF_EXIT_INSN(),
685 BPF_MOV64_IMM(BPF_REG_0, 0),
686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
687 BPF_MOV64_IMM(BPF_REG_0, 1),
688 BPF_EXIT_INSN(),
689 },
690 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
691 .fixup_map_hash_48b = { 3 },
692 .result_unpriv = REJECT,
693 .result = ACCEPT,
694 .retval = 0,
695},
696{
697 "calls: two calls with args",
698 .insns = {
699 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
700 BPF_EXIT_INSN(),
701 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
702 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
703 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
704 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
705 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
706 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
707 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
708 BPF_EXIT_INSN(),
709 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
710 offsetof(struct __sk_buff, len)),
711 BPF_EXIT_INSN(),
712 },
713 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
714 .result = ACCEPT,
715 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
716},
717{
718 "calls: calls with stack arith",
719 .insns = {
720 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
722 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
723 BPF_EXIT_INSN(),
724 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
725 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
726 BPF_EXIT_INSN(),
727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
728 BPF_MOV64_IMM(BPF_REG_0, 42),
729 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
730 BPF_EXIT_INSN(),
731 },
732 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
733 .result = ACCEPT,
734 .retval = 42,
735},
736{
737 "calls: calls with misaligned stack access",
738 .insns = {
739 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
740 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
741 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
742 BPF_EXIT_INSN(),
743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
744 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
745 BPF_EXIT_INSN(),
746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
747 BPF_MOV64_IMM(BPF_REG_0, 42),
748 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
749 BPF_EXIT_INSN(),
750 },
751 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
752 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
753 .errstr = "misaligned stack access",
754 .result = REJECT,
755},
756{
757 "calls: calls control flow, jump test",
758 .insns = {
759 BPF_MOV64_IMM(BPF_REG_0, 42),
760 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
761 BPF_MOV64_IMM(BPF_REG_0, 43),
762 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
763 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
764 BPF_EXIT_INSN(),
765 },
766 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
767 .result = ACCEPT,
768 .retval = 43,
769},
770{
771 "calls: calls control flow, jump test 2",
772 .insns = {
773 BPF_MOV64_IMM(BPF_REG_0, 42),
774 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
775 BPF_MOV64_IMM(BPF_REG_0, 43),
776 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
777 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
778 BPF_EXIT_INSN(),
779 },
780 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
781 .errstr = "jump out of range from insn 1 to 4",
782 .result = REJECT,
783},
784{
785 "calls: two calls with bad jump",
786 .insns = {
787 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
788 BPF_EXIT_INSN(),
789 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
790 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
791 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
792 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
793 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
794 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
795 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
796 BPF_EXIT_INSN(),
797 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
798 offsetof(struct __sk_buff, len)),
799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
800 BPF_EXIT_INSN(),
801 },
802 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
803 .errstr = "jump out of range from insn 11 to 9",
804 .result = REJECT,
805},
806{
807 "calls: recursive call. test1",
808 .insns = {
809 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
810 BPF_EXIT_INSN(),
811 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
812 BPF_EXIT_INSN(),
813 },
814 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
815 .errstr = "the call stack of 9 frames is too deep",
816 .result = REJECT,
817},
818{
819 "calls: recursive call. test2",
820 .insns = {
821 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
822 BPF_EXIT_INSN(),
823 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
824 BPF_EXIT_INSN(),
825 },
826 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
827 .errstr = "the call stack of 9 frames is too deep",
828 .result = REJECT,
829},
830{
831 "calls: unreachable code",
832 .insns = {
833 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
834 BPF_EXIT_INSN(),
835 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
836 BPF_EXIT_INSN(),
837 BPF_MOV64_IMM(BPF_REG_0, 0),
838 BPF_EXIT_INSN(),
839 BPF_MOV64_IMM(BPF_REG_0, 0),
840 BPF_EXIT_INSN(),
841 },
842 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
843 .errstr = "unreachable insn 6",
844 .result = REJECT,
845},
846{
847 "calls: invalid call",
848 .insns = {
849 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
850 BPF_EXIT_INSN(),
851 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
852 BPF_EXIT_INSN(),
853 },
854 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
855 .errstr = "invalid destination",
856 .result = REJECT,
857},
858{
859 "calls: invalid call 2",
860 .insns = {
861 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
862 BPF_EXIT_INSN(),
863 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
864 BPF_EXIT_INSN(),
865 },
866 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
867 .errstr = "invalid destination",
868 .result = REJECT,
869},
870{
871 "calls: jumping across function bodies. test1",
872 .insns = {
873 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
874 BPF_MOV64_IMM(BPF_REG_0, 0),
875 BPF_EXIT_INSN(),
876 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
877 BPF_EXIT_INSN(),
878 },
879 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
880 .errstr = "jump out of range",
881 .result = REJECT,
882},
883{
884 "calls: jumping across function bodies. test2",
885 .insns = {
886 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
887 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
888 BPF_MOV64_IMM(BPF_REG_0, 0),
889 BPF_EXIT_INSN(),
890 BPF_EXIT_INSN(),
891 },
892 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
893 .errstr = "jump out of range",
894 .result = REJECT,
895},
896{
897 "calls: call without exit",
898 .insns = {
899 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
900 BPF_EXIT_INSN(),
901 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
902 BPF_EXIT_INSN(),
903 BPF_MOV64_IMM(BPF_REG_0, 0),
904 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
905 },
906 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
907 .errstr = "not an exit",
908 .result = REJECT,
909},
910{
911 "calls: call into middle of ld_imm64",
912 .insns = {
913 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
914 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
915 BPF_MOV64_IMM(BPF_REG_0, 0),
916 BPF_EXIT_INSN(),
917 BPF_LD_IMM64(BPF_REG_0, 0),
918 BPF_EXIT_INSN(),
919 },
920 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
921 .errstr = "last insn",
922 .result = REJECT,
923},
924{
925 "calls: call into middle of other call",
926 .insns = {
927 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
928 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
929 BPF_MOV64_IMM(BPF_REG_0, 0),
930 BPF_EXIT_INSN(),
931 BPF_MOV64_IMM(BPF_REG_0, 0),
932 BPF_MOV64_IMM(BPF_REG_0, 0),
933 BPF_EXIT_INSN(),
934 },
935 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
936 .errstr = "last insn",
937 .result = REJECT,
938},
939{
940 "calls: subprog call with ld_abs in main prog",
941 .insns = {
942 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
943 BPF_LD_ABS(BPF_B, 0),
944 BPF_LD_ABS(BPF_H, 0),
945 BPF_LD_ABS(BPF_W, 0),
946 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
947 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
948 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
949 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
950 BPF_LD_ABS(BPF_B, 0),
951 BPF_LD_ABS(BPF_H, 0),
952 BPF_LD_ABS(BPF_W, 0),
953 BPF_EXIT_INSN(),
954 BPF_MOV64_IMM(BPF_REG_2, 1),
955 BPF_MOV64_IMM(BPF_REG_3, 2),
956 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
957 BPF_EXIT_INSN(),
958 },
959 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
960 .result = ACCEPT,
961},
962{
963 "calls: two calls with bad fallthrough",
964 .insns = {
965 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
966 BPF_EXIT_INSN(),
967 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
968 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
969 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
970 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
971 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
972 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
973 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
974 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
975 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
976 offsetof(struct __sk_buff, len)),
977 BPF_EXIT_INSN(),
978 },
979 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
980 .errstr = "not an exit",
981 .result = REJECT,
982},
983{
984 "calls: two calls with stack read",
985 .insns = {
986 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
987 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
989 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
990 BPF_EXIT_INSN(),
991 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
992 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
993 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
994 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
995 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
996 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
997 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
998 BPF_EXIT_INSN(),
999 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
1000 BPF_EXIT_INSN(),
1001 },
1002 .prog_type = BPF_PROG_TYPE_XDP,
1003 .result = ACCEPT,
1004},
1005{
1006 "calls: two calls with stack write",
1007 .insns = {
1008 /* main prog */
1009 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1010 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1012 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1014 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1015 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1016 BPF_EXIT_INSN(),
1017
1018 /* subprog 1 */
1019 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1020 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1021 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
1022 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
1023 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1024 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1025 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
1026 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
1027 /* write into stack frame of main prog */
1028 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1029 BPF_EXIT_INSN(),
1030
1031 /* subprog 2 */
1032 /* read from stack frame of main prog */
1033 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
1034 BPF_EXIT_INSN(),
1035 },
1036 .prog_type = BPF_PROG_TYPE_XDP,
1037 .result = ACCEPT,
1038},
1039{
1040 "calls: stack overflow using two frames (pre-call access)",
1041 .insns = {
1042 /* prog 1 */
1043 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1044 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
1045 BPF_EXIT_INSN(),
1046
1047 /* prog 2 */
1048 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1049 BPF_MOV64_IMM(BPF_REG_0, 0),
1050 BPF_EXIT_INSN(),
1051 },
1052 .prog_type = BPF_PROG_TYPE_XDP,
1053 .errstr = "combined stack size",
1054 .result = REJECT,
1055},
1056{
1057 "calls: stack overflow using two frames (post-call access)",
1058 .insns = {
1059 /* prog 1 */
1060 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
1061 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1062 BPF_EXIT_INSN(),
1063
1064 /* prog 2 */
1065 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1066 BPF_MOV64_IMM(BPF_REG_0, 0),
1067 BPF_EXIT_INSN(),
1068 },
1069 .prog_type = BPF_PROG_TYPE_XDP,
1070 .errstr = "combined stack size",
1071 .result = REJECT,
1072},
1073{
1074 "calls: stack depth check using three frames. test1",
1075 .insns = {
1076 /* main */
1077 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1078 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
1079 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
1080 BPF_MOV64_IMM(BPF_REG_0, 0),
1081 BPF_EXIT_INSN(),
1082 /* A */
1083 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1084 BPF_EXIT_INSN(),
1085 /* B */
1086 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
1087 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1088 BPF_EXIT_INSN(),
1089 },
1090 .prog_type = BPF_PROG_TYPE_XDP,
1091 /* stack_main=32, stack_A=256, stack_B=64
1092 * and max(main+A, main+A+B) < 512
1093 */
1094 .result = ACCEPT,
1095},
1096{
1097 "calls: stack depth check using three frames. test2",
1098 .insns = {
1099 /* main */
1100 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1101 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
1102 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
1103 BPF_MOV64_IMM(BPF_REG_0, 0),
1104 BPF_EXIT_INSN(),
1105 /* A */
1106 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1107 BPF_EXIT_INSN(),
1108 /* B */
1109 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
1110 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1111 BPF_EXIT_INSN(),
1112 },
1113 .prog_type = BPF_PROG_TYPE_XDP,
1114 /* stack_main=32, stack_A=64, stack_B=256
1115 * and max(main+A, main+A+B) < 512
1116 */
1117 .result = ACCEPT,
1118},
1119{
1120 "calls: stack depth check using three frames. test3",
1121 .insns = {
1122 /* main */
1123 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1124 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
1125 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1126 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
1127 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
1128 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1129 BPF_MOV64_IMM(BPF_REG_0, 0),
1130 BPF_EXIT_INSN(),
1131 /* A */
1132 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
1133 BPF_EXIT_INSN(),
1134 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
1135 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
1136 /* B */
1137 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
1138 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
1139 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1140 BPF_EXIT_INSN(),
1141 },
1142 .prog_type = BPF_PROG_TYPE_XDP,
1143 /* stack_main=64, stack_A=224, stack_B=256
1144 * and max(main+A, main+A+B) > 512
1145 */
1146 .errstr = "combined stack",
1147 .result = REJECT,
1148},
1149{
1150 "calls: stack depth check using three frames. test4",
1151 /* void main(void) {
1152 * func1(0);
1153 * func1(1);
1154 * func2(1);
1155 * }
1156 * void func1(int alloc_or_recurse) {
1157 * if (alloc_or_recurse) {
1158 * frame_pointer[-300] = 1;
1159 * } else {
1160 * func2(alloc_or_recurse);
1161 * }
1162 * }
1163 * void func2(int alloc_or_recurse) {
1164 * if (alloc_or_recurse) {
1165 * frame_pointer[-300] = 1;
1166 * }
1167 * }
1168 */
1169 .insns = {
1170 /* main */
1171 BPF_MOV64_IMM(BPF_REG_1, 0),
1172 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
1173 BPF_MOV64_IMM(BPF_REG_1, 1),
1174 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1175 BPF_MOV64_IMM(BPF_REG_1, 1),
1176 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
1177 BPF_MOV64_IMM(BPF_REG_0, 0),
1178 BPF_EXIT_INSN(),
1179 /* A */
1180 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1181 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1182 BPF_EXIT_INSN(),
1183 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
1184 BPF_EXIT_INSN(),
1185 /* B */
1186 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1187 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1188 BPF_EXIT_INSN(),
1189 },
1190 .prog_type = BPF_PROG_TYPE_XDP,
1191 .result = REJECT,
1192 .errstr = "combined stack",
1193},
1194{
1195 "calls: stack depth check using three frames. test5",
1196 .insns = {
1197 /* main */
1198 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
1199 BPF_EXIT_INSN(),
1200 /* A */
1201 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
1202 BPF_EXIT_INSN(),
1203 /* B */
1204 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
1205 BPF_EXIT_INSN(),
1206 /* C */
1207 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
1208 BPF_EXIT_INSN(),
1209 /* D */
1210 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
1211 BPF_EXIT_INSN(),
1212 /* E */
1213 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
1214 BPF_EXIT_INSN(),
1215 /* F */
1216 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
1217 BPF_EXIT_INSN(),
1218 /* G */
1219 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
1220 BPF_EXIT_INSN(),
1221 /* H */
1222 BPF_MOV64_IMM(BPF_REG_0, 0),
1223 BPF_EXIT_INSN(),
1224 },
1225 .prog_type = BPF_PROG_TYPE_XDP,
1226 .errstr = "call stack",
1227 .result = REJECT,
1228},
1229{
1230 "calls: stack depth check in dead code",
1231 .insns = {
1232 /* main */
1233 BPF_MOV64_IMM(BPF_REG_1, 0),
1234 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
1235 BPF_EXIT_INSN(),
1236 /* A */
1237 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1238 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
1239 BPF_MOV64_IMM(BPF_REG_0, 0),
1240 BPF_EXIT_INSN(),
1241 /* B */
1242 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
1243 BPF_EXIT_INSN(),
1244 /* C */
1245 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
1246 BPF_EXIT_INSN(),
1247 /* D */
1248 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
1249 BPF_EXIT_INSN(),
1250 /* E */
1251 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
1252 BPF_EXIT_INSN(),
1253 /* F */
1254 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
1255 BPF_EXIT_INSN(),
1256 /* G */
1257 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
1258 BPF_EXIT_INSN(),
1259 /* H */
1260 BPF_MOV64_IMM(BPF_REG_0, 0),
1261 BPF_EXIT_INSN(),
1262 },
1263 .prog_type = BPF_PROG_TYPE_XDP,
1264 .errstr = "call stack",
1265 .result = REJECT,
1266},
1267{
1268 "calls: spill into caller stack frame",
1269 .insns = {
1270 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1271 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1273 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1274 BPF_EXIT_INSN(),
1275 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1276 BPF_MOV64_IMM(BPF_REG_0, 0),
1277 BPF_EXIT_INSN(),
1278 },
1279 .prog_type = BPF_PROG_TYPE_XDP,
1280 .errstr = "cannot spill",
1281 .result = REJECT,
1282},
1283{
1284 "calls: write into caller stack frame",
1285 .insns = {
1286 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1288 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1289 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1290 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1291 BPF_EXIT_INSN(),
1292 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
1293 BPF_MOV64_IMM(BPF_REG_0, 0),
1294 BPF_EXIT_INSN(),
1295 },
1296 .prog_type = BPF_PROG_TYPE_XDP,
1297 .result = ACCEPT,
1298 .retval = 42,
1299},
1300{
1301 "calls: write into callee stack frame",
1302 .insns = {
1303 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1304 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
1305 BPF_EXIT_INSN(),
1306 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
1308 BPF_EXIT_INSN(),
1309 },
1310 .prog_type = BPF_PROG_TYPE_XDP,
1311 .errstr = "cannot return stack pointer",
1312 .result = REJECT,
1313},
1314{
1315 "calls: two calls with stack write and void return",
1316 .insns = {
1317 /* main prog */
1318 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1319 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1320 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1321 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1322 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1323 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1324 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1325 BPF_EXIT_INSN(),
1326
1327 /* subprog 1 */
1328 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1329 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1330 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1331 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1332 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1333 BPF_EXIT_INSN(),
1334
1335 /* subprog 2 */
1336 /* write into stack frame of main prog */
1337 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
1338 BPF_EXIT_INSN(), /* void return */
1339 },
1340 .prog_type = BPF_PROG_TYPE_XDP,
1341 .result = ACCEPT,
1342},
1343{
1344 "calls: ambiguous return value",
1345 .insns = {
1346 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1347 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1348 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1349 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1350 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1351 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1352 BPF_EXIT_INSN(),
1353 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1354 BPF_MOV64_IMM(BPF_REG_0, 0),
1355 BPF_EXIT_INSN(),
1356 },
1357 .errstr_unpriv = "allowed for",
1358 .result_unpriv = REJECT,
1359 .errstr = "R0 !read_ok",
1360 .result = REJECT,
1361},
1362{
1363 "calls: two calls that return map_value",
1364 .insns = {
1365 /* main prog */
1366 /* pass fp-16, fp-8 into a function */
1367 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1369 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1370 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1371 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
1372
1373 /* fetch map_value_ptr from the stack of this function */
1374 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1375 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1376 /* write into map value */
1377 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1378 /* fetch secound map_value_ptr from the stack */
1379 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1380 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1381 /* write into map value */
1382 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1383 BPF_MOV64_IMM(BPF_REG_0, 0),
1384 BPF_EXIT_INSN(),
1385
1386 /* subprog 1 */
1387 /* call 3rd function twice */
1388 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1389 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1390 /* first time with fp-8 */
1391 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1392 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1393 /* second time with fp-16 */
1394 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1395 BPF_EXIT_INSN(),
1396
1397 /* subprog 2 */
1398 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1399 /* lookup from map */
1400 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1403 BPF_LD_MAP_FD(BPF_REG_1, 0),
1404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1405 /* write map_value_ptr into stack frame of main prog */
1406 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1407 BPF_MOV64_IMM(BPF_REG_0, 0),
1408 BPF_EXIT_INSN(), /* return 0 */
1409 },
1410 .prog_type = BPF_PROG_TYPE_XDP,
1411 .fixup_map_hash_8b = { 23 },
1412 .result = ACCEPT,
1413},
1414{
1415 "calls: two calls that return map_value with bool condition",
1416 .insns = {
1417 /* main prog */
1418 /* pass fp-16, fp-8 into a function */
1419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1421 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1422 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1423 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1424 BPF_MOV64_IMM(BPF_REG_0, 0),
1425 BPF_EXIT_INSN(),
1426
1427 /* subprog 1 */
1428 /* call 3rd function twice */
1429 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1430 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1431 /* first time with fp-8 */
1432 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1433 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1434 /* fetch map_value_ptr from the stack of this function */
1435 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1436 /* write into map value */
1437 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1438 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1439 /* second time with fp-16 */
1440 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1441 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1442 /* fetch secound map_value_ptr from the stack */
1443 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1444 /* write into map value */
1445 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1446 BPF_EXIT_INSN(),
1447
1448 /* subprog 2 */
1449 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1450 /* lookup from map */
1451 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1452 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1454 BPF_LD_MAP_FD(BPF_REG_1, 0),
1455 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1456 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1457 BPF_MOV64_IMM(BPF_REG_0, 0),
1458 BPF_EXIT_INSN(), /* return 0 */
1459 /* write map_value_ptr into stack frame of main prog */
1460 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1461 BPF_MOV64_IMM(BPF_REG_0, 1),
1462 BPF_EXIT_INSN(), /* return 1 */
1463 },
1464 .prog_type = BPF_PROG_TYPE_XDP,
1465 .fixup_map_hash_8b = { 23 },
1466 .result = ACCEPT,
1467},
1468{
1469 "calls: two calls that return map_value with incorrect bool check",
1470 .insns = {
1471 /* main prog */
1472 /* pass fp-16, fp-8 into a function */
1473 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1475 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1476 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1477 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1478 BPF_MOV64_IMM(BPF_REG_0, 0),
1479 BPF_EXIT_INSN(),
1480
1481 /* subprog 1 */
1482 /* call 3rd function twice */
1483 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1484 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1485 /* first time with fp-8 */
1486 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1487 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1488 /* fetch map_value_ptr from the stack of this function */
1489 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1490 /* write into map value */
1491 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1492 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1493 /* second time with fp-16 */
1494 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1495 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1496 /* fetch secound map_value_ptr from the stack */
1497 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1498 /* write into map value */
1499 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1500 BPF_EXIT_INSN(),
1501
1502 /* subprog 2 */
1503 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1504 /* lookup from map */
1505 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1506 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1508 BPF_LD_MAP_FD(BPF_REG_1, 0),
1509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1510 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1511 BPF_MOV64_IMM(BPF_REG_0, 0),
1512 BPF_EXIT_INSN(), /* return 0 */
1513 /* write map_value_ptr into stack frame of main prog */
1514 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1515 BPF_MOV64_IMM(BPF_REG_0, 1),
1516 BPF_EXIT_INSN(), /* return 1 */
1517 },
1518 .prog_type = BPF_PROG_TYPE_XDP,
1519 .fixup_map_hash_8b = { 23 },
1520 .result = REJECT,
1521 .errstr = "R0 invalid mem access 'scalar'",
1522 .result_unpriv = REJECT,
1523 .errstr_unpriv = "invalid read from stack R7 off=-16 size=8",
1524},
1525{
1526 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
1527 .insns = {
1528 /* main prog */
1529 /* pass fp-16, fp-8 into a function */
1530 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1532 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1534 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1535 BPF_MOV64_IMM(BPF_REG_0, 0),
1536 BPF_EXIT_INSN(),
1537
1538 /* subprog 1 */
1539 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1540 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1541 /* 1st lookup from map */
1542 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1543 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1544 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1545 BPF_LD_MAP_FD(BPF_REG_1, 0),
1546 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1547 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1548 BPF_MOV64_IMM(BPF_REG_8, 0),
1549 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1550 /* write map_value_ptr into stack frame of main prog at fp-8 */
1551 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1552 BPF_MOV64_IMM(BPF_REG_8, 1),
1553
1554 /* 2nd lookup from map */
1555 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1557 BPF_LD_MAP_FD(BPF_REG_1, 0),
1558 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1559 BPF_FUNC_map_lookup_elem),
1560 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1561 BPF_MOV64_IMM(BPF_REG_9, 0),
1562 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1563 /* write map_value_ptr into stack frame of main prog at fp-16 */
1564 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1565 BPF_MOV64_IMM(BPF_REG_9, 1),
1566
1567 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1568 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1569 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1570 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1571 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1572 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1573 BPF_EXIT_INSN(),
1574
1575 /* subprog 2 */
1576 /* if arg2 == 1 do *arg1 = 0 */
1577 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1578 /* fetch map_value_ptr from the stack of this function */
1579 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1580 /* write into map value */
1581 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1582
1583 /* if arg4 == 1 do *arg3 = 0 */
1584 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1585 /* fetch map_value_ptr from the stack of this function */
1586 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1587 /* write into map value */
1588 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1589 BPF_EXIT_INSN(),
1590 },
1591 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1592 .fixup_map_hash_8b = { 12, 22 },
1593 .result = REJECT,
1594 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1595 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1596},
1597{
1598 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
1599 .insns = {
1600 /* main prog */
1601 /* pass fp-16, fp-8 into a function */
1602 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1603 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1604 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1605 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1606 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1607 BPF_MOV64_IMM(BPF_REG_0, 0),
1608 BPF_EXIT_INSN(),
1609
1610 /* subprog 1 */
1611 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1612 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1613 /* 1st lookup from map */
1614 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1615 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1617 BPF_LD_MAP_FD(BPF_REG_1, 0),
1618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1619 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1620 BPF_MOV64_IMM(BPF_REG_8, 0),
1621 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1622 /* write map_value_ptr into stack frame of main prog at fp-8 */
1623 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1624 BPF_MOV64_IMM(BPF_REG_8, 1),
1625
1626 /* 2nd lookup from map */
1627 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1629 BPF_LD_MAP_FD(BPF_REG_1, 0),
1630 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1631 BPF_FUNC_map_lookup_elem),
1632 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1633 BPF_MOV64_IMM(BPF_REG_9, 0),
1634 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1635 /* write map_value_ptr into stack frame of main prog at fp-16 */
1636 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1637 BPF_MOV64_IMM(BPF_REG_9, 1),
1638
1639 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1640 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1641 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1642 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1643 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1644 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1645 BPF_EXIT_INSN(),
1646
1647 /* subprog 2 */
1648 /* if arg2 == 1 do *arg1 = 0 */
1649 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1650 /* fetch map_value_ptr from the stack of this function */
1651 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1652 /* write into map value */
1653 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1654
1655 /* if arg4 == 1 do *arg3 = 0 */
1656 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1657 /* fetch map_value_ptr from the stack of this function */
1658 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1659 /* write into map value */
1660 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1661 BPF_EXIT_INSN(),
1662 },
1663 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1664 .fixup_map_hash_8b = { 12, 22 },
1665 .result = ACCEPT,
1666},
1667{
1668 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
1669 .insns = {
1670 /* main prog */
1671 /* pass fp-16, fp-8 into a function */
1672 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1673 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1674 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1675 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1676 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
1677 BPF_MOV64_IMM(BPF_REG_0, 0),
1678 BPF_EXIT_INSN(),
1679
1680 /* subprog 1 */
1681 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1682 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1683 /* 1st lookup from map */
1684 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
1685 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1687 BPF_LD_MAP_FD(BPF_REG_1, 0),
1688 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1689 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1690 BPF_MOV64_IMM(BPF_REG_8, 0),
1691 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1692 /* write map_value_ptr into stack frame of main prog at fp-8 */
1693 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1694 BPF_MOV64_IMM(BPF_REG_8, 1),
1695
1696 /* 2nd lookup from map */
1697 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1698 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1699 BPF_LD_MAP_FD(BPF_REG_1, 0),
1700 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1701 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1702 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
1703 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1704 /* write map_value_ptr into stack frame of main prog at fp-16 */
1705 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1706 BPF_MOV64_IMM(BPF_REG_9, 1),
1707
1708 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1709 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
1710 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1711 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1712 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1713 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
1714 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
1715
1716 /* subprog 2 */
1717 /* if arg2 == 1 do *arg1 = 0 */
1718 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1719 /* fetch map_value_ptr from the stack of this function */
1720 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1721 /* write into map value */
1722 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1723
1724 /* if arg4 == 1 do *arg3 = 0 */
1725 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1726 /* fetch map_value_ptr from the stack of this function */
1727 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1728 /* write into map value */
1729 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1730 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
1731 },
1732 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1733 .fixup_map_hash_8b = { 12, 22 },
1734 .result = REJECT,
1735 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1736 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1737},
1738{
1739 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
1740 .insns = {
1741 /* main prog */
1742 /* pass fp-16, fp-8 into a function */
1743 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1744 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1745 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1747 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1748 BPF_MOV64_IMM(BPF_REG_0, 0),
1749 BPF_EXIT_INSN(),
1750
1751 /* subprog 1 */
1752 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1753 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1754 /* 1st lookup from map */
1755 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1756 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1757 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1758 BPF_LD_MAP_FD(BPF_REG_1, 0),
1759 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1760 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1761 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1762 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1763 BPF_MOV64_IMM(BPF_REG_8, 0),
1764 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1765 BPF_MOV64_IMM(BPF_REG_8, 1),
1766
1767 /* 2nd lookup from map */
1768 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1770 BPF_LD_MAP_FD(BPF_REG_1, 0),
1771 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1772 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1773 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1774 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1775 BPF_MOV64_IMM(BPF_REG_9, 0),
1776 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1777 BPF_MOV64_IMM(BPF_REG_9, 1),
1778
1779 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1780 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1781 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1782 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1783 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1784 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1785 BPF_EXIT_INSN(),
1786
1787 /* subprog 2 */
1788 /* if arg2 == 1 do *arg1 = 0 */
1789 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1790 /* fetch map_value_ptr from the stack of this function */
1791 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1792 /* write into map value */
1793 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1794
1795 /* if arg4 == 1 do *arg3 = 0 */
1796 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1797 /* fetch map_value_ptr from the stack of this function */
1798 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1799 /* write into map value */
1800 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1801 BPF_EXIT_INSN(),
1802 },
1803 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1804 .fixup_map_hash_8b = { 12, 22 },
1805 .result = ACCEPT,
1806},
1807{
1808 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
1809 .insns = {
1810 /* main prog */
1811 /* pass fp-16, fp-8 into a function */
1812 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1813 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1814 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1816 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1817 BPF_MOV64_IMM(BPF_REG_0, 0),
1818 BPF_EXIT_INSN(),
1819
1820 /* subprog 1 */
1821 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1822 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1823 /* 1st lookup from map */
1824 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1825 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1827 BPF_LD_MAP_FD(BPF_REG_1, 0),
1828 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1829 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1830 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1831 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1832 BPF_MOV64_IMM(BPF_REG_8, 0),
1833 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1834 BPF_MOV64_IMM(BPF_REG_8, 1),
1835
1836 /* 2nd lookup from map */
1837 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1838 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1839 BPF_LD_MAP_FD(BPF_REG_1, 0),
1840 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1841 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1842 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1843 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1844 BPF_MOV64_IMM(BPF_REG_9, 0),
1845 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1846 BPF_MOV64_IMM(BPF_REG_9, 1),
1847
1848 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1849 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1850 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1851 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1852 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1853 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1854 BPF_EXIT_INSN(),
1855
1856 /* subprog 2 */
1857 /* if arg2 == 1 do *arg1 = 0 */
1858 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1859 /* fetch map_value_ptr from the stack of this function */
1860 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1861 /* write into map value */
1862 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1863
1864 /* if arg4 == 0 do *arg3 = 0 */
1865 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
1866 /* fetch map_value_ptr from the stack of this function */
1867 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1868 /* write into map value */
1869 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1870 BPF_EXIT_INSN(),
1871 },
1872 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1873 .fixup_map_hash_8b = { 12, 22 },
1874 .result = REJECT,
1875 .errstr = "R0 invalid mem access 'scalar'",
1876},
1877{
1878 "calls: pkt_ptr spill into caller stack",
1879 .insns = {
1880 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1881 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1882 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1883 BPF_EXIT_INSN(),
1884
1885 /* subprog 1 */
1886 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1887 offsetof(struct __sk_buff, data)),
1888 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1889 offsetof(struct __sk_buff, data_end)),
1890 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1891 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1892 /* spill unchecked pkt_ptr into stack of caller */
1893 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1894 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1895 /* now the pkt range is verified, read pkt_ptr from stack */
1896 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1897 /* write 4 bytes into packet */
1898 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1899 BPF_EXIT_INSN(),
1900 },
1901 .result = ACCEPT,
1902 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1903 .retval = POINTER_VALUE,
1904 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1905},
1906{
1907 "calls: pkt_ptr spill into caller stack 2",
1908 .insns = {
1909 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1911 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1912 /* Marking is still kept, but not in all cases safe. */
1913 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1914 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1915 BPF_EXIT_INSN(),
1916
1917 /* subprog 1 */
1918 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1919 offsetof(struct __sk_buff, data)),
1920 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1921 offsetof(struct __sk_buff, data_end)),
1922 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1923 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1924 /* spill unchecked pkt_ptr into stack of caller */
1925 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1926 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1927 /* now the pkt range is verified, read pkt_ptr from stack */
1928 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1929 /* write 4 bytes into packet */
1930 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1931 BPF_EXIT_INSN(),
1932 },
1933 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1934 .errstr = "invalid access to packet",
1935 .result = REJECT,
1936 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1937},
1938{
1939 "calls: pkt_ptr spill into caller stack 3",
1940 .insns = {
1941 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1942 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1943 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1944 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1945 /* Marking is still kept and safe here. */
1946 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1947 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1948 BPF_EXIT_INSN(),
1949
1950 /* subprog 1 */
1951 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1952 offsetof(struct __sk_buff, data)),
1953 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1954 offsetof(struct __sk_buff, data_end)),
1955 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1956 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1957 /* spill unchecked pkt_ptr into stack of caller */
1958 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1959 BPF_MOV64_IMM(BPF_REG_5, 0),
1960 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1961 BPF_MOV64_IMM(BPF_REG_5, 1),
1962 /* now the pkt range is verified, read pkt_ptr from stack */
1963 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1964 /* write 4 bytes into packet */
1965 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1966 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1967 BPF_EXIT_INSN(),
1968 },
1969 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1970 .result = ACCEPT,
1971 .retval = 1,
1972 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1973},
1974{
1975 "calls: pkt_ptr spill into caller stack 4",
1976 .insns = {
1977 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1979 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1980 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1981 /* Check marking propagated. */
1982 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1983 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1984 BPF_EXIT_INSN(),
1985
1986 /* subprog 1 */
1987 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1988 offsetof(struct __sk_buff, data)),
1989 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1990 offsetof(struct __sk_buff, data_end)),
1991 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1993 /* spill unchecked pkt_ptr into stack of caller */
1994 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1995 BPF_MOV64_IMM(BPF_REG_5, 0),
1996 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1997 BPF_MOV64_IMM(BPF_REG_5, 1),
1998 /* don't read back pkt_ptr from stack here */
1999 /* write 4 bytes into packet */
2000 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2001 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2002 BPF_EXIT_INSN(),
2003 },
2004 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2005 .result = ACCEPT,
2006 .retval = 1,
2007 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2008},
2009{
2010 "calls: pkt_ptr spill into caller stack 5",
2011 .insns = {
2012 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2014 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
2015 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2016 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2017 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2018 BPF_EXIT_INSN(),
2019
2020 /* subprog 1 */
2021 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2022 offsetof(struct __sk_buff, data)),
2023 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2024 offsetof(struct __sk_buff, data_end)),
2025 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2026 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2027 BPF_MOV64_IMM(BPF_REG_5, 0),
2028 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2029 /* spill checked pkt_ptr into stack of caller */
2030 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2031 BPF_MOV64_IMM(BPF_REG_5, 1),
2032 /* don't read back pkt_ptr from stack here */
2033 /* write 4 bytes into packet */
2034 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2035 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2036 BPF_EXIT_INSN(),
2037 },
2038 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2039 .errstr = "same insn cannot be used with different",
2040 .result = REJECT,
2041 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2042},
2043{
2044 "calls: pkt_ptr spill into caller stack 6",
2045 .insns = {
2046 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2047 offsetof(struct __sk_buff, data_end)),
2048 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2050 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2051 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2052 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2053 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2054 BPF_EXIT_INSN(),
2055
2056 /* subprog 1 */
2057 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2058 offsetof(struct __sk_buff, data)),
2059 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2060 offsetof(struct __sk_buff, data_end)),
2061 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2063 BPF_MOV64_IMM(BPF_REG_5, 0),
2064 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2065 /* spill checked pkt_ptr into stack of caller */
2066 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2067 BPF_MOV64_IMM(BPF_REG_5, 1),
2068 /* don't read back pkt_ptr from stack here */
2069 /* write 4 bytes into packet */
2070 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2071 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2072 BPF_EXIT_INSN(),
2073 },
2074 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2075 .errstr = "R4 invalid mem access",
2076 .result = REJECT,
2077 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2078},
2079{
2080 "calls: pkt_ptr spill into caller stack 7",
2081 .insns = {
2082 BPF_MOV64_IMM(BPF_REG_2, 0),
2083 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2084 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2085 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2086 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2087 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2088 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2089 BPF_EXIT_INSN(),
2090
2091 /* subprog 1 */
2092 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2093 offsetof(struct __sk_buff, data)),
2094 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2095 offsetof(struct __sk_buff, data_end)),
2096 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2097 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2098 BPF_MOV64_IMM(BPF_REG_5, 0),
2099 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2100 /* spill checked pkt_ptr into stack of caller */
2101 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2102 BPF_MOV64_IMM(BPF_REG_5, 1),
2103 /* don't read back pkt_ptr from stack here */
2104 /* write 4 bytes into packet */
2105 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2106 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2107 BPF_EXIT_INSN(),
2108 },
2109 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2110 .errstr = "R4 invalid mem access",
2111 .result = REJECT,
2112 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2113},
2114{
2115 "calls: pkt_ptr spill into caller stack 8",
2116 .insns = {
2117 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2118 offsetof(struct __sk_buff, data)),
2119 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2120 offsetof(struct __sk_buff, data_end)),
2121 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2123 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
2124 BPF_EXIT_INSN(),
2125 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2127 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2128 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2129 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2130 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2131 BPF_EXIT_INSN(),
2132
2133 /* subprog 1 */
2134 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2135 offsetof(struct __sk_buff, data)),
2136 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2137 offsetof(struct __sk_buff, data_end)),
2138 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2139 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2140 BPF_MOV64_IMM(BPF_REG_5, 0),
2141 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2142 /* spill checked pkt_ptr into stack of caller */
2143 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2144 BPF_MOV64_IMM(BPF_REG_5, 1),
2145 /* don't read back pkt_ptr from stack here */
2146 /* write 4 bytes into packet */
2147 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2148 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2149 BPF_EXIT_INSN(),
2150 },
2151 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2152 .result = ACCEPT,
2153 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2154},
2155{
2156 "calls: pkt_ptr spill into caller stack 9",
2157 .insns = {
2158 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2159 offsetof(struct __sk_buff, data)),
2160 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2161 offsetof(struct __sk_buff, data_end)),
2162 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2164 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
2165 BPF_EXIT_INSN(),
2166 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2167 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2168 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2169 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2170 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2171 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2172 BPF_EXIT_INSN(),
2173
2174 /* subprog 1 */
2175 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2176 offsetof(struct __sk_buff, data)),
2177 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2178 offsetof(struct __sk_buff, data_end)),
2179 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2181 BPF_MOV64_IMM(BPF_REG_5, 0),
2182 /* spill unchecked pkt_ptr into stack of caller */
2183 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2184 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2185 BPF_MOV64_IMM(BPF_REG_5, 1),
2186 /* don't read back pkt_ptr from stack here */
2187 /* write 4 bytes into packet */
2188 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2189 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2190 BPF_EXIT_INSN(),
2191 },
2192 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2193 .errstr = "invalid access to packet",
2194 .result = REJECT,
2195 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2196},
2197{
2198 "calls: caller stack init to zero or map_value_or_null",
2199 .insns = {
2200 BPF_MOV64_IMM(BPF_REG_0, 0),
2201 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2202 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2203 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2204 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2205 /* fetch map_value_or_null or const_zero from stack */
2206 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2207 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2208 /* store into map_value */
2209 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
2210 BPF_EXIT_INSN(),
2211
2212 /* subprog 1 */
2213 /* if (ctx == 0) return; */
2214 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
2215 /* else bpf_map_lookup() and *(fp - 8) = r0 */
2216 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2217 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2219 BPF_LD_MAP_FD(BPF_REG_1, 0),
2220 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2221 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
2222 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
2223 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
2224 BPF_EXIT_INSN(),
2225 },
2226 .fixup_map_hash_8b = { 13 },
2227 .result = ACCEPT,
2228 .prog_type = BPF_PROG_TYPE_XDP,
2229},
2230{
2231 "calls: stack init to zero and pruning",
2232 .insns = {
2233 /* first make allocated_stack 16 byte */
2234 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
2235 /* now fork the execution such that the false branch
2236 * of JGT insn will be verified second and it skisp zero
2237 * init of fp-8 stack slot. If stack liveness marking
2238 * is missing live_read marks from call map_lookup
2239 * processing then pruning will incorrectly assume
2240 * that fp-8 stack slot was unused in the fall-through
2241 * branch and will accept the program incorrectly
2242 */
2243 BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
2244 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2),
2245 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2246 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2247 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2249 BPF_LD_MAP_FD(BPF_REG_1, 0),
2250 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
2251 BPF_MOV64_IMM(BPF_REG_0, 0),
2252 BPF_EXIT_INSN(),
2253 },
2254 .fixup_map_hash_48b = { 7 },
2255 .errstr_unpriv = "invalid read from stack R2 off -8+0 size 8",
2256 .result_unpriv = REJECT,
2257 /* in privileged mode reads from uninitialized stack locations are permitted */
2258 .result = ACCEPT,
2259},
2260{
2261 "calls: ctx read at start of subprog",
2262 .insns = {
2263 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
2264 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
2265 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
2266 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2267 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
2268 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2269 BPF_EXIT_INSN(),
2270 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2271 BPF_MOV64_IMM(BPF_REG_0, 0),
2272 BPF_EXIT_INSN(),
2273 },
2274 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2275 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2276 .result_unpriv = REJECT,
2277 .result = ACCEPT,
2278},
2279{
2280 "calls: cross frame pruning",
2281 .insns = {
2282 /* r8 = !!random();
2283 * call pruner()
2284 * if (r8)
2285 * do something bad;
2286 */
2287 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2288 BPF_MOV64_IMM(BPF_REG_8, 0),
2289 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2290 BPF_MOV64_IMM(BPF_REG_8, 1),
2291 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2292 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2293 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2294 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2295 BPF_MOV64_IMM(BPF_REG_0, 0),
2296 BPF_EXIT_INSN(),
2297 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2298 BPF_EXIT_INSN(),
2299 },
2300 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2301 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2302 .errstr = "!read_ok",
2303 .result = REJECT,
2304},
2305{
2306 "calls: cross frame pruning - liveness propagation",
2307 .insns = {
2308 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2309 BPF_MOV64_IMM(BPF_REG_8, 0),
2310 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2311 BPF_MOV64_IMM(BPF_REG_8, 1),
2312 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2313 BPF_MOV64_IMM(BPF_REG_9, 0),
2314 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2315 BPF_MOV64_IMM(BPF_REG_9, 1),
2316 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2317 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2318 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2319 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
2320 BPF_MOV64_IMM(BPF_REG_0, 0),
2321 BPF_EXIT_INSN(),
2322 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2323 BPF_EXIT_INSN(),
2324 },
2325 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2326 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2327 .errstr = "!read_ok",
2328 .result = REJECT,
2329},
2330/* Make sure that verifier.c:states_equal() considers IDs from all
2331 * frames when building 'idmap' for check_ids().
2332 */
2333{
2334 "calls: check_ids() across call boundary",
2335 .insns = {
2336 /* Function main() */
2337 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2338 /* fp[-24] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */
2339 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2341 BPF_LD_MAP_FD(BPF_REG_1,
2342 0),
2343 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
2344 BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -24),
2345 /* fp[-32] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */
2346 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2348 BPF_LD_MAP_FD(BPF_REG_1,
2349 0),
2350 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
2351 BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -32),
2352 /* call foo(&fp[-24], &fp[-32]) ; both arguments have IDs in the current
2353 * ; stack frame
2354 */
2355 BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
2356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -24),
2357 BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
2358 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
2359 BPF_CALL_REL(2),
2360 /* exit 0 */
2361 BPF_MOV64_IMM(BPF_REG_0, 0),
2362 BPF_EXIT_INSN(),
2363 /* Function foo()
2364 *
2365 * r9 = &frame[0].fp[-24] ; save arguments in the callee saved registers,
2366 * r8 = &frame[0].fp[-32] ; arguments are pointers to pointers to map value
2367 */
2368 BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
2369 BPF_MOV64_REG(BPF_REG_8, BPF_REG_2),
2370 /* r7 = ktime_get_ns() */
2371 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
2372 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
2373 /* r6 = ktime_get_ns() */
2374 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
2375 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
2376 /* if r6 > r7 goto +1 ; no new information about the state is derived from
2377 * ; this check, thus produced verifier states differ
2378 * ; only in 'insn_idx'
2379 * r9 = r8
2380 */
2381 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
2382 BPF_MOV64_REG(BPF_REG_9, BPF_REG_8),
2383 /* r9 = *r9 ; verifier get's to this point via two paths:
2384 * ; (I) one including r9 = r8, verified first;
2385 * ; (II) one excluding r9 = r8, verified next.
2386 * ; After load of *r9 to r9 the frame[0].fp[-24].id == r9.id.
2387 * ; Suppose that checkpoint is created here via path (I).
2388 * ; When verifying via (II) the r9.id must be compared against
2389 * ; frame[0].fp[-24].id, otherwise (I) and (II) would be
2390 * ; incorrectly deemed equivalent.
2391 * if r9 == 0 goto <exit>
2392 */
2393 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_9, 0),
2394 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1),
2395 /* r8 = *r8 ; read map value via r8, this is not safe
2396 * r0 = *r8 ; because r8 might be not equal to r9.
2397 */
2398 BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_8, 0),
2399 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_8, 0),
2400 /* exit 0 */
2401 BPF_MOV64_IMM(BPF_REG_0, 0),
2402 BPF_EXIT_INSN(),
2403 },
2404 .flags = BPF_F_TEST_STATE_FREQ,
2405 .fixup_map_hash_8b = { 3, 9 },
2406 .result = REJECT,
2407 .errstr = "R8 invalid mem access 'map_value_or_null'",
2408 .result_unpriv = REJECT,
2409 .errstr_unpriv = "",
2410 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
2411},
2412{
2413 "calls: several args with ref_obj_id",
2414 .insns = {
2415 /* Reserve at least sizeof(struct iphdr) bytes in the ring buffer.
2416 * With a smaller size, the verifier would reject the call to
2417 * bpf_tcp_raw_gen_syncookie_ipv4 before we can reach the
2418 * ref_obj_id error.
2419 */
2420 BPF_MOV64_IMM(BPF_REG_2, 20),
2421 BPF_MOV64_IMM(BPF_REG_3, 0),
2422 BPF_LD_MAP_FD(BPF_REG_1, 0),
2423 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
2424 /* if r0 == 0 goto <exit> */
2425 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
2426 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2427 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2428 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tcp_raw_gen_syncookie_ipv4),
2429 BPF_EXIT_INSN(),
2430 },
2431 .fixup_map_ringbuf = { 2 },
2432 .result = REJECT,
2433 .errstr = "more than one arg with ref_obj_id",
2434 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2435},