Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1{
2 "calls: invalid kfunc call not eliminated",
3 .insns = {
4 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
5 BPF_MOV64_IMM(BPF_REG_0, 1),
6 BPF_EXIT_INSN(),
7 },
8 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9 .result = REJECT,
10 .errstr = "invalid kernel function call not eliminated in verifier pass",
11},
12{
13 "calls: invalid kfunc call unreachable",
14 .insns = {
15 BPF_MOV64_IMM(BPF_REG_0, 1),
16 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2),
17 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
18 BPF_MOV64_IMM(BPF_REG_0, 1),
19 BPF_EXIT_INSN(),
20 },
21 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
22 .result = ACCEPT,
23},
24{
25 "calls: invalid kfunc call: ptr_to_mem to struct with non-scalar",
26 .insns = {
27 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
28 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
29 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
30 BPF_EXIT_INSN(),
31 },
32 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
33 .result = REJECT,
34 .errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar",
35 .fixup_kfunc_btf_id = {
36 { "bpf_kfunc_call_test_fail1", 2 },
37 },
38},
39{
40 "calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4",
41 .insns = {
42 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
43 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
44 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
45 BPF_EXIT_INSN(),
46 },
47 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
48 .result = REJECT,
49 .errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2",
50 .fixup_kfunc_btf_id = {
51 { "bpf_kfunc_call_test_fail2", 2 },
52 },
53},
54{
55 "calls: invalid kfunc call: ptr_to_mem to struct with FAM",
56 .insns = {
57 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
58 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
59 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
60 BPF_EXIT_INSN(),
61 },
62 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
63 .result = REJECT,
64 .errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar",
65 .fixup_kfunc_btf_id = {
66 { "bpf_kfunc_call_test_fail3", 2 },
67 },
68},
69{
70 "calls: invalid kfunc call: reg->type != PTR_TO_CTX",
71 .insns = {
72 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
73 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
74 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
75 BPF_EXIT_INSN(),
76 },
77 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
78 .result = REJECT,
79 .errstr = "R1 must have zero offset when passed to release func or trusted arg to kfunc",
80 .fixup_kfunc_btf_id = {
81 { "bpf_kfunc_call_test_pass_ctx", 2 },
82 },
83},
84{
85 "calls: invalid kfunc call: void * not allowed in func proto without mem size arg",
86 .insns = {
87 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
89 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
90 BPF_EXIT_INSN(),
91 },
92 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
93 .result = REJECT,
94 .errstr = "arg#0 pointer type UNKNOWN must point to scalar",
95 .fixup_kfunc_btf_id = {
96 { "bpf_kfunc_call_test_mem_len_fail1", 2 },
97 },
98},
99{
100 "calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX",
101 .insns = {
102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
104 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
107 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
108 BPF_EXIT_INSN(),
109 },
110 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
111 .result = REJECT,
112 .errstr = "Possibly NULL pointer passed to trusted arg0",
113 .fixup_kfunc_btf_id = {
114 { "bpf_kfunc_call_test_acquire", 3 },
115 { "bpf_kfunc_call_test_release", 5 },
116 },
117},
118{
119 "calls: invalid kfunc call: reg->off must be zero when passed to release kfunc",
120 .insns = {
121 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
123 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
124 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
125 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
126 BPF_EXIT_INSN(),
127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
130 BPF_MOV64_IMM(BPF_REG_0, 0),
131 BPF_EXIT_INSN(),
132 },
133 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
134 .result = REJECT,
135 .errstr = "R1 must have zero offset when passed to release func",
136 .fixup_kfunc_btf_id = {
137 { "bpf_kfunc_call_test_acquire", 3 },
138 { "bpf_kfunc_call_memb_release", 8 },
139 },
140},
141{
142 "calls: invalid kfunc call: don't match first member type when passed to release kfunc",
143 .insns = {
144 BPF_MOV64_IMM(BPF_REG_0, 0),
145 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
146 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
147 BPF_EXIT_INSN(),
148 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
149 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
150 BPF_MOV64_IMM(BPF_REG_0, 0),
151 BPF_EXIT_INSN(),
152 },
153 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
154 .result = REJECT,
155 .errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
156 .fixup_kfunc_btf_id = {
157 { "bpf_kfunc_call_memb_acquire", 1 },
158 { "bpf_kfunc_call_memb1_release", 5 },
159 },
160},
161{
162 "calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
163 .insns = {
164 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
166 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
167 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
168 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
169 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
170 BPF_EXIT_INSN(),
171 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4),
173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
174 BPF_MOV64_IMM(BPF_REG_0, 0),
175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
176 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
177 BPF_MOV64_IMM(BPF_REG_0, 0),
178 BPF_EXIT_INSN(),
179 },
180 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
181 .fixup_kfunc_btf_id = {
182 { "bpf_kfunc_call_test_acquire", 3 },
183 { "bpf_kfunc_call_test_offset", 9 },
184 { "bpf_kfunc_call_test_release", 12 },
185 },
186 .result_unpriv = REJECT,
187 .result = REJECT,
188 .errstr = "ptr R1 off=-4 disallowed",
189},
190{
191 "calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset",
192 .insns = {
193 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
195 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
197 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
198 BPF_EXIT_INSN(),
199 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
200 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
201 BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3),
202 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
203 BPF_MOV64_IMM(BPF_REG_0, 0),
204 BPF_EXIT_INSN(),
205 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3),
206 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
207 BPF_MOV64_IMM(BPF_REG_0, 0),
208 BPF_EXIT_INSN(),
209 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
210 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
211 BPF_MOV64_IMM(BPF_REG_0, 0),
212 BPF_EXIT_INSN(),
213 },
214 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
215 .fixup_kfunc_btf_id = {
216 { "bpf_kfunc_call_test_acquire", 3 },
217 { "bpf_kfunc_call_test_release", 9 },
218 { "bpf_kfunc_call_test_release", 13 },
219 { "bpf_kfunc_call_test_release", 17 },
220 },
221 .result_unpriv = REJECT,
222 .result = REJECT,
223 .errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed",
224},
225{
226 "calls: invalid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
227 .insns = {
228 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
230 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
231 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
232 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
233 BPF_EXIT_INSN(),
234 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
235 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
236 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
237 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 16),
238 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
239 BPF_MOV64_IMM(BPF_REG_0, 0),
240 BPF_EXIT_INSN(),
241 },
242 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
243 .fixup_kfunc_btf_id = {
244 { "bpf_kfunc_call_test_acquire", 3 },
245 { "bpf_kfunc_call_test_ref", 8 },
246 { "bpf_kfunc_call_test_ref", 10 },
247 },
248 .result_unpriv = REJECT,
249 .result = REJECT,
250 .errstr = "R1 must be",
251},
252{
253 "calls: valid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
254 .insns = {
255 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
257 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
258 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
259 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
260 BPF_EXIT_INSN(),
261 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
262 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
263 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
264 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
266 BPF_MOV64_IMM(BPF_REG_0, 0),
267 BPF_EXIT_INSN(),
268 },
269 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
270 .fixup_kfunc_btf_id = {
271 { "bpf_kfunc_call_test_acquire", 3 },
272 { "bpf_kfunc_call_test_ref", 8 },
273 { "bpf_kfunc_call_test_release", 10 },
274 },
275 .result_unpriv = REJECT,
276 .result = ACCEPT,
277},
278{
279 "calls: basic sanity",
280 .insns = {
281 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
282 BPF_MOV64_IMM(BPF_REG_0, 1),
283 BPF_EXIT_INSN(),
284 BPF_MOV64_IMM(BPF_REG_0, 2),
285 BPF_EXIT_INSN(),
286 },
287 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
288 .result = ACCEPT,
289},
290{
291 "calls: not on unprivileged",
292 .insns = {
293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
294 BPF_MOV64_IMM(BPF_REG_0, 1),
295 BPF_EXIT_INSN(),
296 BPF_MOV64_IMM(BPF_REG_0, 2),
297 BPF_EXIT_INSN(),
298 },
299 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
300 .result_unpriv = REJECT,
301 .result = ACCEPT,
302 .retval = 1,
303},
304{
305 "calls: div by 0 in subprog",
306 .insns = {
307 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
308 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
309 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
310 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
311 offsetof(struct __sk_buff, data_end)),
312 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
314 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
315 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
316 BPF_MOV64_IMM(BPF_REG_0, 1),
317 BPF_EXIT_INSN(),
318 BPF_MOV32_IMM(BPF_REG_2, 0),
319 BPF_MOV32_IMM(BPF_REG_3, 1),
320 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
321 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
322 offsetof(struct __sk_buff, data)),
323 BPF_EXIT_INSN(),
324 },
325 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
326 .result = ACCEPT,
327 .retval = 1,
328},
329{
330 "calls: multiple ret types in subprog 1",
331 .insns = {
332 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
333 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
334 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
335 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
336 offsetof(struct __sk_buff, data_end)),
337 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
339 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
340 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
341 BPF_MOV64_IMM(BPF_REG_0, 1),
342 BPF_EXIT_INSN(),
343 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
344 offsetof(struct __sk_buff, data)),
345 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
346 BPF_MOV32_IMM(BPF_REG_0, 42),
347 BPF_EXIT_INSN(),
348 },
349 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
350 .result = REJECT,
351 .errstr = "R0 invalid mem access 'scalar'",
352},
353{
354 "calls: multiple ret types in subprog 2",
355 .insns = {
356 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
357 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
358 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
359 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
360 offsetof(struct __sk_buff, data_end)),
361 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
362 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
363 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
364 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
365 BPF_MOV64_IMM(BPF_REG_0, 1),
366 BPF_EXIT_INSN(),
367 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
368 offsetof(struct __sk_buff, data)),
369 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
370 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
371 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
372 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
374 BPF_LD_MAP_FD(BPF_REG_1, 0),
375 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
376 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
377 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
378 offsetof(struct __sk_buff, data)),
379 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
380 BPF_EXIT_INSN(),
381 },
382 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
383 .fixup_map_hash_8b = { 16 },
384 .result = REJECT,
385 .errstr = "R0 min value is outside of the allowed memory range",
386},
387{
388 "calls: overlapping caller/callee",
389 .insns = {
390 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
391 BPF_MOV64_IMM(BPF_REG_0, 1),
392 BPF_EXIT_INSN(),
393 },
394 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
395 .errstr = "last insn is not an exit or jmp",
396 .result = REJECT,
397},
398{
399 "calls: wrong recursive calls",
400 .insns = {
401 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
402 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
403 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
405 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
406 BPF_MOV64_IMM(BPF_REG_0, 1),
407 BPF_EXIT_INSN(),
408 },
409 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
410 .errstr = "jump out of range",
411 .result = REJECT,
412},
413{
414 "calls: wrong src reg",
415 .insns = {
416 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0),
417 BPF_MOV64_IMM(BPF_REG_0, 1),
418 BPF_EXIT_INSN(),
419 },
420 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
421 .errstr = "BPF_CALL uses reserved fields",
422 .result = REJECT,
423},
424{
425 "calls: wrong off value",
426 .insns = {
427 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
428 BPF_MOV64_IMM(BPF_REG_0, 1),
429 BPF_EXIT_INSN(),
430 BPF_MOV64_IMM(BPF_REG_0, 2),
431 BPF_EXIT_INSN(),
432 },
433 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
434 .errstr = "BPF_CALL uses reserved fields",
435 .result = REJECT,
436},
437{
438 "calls: jump back loop",
439 .insns = {
440 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
441 BPF_MOV64_IMM(BPF_REG_0, 1),
442 BPF_EXIT_INSN(),
443 },
444 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
445 .errstr = "the call stack of 9 frames is too deep",
446 .result = REJECT,
447},
448{
449 "calls: conditional call",
450 .insns = {
451 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
452 offsetof(struct __sk_buff, mark)),
453 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
454 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
455 BPF_MOV64_IMM(BPF_REG_0, 1),
456 BPF_EXIT_INSN(),
457 BPF_MOV64_IMM(BPF_REG_0, 2),
458 BPF_EXIT_INSN(),
459 },
460 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
461 .errstr = "jump out of range",
462 .result = REJECT,
463},
464{
465 "calls: conditional call 2",
466 .insns = {
467 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
468 offsetof(struct __sk_buff, mark)),
469 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
470 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
471 BPF_MOV64_IMM(BPF_REG_0, 1),
472 BPF_EXIT_INSN(),
473 BPF_MOV64_IMM(BPF_REG_0, 2),
474 BPF_EXIT_INSN(),
475 BPF_MOV64_IMM(BPF_REG_0, 3),
476 BPF_EXIT_INSN(),
477 },
478 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
479 .result = ACCEPT,
480},
481{
482 "calls: conditional call 3",
483 .insns = {
484 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
485 offsetof(struct __sk_buff, mark)),
486 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
487 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
488 BPF_MOV64_IMM(BPF_REG_0, 1),
489 BPF_EXIT_INSN(),
490 BPF_MOV64_IMM(BPF_REG_0, 1),
491 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
492 BPF_MOV64_IMM(BPF_REG_0, 3),
493 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
494 },
495 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
496 .errstr_unpriv = "back-edge from insn",
497 .result_unpriv = REJECT,
498 .result = ACCEPT,
499 .retval = 1,
500},
501{
502 "calls: conditional call 4",
503 .insns = {
504 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
505 offsetof(struct __sk_buff, mark)),
506 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
508 BPF_MOV64_IMM(BPF_REG_0, 1),
509 BPF_EXIT_INSN(),
510 BPF_MOV64_IMM(BPF_REG_0, 1),
511 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
512 BPF_MOV64_IMM(BPF_REG_0, 3),
513 BPF_EXIT_INSN(),
514 },
515 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
516 .result = ACCEPT,
517},
518{
519 "calls: conditional call 5",
520 .insns = {
521 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
522 offsetof(struct __sk_buff, mark)),
523 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
524 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
525 BPF_MOV64_IMM(BPF_REG_0, 1),
526 BPF_EXIT_INSN(),
527 BPF_MOV64_IMM(BPF_REG_0, 1),
528 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
529 BPF_MOV64_IMM(BPF_REG_0, 3),
530 BPF_EXIT_INSN(),
531 },
532 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
533 .result = ACCEPT,
534 .retval = 1,
535},
536{
537 "calls: conditional call 6",
538 .insns = {
539 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
540 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
541 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
542 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
543 BPF_EXIT_INSN(),
544 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
545 offsetof(struct __sk_buff, mark)),
546 BPF_EXIT_INSN(),
547 },
548 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
549 .errstr = "infinite loop detected",
550 .result = REJECT,
551},
552{
553 "calls: using r0 returned by callee",
554 .insns = {
555 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
556 BPF_EXIT_INSN(),
557 BPF_MOV64_IMM(BPF_REG_0, 2),
558 BPF_EXIT_INSN(),
559 },
560 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
561 .result = ACCEPT,
562},
563{
564 "calls: using uninit r0 from callee",
565 .insns = {
566 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
567 BPF_EXIT_INSN(),
568 BPF_EXIT_INSN(),
569 },
570 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
571 .errstr = "!read_ok",
572 .result = REJECT,
573},
574{
575 "calls: callee is using r1",
576 .insns = {
577 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
578 BPF_EXIT_INSN(),
579 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
580 offsetof(struct __sk_buff, len)),
581 BPF_EXIT_INSN(),
582 },
583 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
584 .result = ACCEPT,
585 .retval = TEST_DATA_LEN,
586},
587{
588 "calls: callee using args1",
589 .insns = {
590 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
591 BPF_EXIT_INSN(),
592 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
593 BPF_EXIT_INSN(),
594 },
595 .errstr_unpriv = "allowed for",
596 .result_unpriv = REJECT,
597 .result = ACCEPT,
598 .retval = POINTER_VALUE,
599},
600{
601 "calls: callee using wrong args2",
602 .insns = {
603 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
604 BPF_EXIT_INSN(),
605 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
606 BPF_EXIT_INSN(),
607 },
608 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
609 .errstr = "R2 !read_ok",
610 .result = REJECT,
611},
612{
613 "calls: callee using two args",
614 .insns = {
615 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
616 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
617 offsetof(struct __sk_buff, len)),
618 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
619 offsetof(struct __sk_buff, len)),
620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
621 BPF_EXIT_INSN(),
622 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
623 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
624 BPF_EXIT_INSN(),
625 },
626 .errstr_unpriv = "allowed for",
627 .result_unpriv = REJECT,
628 .result = ACCEPT,
629 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
630},
631{
632 "calls: callee changing pkt pointers",
633 .insns = {
634 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
635 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
636 offsetof(struct xdp_md, data_end)),
637 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
639 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
640 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
641 /* clear_all_pkt_pointers() has to walk all frames
642 * to make sure that pkt pointers in the caller
643 * are cleared when callee is calling a helper that
644 * adjusts packet size
645 */
646 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
647 BPF_MOV32_IMM(BPF_REG_0, 0),
648 BPF_EXIT_INSN(),
649 BPF_MOV64_IMM(BPF_REG_2, 0),
650 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
651 BPF_EXIT_INSN(),
652 },
653 .result = REJECT,
654 .errstr = "R6 invalid mem access 'scalar'",
655 .prog_type = BPF_PROG_TYPE_XDP,
656 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
657},
658{
659 "calls: ptr null check in subprog",
660 .insns = {
661 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
662 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
664 BPF_LD_MAP_FD(BPF_REG_1, 0),
665 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
666 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
667 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
668 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
669 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
670 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
671 BPF_EXIT_INSN(),
672 BPF_MOV64_IMM(BPF_REG_0, 0),
673 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
674 BPF_MOV64_IMM(BPF_REG_0, 1),
675 BPF_EXIT_INSN(),
676 },
677 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
678 .fixup_map_hash_48b = { 3 },
679 .result_unpriv = REJECT,
680 .result = ACCEPT,
681 .retval = 0,
682},
683{
684 "calls: two calls with args",
685 .insns = {
686 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
687 BPF_EXIT_INSN(),
688 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
689 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
690 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
691 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
693 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
694 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
695 BPF_EXIT_INSN(),
696 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
697 offsetof(struct __sk_buff, len)),
698 BPF_EXIT_INSN(),
699 },
700 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
701 .result = ACCEPT,
702 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
703},
704{
705 "calls: calls with stack arith",
706 .insns = {
707 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
709 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
710 BPF_EXIT_INSN(),
711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
712 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
713 BPF_EXIT_INSN(),
714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
715 BPF_MOV64_IMM(BPF_REG_0, 42),
716 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
717 BPF_EXIT_INSN(),
718 },
719 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
720 .result = ACCEPT,
721 .retval = 42,
722},
723{
724 "calls: calls with misaligned stack access",
725 .insns = {
726 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
729 BPF_EXIT_INSN(),
730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
731 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
732 BPF_EXIT_INSN(),
733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
734 BPF_MOV64_IMM(BPF_REG_0, 42),
735 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
736 BPF_EXIT_INSN(),
737 },
738 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
739 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
740 .errstr = "misaligned stack access",
741 .result = REJECT,
742},
743{
744 "calls: calls control flow, jump test",
745 .insns = {
746 BPF_MOV64_IMM(BPF_REG_0, 42),
747 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
748 BPF_MOV64_IMM(BPF_REG_0, 43),
749 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
750 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
751 BPF_EXIT_INSN(),
752 },
753 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
754 .result = ACCEPT,
755 .retval = 43,
756},
757{
758 "calls: calls control flow, jump test 2",
759 .insns = {
760 BPF_MOV64_IMM(BPF_REG_0, 42),
761 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
762 BPF_MOV64_IMM(BPF_REG_0, 43),
763 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
765 BPF_EXIT_INSN(),
766 },
767 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
768 .errstr = "jump out of range from insn 1 to 4",
769 .result = REJECT,
770},
771{
772 "calls: two calls with bad jump",
773 .insns = {
774 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
775 BPF_EXIT_INSN(),
776 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
777 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
778 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
779 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
780 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
781 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
782 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
783 BPF_EXIT_INSN(),
784 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
785 offsetof(struct __sk_buff, len)),
786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
787 BPF_EXIT_INSN(),
788 },
789 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
790 .errstr = "jump out of range from insn 11 to 9",
791 .result = REJECT,
792},
793{
794 "calls: recursive call. test1",
795 .insns = {
796 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
797 BPF_EXIT_INSN(),
798 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
799 BPF_EXIT_INSN(),
800 },
801 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
802 .errstr = "the call stack of 9 frames is too deep",
803 .result = REJECT,
804},
805{
806 "calls: recursive call. test2",
807 .insns = {
808 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
809 BPF_EXIT_INSN(),
810 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
811 BPF_EXIT_INSN(),
812 },
813 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
814 .errstr = "the call stack of 9 frames is too deep",
815 .result = REJECT,
816},
817{
818 "calls: unreachable code",
819 .insns = {
820 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
821 BPF_EXIT_INSN(),
822 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
823 BPF_EXIT_INSN(),
824 BPF_MOV64_IMM(BPF_REG_0, 0),
825 BPF_EXIT_INSN(),
826 BPF_MOV64_IMM(BPF_REG_0, 0),
827 BPF_EXIT_INSN(),
828 },
829 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
830 .errstr = "unreachable insn 6",
831 .result = REJECT,
832},
833{
834 "calls: invalid call",
835 .insns = {
836 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
837 BPF_EXIT_INSN(),
838 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
839 BPF_EXIT_INSN(),
840 },
841 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
842 .errstr = "invalid destination",
843 .result = REJECT,
844},
845{
846 "calls: invalid call 2",
847 .insns = {
848 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
849 BPF_EXIT_INSN(),
850 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
851 BPF_EXIT_INSN(),
852 },
853 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
854 .errstr = "invalid destination",
855 .result = REJECT,
856},
857{
858 "calls: jumping across function bodies. test1",
859 .insns = {
860 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
861 BPF_MOV64_IMM(BPF_REG_0, 0),
862 BPF_EXIT_INSN(),
863 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
864 BPF_EXIT_INSN(),
865 },
866 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
867 .errstr = "jump out of range",
868 .result = REJECT,
869},
870{
871 "calls: jumping across function bodies. test2",
872 .insns = {
873 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
874 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
875 BPF_MOV64_IMM(BPF_REG_0, 0),
876 BPF_EXIT_INSN(),
877 BPF_EXIT_INSN(),
878 },
879 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
880 .errstr = "jump out of range",
881 .result = REJECT,
882},
883{
884 "calls: call without exit",
885 .insns = {
886 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
887 BPF_EXIT_INSN(),
888 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
889 BPF_EXIT_INSN(),
890 BPF_MOV64_IMM(BPF_REG_0, 0),
891 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
892 },
893 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
894 .errstr = "not an exit",
895 .result = REJECT,
896},
897{
898 "calls: call into middle of ld_imm64",
899 .insns = {
900 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
901 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
902 BPF_MOV64_IMM(BPF_REG_0, 0),
903 BPF_EXIT_INSN(),
904 BPF_LD_IMM64(BPF_REG_0, 0),
905 BPF_EXIT_INSN(),
906 },
907 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
908 .errstr = "last insn",
909 .result = REJECT,
910},
911{
912 "calls: call into middle of other call",
913 .insns = {
914 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
915 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
916 BPF_MOV64_IMM(BPF_REG_0, 0),
917 BPF_EXIT_INSN(),
918 BPF_MOV64_IMM(BPF_REG_0, 0),
919 BPF_MOV64_IMM(BPF_REG_0, 0),
920 BPF_EXIT_INSN(),
921 },
922 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
923 .errstr = "last insn",
924 .result = REJECT,
925},
926{
927 "calls: subprog call with ld_abs in main prog",
928 .insns = {
929 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
930 BPF_LD_ABS(BPF_B, 0),
931 BPF_LD_ABS(BPF_H, 0),
932 BPF_LD_ABS(BPF_W, 0),
933 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
934 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
935 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
936 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
937 BPF_LD_ABS(BPF_B, 0),
938 BPF_LD_ABS(BPF_H, 0),
939 BPF_LD_ABS(BPF_W, 0),
940 BPF_EXIT_INSN(),
941 BPF_MOV64_IMM(BPF_REG_2, 1),
942 BPF_MOV64_IMM(BPF_REG_3, 2),
943 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
944 BPF_EXIT_INSN(),
945 },
946 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
947 .result = ACCEPT,
948},
949{
950 "calls: two calls with bad fallthrough",
951 .insns = {
952 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
953 BPF_EXIT_INSN(),
954 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
955 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
956 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
957 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
958 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
959 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
960 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
961 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
962 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
963 offsetof(struct __sk_buff, len)),
964 BPF_EXIT_INSN(),
965 },
966 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
967 .errstr = "not an exit",
968 .result = REJECT,
969},
970{
971 "calls: two calls with stack read",
972 .insns = {
973 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
974 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
975 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
976 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
977 BPF_EXIT_INSN(),
978 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
979 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
980 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
981 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
982 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
983 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
984 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
985 BPF_EXIT_INSN(),
986 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
987 BPF_EXIT_INSN(),
988 },
989 .prog_type = BPF_PROG_TYPE_XDP,
990 .result = ACCEPT,
991},
992{
993 "calls: two calls with stack write",
994 .insns = {
995 /* main prog */
996 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
997 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
999 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1000 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1001 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1002 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1003 BPF_EXIT_INSN(),
1004
1005 /* subprog 1 */
1006 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1007 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1008 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
1009 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
1010 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1011 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1012 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
1013 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
1014 /* write into stack frame of main prog */
1015 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1016 BPF_EXIT_INSN(),
1017
1018 /* subprog 2 */
1019 /* read from stack frame of main prog */
1020 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
1021 BPF_EXIT_INSN(),
1022 },
1023 .prog_type = BPF_PROG_TYPE_XDP,
1024 .result = ACCEPT,
1025},
1026{
1027 "calls: stack overflow using two frames (pre-call access)",
1028 .insns = {
1029 /* prog 1 */
1030 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1031 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
1032 BPF_EXIT_INSN(),
1033
1034 /* prog 2 */
1035 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1036 BPF_MOV64_IMM(BPF_REG_0, 0),
1037 BPF_EXIT_INSN(),
1038 },
1039 .prog_type = BPF_PROG_TYPE_XDP,
1040 .errstr = "combined stack size",
1041 .result = REJECT,
1042},
1043{
1044 "calls: stack overflow using two frames (post-call access)",
1045 .insns = {
1046 /* prog 1 */
1047 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
1048 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1049 BPF_EXIT_INSN(),
1050
1051 /* prog 2 */
1052 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1053 BPF_MOV64_IMM(BPF_REG_0, 0),
1054 BPF_EXIT_INSN(),
1055 },
1056 .prog_type = BPF_PROG_TYPE_XDP,
1057 .errstr = "combined stack size",
1058 .result = REJECT,
1059},
1060{
1061 "calls: stack depth check using three frames. test1",
1062 .insns = {
1063 /* main */
1064 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1065 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
1066 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
1067 BPF_MOV64_IMM(BPF_REG_0, 0),
1068 BPF_EXIT_INSN(),
1069 /* A */
1070 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1071 BPF_EXIT_INSN(),
1072 /* B */
1073 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
1074 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1075 BPF_EXIT_INSN(),
1076 },
1077 .prog_type = BPF_PROG_TYPE_XDP,
1078 /* stack_main=32, stack_A=256, stack_B=64
1079 * and max(main+A, main+A+B) < 512
1080 */
1081 .result = ACCEPT,
1082},
1083{
1084 "calls: stack depth check using three frames. test2",
1085 .insns = {
1086 /* main */
1087 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1088 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
1089 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
1090 BPF_MOV64_IMM(BPF_REG_0, 0),
1091 BPF_EXIT_INSN(),
1092 /* A */
1093 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1094 BPF_EXIT_INSN(),
1095 /* B */
1096 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
1097 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1098 BPF_EXIT_INSN(),
1099 },
1100 .prog_type = BPF_PROG_TYPE_XDP,
1101 /* stack_main=32, stack_A=64, stack_B=256
1102 * and max(main+A, main+A+B) < 512
1103 */
1104 .result = ACCEPT,
1105},
1106{
1107 "calls: stack depth check using three frames. test3",
1108 .insns = {
1109 /* main */
1110 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1111 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
1112 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1113 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
1114 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
1115 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1116 BPF_MOV64_IMM(BPF_REG_0, 0),
1117 BPF_EXIT_INSN(),
1118 /* A */
1119 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
1120 BPF_EXIT_INSN(),
1121 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
1122 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
1123 /* B */
1124 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
1125 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
1126 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1127 BPF_EXIT_INSN(),
1128 },
1129 .prog_type = BPF_PROG_TYPE_XDP,
1130 /* stack_main=64, stack_A=224, stack_B=256
1131 * and max(main+A, main+A+B) > 512
1132 */
1133 .errstr = "combined stack",
1134 .result = REJECT,
1135},
1136{
1137 "calls: stack depth check using three frames. test4",
1138 /* void main(void) {
1139 * func1(0);
1140 * func1(1);
1141 * func2(1);
1142 * }
1143 * void func1(int alloc_or_recurse) {
1144 * if (alloc_or_recurse) {
1145 * frame_pointer[-300] = 1;
1146 * } else {
1147 * func2(alloc_or_recurse);
1148 * }
1149 * }
1150 * void func2(int alloc_or_recurse) {
1151 * if (alloc_or_recurse) {
1152 * frame_pointer[-300] = 1;
1153 * }
1154 * }
1155 */
1156 .insns = {
1157 /* main */
1158 BPF_MOV64_IMM(BPF_REG_1, 0),
1159 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
1160 BPF_MOV64_IMM(BPF_REG_1, 1),
1161 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1162 BPF_MOV64_IMM(BPF_REG_1, 1),
1163 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
1164 BPF_MOV64_IMM(BPF_REG_0, 0),
1165 BPF_EXIT_INSN(),
1166 /* A */
1167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1168 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1169 BPF_EXIT_INSN(),
1170 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
1171 BPF_EXIT_INSN(),
1172 /* B */
1173 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1174 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1175 BPF_EXIT_INSN(),
1176 },
1177 .prog_type = BPF_PROG_TYPE_XDP,
1178 .result = REJECT,
1179 .errstr = "combined stack",
1180},
1181{
1182 "calls: stack depth check using three frames. test5",
1183 .insns = {
1184 /* main */
1185 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
1186 BPF_EXIT_INSN(),
1187 /* A */
1188 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
1189 BPF_EXIT_INSN(),
1190 /* B */
1191 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
1192 BPF_EXIT_INSN(),
1193 /* C */
1194 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
1195 BPF_EXIT_INSN(),
1196 /* D */
1197 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
1198 BPF_EXIT_INSN(),
1199 /* E */
1200 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
1201 BPF_EXIT_INSN(),
1202 /* F */
1203 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
1204 BPF_EXIT_INSN(),
1205 /* G */
1206 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
1207 BPF_EXIT_INSN(),
1208 /* H */
1209 BPF_MOV64_IMM(BPF_REG_0, 0),
1210 BPF_EXIT_INSN(),
1211 },
1212 .prog_type = BPF_PROG_TYPE_XDP,
1213 .errstr = "call stack",
1214 .result = REJECT,
1215},
1216{
1217 "calls: stack depth check in dead code",
1218 .insns = {
1219 /* main */
1220 BPF_MOV64_IMM(BPF_REG_1, 0),
1221 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
1222 BPF_EXIT_INSN(),
1223 /* A */
1224 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1225 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
1226 BPF_MOV64_IMM(BPF_REG_0, 0),
1227 BPF_EXIT_INSN(),
1228 /* B */
1229 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
1230 BPF_EXIT_INSN(),
1231 /* C */
1232 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
1233 BPF_EXIT_INSN(),
1234 /* D */
1235 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
1236 BPF_EXIT_INSN(),
1237 /* E */
1238 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
1239 BPF_EXIT_INSN(),
1240 /* F */
1241 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
1242 BPF_EXIT_INSN(),
1243 /* G */
1244 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
1245 BPF_EXIT_INSN(),
1246 /* H */
1247 BPF_MOV64_IMM(BPF_REG_0, 0),
1248 BPF_EXIT_INSN(),
1249 },
1250 .prog_type = BPF_PROG_TYPE_XDP,
1251 .errstr = "call stack",
1252 .result = REJECT,
1253},
1254{
1255 "calls: spill into caller stack frame",
1256 .insns = {
1257 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1258 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1260 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1261 BPF_EXIT_INSN(),
1262 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1263 BPF_MOV64_IMM(BPF_REG_0, 0),
1264 BPF_EXIT_INSN(),
1265 },
1266 .prog_type = BPF_PROG_TYPE_XDP,
1267 .errstr = "cannot spill",
1268 .result = REJECT,
1269},
1270{
1271 "calls: write into caller stack frame",
1272 .insns = {
1273 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1275 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1277 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1278 BPF_EXIT_INSN(),
1279 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
1280 BPF_MOV64_IMM(BPF_REG_0, 0),
1281 BPF_EXIT_INSN(),
1282 },
1283 .prog_type = BPF_PROG_TYPE_XDP,
1284 .result = ACCEPT,
1285 .retval = 42,
1286},
1287{
1288 "calls: write into callee stack frame",
1289 .insns = {
1290 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1291 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
1292 BPF_EXIT_INSN(),
1293 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
1295 BPF_EXIT_INSN(),
1296 },
1297 .prog_type = BPF_PROG_TYPE_XDP,
1298 .errstr = "cannot return stack pointer",
1299 .result = REJECT,
1300},
1301{
1302 "calls: two calls with stack write and void return",
1303 .insns = {
1304 /* main prog */
1305 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1306 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1308 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1309 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1310 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1311 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1312 BPF_EXIT_INSN(),
1313
1314 /* subprog 1 */
1315 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1316 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1317 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1318 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1319 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1320 BPF_EXIT_INSN(),
1321
1322 /* subprog 2 */
1323 /* write into stack frame of main prog */
1324 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
1325 BPF_EXIT_INSN(), /* void return */
1326 },
1327 .prog_type = BPF_PROG_TYPE_XDP,
1328 .result = ACCEPT,
1329},
1330{
1331 "calls: ambiguous return value",
1332 .insns = {
1333 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1334 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1335 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1336 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1338 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1339 BPF_EXIT_INSN(),
1340 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1341 BPF_MOV64_IMM(BPF_REG_0, 0),
1342 BPF_EXIT_INSN(),
1343 },
1344 .errstr_unpriv = "allowed for",
1345 .result_unpriv = REJECT,
1346 .errstr = "R0 !read_ok",
1347 .result = REJECT,
1348},
1349{
1350 "calls: two calls that return map_value",
1351 .insns = {
1352 /* main prog */
1353 /* pass fp-16, fp-8 into a function */
1354 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1356 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1358 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
1359
1360 /* fetch map_value_ptr from the stack of this function */
1361 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1362 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1363 /* write into map value */
1364 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1365 /* fetch secound map_value_ptr from the stack */
1366 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1367 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1368 /* write into map value */
1369 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1370 BPF_MOV64_IMM(BPF_REG_0, 0),
1371 BPF_EXIT_INSN(),
1372
1373 /* subprog 1 */
1374 /* call 3rd function twice */
1375 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1376 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1377 /* first time with fp-8 */
1378 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1379 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1380 /* second time with fp-16 */
1381 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1382 BPF_EXIT_INSN(),
1383
1384 /* subprog 2 */
1385 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1386 /* lookup from map */
1387 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1388 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1389 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1390 BPF_LD_MAP_FD(BPF_REG_1, 0),
1391 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1392 /* write map_value_ptr into stack frame of main prog */
1393 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1394 BPF_MOV64_IMM(BPF_REG_0, 0),
1395 BPF_EXIT_INSN(), /* return 0 */
1396 },
1397 .prog_type = BPF_PROG_TYPE_XDP,
1398 .fixup_map_hash_8b = { 23 },
1399 .result = ACCEPT,
1400},
1401{
1402 "calls: two calls that return map_value with bool condition",
1403 .insns = {
1404 /* main prog */
1405 /* pass fp-16, fp-8 into a function */
1406 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1408 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1410 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1411 BPF_MOV64_IMM(BPF_REG_0, 0),
1412 BPF_EXIT_INSN(),
1413
1414 /* subprog 1 */
1415 /* call 3rd function twice */
1416 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1417 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1418 /* first time with fp-8 */
1419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1420 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1421 /* fetch map_value_ptr from the stack of this function */
1422 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1423 /* write into map value */
1424 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1425 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1426 /* second time with fp-16 */
1427 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1428 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1429 /* fetch secound map_value_ptr from the stack */
1430 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1431 /* write into map value */
1432 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1433 BPF_EXIT_INSN(),
1434
1435 /* subprog 2 */
1436 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1437 /* lookup from map */
1438 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1439 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1441 BPF_LD_MAP_FD(BPF_REG_1, 0),
1442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1443 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1444 BPF_MOV64_IMM(BPF_REG_0, 0),
1445 BPF_EXIT_INSN(), /* return 0 */
1446 /* write map_value_ptr into stack frame of main prog */
1447 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1448 BPF_MOV64_IMM(BPF_REG_0, 1),
1449 BPF_EXIT_INSN(), /* return 1 */
1450 },
1451 .prog_type = BPF_PROG_TYPE_XDP,
1452 .fixup_map_hash_8b = { 23 },
1453 .result = ACCEPT,
1454},
1455{
1456 "calls: two calls that return map_value with incorrect bool check",
1457 .insns = {
1458 /* main prog */
1459 /* pass fp-16, fp-8 into a function */
1460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1462 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1465 BPF_MOV64_IMM(BPF_REG_0, 0),
1466 BPF_EXIT_INSN(),
1467
1468 /* subprog 1 */
1469 /* call 3rd function twice */
1470 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1471 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1472 /* first time with fp-8 */
1473 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1474 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1475 /* fetch map_value_ptr from the stack of this function */
1476 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1477 /* write into map value */
1478 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1480 /* second time with fp-16 */
1481 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1482 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1483 /* fetch secound map_value_ptr from the stack */
1484 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1485 /* write into map value */
1486 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1487 BPF_EXIT_INSN(),
1488
1489 /* subprog 2 */
1490 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1491 /* lookup from map */
1492 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1493 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1494 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1495 BPF_LD_MAP_FD(BPF_REG_1, 0),
1496 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1497 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1498 BPF_MOV64_IMM(BPF_REG_0, 0),
1499 BPF_EXIT_INSN(), /* return 0 */
1500 /* write map_value_ptr into stack frame of main prog */
1501 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1502 BPF_MOV64_IMM(BPF_REG_0, 1),
1503 BPF_EXIT_INSN(), /* return 1 */
1504 },
1505 .prog_type = BPF_PROG_TYPE_XDP,
1506 .fixup_map_hash_8b = { 23 },
1507 .result = REJECT,
1508 .errstr = "invalid read from stack R7 off=-16 size=8",
1509},
1510{
1511 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
1512 .insns = {
1513 /* main prog */
1514 /* pass fp-16, fp-8 into a function */
1515 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1517 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1518 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1519 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1520 BPF_MOV64_IMM(BPF_REG_0, 0),
1521 BPF_EXIT_INSN(),
1522
1523 /* subprog 1 */
1524 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1525 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1526 /* 1st lookup from map */
1527 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1528 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1530 BPF_LD_MAP_FD(BPF_REG_1, 0),
1531 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1532 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1533 BPF_MOV64_IMM(BPF_REG_8, 0),
1534 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1535 /* write map_value_ptr into stack frame of main prog at fp-8 */
1536 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1537 BPF_MOV64_IMM(BPF_REG_8, 1),
1538
1539 /* 2nd lookup from map */
1540 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1541 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1542 BPF_LD_MAP_FD(BPF_REG_1, 0),
1543 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1544 BPF_FUNC_map_lookup_elem),
1545 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1546 BPF_MOV64_IMM(BPF_REG_9, 0),
1547 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1548 /* write map_value_ptr into stack frame of main prog at fp-16 */
1549 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1550 BPF_MOV64_IMM(BPF_REG_9, 1),
1551
1552 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1553 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1554 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1555 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1556 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1557 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1558 BPF_EXIT_INSN(),
1559
1560 /* subprog 2 */
1561 /* if arg2 == 1 do *arg1 = 0 */
1562 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1563 /* fetch map_value_ptr from the stack of this function */
1564 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1565 /* write into map value */
1566 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1567
1568 /* if arg4 == 1 do *arg3 = 0 */
1569 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1570 /* fetch map_value_ptr from the stack of this function */
1571 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1572 /* write into map value */
1573 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1574 BPF_EXIT_INSN(),
1575 },
1576 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1577 .fixup_map_hash_8b = { 12, 22 },
1578 .result = REJECT,
1579 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1580 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1581},
1582{
1583 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
1584 .insns = {
1585 /* main prog */
1586 /* pass fp-16, fp-8 into a function */
1587 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1589 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1591 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1592 BPF_MOV64_IMM(BPF_REG_0, 0),
1593 BPF_EXIT_INSN(),
1594
1595 /* subprog 1 */
1596 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1597 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1598 /* 1st lookup from map */
1599 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1600 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1601 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1602 BPF_LD_MAP_FD(BPF_REG_1, 0),
1603 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1604 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1605 BPF_MOV64_IMM(BPF_REG_8, 0),
1606 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1607 /* write map_value_ptr into stack frame of main prog at fp-8 */
1608 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1609 BPF_MOV64_IMM(BPF_REG_8, 1),
1610
1611 /* 2nd lookup from map */
1612 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1614 BPF_LD_MAP_FD(BPF_REG_1, 0),
1615 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1616 BPF_FUNC_map_lookup_elem),
1617 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1618 BPF_MOV64_IMM(BPF_REG_9, 0),
1619 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1620 /* write map_value_ptr into stack frame of main prog at fp-16 */
1621 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1622 BPF_MOV64_IMM(BPF_REG_9, 1),
1623
1624 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1625 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1626 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1627 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1628 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1629 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1630 BPF_EXIT_INSN(),
1631
1632 /* subprog 2 */
1633 /* if arg2 == 1 do *arg1 = 0 */
1634 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1635 /* fetch map_value_ptr from the stack of this function */
1636 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1637 /* write into map value */
1638 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1639
1640 /* if arg4 == 1 do *arg3 = 0 */
1641 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1642 /* fetch map_value_ptr from the stack of this function */
1643 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1644 /* write into map value */
1645 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1646 BPF_EXIT_INSN(),
1647 },
1648 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1649 .fixup_map_hash_8b = { 12, 22 },
1650 .result = ACCEPT,
1651},
1652{
1653 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
1654 .insns = {
1655 /* main prog */
1656 /* pass fp-16, fp-8 into a function */
1657 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1658 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1659 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1661 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
1662 BPF_MOV64_IMM(BPF_REG_0, 0),
1663 BPF_EXIT_INSN(),
1664
1665 /* subprog 1 */
1666 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1667 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1668 /* 1st lookup from map */
1669 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
1670 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1671 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1672 BPF_LD_MAP_FD(BPF_REG_1, 0),
1673 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1674 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1675 BPF_MOV64_IMM(BPF_REG_8, 0),
1676 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1677 /* write map_value_ptr into stack frame of main prog at fp-8 */
1678 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1679 BPF_MOV64_IMM(BPF_REG_8, 1),
1680
1681 /* 2nd lookup from map */
1682 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1683 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1684 BPF_LD_MAP_FD(BPF_REG_1, 0),
1685 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1686 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1687 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
1688 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1689 /* write map_value_ptr into stack frame of main prog at fp-16 */
1690 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1691 BPF_MOV64_IMM(BPF_REG_9, 1),
1692
1693 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1694 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
1695 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1696 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1697 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1698 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
1699 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
1700
1701 /* subprog 2 */
1702 /* if arg2 == 1 do *arg1 = 0 */
1703 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1704 /* fetch map_value_ptr from the stack of this function */
1705 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1706 /* write into map value */
1707 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1708
1709 /* if arg4 == 1 do *arg3 = 0 */
1710 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1711 /* fetch map_value_ptr from the stack of this function */
1712 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1713 /* write into map value */
1714 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1715 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
1716 },
1717 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1718 .fixup_map_hash_8b = { 12, 22 },
1719 .result = REJECT,
1720 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1721 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1722},
1723{
1724 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
1725 .insns = {
1726 /* main prog */
1727 /* pass fp-16, fp-8 into a function */
1728 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1729 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1730 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1731 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1732 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1733 BPF_MOV64_IMM(BPF_REG_0, 0),
1734 BPF_EXIT_INSN(),
1735
1736 /* subprog 1 */
1737 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1738 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1739 /* 1st lookup from map */
1740 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1741 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1743 BPF_LD_MAP_FD(BPF_REG_1, 0),
1744 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1745 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1746 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1747 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1748 BPF_MOV64_IMM(BPF_REG_8, 0),
1749 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1750 BPF_MOV64_IMM(BPF_REG_8, 1),
1751
1752 /* 2nd lookup from map */
1753 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1755 BPF_LD_MAP_FD(BPF_REG_1, 0),
1756 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1757 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1758 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1759 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1760 BPF_MOV64_IMM(BPF_REG_9, 0),
1761 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1762 BPF_MOV64_IMM(BPF_REG_9, 1),
1763
1764 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1765 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1766 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1767 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1768 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1769 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1770 BPF_EXIT_INSN(),
1771
1772 /* subprog 2 */
1773 /* if arg2 == 1 do *arg1 = 0 */
1774 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1775 /* fetch map_value_ptr from the stack of this function */
1776 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1777 /* write into map value */
1778 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1779
1780 /* if arg4 == 1 do *arg3 = 0 */
1781 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1782 /* fetch map_value_ptr from the stack of this function */
1783 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1784 /* write into map value */
1785 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1786 BPF_EXIT_INSN(),
1787 },
1788 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1789 .fixup_map_hash_8b = { 12, 22 },
1790 .result = ACCEPT,
1791},
1792{
1793 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
1794 .insns = {
1795 /* main prog */
1796 /* pass fp-16, fp-8 into a function */
1797 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1799 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1800 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1801 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1802 BPF_MOV64_IMM(BPF_REG_0, 0),
1803 BPF_EXIT_INSN(),
1804
1805 /* subprog 1 */
1806 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1807 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1808 /* 1st lookup from map */
1809 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1810 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1811 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1812 BPF_LD_MAP_FD(BPF_REG_1, 0),
1813 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1814 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1815 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1816 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1817 BPF_MOV64_IMM(BPF_REG_8, 0),
1818 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1819 BPF_MOV64_IMM(BPF_REG_8, 1),
1820
1821 /* 2nd lookup from map */
1822 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1823 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1824 BPF_LD_MAP_FD(BPF_REG_1, 0),
1825 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1826 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1827 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1828 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1829 BPF_MOV64_IMM(BPF_REG_9, 0),
1830 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1831 BPF_MOV64_IMM(BPF_REG_9, 1),
1832
1833 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1834 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1835 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1836 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1837 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1838 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1839 BPF_EXIT_INSN(),
1840
1841 /* subprog 2 */
1842 /* if arg2 == 1 do *arg1 = 0 */
1843 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1844 /* fetch map_value_ptr from the stack of this function */
1845 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1846 /* write into map value */
1847 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1848
1849 /* if arg4 == 0 do *arg3 = 0 */
1850 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
1851 /* fetch map_value_ptr from the stack of this function */
1852 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1853 /* write into map value */
1854 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1855 BPF_EXIT_INSN(),
1856 },
1857 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1858 .fixup_map_hash_8b = { 12, 22 },
1859 .result = REJECT,
1860 .errstr = "R0 invalid mem access 'scalar'",
1861},
1862{
1863 "calls: pkt_ptr spill into caller stack",
1864 .insns = {
1865 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1866 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1867 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1868 BPF_EXIT_INSN(),
1869
1870 /* subprog 1 */
1871 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1872 offsetof(struct __sk_buff, data)),
1873 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1874 offsetof(struct __sk_buff, data_end)),
1875 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1877 /* spill unchecked pkt_ptr into stack of caller */
1878 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1879 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1880 /* now the pkt range is verified, read pkt_ptr from stack */
1881 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1882 /* write 4 bytes into packet */
1883 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1884 BPF_EXIT_INSN(),
1885 },
1886 .result = ACCEPT,
1887 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1888 .retval = POINTER_VALUE,
1889 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1890},
1891{
1892 "calls: pkt_ptr spill into caller stack 2",
1893 .insns = {
1894 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1896 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1897 /* Marking is still kept, but not in all cases safe. */
1898 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1899 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1900 BPF_EXIT_INSN(),
1901
1902 /* subprog 1 */
1903 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1904 offsetof(struct __sk_buff, data)),
1905 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1906 offsetof(struct __sk_buff, data_end)),
1907 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1909 /* spill unchecked pkt_ptr into stack of caller */
1910 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1911 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1912 /* now the pkt range is verified, read pkt_ptr from stack */
1913 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1914 /* write 4 bytes into packet */
1915 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1916 BPF_EXIT_INSN(),
1917 },
1918 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1919 .errstr = "invalid access to packet",
1920 .result = REJECT,
1921 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1922},
1923{
1924 "calls: pkt_ptr spill into caller stack 3",
1925 .insns = {
1926 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1927 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1928 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1929 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1930 /* Marking is still kept and safe here. */
1931 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1932 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1933 BPF_EXIT_INSN(),
1934
1935 /* subprog 1 */
1936 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1937 offsetof(struct __sk_buff, data)),
1938 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1939 offsetof(struct __sk_buff, data_end)),
1940 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1941 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1942 /* spill unchecked pkt_ptr into stack of caller */
1943 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1944 BPF_MOV64_IMM(BPF_REG_5, 0),
1945 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1946 BPF_MOV64_IMM(BPF_REG_5, 1),
1947 /* now the pkt range is verified, read pkt_ptr from stack */
1948 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1949 /* write 4 bytes into packet */
1950 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1951 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1952 BPF_EXIT_INSN(),
1953 },
1954 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1955 .result = ACCEPT,
1956 .retval = 1,
1957 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1958},
1959{
1960 "calls: pkt_ptr spill into caller stack 4",
1961 .insns = {
1962 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1964 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1965 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1966 /* Check marking propagated. */
1967 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1968 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1969 BPF_EXIT_INSN(),
1970
1971 /* subprog 1 */
1972 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1973 offsetof(struct __sk_buff, data)),
1974 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1975 offsetof(struct __sk_buff, data_end)),
1976 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1978 /* spill unchecked pkt_ptr into stack of caller */
1979 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1980 BPF_MOV64_IMM(BPF_REG_5, 0),
1981 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1982 BPF_MOV64_IMM(BPF_REG_5, 1),
1983 /* don't read back pkt_ptr from stack here */
1984 /* write 4 bytes into packet */
1985 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1986 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1987 BPF_EXIT_INSN(),
1988 },
1989 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1990 .result = ACCEPT,
1991 .retval = 1,
1992 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1993},
1994{
1995 "calls: pkt_ptr spill into caller stack 5",
1996 .insns = {
1997 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1999 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
2000 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2001 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2002 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2003 BPF_EXIT_INSN(),
2004
2005 /* subprog 1 */
2006 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2007 offsetof(struct __sk_buff, data)),
2008 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2009 offsetof(struct __sk_buff, data_end)),
2010 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2012 BPF_MOV64_IMM(BPF_REG_5, 0),
2013 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2014 /* spill checked pkt_ptr into stack of caller */
2015 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2016 BPF_MOV64_IMM(BPF_REG_5, 1),
2017 /* don't read back pkt_ptr from stack here */
2018 /* write 4 bytes into packet */
2019 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2020 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2021 BPF_EXIT_INSN(),
2022 },
2023 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2024 .errstr = "same insn cannot be used with different",
2025 .result = REJECT,
2026 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2027},
2028{
2029 "calls: pkt_ptr spill into caller stack 6",
2030 .insns = {
2031 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2032 offsetof(struct __sk_buff, data_end)),
2033 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2035 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2036 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2037 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2038 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2039 BPF_EXIT_INSN(),
2040
2041 /* subprog 1 */
2042 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2043 offsetof(struct __sk_buff, data)),
2044 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2045 offsetof(struct __sk_buff, data_end)),
2046 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2048 BPF_MOV64_IMM(BPF_REG_5, 0),
2049 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2050 /* spill checked pkt_ptr into stack of caller */
2051 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2052 BPF_MOV64_IMM(BPF_REG_5, 1),
2053 /* don't read back pkt_ptr from stack here */
2054 /* write 4 bytes into packet */
2055 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2056 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2057 BPF_EXIT_INSN(),
2058 },
2059 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2060 .errstr = "R4 invalid mem access",
2061 .result = REJECT,
2062 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2063},
2064{
2065 "calls: pkt_ptr spill into caller stack 7",
2066 .insns = {
2067 BPF_MOV64_IMM(BPF_REG_2, 0),
2068 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2069 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2070 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2071 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2072 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2073 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2074 BPF_EXIT_INSN(),
2075
2076 /* subprog 1 */
2077 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2078 offsetof(struct __sk_buff, data)),
2079 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2080 offsetof(struct __sk_buff, data_end)),
2081 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2083 BPF_MOV64_IMM(BPF_REG_5, 0),
2084 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2085 /* spill checked pkt_ptr into stack of caller */
2086 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2087 BPF_MOV64_IMM(BPF_REG_5, 1),
2088 /* don't read back pkt_ptr from stack here */
2089 /* write 4 bytes into packet */
2090 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2091 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2092 BPF_EXIT_INSN(),
2093 },
2094 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2095 .errstr = "R4 invalid mem access",
2096 .result = REJECT,
2097 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2098},
2099{
2100 "calls: pkt_ptr spill into caller stack 8",
2101 .insns = {
2102 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2103 offsetof(struct __sk_buff, data)),
2104 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2105 offsetof(struct __sk_buff, data_end)),
2106 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2107 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2108 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
2109 BPF_EXIT_INSN(),
2110 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2112 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2114 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2115 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2116 BPF_EXIT_INSN(),
2117
2118 /* subprog 1 */
2119 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2120 offsetof(struct __sk_buff, data)),
2121 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2122 offsetof(struct __sk_buff, data_end)),
2123 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2125 BPF_MOV64_IMM(BPF_REG_5, 0),
2126 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2127 /* spill checked pkt_ptr into stack of caller */
2128 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2129 BPF_MOV64_IMM(BPF_REG_5, 1),
2130 /* don't read back pkt_ptr from stack here */
2131 /* write 4 bytes into packet */
2132 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2133 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2134 BPF_EXIT_INSN(),
2135 },
2136 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2137 .result = ACCEPT,
2138 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2139},
2140{
2141 "calls: pkt_ptr spill into caller stack 9",
2142 .insns = {
2143 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2144 offsetof(struct __sk_buff, data)),
2145 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2146 offsetof(struct __sk_buff, data_end)),
2147 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2149 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
2150 BPF_EXIT_INSN(),
2151 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2152 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2153 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2154 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2155 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2156 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2157 BPF_EXIT_INSN(),
2158
2159 /* subprog 1 */
2160 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2161 offsetof(struct __sk_buff, data)),
2162 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2163 offsetof(struct __sk_buff, data_end)),
2164 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2166 BPF_MOV64_IMM(BPF_REG_5, 0),
2167 /* spill unchecked pkt_ptr into stack of caller */
2168 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2169 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2170 BPF_MOV64_IMM(BPF_REG_5, 1),
2171 /* don't read back pkt_ptr from stack here */
2172 /* write 4 bytes into packet */
2173 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2174 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2175 BPF_EXIT_INSN(),
2176 },
2177 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2178 .errstr = "invalid access to packet",
2179 .result = REJECT,
2180 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2181},
2182{
2183 "calls: caller stack init to zero or map_value_or_null",
2184 .insns = {
2185 BPF_MOV64_IMM(BPF_REG_0, 0),
2186 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2189 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2190 /* fetch map_value_or_null or const_zero from stack */
2191 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2193 /* store into map_value */
2194 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
2195 BPF_EXIT_INSN(),
2196
2197 /* subprog 1 */
2198 /* if (ctx == 0) return; */
2199 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
2200 /* else bpf_map_lookup() and *(fp - 8) = r0 */
2201 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2202 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2203 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2204 BPF_LD_MAP_FD(BPF_REG_1, 0),
2205 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2206 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
2207 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
2208 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
2209 BPF_EXIT_INSN(),
2210 },
2211 .fixup_map_hash_8b = { 13 },
2212 .result = ACCEPT,
2213 .prog_type = BPF_PROG_TYPE_XDP,
2214},
2215{
2216 "calls: stack init to zero and pruning",
2217 .insns = {
2218 /* first make allocated_stack 16 byte */
2219 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
2220 /* now fork the execution such that the false branch
2221 * of JGT insn will be verified second and it skisp zero
2222 * init of fp-8 stack slot. If stack liveness marking
2223 * is missing live_read marks from call map_lookup
2224 * processing then pruning will incorrectly assume
2225 * that fp-8 stack slot was unused in the fall-through
2226 * branch and will accept the program incorrectly
2227 */
2228 BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
2229 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2),
2230 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2231 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2232 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2233 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2234 BPF_LD_MAP_FD(BPF_REG_1, 0),
2235 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
2236 BPF_MOV64_IMM(BPF_REG_0, 0),
2237 BPF_EXIT_INSN(),
2238 },
2239 .fixup_map_hash_48b = { 7 },
2240 .errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
2241 .result_unpriv = REJECT,
2242 /* in privileged mode reads from uninitialized stack locations are permitted */
2243 .result = ACCEPT,
2244},
2245{
2246 "calls: ctx read at start of subprog",
2247 .insns = {
2248 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
2249 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
2250 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
2251 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2252 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
2253 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2254 BPF_EXIT_INSN(),
2255 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2256 BPF_MOV64_IMM(BPF_REG_0, 0),
2257 BPF_EXIT_INSN(),
2258 },
2259 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2260 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2261 .result_unpriv = REJECT,
2262 .result = ACCEPT,
2263},
2264{
2265 "calls: cross frame pruning",
2266 .insns = {
2267 /* r8 = !!random();
2268 * call pruner()
2269 * if (r8)
2270 * do something bad;
2271 */
2272 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2273 BPF_MOV64_IMM(BPF_REG_8, 0),
2274 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2275 BPF_MOV64_IMM(BPF_REG_8, 1),
2276 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2277 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2278 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2279 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2280 BPF_MOV64_IMM(BPF_REG_0, 0),
2281 BPF_EXIT_INSN(),
2282 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2283 BPF_EXIT_INSN(),
2284 },
2285 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2286 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2287 .errstr = "!read_ok",
2288 .result = REJECT,
2289},
2290{
2291 "calls: cross frame pruning - liveness propagation",
2292 .insns = {
2293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2294 BPF_MOV64_IMM(BPF_REG_8, 0),
2295 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2296 BPF_MOV64_IMM(BPF_REG_8, 1),
2297 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2298 BPF_MOV64_IMM(BPF_REG_9, 0),
2299 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2300 BPF_MOV64_IMM(BPF_REG_9, 1),
2301 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2302 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2303 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2304 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
2305 BPF_MOV64_IMM(BPF_REG_0, 0),
2306 BPF_EXIT_INSN(),
2307 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2308 BPF_EXIT_INSN(),
2309 },
2310 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2311 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2312 .errstr = "!read_ok",
2313 .result = REJECT,
2314},
2315/* Make sure that verifier.c:states_equal() considers IDs from all
2316 * frames when building 'idmap' for check_ids().
2317 */
2318{
2319 "calls: check_ids() across call boundary",
2320 .insns = {
2321 /* Function main() */
2322 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2323 /* fp[-24] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */
2324 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2325 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2326 BPF_LD_MAP_FD(BPF_REG_1,
2327 0),
2328 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
2329 BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -24),
2330 /* fp[-32] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */
2331 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2333 BPF_LD_MAP_FD(BPF_REG_1,
2334 0),
2335 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
2336 BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -32),
2337 /* call foo(&fp[-24], &fp[-32]) ; both arguments have IDs in the current
2338 * ; stack frame
2339 */
2340 BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
2341 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -24),
2342 BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
2343 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
2344 BPF_CALL_REL(2),
2345 /* exit 0 */
2346 BPF_MOV64_IMM(BPF_REG_0, 0),
2347 BPF_EXIT_INSN(),
2348 /* Function foo()
2349 *
2350 * r9 = &frame[0].fp[-24] ; save arguments in the callee saved registers,
2351 * r8 = &frame[0].fp[-32] ; arguments are pointers to pointers to map value
2352 */
2353 BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
2354 BPF_MOV64_REG(BPF_REG_8, BPF_REG_2),
2355 /* r7 = ktime_get_ns() */
2356 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
2357 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
2358 /* r6 = ktime_get_ns() */
2359 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
2360 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
2361 /* if r6 > r7 goto +1 ; no new information about the state is derived from
2362 * ; this check, thus produced verifier states differ
2363 * ; only in 'insn_idx'
2364 * r9 = r8
2365 */
2366 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
2367 BPF_MOV64_REG(BPF_REG_9, BPF_REG_8),
2368 /* r9 = *r9 ; verifier get's to this point via two paths:
2369 * ; (I) one including r9 = r8, verified first;
2370 * ; (II) one excluding r9 = r8, verified next.
2371 * ; After load of *r9 to r9 the frame[0].fp[-24].id == r9.id.
2372 * ; Suppose that checkpoint is created here via path (I).
2373 * ; When verifying via (II) the r9.id must be compared against
2374 * ; frame[0].fp[-24].id, otherwise (I) and (II) would be
2375 * ; incorrectly deemed equivalent.
2376 * if r9 == 0 goto <exit>
2377 */
2378 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_9, 0),
2379 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1),
2380 /* r8 = *r8 ; read map value via r8, this is not safe
2381 * r0 = *r8 ; because r8 might be not equal to r9.
2382 */
2383 BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_8, 0),
2384 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_8, 0),
2385 /* exit 0 */
2386 BPF_MOV64_IMM(BPF_REG_0, 0),
2387 BPF_EXIT_INSN(),
2388 },
2389 .flags = BPF_F_TEST_STATE_FREQ,
2390 .fixup_map_hash_8b = { 3, 9 },
2391 .result = REJECT,
2392 .errstr = "R8 invalid mem access 'map_value_or_null'",
2393 .result_unpriv = REJECT,
2394 .errstr_unpriv = "",
2395 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
2396},