Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1{
2 "calls: invalid kfunc call not eliminated",
3 .insns = {
4 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
5 BPF_MOV64_IMM(BPF_REG_0, 1),
6 BPF_EXIT_INSN(),
7 },
8 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9 .result = REJECT,
10 .errstr = "invalid kernel function call not eliminated in verifier pass",
11},
12{
13 "calls: invalid kfunc call unreachable",
14 .insns = {
15 BPF_MOV64_IMM(BPF_REG_0, 1),
16 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2),
17 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
18 BPF_MOV64_IMM(BPF_REG_0, 1),
19 BPF_EXIT_INSN(),
20 },
21 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
22 .result = ACCEPT,
23},
24{
25 "calls: basic sanity",
26 .insns = {
27 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
28 BPF_MOV64_IMM(BPF_REG_0, 1),
29 BPF_EXIT_INSN(),
30 BPF_MOV64_IMM(BPF_REG_0, 2),
31 BPF_EXIT_INSN(),
32 },
33 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
34 .result = ACCEPT,
35},
36{
37 "calls: not on unpriviledged",
38 .insns = {
39 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
40 BPF_MOV64_IMM(BPF_REG_0, 1),
41 BPF_EXIT_INSN(),
42 BPF_MOV64_IMM(BPF_REG_0, 2),
43 BPF_EXIT_INSN(),
44 },
45 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
46 .result_unpriv = REJECT,
47 .result = ACCEPT,
48 .retval = 1,
49},
50{
51 "calls: div by 0 in subprog",
52 .insns = {
53 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
54 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
55 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
56 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
57 offsetof(struct __sk_buff, data_end)),
58 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
59 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
60 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
61 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
62 BPF_MOV64_IMM(BPF_REG_0, 1),
63 BPF_EXIT_INSN(),
64 BPF_MOV32_IMM(BPF_REG_2, 0),
65 BPF_MOV32_IMM(BPF_REG_3, 1),
66 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
67 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
68 offsetof(struct __sk_buff, data)),
69 BPF_EXIT_INSN(),
70 },
71 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
72 .result = ACCEPT,
73 .retval = 1,
74},
75{
76 "calls: multiple ret types in subprog 1",
77 .insns = {
78 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
79 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
80 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
81 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
82 offsetof(struct __sk_buff, data_end)),
83 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
84 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
85 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
86 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
87 BPF_MOV64_IMM(BPF_REG_0, 1),
88 BPF_EXIT_INSN(),
89 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
90 offsetof(struct __sk_buff, data)),
91 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
92 BPF_MOV32_IMM(BPF_REG_0, 42),
93 BPF_EXIT_INSN(),
94 },
95 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
96 .result = REJECT,
97 .errstr = "R0 invalid mem access 'inv'",
98},
99{
100 "calls: multiple ret types in subprog 2",
101 .insns = {
102 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
103 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
104 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
105 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
106 offsetof(struct __sk_buff, data_end)),
107 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
109 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
110 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
111 BPF_MOV64_IMM(BPF_REG_0, 1),
112 BPF_EXIT_INSN(),
113 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
114 offsetof(struct __sk_buff, data)),
115 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
116 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
117 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
118 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
120 BPF_LD_MAP_FD(BPF_REG_1, 0),
121 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
122 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
123 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
124 offsetof(struct __sk_buff, data)),
125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
126 BPF_EXIT_INSN(),
127 },
128 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
129 .fixup_map_hash_8b = { 16 },
130 .result = REJECT,
131 .errstr = "R0 min value is outside of the allowed memory range",
132},
133{
134 "calls: overlapping caller/callee",
135 .insns = {
136 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
137 BPF_MOV64_IMM(BPF_REG_0, 1),
138 BPF_EXIT_INSN(),
139 },
140 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
141 .errstr = "last insn is not an exit or jmp",
142 .result = REJECT,
143},
144{
145 "calls: wrong recursive calls",
146 .insns = {
147 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
148 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
149 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
151 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
152 BPF_MOV64_IMM(BPF_REG_0, 1),
153 BPF_EXIT_INSN(),
154 },
155 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
156 .errstr = "jump out of range",
157 .result = REJECT,
158},
159{
160 "calls: wrong src reg",
161 .insns = {
162 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0),
163 BPF_MOV64_IMM(BPF_REG_0, 1),
164 BPF_EXIT_INSN(),
165 },
166 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
167 .errstr = "BPF_CALL uses reserved fields",
168 .result = REJECT,
169},
170{
171 "calls: wrong off value",
172 .insns = {
173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
174 BPF_MOV64_IMM(BPF_REG_0, 1),
175 BPF_EXIT_INSN(),
176 BPF_MOV64_IMM(BPF_REG_0, 2),
177 BPF_EXIT_INSN(),
178 },
179 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
180 .errstr = "BPF_CALL uses reserved fields",
181 .result = REJECT,
182},
183{
184 "calls: jump back loop",
185 .insns = {
186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
187 BPF_MOV64_IMM(BPF_REG_0, 1),
188 BPF_EXIT_INSN(),
189 },
190 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
191 .errstr = "back-edge from insn 0 to 0",
192 .result = REJECT,
193},
194{
195 "calls: conditional call",
196 .insns = {
197 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
198 offsetof(struct __sk_buff, mark)),
199 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
200 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
201 BPF_MOV64_IMM(BPF_REG_0, 1),
202 BPF_EXIT_INSN(),
203 BPF_MOV64_IMM(BPF_REG_0, 2),
204 BPF_EXIT_INSN(),
205 },
206 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
207 .errstr = "jump out of range",
208 .result = REJECT,
209},
210{
211 "calls: conditional call 2",
212 .insns = {
213 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
214 offsetof(struct __sk_buff, mark)),
215 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
216 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
217 BPF_MOV64_IMM(BPF_REG_0, 1),
218 BPF_EXIT_INSN(),
219 BPF_MOV64_IMM(BPF_REG_0, 2),
220 BPF_EXIT_INSN(),
221 BPF_MOV64_IMM(BPF_REG_0, 3),
222 BPF_EXIT_INSN(),
223 },
224 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
225 .result = ACCEPT,
226},
227{
228 "calls: conditional call 3",
229 .insns = {
230 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
231 offsetof(struct __sk_buff, mark)),
232 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
233 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
234 BPF_MOV64_IMM(BPF_REG_0, 1),
235 BPF_EXIT_INSN(),
236 BPF_MOV64_IMM(BPF_REG_0, 1),
237 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
238 BPF_MOV64_IMM(BPF_REG_0, 3),
239 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
240 },
241 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
242 .errstr_unpriv = "back-edge from insn",
243 .result_unpriv = REJECT,
244 .result = ACCEPT,
245 .retval = 1,
246},
247{
248 "calls: conditional call 4",
249 .insns = {
250 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
251 offsetof(struct __sk_buff, mark)),
252 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
253 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
254 BPF_MOV64_IMM(BPF_REG_0, 1),
255 BPF_EXIT_INSN(),
256 BPF_MOV64_IMM(BPF_REG_0, 1),
257 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
258 BPF_MOV64_IMM(BPF_REG_0, 3),
259 BPF_EXIT_INSN(),
260 },
261 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
262 .result = ACCEPT,
263},
264{
265 "calls: conditional call 5",
266 .insns = {
267 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
268 offsetof(struct __sk_buff, mark)),
269 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
270 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
271 BPF_MOV64_IMM(BPF_REG_0, 1),
272 BPF_EXIT_INSN(),
273 BPF_MOV64_IMM(BPF_REG_0, 1),
274 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
275 BPF_MOV64_IMM(BPF_REG_0, 3),
276 BPF_EXIT_INSN(),
277 },
278 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
279 .result = ACCEPT,
280 .retval = 1,
281},
282{
283 "calls: conditional call 6",
284 .insns = {
285 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
286 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
287 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
288 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
289 BPF_EXIT_INSN(),
290 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
291 offsetof(struct __sk_buff, mark)),
292 BPF_EXIT_INSN(),
293 },
294 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
295 .errstr = "infinite loop detected",
296 .result = REJECT,
297},
298{
299 "calls: using r0 returned by callee",
300 .insns = {
301 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
302 BPF_EXIT_INSN(),
303 BPF_MOV64_IMM(BPF_REG_0, 2),
304 BPF_EXIT_INSN(),
305 },
306 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
307 .result = ACCEPT,
308},
309{
310 "calls: using uninit r0 from callee",
311 .insns = {
312 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
313 BPF_EXIT_INSN(),
314 BPF_EXIT_INSN(),
315 },
316 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
317 .errstr = "!read_ok",
318 .result = REJECT,
319},
320{
321 "calls: callee is using r1",
322 .insns = {
323 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
324 BPF_EXIT_INSN(),
325 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
326 offsetof(struct __sk_buff, len)),
327 BPF_EXIT_INSN(),
328 },
329 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
330 .result = ACCEPT,
331 .retval = TEST_DATA_LEN,
332},
333{
334 "calls: callee using args1",
335 .insns = {
336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
337 BPF_EXIT_INSN(),
338 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
339 BPF_EXIT_INSN(),
340 },
341 .errstr_unpriv = "allowed for",
342 .result_unpriv = REJECT,
343 .result = ACCEPT,
344 .retval = POINTER_VALUE,
345},
346{
347 "calls: callee using wrong args2",
348 .insns = {
349 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
350 BPF_EXIT_INSN(),
351 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
352 BPF_EXIT_INSN(),
353 },
354 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
355 .errstr = "R2 !read_ok",
356 .result = REJECT,
357},
358{
359 "calls: callee using two args",
360 .insns = {
361 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
362 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
363 offsetof(struct __sk_buff, len)),
364 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
365 offsetof(struct __sk_buff, len)),
366 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
367 BPF_EXIT_INSN(),
368 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
369 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
370 BPF_EXIT_INSN(),
371 },
372 .errstr_unpriv = "allowed for",
373 .result_unpriv = REJECT,
374 .result = ACCEPT,
375 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
376},
377{
378 "calls: callee changing pkt pointers",
379 .insns = {
380 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
381 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
382 offsetof(struct xdp_md, data_end)),
383 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
384 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
385 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
386 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
387 /* clear_all_pkt_pointers() has to walk all frames
388 * to make sure that pkt pointers in the caller
389 * are cleared when callee is calling a helper that
390 * adjusts packet size
391 */
392 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
393 BPF_MOV32_IMM(BPF_REG_0, 0),
394 BPF_EXIT_INSN(),
395 BPF_MOV64_IMM(BPF_REG_2, 0),
396 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
397 BPF_EXIT_INSN(),
398 },
399 .result = REJECT,
400 .errstr = "R6 invalid mem access 'inv'",
401 .prog_type = BPF_PROG_TYPE_XDP,
402 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
403},
404{
405 "calls: ptr null check in subprog",
406 .insns = {
407 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
408 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
410 BPF_LD_MAP_FD(BPF_REG_1, 0),
411 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
412 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
413 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
414 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
415 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
416 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
417 BPF_EXIT_INSN(),
418 BPF_MOV64_IMM(BPF_REG_0, 0),
419 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
420 BPF_MOV64_IMM(BPF_REG_0, 1),
421 BPF_EXIT_INSN(),
422 },
423 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
424 .fixup_map_hash_48b = { 3 },
425 .result_unpriv = REJECT,
426 .result = ACCEPT,
427 .retval = 0,
428},
429{
430 "calls: two calls with args",
431 .insns = {
432 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
433 BPF_EXIT_INSN(),
434 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
435 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
436 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
437 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
438 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
439 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
440 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
441 BPF_EXIT_INSN(),
442 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
443 offsetof(struct __sk_buff, len)),
444 BPF_EXIT_INSN(),
445 },
446 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
447 .result = ACCEPT,
448 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
449},
450{
451 "calls: calls with stack arith",
452 .insns = {
453 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
455 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
456 BPF_EXIT_INSN(),
457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
458 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
459 BPF_EXIT_INSN(),
460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
461 BPF_MOV64_IMM(BPF_REG_0, 42),
462 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
463 BPF_EXIT_INSN(),
464 },
465 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
466 .result = ACCEPT,
467 .retval = 42,
468},
469{
470 "calls: calls with misaligned stack access",
471 .insns = {
472 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
473 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
474 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
475 BPF_EXIT_INSN(),
476 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
477 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
478 BPF_EXIT_INSN(),
479 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
480 BPF_MOV64_IMM(BPF_REG_0, 42),
481 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
482 BPF_EXIT_INSN(),
483 },
484 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
485 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
486 .errstr = "misaligned stack access",
487 .result = REJECT,
488},
489{
490 "calls: calls control flow, jump test",
491 .insns = {
492 BPF_MOV64_IMM(BPF_REG_0, 42),
493 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
494 BPF_MOV64_IMM(BPF_REG_0, 43),
495 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
496 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
497 BPF_EXIT_INSN(),
498 },
499 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
500 .result = ACCEPT,
501 .retval = 43,
502},
503{
504 "calls: calls control flow, jump test 2",
505 .insns = {
506 BPF_MOV64_IMM(BPF_REG_0, 42),
507 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
508 BPF_MOV64_IMM(BPF_REG_0, 43),
509 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
510 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
511 BPF_EXIT_INSN(),
512 },
513 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
514 .errstr = "jump out of range from insn 1 to 4",
515 .result = REJECT,
516},
517{
518 "calls: two calls with bad jump",
519 .insns = {
520 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
521 BPF_EXIT_INSN(),
522 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
523 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
524 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
525 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
526 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
527 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
528 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
529 BPF_EXIT_INSN(),
530 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
531 offsetof(struct __sk_buff, len)),
532 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
533 BPF_EXIT_INSN(),
534 },
535 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
536 .errstr = "jump out of range from insn 11 to 9",
537 .result = REJECT,
538},
539{
540 "calls: recursive call. test1",
541 .insns = {
542 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
543 BPF_EXIT_INSN(),
544 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
545 BPF_EXIT_INSN(),
546 },
547 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
548 .errstr = "back-edge",
549 .result = REJECT,
550},
551{
552 "calls: recursive call. test2",
553 .insns = {
554 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
555 BPF_EXIT_INSN(),
556 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
557 BPF_EXIT_INSN(),
558 },
559 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
560 .errstr = "back-edge",
561 .result = REJECT,
562},
563{
564 "calls: unreachable code",
565 .insns = {
566 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
567 BPF_EXIT_INSN(),
568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
569 BPF_EXIT_INSN(),
570 BPF_MOV64_IMM(BPF_REG_0, 0),
571 BPF_EXIT_INSN(),
572 BPF_MOV64_IMM(BPF_REG_0, 0),
573 BPF_EXIT_INSN(),
574 },
575 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
576 .errstr = "unreachable insn 6",
577 .result = REJECT,
578},
579{
580 "calls: invalid call",
581 .insns = {
582 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
583 BPF_EXIT_INSN(),
584 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
585 BPF_EXIT_INSN(),
586 },
587 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
588 .errstr = "invalid destination",
589 .result = REJECT,
590},
591{
592 "calls: invalid call 2",
593 .insns = {
594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
595 BPF_EXIT_INSN(),
596 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
597 BPF_EXIT_INSN(),
598 },
599 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
600 .errstr = "invalid destination",
601 .result = REJECT,
602},
603{
604 "calls: jumping across function bodies. test1",
605 .insns = {
606 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
607 BPF_MOV64_IMM(BPF_REG_0, 0),
608 BPF_EXIT_INSN(),
609 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
610 BPF_EXIT_INSN(),
611 },
612 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
613 .errstr = "jump out of range",
614 .result = REJECT,
615},
616{
617 "calls: jumping across function bodies. test2",
618 .insns = {
619 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
621 BPF_MOV64_IMM(BPF_REG_0, 0),
622 BPF_EXIT_INSN(),
623 BPF_EXIT_INSN(),
624 },
625 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
626 .errstr = "jump out of range",
627 .result = REJECT,
628},
629{
630 "calls: call without exit",
631 .insns = {
632 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
633 BPF_EXIT_INSN(),
634 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
635 BPF_EXIT_INSN(),
636 BPF_MOV64_IMM(BPF_REG_0, 0),
637 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
638 },
639 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
640 .errstr = "not an exit",
641 .result = REJECT,
642},
643{
644 "calls: call into middle of ld_imm64",
645 .insns = {
646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
647 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
648 BPF_MOV64_IMM(BPF_REG_0, 0),
649 BPF_EXIT_INSN(),
650 BPF_LD_IMM64(BPF_REG_0, 0),
651 BPF_EXIT_INSN(),
652 },
653 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
654 .errstr = "last insn",
655 .result = REJECT,
656},
657{
658 "calls: call into middle of other call",
659 .insns = {
660 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
661 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
662 BPF_MOV64_IMM(BPF_REG_0, 0),
663 BPF_EXIT_INSN(),
664 BPF_MOV64_IMM(BPF_REG_0, 0),
665 BPF_MOV64_IMM(BPF_REG_0, 0),
666 BPF_EXIT_INSN(),
667 },
668 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
669 .errstr = "last insn",
670 .result = REJECT,
671},
672{
673 "calls: subprog call with ld_abs in main prog",
674 .insns = {
675 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
676 BPF_LD_ABS(BPF_B, 0),
677 BPF_LD_ABS(BPF_H, 0),
678 BPF_LD_ABS(BPF_W, 0),
679 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
680 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
681 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
682 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
683 BPF_LD_ABS(BPF_B, 0),
684 BPF_LD_ABS(BPF_H, 0),
685 BPF_LD_ABS(BPF_W, 0),
686 BPF_EXIT_INSN(),
687 BPF_MOV64_IMM(BPF_REG_2, 1),
688 BPF_MOV64_IMM(BPF_REG_3, 2),
689 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
690 BPF_EXIT_INSN(),
691 },
692 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
693 .result = ACCEPT,
694},
695{
696 "calls: two calls with bad fallthrough",
697 .insns = {
698 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
699 BPF_EXIT_INSN(),
700 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
701 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
702 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
703 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
704 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
705 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
706 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
707 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
708 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
709 offsetof(struct __sk_buff, len)),
710 BPF_EXIT_INSN(),
711 },
712 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
713 .errstr = "not an exit",
714 .result = REJECT,
715},
716{
717 "calls: two calls with stack read",
718 .insns = {
719 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
720 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
722 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
723 BPF_EXIT_INSN(),
724 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
725 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
726 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
727 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
729 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
730 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
731 BPF_EXIT_INSN(),
732 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
733 BPF_EXIT_INSN(),
734 },
735 .prog_type = BPF_PROG_TYPE_XDP,
736 .result = ACCEPT,
737},
738{
739 "calls: two calls with stack write",
740 .insns = {
741 /* main prog */
742 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
743 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
744 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
745 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
747 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
748 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
749 BPF_EXIT_INSN(),
750
751 /* subprog 1 */
752 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
753 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
754 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
755 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
756 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
757 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
758 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
759 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
760 /* write into stack frame of main prog */
761 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
762 BPF_EXIT_INSN(),
763
764 /* subprog 2 */
765 /* read from stack frame of main prog */
766 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
767 BPF_EXIT_INSN(),
768 },
769 .prog_type = BPF_PROG_TYPE_XDP,
770 .result = ACCEPT,
771},
772{
773 "calls: stack overflow using two frames (pre-call access)",
774 .insns = {
775 /* prog 1 */
776 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
777 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
778 BPF_EXIT_INSN(),
779
780 /* prog 2 */
781 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
782 BPF_MOV64_IMM(BPF_REG_0, 0),
783 BPF_EXIT_INSN(),
784 },
785 .prog_type = BPF_PROG_TYPE_XDP,
786 .errstr = "combined stack size",
787 .result = REJECT,
788},
789{
790 "calls: stack overflow using two frames (post-call access)",
791 .insns = {
792 /* prog 1 */
793 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
794 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
795 BPF_EXIT_INSN(),
796
797 /* prog 2 */
798 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
799 BPF_MOV64_IMM(BPF_REG_0, 0),
800 BPF_EXIT_INSN(),
801 },
802 .prog_type = BPF_PROG_TYPE_XDP,
803 .errstr = "combined stack size",
804 .result = REJECT,
805},
806{
807 "calls: stack depth check using three frames. test1",
808 .insns = {
809 /* main */
810 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
811 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
812 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
813 BPF_MOV64_IMM(BPF_REG_0, 0),
814 BPF_EXIT_INSN(),
815 /* A */
816 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
817 BPF_EXIT_INSN(),
818 /* B */
819 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
820 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
821 BPF_EXIT_INSN(),
822 },
823 .prog_type = BPF_PROG_TYPE_XDP,
824 /* stack_main=32, stack_A=256, stack_B=64
825 * and max(main+A, main+A+B) < 512
826 */
827 .result = ACCEPT,
828},
829{
830 "calls: stack depth check using three frames. test2",
831 .insns = {
832 /* main */
833 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
834 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
835 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
836 BPF_MOV64_IMM(BPF_REG_0, 0),
837 BPF_EXIT_INSN(),
838 /* A */
839 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
840 BPF_EXIT_INSN(),
841 /* B */
842 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
843 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
844 BPF_EXIT_INSN(),
845 },
846 .prog_type = BPF_PROG_TYPE_XDP,
847 /* stack_main=32, stack_A=64, stack_B=256
848 * and max(main+A, main+A+B) < 512
849 */
850 .result = ACCEPT,
851},
852{
853 "calls: stack depth check using three frames. test3",
854 .insns = {
855 /* main */
856 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
857 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
858 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
859 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
860 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
861 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
862 BPF_MOV64_IMM(BPF_REG_0, 0),
863 BPF_EXIT_INSN(),
864 /* A */
865 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
866 BPF_EXIT_INSN(),
867 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
868 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
869 /* B */
870 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
871 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
872 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
873 BPF_EXIT_INSN(),
874 },
875 .prog_type = BPF_PROG_TYPE_XDP,
876 /* stack_main=64, stack_A=224, stack_B=256
877 * and max(main+A, main+A+B) > 512
878 */
879 .errstr = "combined stack",
880 .result = REJECT,
881},
882{
883 "calls: stack depth check using three frames. test4",
884 /* void main(void) {
885 * func1(0);
886 * func1(1);
887 * func2(1);
888 * }
889 * void func1(int alloc_or_recurse) {
890 * if (alloc_or_recurse) {
891 * frame_pointer[-300] = 1;
892 * } else {
893 * func2(alloc_or_recurse);
894 * }
895 * }
896 * void func2(int alloc_or_recurse) {
897 * if (alloc_or_recurse) {
898 * frame_pointer[-300] = 1;
899 * }
900 * }
901 */
902 .insns = {
903 /* main */
904 BPF_MOV64_IMM(BPF_REG_1, 0),
905 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
906 BPF_MOV64_IMM(BPF_REG_1, 1),
907 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
908 BPF_MOV64_IMM(BPF_REG_1, 1),
909 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
910 BPF_MOV64_IMM(BPF_REG_0, 0),
911 BPF_EXIT_INSN(),
912 /* A */
913 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
914 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
915 BPF_EXIT_INSN(),
916 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
917 BPF_EXIT_INSN(),
918 /* B */
919 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
920 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
921 BPF_EXIT_INSN(),
922 },
923 .prog_type = BPF_PROG_TYPE_XDP,
924 .result = REJECT,
925 .errstr = "combined stack",
926},
927{
928 "calls: stack depth check using three frames. test5",
929 .insns = {
930 /* main */
931 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
932 BPF_EXIT_INSN(),
933 /* A */
934 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
935 BPF_EXIT_INSN(),
936 /* B */
937 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
938 BPF_EXIT_INSN(),
939 /* C */
940 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
941 BPF_EXIT_INSN(),
942 /* D */
943 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
944 BPF_EXIT_INSN(),
945 /* E */
946 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
947 BPF_EXIT_INSN(),
948 /* F */
949 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
950 BPF_EXIT_INSN(),
951 /* G */
952 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
953 BPF_EXIT_INSN(),
954 /* H */
955 BPF_MOV64_IMM(BPF_REG_0, 0),
956 BPF_EXIT_INSN(),
957 },
958 .prog_type = BPF_PROG_TYPE_XDP,
959 .errstr = "call stack",
960 .result = REJECT,
961},
962{
963 "calls: stack depth check in dead code",
964 .insns = {
965 /* main */
966 BPF_MOV64_IMM(BPF_REG_1, 0),
967 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
968 BPF_EXIT_INSN(),
969 /* A */
970 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
971 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
972 BPF_MOV64_IMM(BPF_REG_0, 0),
973 BPF_EXIT_INSN(),
974 /* B */
975 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
976 BPF_EXIT_INSN(),
977 /* C */
978 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
979 BPF_EXIT_INSN(),
980 /* D */
981 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
982 BPF_EXIT_INSN(),
983 /* E */
984 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
985 BPF_EXIT_INSN(),
986 /* F */
987 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
988 BPF_EXIT_INSN(),
989 /* G */
990 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
991 BPF_EXIT_INSN(),
992 /* H */
993 BPF_MOV64_IMM(BPF_REG_0, 0),
994 BPF_EXIT_INSN(),
995 },
996 .prog_type = BPF_PROG_TYPE_XDP,
997 .errstr = "call stack",
998 .result = REJECT,
999},
1000{
1001 "calls: spill into caller stack frame",
1002 .insns = {
1003 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1004 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1006 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1007 BPF_EXIT_INSN(),
1008 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1009 BPF_MOV64_IMM(BPF_REG_0, 0),
1010 BPF_EXIT_INSN(),
1011 },
1012 .prog_type = BPF_PROG_TYPE_XDP,
1013 .errstr = "cannot spill",
1014 .result = REJECT,
1015},
1016{
1017 "calls: write into caller stack frame",
1018 .insns = {
1019 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1020 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1021 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1022 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1023 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1024 BPF_EXIT_INSN(),
1025 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
1026 BPF_MOV64_IMM(BPF_REG_0, 0),
1027 BPF_EXIT_INSN(),
1028 },
1029 .prog_type = BPF_PROG_TYPE_XDP,
1030 .result = ACCEPT,
1031 .retval = 42,
1032},
1033{
1034 "calls: write into callee stack frame",
1035 .insns = {
1036 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1037 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
1038 BPF_EXIT_INSN(),
1039 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
1041 BPF_EXIT_INSN(),
1042 },
1043 .prog_type = BPF_PROG_TYPE_XDP,
1044 .errstr = "cannot return stack pointer",
1045 .result = REJECT,
1046},
1047{
1048 "calls: two calls with stack write and void return",
1049 .insns = {
1050 /* main prog */
1051 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1052 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1053 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1054 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1056 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1057 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1058 BPF_EXIT_INSN(),
1059
1060 /* subprog 1 */
1061 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1062 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1063 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1064 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1065 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1066 BPF_EXIT_INSN(),
1067
1068 /* subprog 2 */
1069 /* write into stack frame of main prog */
1070 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
1071 BPF_EXIT_INSN(), /* void return */
1072 },
1073 .prog_type = BPF_PROG_TYPE_XDP,
1074 .result = ACCEPT,
1075},
1076{
1077 "calls: ambiguous return value",
1078 .insns = {
1079 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1080 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1081 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1082 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1083 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1084 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1085 BPF_EXIT_INSN(),
1086 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1087 BPF_MOV64_IMM(BPF_REG_0, 0),
1088 BPF_EXIT_INSN(),
1089 },
1090 .errstr_unpriv = "allowed for",
1091 .result_unpriv = REJECT,
1092 .errstr = "R0 !read_ok",
1093 .result = REJECT,
1094},
1095{
1096 "calls: two calls that return map_value",
1097 .insns = {
1098 /* main prog */
1099 /* pass fp-16, fp-8 into a function */
1100 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1101 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1102 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1104 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
1105
1106 /* fetch map_value_ptr from the stack of this function */
1107 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1108 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1109 /* write into map value */
1110 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1111 /* fetch secound map_value_ptr from the stack */
1112 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1114 /* write into map value */
1115 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1116 BPF_MOV64_IMM(BPF_REG_0, 0),
1117 BPF_EXIT_INSN(),
1118
1119 /* subprog 1 */
1120 /* call 3rd function twice */
1121 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1122 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1123 /* first time with fp-8 */
1124 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1125 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1126 /* second time with fp-16 */
1127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1128 BPF_EXIT_INSN(),
1129
1130 /* subprog 2 */
1131 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1132 /* lookup from map */
1133 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1134 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1135 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1136 BPF_LD_MAP_FD(BPF_REG_1, 0),
1137 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1138 /* write map_value_ptr into stack frame of main prog */
1139 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1140 BPF_MOV64_IMM(BPF_REG_0, 0),
1141 BPF_EXIT_INSN(), /* return 0 */
1142 },
1143 .prog_type = BPF_PROG_TYPE_XDP,
1144 .fixup_map_hash_8b = { 23 },
1145 .result = ACCEPT,
1146},
1147{
1148 "calls: two calls that return map_value with bool condition",
1149 .insns = {
1150 /* main prog */
1151 /* pass fp-16, fp-8 into a function */
1152 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1154 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1155 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1156 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1157 BPF_MOV64_IMM(BPF_REG_0, 0),
1158 BPF_EXIT_INSN(),
1159
1160 /* subprog 1 */
1161 /* call 3rd function twice */
1162 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1163 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1164 /* first time with fp-8 */
1165 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1166 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1167 /* fetch map_value_ptr from the stack of this function */
1168 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1169 /* write into map value */
1170 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1171 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1172 /* second time with fp-16 */
1173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1174 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1175 /* fetch secound map_value_ptr from the stack */
1176 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1177 /* write into map value */
1178 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1179 BPF_EXIT_INSN(),
1180
1181 /* subprog 2 */
1182 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1183 /* lookup from map */
1184 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1185 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1187 BPF_LD_MAP_FD(BPF_REG_1, 0),
1188 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1189 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1190 BPF_MOV64_IMM(BPF_REG_0, 0),
1191 BPF_EXIT_INSN(), /* return 0 */
1192 /* write map_value_ptr into stack frame of main prog */
1193 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1194 BPF_MOV64_IMM(BPF_REG_0, 1),
1195 BPF_EXIT_INSN(), /* return 1 */
1196 },
1197 .prog_type = BPF_PROG_TYPE_XDP,
1198 .fixup_map_hash_8b = { 23 },
1199 .result = ACCEPT,
1200},
1201{
1202 "calls: two calls that return map_value with incorrect bool check",
1203 .insns = {
1204 /* main prog */
1205 /* pass fp-16, fp-8 into a function */
1206 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1207 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1208 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1209 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1210 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1211 BPF_MOV64_IMM(BPF_REG_0, 0),
1212 BPF_EXIT_INSN(),
1213
1214 /* subprog 1 */
1215 /* call 3rd function twice */
1216 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1217 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1218 /* first time with fp-8 */
1219 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1220 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1221 /* fetch map_value_ptr from the stack of this function */
1222 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1223 /* write into map value */
1224 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1225 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1226 /* second time with fp-16 */
1227 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1228 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1229 /* fetch secound map_value_ptr from the stack */
1230 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1231 /* write into map value */
1232 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1233 BPF_EXIT_INSN(),
1234
1235 /* subprog 2 */
1236 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1237 /* lookup from map */
1238 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1239 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1241 BPF_LD_MAP_FD(BPF_REG_1, 0),
1242 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1243 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1244 BPF_MOV64_IMM(BPF_REG_0, 0),
1245 BPF_EXIT_INSN(), /* return 0 */
1246 /* write map_value_ptr into stack frame of main prog */
1247 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1248 BPF_MOV64_IMM(BPF_REG_0, 1),
1249 BPF_EXIT_INSN(), /* return 1 */
1250 },
1251 .prog_type = BPF_PROG_TYPE_XDP,
1252 .fixup_map_hash_8b = { 23 },
1253 .result = REJECT,
1254 .errstr = "invalid read from stack R7 off=-16 size=8",
1255},
1256{
1257 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
1258 .insns = {
1259 /* main prog */
1260 /* pass fp-16, fp-8 into a function */
1261 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1262 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1263 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1266 BPF_MOV64_IMM(BPF_REG_0, 0),
1267 BPF_EXIT_INSN(),
1268
1269 /* subprog 1 */
1270 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1271 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1272 /* 1st lookup from map */
1273 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1274 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1275 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1276 BPF_LD_MAP_FD(BPF_REG_1, 0),
1277 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1278 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1279 BPF_MOV64_IMM(BPF_REG_8, 0),
1280 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1281 /* write map_value_ptr into stack frame of main prog at fp-8 */
1282 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1283 BPF_MOV64_IMM(BPF_REG_8, 1),
1284
1285 /* 2nd lookup from map */
1286 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1288 BPF_LD_MAP_FD(BPF_REG_1, 0),
1289 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1290 BPF_FUNC_map_lookup_elem),
1291 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1292 BPF_MOV64_IMM(BPF_REG_9, 0),
1293 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1294 /* write map_value_ptr into stack frame of main prog at fp-16 */
1295 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1296 BPF_MOV64_IMM(BPF_REG_9, 1),
1297
1298 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1299 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1300 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1301 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1302 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1303 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1304 BPF_EXIT_INSN(),
1305
1306 /* subprog 2 */
1307 /* if arg2 == 1 do *arg1 = 0 */
1308 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1309 /* fetch map_value_ptr from the stack of this function */
1310 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1311 /* write into map value */
1312 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1313
1314 /* if arg4 == 1 do *arg3 = 0 */
1315 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1316 /* fetch map_value_ptr from the stack of this function */
1317 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1318 /* write into map value */
1319 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1320 BPF_EXIT_INSN(),
1321 },
1322 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1323 .fixup_map_hash_8b = { 12, 22 },
1324 .result = REJECT,
1325 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1326 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1327},
1328{
1329 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
1330 .insns = {
1331 /* main prog */
1332 /* pass fp-16, fp-8 into a function */
1333 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1335 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1338 BPF_MOV64_IMM(BPF_REG_0, 0),
1339 BPF_EXIT_INSN(),
1340
1341 /* subprog 1 */
1342 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1343 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1344 /* 1st lookup from map */
1345 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1346 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1348 BPF_LD_MAP_FD(BPF_REG_1, 0),
1349 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1350 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1351 BPF_MOV64_IMM(BPF_REG_8, 0),
1352 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1353 /* write map_value_ptr into stack frame of main prog at fp-8 */
1354 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1355 BPF_MOV64_IMM(BPF_REG_8, 1),
1356
1357 /* 2nd lookup from map */
1358 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1360 BPF_LD_MAP_FD(BPF_REG_1, 0),
1361 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1362 BPF_FUNC_map_lookup_elem),
1363 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1364 BPF_MOV64_IMM(BPF_REG_9, 0),
1365 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1366 /* write map_value_ptr into stack frame of main prog at fp-16 */
1367 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1368 BPF_MOV64_IMM(BPF_REG_9, 1),
1369
1370 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1371 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1372 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1373 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1374 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1375 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1376 BPF_EXIT_INSN(),
1377
1378 /* subprog 2 */
1379 /* if arg2 == 1 do *arg1 = 0 */
1380 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1381 /* fetch map_value_ptr from the stack of this function */
1382 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1383 /* write into map value */
1384 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1385
1386 /* if arg4 == 1 do *arg3 = 0 */
1387 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1388 /* fetch map_value_ptr from the stack of this function */
1389 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1390 /* write into map value */
1391 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1392 BPF_EXIT_INSN(),
1393 },
1394 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1395 .fixup_map_hash_8b = { 12, 22 },
1396 .result = ACCEPT,
1397},
1398{
1399 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
1400 .insns = {
1401 /* main prog */
1402 /* pass fp-16, fp-8 into a function */
1403 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1404 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1405 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1407 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
1408 BPF_MOV64_IMM(BPF_REG_0, 0),
1409 BPF_EXIT_INSN(),
1410
1411 /* subprog 1 */
1412 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1413 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1414 /* 1st lookup from map */
1415 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
1416 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1418 BPF_LD_MAP_FD(BPF_REG_1, 0),
1419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1420 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1421 BPF_MOV64_IMM(BPF_REG_8, 0),
1422 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1423 /* write map_value_ptr into stack frame of main prog at fp-8 */
1424 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1425 BPF_MOV64_IMM(BPF_REG_8, 1),
1426
1427 /* 2nd lookup from map */
1428 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1429 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1430 BPF_LD_MAP_FD(BPF_REG_1, 0),
1431 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1432 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1433 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
1434 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1435 /* write map_value_ptr into stack frame of main prog at fp-16 */
1436 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1437 BPF_MOV64_IMM(BPF_REG_9, 1),
1438
1439 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1440 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
1441 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1442 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1443 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1444 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
1445 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
1446
1447 /* subprog 2 */
1448 /* if arg2 == 1 do *arg1 = 0 */
1449 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1450 /* fetch map_value_ptr from the stack of this function */
1451 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1452 /* write into map value */
1453 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1454
1455 /* if arg4 == 1 do *arg3 = 0 */
1456 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1457 /* fetch map_value_ptr from the stack of this function */
1458 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1459 /* write into map value */
1460 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1461 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
1462 },
1463 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1464 .fixup_map_hash_8b = { 12, 22 },
1465 .result = REJECT,
1466 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1467 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1468},
1469{
1470 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
1471 .insns = {
1472 /* main prog */
1473 /* pass fp-16, fp-8 into a function */
1474 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1476 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1478 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1479 BPF_MOV64_IMM(BPF_REG_0, 0),
1480 BPF_EXIT_INSN(),
1481
1482 /* subprog 1 */
1483 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1484 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1485 /* 1st lookup from map */
1486 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1487 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1488 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1489 BPF_LD_MAP_FD(BPF_REG_1, 0),
1490 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1491 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1492 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1493 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1494 BPF_MOV64_IMM(BPF_REG_8, 0),
1495 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1496 BPF_MOV64_IMM(BPF_REG_8, 1),
1497
1498 /* 2nd lookup from map */
1499 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1500 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1501 BPF_LD_MAP_FD(BPF_REG_1, 0),
1502 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1503 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1504 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1505 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1506 BPF_MOV64_IMM(BPF_REG_9, 0),
1507 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1508 BPF_MOV64_IMM(BPF_REG_9, 1),
1509
1510 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1511 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1512 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1513 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1514 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1515 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1516 BPF_EXIT_INSN(),
1517
1518 /* subprog 2 */
1519 /* if arg2 == 1 do *arg1 = 0 */
1520 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1521 /* fetch map_value_ptr from the stack of this function */
1522 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1523 /* write into map value */
1524 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1525
1526 /* if arg4 == 1 do *arg3 = 0 */
1527 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1528 /* fetch map_value_ptr from the stack of this function */
1529 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1530 /* write into map value */
1531 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1532 BPF_EXIT_INSN(),
1533 },
1534 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1535 .fixup_map_hash_8b = { 12, 22 },
1536 .result = ACCEPT,
1537},
1538{
1539 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
1540 .insns = {
1541 /* main prog */
1542 /* pass fp-16, fp-8 into a function */
1543 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1544 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1545 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1546 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1547 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1548 BPF_MOV64_IMM(BPF_REG_0, 0),
1549 BPF_EXIT_INSN(),
1550
1551 /* subprog 1 */
1552 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1553 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1554 /* 1st lookup from map */
1555 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1556 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1558 BPF_LD_MAP_FD(BPF_REG_1, 0),
1559 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1560 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1561 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1562 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1563 BPF_MOV64_IMM(BPF_REG_8, 0),
1564 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1565 BPF_MOV64_IMM(BPF_REG_8, 1),
1566
1567 /* 2nd lookup from map */
1568 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1570 BPF_LD_MAP_FD(BPF_REG_1, 0),
1571 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1572 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1573 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1574 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1575 BPF_MOV64_IMM(BPF_REG_9, 0),
1576 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1577 BPF_MOV64_IMM(BPF_REG_9, 1),
1578
1579 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1580 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1581 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1582 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1583 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1584 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1585 BPF_EXIT_INSN(),
1586
1587 /* subprog 2 */
1588 /* if arg2 == 1 do *arg1 = 0 */
1589 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1590 /* fetch map_value_ptr from the stack of this function */
1591 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1592 /* write into map value */
1593 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1594
1595 /* if arg4 == 0 do *arg3 = 0 */
1596 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
1597 /* fetch map_value_ptr from the stack of this function */
1598 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1599 /* write into map value */
1600 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1601 BPF_EXIT_INSN(),
1602 },
1603 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1604 .fixup_map_hash_8b = { 12, 22 },
1605 .result = REJECT,
1606 .errstr = "R0 invalid mem access 'inv'",
1607},
1608{
1609 "calls: pkt_ptr spill into caller stack",
1610 .insns = {
1611 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1612 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1613 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1614 BPF_EXIT_INSN(),
1615
1616 /* subprog 1 */
1617 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1618 offsetof(struct __sk_buff, data)),
1619 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1620 offsetof(struct __sk_buff, data_end)),
1621 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1623 /* spill unchecked pkt_ptr into stack of caller */
1624 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1625 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1626 /* now the pkt range is verified, read pkt_ptr from stack */
1627 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1628 /* write 4 bytes into packet */
1629 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1630 BPF_EXIT_INSN(),
1631 },
1632 .result = ACCEPT,
1633 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1634 .retval = POINTER_VALUE,
1635 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1636},
1637{
1638 "calls: pkt_ptr spill into caller stack 2",
1639 .insns = {
1640 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1641 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1642 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1643 /* Marking is still kept, but not in all cases safe. */
1644 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1645 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1646 BPF_EXIT_INSN(),
1647
1648 /* subprog 1 */
1649 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1650 offsetof(struct __sk_buff, data)),
1651 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1652 offsetof(struct __sk_buff, data_end)),
1653 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1654 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1655 /* spill unchecked pkt_ptr into stack of caller */
1656 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1657 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1658 /* now the pkt range is verified, read pkt_ptr from stack */
1659 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1660 /* write 4 bytes into packet */
1661 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1662 BPF_EXIT_INSN(),
1663 },
1664 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1665 .errstr = "invalid access to packet",
1666 .result = REJECT,
1667 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1668},
1669{
1670 "calls: pkt_ptr spill into caller stack 3",
1671 .insns = {
1672 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1673 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1674 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1675 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1676 /* Marking is still kept and safe here. */
1677 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1678 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1679 BPF_EXIT_INSN(),
1680
1681 /* subprog 1 */
1682 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1683 offsetof(struct __sk_buff, data)),
1684 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1685 offsetof(struct __sk_buff, data_end)),
1686 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1688 /* spill unchecked pkt_ptr into stack of caller */
1689 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1690 BPF_MOV64_IMM(BPF_REG_5, 0),
1691 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1692 BPF_MOV64_IMM(BPF_REG_5, 1),
1693 /* now the pkt range is verified, read pkt_ptr from stack */
1694 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1695 /* write 4 bytes into packet */
1696 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1697 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1698 BPF_EXIT_INSN(),
1699 },
1700 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1701 .result = ACCEPT,
1702 .retval = 1,
1703 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1704},
1705{
1706 "calls: pkt_ptr spill into caller stack 4",
1707 .insns = {
1708 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1709 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1710 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1711 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1712 /* Check marking propagated. */
1713 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1714 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1715 BPF_EXIT_INSN(),
1716
1717 /* subprog 1 */
1718 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1719 offsetof(struct __sk_buff, data)),
1720 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1721 offsetof(struct __sk_buff, data_end)),
1722 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1723 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1724 /* spill unchecked pkt_ptr into stack of caller */
1725 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1726 BPF_MOV64_IMM(BPF_REG_5, 0),
1727 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1728 BPF_MOV64_IMM(BPF_REG_5, 1),
1729 /* don't read back pkt_ptr from stack here */
1730 /* write 4 bytes into packet */
1731 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1732 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1733 BPF_EXIT_INSN(),
1734 },
1735 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1736 .result = ACCEPT,
1737 .retval = 1,
1738 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1739},
1740{
1741 "calls: pkt_ptr spill into caller stack 5",
1742 .insns = {
1743 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1744 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1745 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
1746 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1747 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1748 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1749 BPF_EXIT_INSN(),
1750
1751 /* subprog 1 */
1752 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1753 offsetof(struct __sk_buff, data)),
1754 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1755 offsetof(struct __sk_buff, data_end)),
1756 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1757 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1758 BPF_MOV64_IMM(BPF_REG_5, 0),
1759 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1760 /* spill checked pkt_ptr into stack of caller */
1761 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1762 BPF_MOV64_IMM(BPF_REG_5, 1),
1763 /* don't read back pkt_ptr from stack here */
1764 /* write 4 bytes into packet */
1765 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1766 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1767 BPF_EXIT_INSN(),
1768 },
1769 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1770 .errstr = "same insn cannot be used with different",
1771 .result = REJECT,
1772 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1773},
1774{
1775 "calls: pkt_ptr spill into caller stack 6",
1776 .insns = {
1777 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1778 offsetof(struct __sk_buff, data_end)),
1779 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1781 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1782 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1783 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1784 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1785 BPF_EXIT_INSN(),
1786
1787 /* subprog 1 */
1788 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1789 offsetof(struct __sk_buff, data)),
1790 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1791 offsetof(struct __sk_buff, data_end)),
1792 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1794 BPF_MOV64_IMM(BPF_REG_5, 0),
1795 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1796 /* spill checked pkt_ptr into stack of caller */
1797 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1798 BPF_MOV64_IMM(BPF_REG_5, 1),
1799 /* don't read back pkt_ptr from stack here */
1800 /* write 4 bytes into packet */
1801 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1802 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1803 BPF_EXIT_INSN(),
1804 },
1805 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1806 .errstr = "R4 invalid mem access",
1807 .result = REJECT,
1808 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1809},
1810{
1811 "calls: pkt_ptr spill into caller stack 7",
1812 .insns = {
1813 BPF_MOV64_IMM(BPF_REG_2, 0),
1814 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1816 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1817 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1818 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1819 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1820 BPF_EXIT_INSN(),
1821
1822 /* subprog 1 */
1823 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1824 offsetof(struct __sk_buff, data)),
1825 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1826 offsetof(struct __sk_buff, data_end)),
1827 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1829 BPF_MOV64_IMM(BPF_REG_5, 0),
1830 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1831 /* spill checked pkt_ptr into stack of caller */
1832 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1833 BPF_MOV64_IMM(BPF_REG_5, 1),
1834 /* don't read back pkt_ptr from stack here */
1835 /* write 4 bytes into packet */
1836 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1837 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1838 BPF_EXIT_INSN(),
1839 },
1840 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1841 .errstr = "R4 invalid mem access",
1842 .result = REJECT,
1843 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1844},
1845{
1846 "calls: pkt_ptr spill into caller stack 8",
1847 .insns = {
1848 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1849 offsetof(struct __sk_buff, data)),
1850 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1851 offsetof(struct __sk_buff, data_end)),
1852 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1854 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
1855 BPF_EXIT_INSN(),
1856 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1857 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1858 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1859 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1860 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1861 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1862 BPF_EXIT_INSN(),
1863
1864 /* subprog 1 */
1865 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1866 offsetof(struct __sk_buff, data)),
1867 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1868 offsetof(struct __sk_buff, data_end)),
1869 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1871 BPF_MOV64_IMM(BPF_REG_5, 0),
1872 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1873 /* spill checked pkt_ptr into stack of caller */
1874 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1875 BPF_MOV64_IMM(BPF_REG_5, 1),
1876 /* don't read back pkt_ptr from stack here */
1877 /* write 4 bytes into packet */
1878 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1879 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1880 BPF_EXIT_INSN(),
1881 },
1882 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1883 .result = ACCEPT,
1884 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1885},
1886{
1887 "calls: pkt_ptr spill into caller stack 9",
1888 .insns = {
1889 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1890 offsetof(struct __sk_buff, data)),
1891 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1892 offsetof(struct __sk_buff, data_end)),
1893 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1895 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
1896 BPF_EXIT_INSN(),
1897 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1899 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1900 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1901 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1902 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1903 BPF_EXIT_INSN(),
1904
1905 /* subprog 1 */
1906 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1907 offsetof(struct __sk_buff, data)),
1908 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1909 offsetof(struct __sk_buff, data_end)),
1910 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1912 BPF_MOV64_IMM(BPF_REG_5, 0),
1913 /* spill unchecked pkt_ptr into stack of caller */
1914 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1915 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1916 BPF_MOV64_IMM(BPF_REG_5, 1),
1917 /* don't read back pkt_ptr from stack here */
1918 /* write 4 bytes into packet */
1919 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1920 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1921 BPF_EXIT_INSN(),
1922 },
1923 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1924 .errstr = "invalid access to packet",
1925 .result = REJECT,
1926 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1927},
1928{
1929 "calls: caller stack init to zero or map_value_or_null",
1930 .insns = {
1931 BPF_MOV64_IMM(BPF_REG_0, 0),
1932 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
1933 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1934 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1935 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1936 /* fetch map_value_or_null or const_zero from stack */
1937 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1938 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1939 /* store into map_value */
1940 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
1941 BPF_EXIT_INSN(),
1942
1943 /* subprog 1 */
1944 /* if (ctx == 0) return; */
1945 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
1946 /* else bpf_map_lookup() and *(fp - 8) = r0 */
1947 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
1948 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1949 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1950 BPF_LD_MAP_FD(BPF_REG_1, 0),
1951 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1952 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1953 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1954 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1955 BPF_EXIT_INSN(),
1956 },
1957 .fixup_map_hash_8b = { 13 },
1958 .result = ACCEPT,
1959 .prog_type = BPF_PROG_TYPE_XDP,
1960},
1961{
1962 "calls: stack init to zero and pruning",
1963 .insns = {
1964 /* first make allocated_stack 16 byte */
1965 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
1966 /* now fork the execution such that the false branch
1967 * of JGT insn will be verified second and it skisp zero
1968 * init of fp-8 stack slot. If stack liveness marking
1969 * is missing live_read marks from call map_lookup
1970 * processing then pruning will incorrectly assume
1971 * that fp-8 stack slot was unused in the fall-through
1972 * branch and will accept the program incorrectly
1973 */
1974 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
1975 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1976 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1977 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1979 BPF_LD_MAP_FD(BPF_REG_1, 0),
1980 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1981 BPF_EXIT_INSN(),
1982 },
1983 .fixup_map_hash_48b = { 6 },
1984 .errstr = "invalid indirect read from stack R2 off -8+0 size 8",
1985 .result = REJECT,
1986 .prog_type = BPF_PROG_TYPE_XDP,
1987},
1988{
1989 "calls: ctx read at start of subprog",
1990 .insns = {
1991 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1992 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1993 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
1994 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1995 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1996 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1997 BPF_EXIT_INSN(),
1998 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
1999 BPF_MOV64_IMM(BPF_REG_0, 0),
2000 BPF_EXIT_INSN(),
2001 },
2002 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2003 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2004 .result_unpriv = REJECT,
2005 .result = ACCEPT,
2006},
2007{
2008 "calls: cross frame pruning",
2009 .insns = {
2010 /* r8 = !!random();
2011 * call pruner()
2012 * if (r8)
2013 * do something bad;
2014 */
2015 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2016 BPF_MOV64_IMM(BPF_REG_8, 0),
2017 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2018 BPF_MOV64_IMM(BPF_REG_8, 1),
2019 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2020 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2021 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2022 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2023 BPF_MOV64_IMM(BPF_REG_0, 0),
2024 BPF_EXIT_INSN(),
2025 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2026 BPF_EXIT_INSN(),
2027 },
2028 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2029 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2030 .errstr = "!read_ok",
2031 .result = REJECT,
2032},
2033{
2034 "calls: cross frame pruning - liveness propagation",
2035 .insns = {
2036 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2037 BPF_MOV64_IMM(BPF_REG_8, 0),
2038 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2039 BPF_MOV64_IMM(BPF_REG_8, 1),
2040 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2041 BPF_MOV64_IMM(BPF_REG_9, 0),
2042 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2043 BPF_MOV64_IMM(BPF_REG_9, 1),
2044 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2045 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2046 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2047 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
2048 BPF_MOV64_IMM(BPF_REG_0, 0),
2049 BPF_EXIT_INSN(),
2050 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2051 BPF_EXIT_INSN(),
2052 },
2053 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2054 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2055 .errstr = "!read_ok",
2056 .result = REJECT,
2057},