Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1{
2 "calls: basic sanity",
3 .insns = {
4 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
5 BPF_MOV64_IMM(BPF_REG_0, 1),
6 BPF_EXIT_INSN(),
7 BPF_MOV64_IMM(BPF_REG_0, 2),
8 BPF_EXIT_INSN(),
9 },
10 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11 .result = ACCEPT,
12},
13{
14 "calls: not on unpriviledged",
15 .insns = {
16 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
17 BPF_MOV64_IMM(BPF_REG_0, 1),
18 BPF_EXIT_INSN(),
19 BPF_MOV64_IMM(BPF_REG_0, 2),
20 BPF_EXIT_INSN(),
21 },
22 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
23 .result_unpriv = REJECT,
24 .result = ACCEPT,
25 .retval = 1,
26},
27{
28 "calls: div by 0 in subprog",
29 .insns = {
30 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
31 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
32 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
33 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
34 offsetof(struct __sk_buff, data_end)),
35 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
36 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
37 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
38 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
39 BPF_MOV64_IMM(BPF_REG_0, 1),
40 BPF_EXIT_INSN(),
41 BPF_MOV32_IMM(BPF_REG_2, 0),
42 BPF_MOV32_IMM(BPF_REG_3, 1),
43 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
44 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
45 offsetof(struct __sk_buff, data)),
46 BPF_EXIT_INSN(),
47 },
48 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
49 .result = ACCEPT,
50 .retval = 1,
51},
52{
53 "calls: multiple ret types in subprog 1",
54 .insns = {
55 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
56 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
57 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
58 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
59 offsetof(struct __sk_buff, data_end)),
60 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
61 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
62 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
63 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
64 BPF_MOV64_IMM(BPF_REG_0, 1),
65 BPF_EXIT_INSN(),
66 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
67 offsetof(struct __sk_buff, data)),
68 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
69 BPF_MOV32_IMM(BPF_REG_0, 42),
70 BPF_EXIT_INSN(),
71 },
72 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
73 .result = REJECT,
74 .errstr = "R0 invalid mem access 'inv'",
75},
76{
77 "calls: multiple ret types in subprog 2",
78 .insns = {
79 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
80 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
81 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
82 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
83 offsetof(struct __sk_buff, data_end)),
84 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
85 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
86 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
87 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
88 BPF_MOV64_IMM(BPF_REG_0, 1),
89 BPF_EXIT_INSN(),
90 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
91 offsetof(struct __sk_buff, data)),
92 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
93 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
94 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
95 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
96 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
97 BPF_LD_MAP_FD(BPF_REG_1, 0),
98 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
99 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
100 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
101 offsetof(struct __sk_buff, data)),
102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
103 BPF_EXIT_INSN(),
104 },
105 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
106 .fixup_map_hash_8b = { 16 },
107 .result = REJECT,
108 .errstr = "R0 min value is outside of the array range",
109},
110{
111 "calls: overlapping caller/callee",
112 .insns = {
113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
114 BPF_MOV64_IMM(BPF_REG_0, 1),
115 BPF_EXIT_INSN(),
116 },
117 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
118 .errstr = "last insn is not an exit or jmp",
119 .result = REJECT,
120},
121{
122 "calls: wrong recursive calls",
123 .insns = {
124 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
125 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
126 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
128 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
129 BPF_MOV64_IMM(BPF_REG_0, 1),
130 BPF_EXIT_INSN(),
131 },
132 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
133 .errstr = "jump out of range",
134 .result = REJECT,
135},
136{
137 "calls: wrong src reg",
138 .insns = {
139 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
140 BPF_MOV64_IMM(BPF_REG_0, 1),
141 BPF_EXIT_INSN(),
142 },
143 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
144 .errstr = "BPF_CALL uses reserved fields",
145 .result = REJECT,
146},
147{
148 "calls: wrong off value",
149 .insns = {
150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
151 BPF_MOV64_IMM(BPF_REG_0, 1),
152 BPF_EXIT_INSN(),
153 BPF_MOV64_IMM(BPF_REG_0, 2),
154 BPF_EXIT_INSN(),
155 },
156 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
157 .errstr = "BPF_CALL uses reserved fields",
158 .result = REJECT,
159},
160{
161 "calls: jump back loop",
162 .insns = {
163 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
164 BPF_MOV64_IMM(BPF_REG_0, 1),
165 BPF_EXIT_INSN(),
166 },
167 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
168 .errstr = "back-edge from insn 0 to 0",
169 .result = REJECT,
170},
171{
172 "calls: conditional call",
173 .insns = {
174 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
175 offsetof(struct __sk_buff, mark)),
176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
177 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
178 BPF_MOV64_IMM(BPF_REG_0, 1),
179 BPF_EXIT_INSN(),
180 BPF_MOV64_IMM(BPF_REG_0, 2),
181 BPF_EXIT_INSN(),
182 },
183 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
184 .errstr = "jump out of range",
185 .result = REJECT,
186},
187{
188 "calls: conditional call 2",
189 .insns = {
190 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
191 offsetof(struct __sk_buff, mark)),
192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
193 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
194 BPF_MOV64_IMM(BPF_REG_0, 1),
195 BPF_EXIT_INSN(),
196 BPF_MOV64_IMM(BPF_REG_0, 2),
197 BPF_EXIT_INSN(),
198 BPF_MOV64_IMM(BPF_REG_0, 3),
199 BPF_EXIT_INSN(),
200 },
201 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
202 .result = ACCEPT,
203},
204{
205 "calls: conditional call 3",
206 .insns = {
207 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
208 offsetof(struct __sk_buff, mark)),
209 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
210 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
211 BPF_MOV64_IMM(BPF_REG_0, 1),
212 BPF_EXIT_INSN(),
213 BPF_MOV64_IMM(BPF_REG_0, 1),
214 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
215 BPF_MOV64_IMM(BPF_REG_0, 3),
216 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
217 },
218 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
219 .errstr = "back-edge from insn",
220 .result = REJECT,
221},
222{
223 "calls: conditional call 4",
224 .insns = {
225 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
226 offsetof(struct __sk_buff, mark)),
227 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
228 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
229 BPF_MOV64_IMM(BPF_REG_0, 1),
230 BPF_EXIT_INSN(),
231 BPF_MOV64_IMM(BPF_REG_0, 1),
232 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
233 BPF_MOV64_IMM(BPF_REG_0, 3),
234 BPF_EXIT_INSN(),
235 },
236 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
237 .result = ACCEPT,
238},
239{
240 "calls: conditional call 5",
241 .insns = {
242 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
243 offsetof(struct __sk_buff, mark)),
244 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
245 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
246 BPF_MOV64_IMM(BPF_REG_0, 1),
247 BPF_EXIT_INSN(),
248 BPF_MOV64_IMM(BPF_REG_0, 1),
249 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
250 BPF_MOV64_IMM(BPF_REG_0, 3),
251 BPF_EXIT_INSN(),
252 },
253 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
254 .errstr = "back-edge from insn",
255 .result = REJECT,
256},
257{
258 "calls: conditional call 6",
259 .insns = {
260 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
261 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
262 BPF_EXIT_INSN(),
263 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
264 offsetof(struct __sk_buff, mark)),
265 BPF_EXIT_INSN(),
266 },
267 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
268 .errstr = "back-edge from insn",
269 .result = REJECT,
270},
271{
272 "calls: using r0 returned by callee",
273 .insns = {
274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
275 BPF_EXIT_INSN(),
276 BPF_MOV64_IMM(BPF_REG_0, 2),
277 BPF_EXIT_INSN(),
278 },
279 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
280 .result = ACCEPT,
281},
282{
283 "calls: using uninit r0 from callee",
284 .insns = {
285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
286 BPF_EXIT_INSN(),
287 BPF_EXIT_INSN(),
288 },
289 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
290 .errstr = "!read_ok",
291 .result = REJECT,
292},
293{
294 "calls: callee is using r1",
295 .insns = {
296 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
297 BPF_EXIT_INSN(),
298 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
299 offsetof(struct __sk_buff, len)),
300 BPF_EXIT_INSN(),
301 },
302 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
303 .result = ACCEPT,
304 .retval = TEST_DATA_LEN,
305},
306{
307 "calls: callee using args1",
308 .insns = {
309 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
310 BPF_EXIT_INSN(),
311 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
312 BPF_EXIT_INSN(),
313 },
314 .errstr_unpriv = "allowed for root only",
315 .result_unpriv = REJECT,
316 .result = ACCEPT,
317 .retval = POINTER_VALUE,
318},
319{
320 "calls: callee using wrong args2",
321 .insns = {
322 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
323 BPF_EXIT_INSN(),
324 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
325 BPF_EXIT_INSN(),
326 },
327 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
328 .errstr = "R2 !read_ok",
329 .result = REJECT,
330},
331{
332 "calls: callee using two args",
333 .insns = {
334 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
335 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
336 offsetof(struct __sk_buff, len)),
337 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
338 offsetof(struct __sk_buff, len)),
339 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
340 BPF_EXIT_INSN(),
341 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
342 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
343 BPF_EXIT_INSN(),
344 },
345 .errstr_unpriv = "allowed for root only",
346 .result_unpriv = REJECT,
347 .result = ACCEPT,
348 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
349},
350{
351 "calls: callee changing pkt pointers",
352 .insns = {
353 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
354 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
355 offsetof(struct xdp_md, data_end)),
356 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
358 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
359 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
360 /* clear_all_pkt_pointers() has to walk all frames
361 * to make sure that pkt pointers in the caller
362 * are cleared when callee is calling a helper that
363 * adjusts packet size
364 */
365 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
366 BPF_MOV32_IMM(BPF_REG_0, 0),
367 BPF_EXIT_INSN(),
368 BPF_MOV64_IMM(BPF_REG_2, 0),
369 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
370 BPF_EXIT_INSN(),
371 },
372 .result = REJECT,
373 .errstr = "R6 invalid mem access 'inv'",
374 .prog_type = BPF_PROG_TYPE_XDP,
375 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
376},
377{
378 "calls: two calls with args",
379 .insns = {
380 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
381 BPF_EXIT_INSN(),
382 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
384 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
385 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
386 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
387 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
388 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
389 BPF_EXIT_INSN(),
390 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
391 offsetof(struct __sk_buff, len)),
392 BPF_EXIT_INSN(),
393 },
394 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
395 .result = ACCEPT,
396 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
397},
398{
399 "calls: calls with stack arith",
400 .insns = {
401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
403 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
404 BPF_EXIT_INSN(),
405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
406 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
407 BPF_EXIT_INSN(),
408 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
409 BPF_MOV64_IMM(BPF_REG_0, 42),
410 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
411 BPF_EXIT_INSN(),
412 },
413 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
414 .result = ACCEPT,
415 .retval = 42,
416},
417{
418 "calls: calls with misaligned stack access",
419 .insns = {
420 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
422 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
423 BPF_EXIT_INSN(),
424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
425 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
426 BPF_EXIT_INSN(),
427 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
428 BPF_MOV64_IMM(BPF_REG_0, 42),
429 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
430 BPF_EXIT_INSN(),
431 },
432 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
433 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
434 .errstr = "misaligned stack access",
435 .result = REJECT,
436},
437{
438 "calls: calls control flow, jump test",
439 .insns = {
440 BPF_MOV64_IMM(BPF_REG_0, 42),
441 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
442 BPF_MOV64_IMM(BPF_REG_0, 43),
443 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
444 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
445 BPF_EXIT_INSN(),
446 },
447 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
448 .result = ACCEPT,
449 .retval = 43,
450},
451{
452 "calls: calls control flow, jump test 2",
453 .insns = {
454 BPF_MOV64_IMM(BPF_REG_0, 42),
455 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
456 BPF_MOV64_IMM(BPF_REG_0, 43),
457 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
458 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
459 BPF_EXIT_INSN(),
460 },
461 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
462 .errstr = "jump out of range from insn 1 to 4",
463 .result = REJECT,
464},
465{
466 "calls: two calls with bad jump",
467 .insns = {
468 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
469 BPF_EXIT_INSN(),
470 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
471 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
472 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
473 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
474 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
475 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
476 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
477 BPF_EXIT_INSN(),
478 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
479 offsetof(struct __sk_buff, len)),
480 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
481 BPF_EXIT_INSN(),
482 },
483 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
484 .errstr = "jump out of range from insn 11 to 9",
485 .result = REJECT,
486},
487{
488 "calls: recursive call. test1",
489 .insns = {
490 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
491 BPF_EXIT_INSN(),
492 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
493 BPF_EXIT_INSN(),
494 },
495 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
496 .errstr = "back-edge",
497 .result = REJECT,
498},
499{
500 "calls: recursive call. test2",
501 .insns = {
502 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
503 BPF_EXIT_INSN(),
504 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
505 BPF_EXIT_INSN(),
506 },
507 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
508 .errstr = "back-edge",
509 .result = REJECT,
510},
511{
512 "calls: unreachable code",
513 .insns = {
514 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
515 BPF_EXIT_INSN(),
516 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
517 BPF_EXIT_INSN(),
518 BPF_MOV64_IMM(BPF_REG_0, 0),
519 BPF_EXIT_INSN(),
520 BPF_MOV64_IMM(BPF_REG_0, 0),
521 BPF_EXIT_INSN(),
522 },
523 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
524 .errstr = "unreachable insn 6",
525 .result = REJECT,
526},
527{
528 "calls: invalid call",
529 .insns = {
530 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
531 BPF_EXIT_INSN(),
532 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
533 BPF_EXIT_INSN(),
534 },
535 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
536 .errstr = "invalid destination",
537 .result = REJECT,
538},
539{
540 "calls: invalid call 2",
541 .insns = {
542 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
543 BPF_EXIT_INSN(),
544 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
545 BPF_EXIT_INSN(),
546 },
547 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
548 .errstr = "invalid destination",
549 .result = REJECT,
550},
551{
552 "calls: jumping across function bodies. test1",
553 .insns = {
554 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
555 BPF_MOV64_IMM(BPF_REG_0, 0),
556 BPF_EXIT_INSN(),
557 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
558 BPF_EXIT_INSN(),
559 },
560 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
561 .errstr = "jump out of range",
562 .result = REJECT,
563},
564{
565 "calls: jumping across function bodies. test2",
566 .insns = {
567 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
569 BPF_MOV64_IMM(BPF_REG_0, 0),
570 BPF_EXIT_INSN(),
571 BPF_EXIT_INSN(),
572 },
573 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
574 .errstr = "jump out of range",
575 .result = REJECT,
576},
577{
578 "calls: call without exit",
579 .insns = {
580 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
581 BPF_EXIT_INSN(),
582 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
583 BPF_EXIT_INSN(),
584 BPF_MOV64_IMM(BPF_REG_0, 0),
585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
586 },
587 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
588 .errstr = "not an exit",
589 .result = REJECT,
590},
591{
592 "calls: call into middle of ld_imm64",
593 .insns = {
594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
595 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
596 BPF_MOV64_IMM(BPF_REG_0, 0),
597 BPF_EXIT_INSN(),
598 BPF_LD_IMM64(BPF_REG_0, 0),
599 BPF_EXIT_INSN(),
600 },
601 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
602 .errstr = "last insn",
603 .result = REJECT,
604},
605{
606 "calls: call into middle of other call",
607 .insns = {
608 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
609 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
610 BPF_MOV64_IMM(BPF_REG_0, 0),
611 BPF_EXIT_INSN(),
612 BPF_MOV64_IMM(BPF_REG_0, 0),
613 BPF_MOV64_IMM(BPF_REG_0, 0),
614 BPF_EXIT_INSN(),
615 },
616 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
617 .errstr = "last insn",
618 .result = REJECT,
619},
620{
621 "calls: ld_abs with changing ctx data in callee",
622 .insns = {
623 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
624 BPF_LD_ABS(BPF_B, 0),
625 BPF_LD_ABS(BPF_H, 0),
626 BPF_LD_ABS(BPF_W, 0),
627 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
628 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
629 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
630 BPF_LD_ABS(BPF_B, 0),
631 BPF_LD_ABS(BPF_H, 0),
632 BPF_LD_ABS(BPF_W, 0),
633 BPF_EXIT_INSN(),
634 BPF_MOV64_IMM(BPF_REG_2, 1),
635 BPF_MOV64_IMM(BPF_REG_3, 2),
636 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
637 BPF_EXIT_INSN(),
638 },
639 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
640 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
641 .result = REJECT,
642},
643{
644 "calls: two calls with bad fallthrough",
645 .insns = {
646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
647 BPF_EXIT_INSN(),
648 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
649 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
650 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
651 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
652 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
653 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
654 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
655 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
656 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
657 offsetof(struct __sk_buff, len)),
658 BPF_EXIT_INSN(),
659 },
660 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
661 .errstr = "not an exit",
662 .result = REJECT,
663},
664{
665 "calls: two calls with stack read",
666 .insns = {
667 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
668 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
669 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
670 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
671 BPF_EXIT_INSN(),
672 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
673 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
674 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
675 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
676 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
677 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
678 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
679 BPF_EXIT_INSN(),
680 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
681 BPF_EXIT_INSN(),
682 },
683 .prog_type = BPF_PROG_TYPE_XDP,
684 .result = ACCEPT,
685},
686{
687 "calls: two calls with stack write",
688 .insns = {
689 /* main prog */
690 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
691 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
692 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
693 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
695 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
696 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
697 BPF_EXIT_INSN(),
698
699 /* subprog 1 */
700 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
701 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
702 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
703 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
704 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
705 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
706 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
707 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
708 /* write into stack frame of main prog */
709 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
710 BPF_EXIT_INSN(),
711
712 /* subprog 2 */
713 /* read from stack frame of main prog */
714 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
715 BPF_EXIT_INSN(),
716 },
717 .prog_type = BPF_PROG_TYPE_XDP,
718 .result = ACCEPT,
719},
720{
721 "calls: stack overflow using two frames (pre-call access)",
722 .insns = {
723 /* prog 1 */
724 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
725 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
726 BPF_EXIT_INSN(),
727
728 /* prog 2 */
729 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
730 BPF_MOV64_IMM(BPF_REG_0, 0),
731 BPF_EXIT_INSN(),
732 },
733 .prog_type = BPF_PROG_TYPE_XDP,
734 .errstr = "combined stack size",
735 .result = REJECT,
736},
737{
738 "calls: stack overflow using two frames (post-call access)",
739 .insns = {
740 /* prog 1 */
741 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
742 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
743 BPF_EXIT_INSN(),
744
745 /* prog 2 */
746 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
747 BPF_MOV64_IMM(BPF_REG_0, 0),
748 BPF_EXIT_INSN(),
749 },
750 .prog_type = BPF_PROG_TYPE_XDP,
751 .errstr = "combined stack size",
752 .result = REJECT,
753},
754{
755 "calls: stack depth check using three frames. test1",
756 .insns = {
757 /* main */
758 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
759 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
760 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
761 BPF_MOV64_IMM(BPF_REG_0, 0),
762 BPF_EXIT_INSN(),
763 /* A */
764 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
765 BPF_EXIT_INSN(),
766 /* B */
767 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
768 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
769 BPF_EXIT_INSN(),
770 },
771 .prog_type = BPF_PROG_TYPE_XDP,
772 /* stack_main=32, stack_A=256, stack_B=64
773 * and max(main+A, main+A+B) < 512
774 */
775 .result = ACCEPT,
776},
777{
778 "calls: stack depth check using three frames. test2",
779 .insns = {
780 /* main */
781 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
782 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
783 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
784 BPF_MOV64_IMM(BPF_REG_0, 0),
785 BPF_EXIT_INSN(),
786 /* A */
787 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
788 BPF_EXIT_INSN(),
789 /* B */
790 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
791 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
792 BPF_EXIT_INSN(),
793 },
794 .prog_type = BPF_PROG_TYPE_XDP,
795 /* stack_main=32, stack_A=64, stack_B=256
796 * and max(main+A, main+A+B) < 512
797 */
798 .result = ACCEPT,
799},
800{
801 "calls: stack depth check using three frames. test3",
802 .insns = {
803 /* main */
804 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
805 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
806 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
807 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
808 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
809 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
810 BPF_MOV64_IMM(BPF_REG_0, 0),
811 BPF_EXIT_INSN(),
812 /* A */
813 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
814 BPF_EXIT_INSN(),
815 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
816 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
817 /* B */
818 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
819 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
820 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
821 BPF_EXIT_INSN(),
822 },
823 .prog_type = BPF_PROG_TYPE_XDP,
824 /* stack_main=64, stack_A=224, stack_B=256
825 * and max(main+A, main+A+B) > 512
826 */
827 .errstr = "combined stack",
828 .result = REJECT,
829},
830{
831 "calls: stack depth check using three frames. test4",
832 /* void main(void) {
833 * func1(0);
834 * func1(1);
835 * func2(1);
836 * }
837 * void func1(int alloc_or_recurse) {
838 * if (alloc_or_recurse) {
839 * frame_pointer[-300] = 1;
840 * } else {
841 * func2(alloc_or_recurse);
842 * }
843 * }
844 * void func2(int alloc_or_recurse) {
845 * if (alloc_or_recurse) {
846 * frame_pointer[-300] = 1;
847 * }
848 * }
849 */
850 .insns = {
851 /* main */
852 BPF_MOV64_IMM(BPF_REG_1, 0),
853 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
854 BPF_MOV64_IMM(BPF_REG_1, 1),
855 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
856 BPF_MOV64_IMM(BPF_REG_1, 1),
857 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
858 BPF_MOV64_IMM(BPF_REG_0, 0),
859 BPF_EXIT_INSN(),
860 /* A */
861 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
862 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
863 BPF_EXIT_INSN(),
864 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
865 BPF_EXIT_INSN(),
866 /* B */
867 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
868 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
869 BPF_EXIT_INSN(),
870 },
871 .prog_type = BPF_PROG_TYPE_XDP,
872 .result = REJECT,
873 .errstr = "combined stack",
874},
875{
876 "calls: stack depth check using three frames. test5",
877 .insns = {
878 /* main */
879 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
880 BPF_EXIT_INSN(),
881 /* A */
882 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
883 BPF_EXIT_INSN(),
884 /* B */
885 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
886 BPF_EXIT_INSN(),
887 /* C */
888 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
889 BPF_EXIT_INSN(),
890 /* D */
891 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
892 BPF_EXIT_INSN(),
893 /* E */
894 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
895 BPF_EXIT_INSN(),
896 /* F */
897 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
898 BPF_EXIT_INSN(),
899 /* G */
900 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
901 BPF_EXIT_INSN(),
902 /* H */
903 BPF_MOV64_IMM(BPF_REG_0, 0),
904 BPF_EXIT_INSN(),
905 },
906 .prog_type = BPF_PROG_TYPE_XDP,
907 .errstr = "call stack",
908 .result = REJECT,
909},
910{
911 "calls: stack depth check in dead code",
912 .insns = {
913 /* main */
914 BPF_MOV64_IMM(BPF_REG_1, 0),
915 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
916 BPF_EXIT_INSN(),
917 /* A */
918 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
919 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
920 BPF_MOV64_IMM(BPF_REG_0, 0),
921 BPF_EXIT_INSN(),
922 /* B */
923 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
924 BPF_EXIT_INSN(),
925 /* C */
926 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
927 BPF_EXIT_INSN(),
928 /* D */
929 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
930 BPF_EXIT_INSN(),
931 /* E */
932 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
933 BPF_EXIT_INSN(),
934 /* F */
935 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
936 BPF_EXIT_INSN(),
937 /* G */
938 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
939 BPF_EXIT_INSN(),
940 /* H */
941 BPF_MOV64_IMM(BPF_REG_0, 0),
942 BPF_EXIT_INSN(),
943 },
944 .prog_type = BPF_PROG_TYPE_XDP,
945 .errstr = "call stack",
946 .result = REJECT,
947},
948{
949 "calls: spill into caller stack frame",
950 .insns = {
951 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
952 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
953 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
954 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
955 BPF_EXIT_INSN(),
956 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
957 BPF_MOV64_IMM(BPF_REG_0, 0),
958 BPF_EXIT_INSN(),
959 },
960 .prog_type = BPF_PROG_TYPE_XDP,
961 .errstr = "cannot spill",
962 .result = REJECT,
963},
964{
965 "calls: write into caller stack frame",
966 .insns = {
967 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
969 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
970 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
971 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
972 BPF_EXIT_INSN(),
973 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
974 BPF_MOV64_IMM(BPF_REG_0, 0),
975 BPF_EXIT_INSN(),
976 },
977 .prog_type = BPF_PROG_TYPE_XDP,
978 .result = ACCEPT,
979 .retval = 42,
980},
981{
982 "calls: write into callee stack frame",
983 .insns = {
984 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
985 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
986 BPF_EXIT_INSN(),
987 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
989 BPF_EXIT_INSN(),
990 },
991 .prog_type = BPF_PROG_TYPE_XDP,
992 .errstr = "cannot return stack pointer",
993 .result = REJECT,
994},
995{
996 "calls: two calls with stack write and void return",
997 .insns = {
998 /* main prog */
999 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1000 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1001 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1002 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1004 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1005 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1006 BPF_EXIT_INSN(),
1007
1008 /* subprog 1 */
1009 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1010 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1011 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1012 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1013 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1014 BPF_EXIT_INSN(),
1015
1016 /* subprog 2 */
1017 /* write into stack frame of main prog */
1018 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
1019 BPF_EXIT_INSN(), /* void return */
1020 },
1021 .prog_type = BPF_PROG_TYPE_XDP,
1022 .result = ACCEPT,
1023},
1024{
1025 "calls: ambiguous return value",
1026 .insns = {
1027 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1028 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1029 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1030 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1032 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1033 BPF_EXIT_INSN(),
1034 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1035 BPF_MOV64_IMM(BPF_REG_0, 0),
1036 BPF_EXIT_INSN(),
1037 },
1038 .errstr_unpriv = "allowed for root only",
1039 .result_unpriv = REJECT,
1040 .errstr = "R0 !read_ok",
1041 .result = REJECT,
1042},
1043{
1044 "calls: two calls that return map_value",
1045 .insns = {
1046 /* main prog */
1047 /* pass fp-16, fp-8 into a function */
1048 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1050 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1051 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1052 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
1053
1054 /* fetch map_value_ptr from the stack of this function */
1055 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1056 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1057 /* write into map value */
1058 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1059 /* fetch secound map_value_ptr from the stack */
1060 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1061 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1062 /* write into map value */
1063 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1064 BPF_MOV64_IMM(BPF_REG_0, 0),
1065 BPF_EXIT_INSN(),
1066
1067 /* subprog 1 */
1068 /* call 3rd function twice */
1069 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1070 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1071 /* first time with fp-8 */
1072 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1073 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1074 /* second time with fp-16 */
1075 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1076 BPF_EXIT_INSN(),
1077
1078 /* subprog 2 */
1079 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1080 /* lookup from map */
1081 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1082 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1084 BPF_LD_MAP_FD(BPF_REG_1, 0),
1085 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1086 /* write map_value_ptr into stack frame of main prog */
1087 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1088 BPF_MOV64_IMM(BPF_REG_0, 0),
1089 BPF_EXIT_INSN(), /* return 0 */
1090 },
1091 .prog_type = BPF_PROG_TYPE_XDP,
1092 .fixup_map_hash_8b = { 23 },
1093 .result = ACCEPT,
1094},
1095{
1096 "calls: two calls that return map_value with bool condition",
1097 .insns = {
1098 /* main prog */
1099 /* pass fp-16, fp-8 into a function */
1100 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1101 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1102 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1104 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1105 BPF_MOV64_IMM(BPF_REG_0, 0),
1106 BPF_EXIT_INSN(),
1107
1108 /* subprog 1 */
1109 /* call 3rd function twice */
1110 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1111 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1112 /* first time with fp-8 */
1113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1114 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1115 /* fetch map_value_ptr from the stack of this function */
1116 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1117 /* write into map value */
1118 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1119 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1120 /* second time with fp-16 */
1121 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1122 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1123 /* fetch secound map_value_ptr from the stack */
1124 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1125 /* write into map value */
1126 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1127 BPF_EXIT_INSN(),
1128
1129 /* subprog 2 */
1130 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1131 /* lookup from map */
1132 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1133 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1135 BPF_LD_MAP_FD(BPF_REG_1, 0),
1136 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1137 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1138 BPF_MOV64_IMM(BPF_REG_0, 0),
1139 BPF_EXIT_INSN(), /* return 0 */
1140 /* write map_value_ptr into stack frame of main prog */
1141 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1142 BPF_MOV64_IMM(BPF_REG_0, 1),
1143 BPF_EXIT_INSN(), /* return 1 */
1144 },
1145 .prog_type = BPF_PROG_TYPE_XDP,
1146 .fixup_map_hash_8b = { 23 },
1147 .result = ACCEPT,
1148},
1149{
1150 "calls: two calls that return map_value with incorrect bool check",
1151 .insns = {
1152 /* main prog */
1153 /* pass fp-16, fp-8 into a function */
1154 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1155 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1156 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1157 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1158 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1159 BPF_MOV64_IMM(BPF_REG_0, 0),
1160 BPF_EXIT_INSN(),
1161
1162 /* subprog 1 */
1163 /* call 3rd function twice */
1164 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1165 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1166 /* first time with fp-8 */
1167 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1168 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1169 /* fetch map_value_ptr from the stack of this function */
1170 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1171 /* write into map value */
1172 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1173 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1174 /* second time with fp-16 */
1175 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1176 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1177 /* fetch secound map_value_ptr from the stack */
1178 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1179 /* write into map value */
1180 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1181 BPF_EXIT_INSN(),
1182
1183 /* subprog 2 */
1184 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1185 /* lookup from map */
1186 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1189 BPF_LD_MAP_FD(BPF_REG_1, 0),
1190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1191 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1192 BPF_MOV64_IMM(BPF_REG_0, 0),
1193 BPF_EXIT_INSN(), /* return 0 */
1194 /* write map_value_ptr into stack frame of main prog */
1195 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1196 BPF_MOV64_IMM(BPF_REG_0, 1),
1197 BPF_EXIT_INSN(), /* return 1 */
1198 },
1199 .prog_type = BPF_PROG_TYPE_XDP,
1200 .fixup_map_hash_8b = { 23 },
1201 .result = REJECT,
1202 .errstr = "invalid read from stack off -16+0 size 8",
1203},
1204{
1205 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
1206 .insns = {
1207 /* main prog */
1208 /* pass fp-16, fp-8 into a function */
1209 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1211 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1213 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1214 BPF_MOV64_IMM(BPF_REG_0, 0),
1215 BPF_EXIT_INSN(),
1216
1217 /* subprog 1 */
1218 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1219 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1220 /* 1st lookup from map */
1221 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1222 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1223 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1224 BPF_LD_MAP_FD(BPF_REG_1, 0),
1225 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1226 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1227 BPF_MOV64_IMM(BPF_REG_8, 0),
1228 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1229 /* write map_value_ptr into stack frame of main prog at fp-8 */
1230 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1231 BPF_MOV64_IMM(BPF_REG_8, 1),
1232
1233 /* 2nd lookup from map */
1234 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1235 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1236 BPF_LD_MAP_FD(BPF_REG_1, 0),
1237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1238 BPF_FUNC_map_lookup_elem),
1239 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1240 BPF_MOV64_IMM(BPF_REG_9, 0),
1241 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1242 /* write map_value_ptr into stack frame of main prog at fp-16 */
1243 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1244 BPF_MOV64_IMM(BPF_REG_9, 1),
1245
1246 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1247 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1248 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1249 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1250 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1251 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1252 BPF_EXIT_INSN(),
1253
1254 /* subprog 2 */
1255 /* if arg2 == 1 do *arg1 = 0 */
1256 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1257 /* fetch map_value_ptr from the stack of this function */
1258 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1259 /* write into map value */
1260 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1261
1262 /* if arg4 == 1 do *arg3 = 0 */
1263 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1264 /* fetch map_value_ptr from the stack of this function */
1265 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1266 /* write into map value */
1267 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1268 BPF_EXIT_INSN(),
1269 },
1270 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1271 .fixup_map_hash_8b = { 12, 22 },
1272 .result = REJECT,
1273 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1274 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1275},
1276{
1277 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
1278 .insns = {
1279 /* main prog */
1280 /* pass fp-16, fp-8 into a function */
1281 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1283 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1284 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1286 BPF_MOV64_IMM(BPF_REG_0, 0),
1287 BPF_EXIT_INSN(),
1288
1289 /* subprog 1 */
1290 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1291 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1292 /* 1st lookup from map */
1293 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1294 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1296 BPF_LD_MAP_FD(BPF_REG_1, 0),
1297 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1298 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1299 BPF_MOV64_IMM(BPF_REG_8, 0),
1300 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1301 /* write map_value_ptr into stack frame of main prog at fp-8 */
1302 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1303 BPF_MOV64_IMM(BPF_REG_8, 1),
1304
1305 /* 2nd lookup from map */
1306 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1308 BPF_LD_MAP_FD(BPF_REG_1, 0),
1309 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1310 BPF_FUNC_map_lookup_elem),
1311 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1312 BPF_MOV64_IMM(BPF_REG_9, 0),
1313 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1314 /* write map_value_ptr into stack frame of main prog at fp-16 */
1315 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1316 BPF_MOV64_IMM(BPF_REG_9, 1),
1317
1318 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1319 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1320 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1321 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1322 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1323 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1324 BPF_EXIT_INSN(),
1325
1326 /* subprog 2 */
1327 /* if arg2 == 1 do *arg1 = 0 */
1328 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1329 /* fetch map_value_ptr from the stack of this function */
1330 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1331 /* write into map value */
1332 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1333
1334 /* if arg4 == 1 do *arg3 = 0 */
1335 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1336 /* fetch map_value_ptr from the stack of this function */
1337 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1338 /* write into map value */
1339 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1340 BPF_EXIT_INSN(),
1341 },
1342 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1343 .fixup_map_hash_8b = { 12, 22 },
1344 .result = ACCEPT,
1345},
1346{
1347 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
1348 .insns = {
1349 /* main prog */
1350 /* pass fp-16, fp-8 into a function */
1351 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1352 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1353 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1354 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1355 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
1356 BPF_MOV64_IMM(BPF_REG_0, 0),
1357 BPF_EXIT_INSN(),
1358
1359 /* subprog 1 */
1360 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1361 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1362 /* 1st lookup from map */
1363 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
1364 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1365 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1366 BPF_LD_MAP_FD(BPF_REG_1, 0),
1367 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1368 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1369 BPF_MOV64_IMM(BPF_REG_8, 0),
1370 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1371 /* write map_value_ptr into stack frame of main prog at fp-8 */
1372 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1373 BPF_MOV64_IMM(BPF_REG_8, 1),
1374
1375 /* 2nd lookup from map */
1376 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1378 BPF_LD_MAP_FD(BPF_REG_1, 0),
1379 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1380 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1381 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
1382 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1383 /* write map_value_ptr into stack frame of main prog at fp-16 */
1384 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1385 BPF_MOV64_IMM(BPF_REG_9, 1),
1386
1387 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1388 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
1389 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1390 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1391 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1392 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
1393 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
1394
1395 /* subprog 2 */
1396 /* if arg2 == 1 do *arg1 = 0 */
1397 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1398 /* fetch map_value_ptr from the stack of this function */
1399 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1400 /* write into map value */
1401 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1402
1403 /* if arg4 == 1 do *arg3 = 0 */
1404 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1405 /* fetch map_value_ptr from the stack of this function */
1406 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1407 /* write into map value */
1408 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1409 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
1410 },
1411 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1412 .fixup_map_hash_8b = { 12, 22 },
1413 .result = REJECT,
1414 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1415 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1416},
1417{
1418 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
1419 .insns = {
1420 /* main prog */
1421 /* pass fp-16, fp-8 into a function */
1422 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1423 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1424 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1425 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1426 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1427 BPF_MOV64_IMM(BPF_REG_0, 0),
1428 BPF_EXIT_INSN(),
1429
1430 /* subprog 1 */
1431 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1432 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1433 /* 1st lookup from map */
1434 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1435 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1437 BPF_LD_MAP_FD(BPF_REG_1, 0),
1438 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1439 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1440 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1441 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1442 BPF_MOV64_IMM(BPF_REG_8, 0),
1443 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1444 BPF_MOV64_IMM(BPF_REG_8, 1),
1445
1446 /* 2nd lookup from map */
1447 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1449 BPF_LD_MAP_FD(BPF_REG_1, 0),
1450 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1451 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1452 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1453 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1454 BPF_MOV64_IMM(BPF_REG_9, 0),
1455 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1456 BPF_MOV64_IMM(BPF_REG_9, 1),
1457
1458 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1459 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1460 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1461 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1462 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1463 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1464 BPF_EXIT_INSN(),
1465
1466 /* subprog 2 */
1467 /* if arg2 == 1 do *arg1 = 0 */
1468 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1469 /* fetch map_value_ptr from the stack of this function */
1470 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1471 /* write into map value */
1472 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1473
1474 /* if arg4 == 1 do *arg3 = 0 */
1475 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1476 /* fetch map_value_ptr from the stack of this function */
1477 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1478 /* write into map value */
1479 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1480 BPF_EXIT_INSN(),
1481 },
1482 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1483 .fixup_map_hash_8b = { 12, 22 },
1484 .result = ACCEPT,
1485},
1486{
1487 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
1488 .insns = {
1489 /* main prog */
1490 /* pass fp-16, fp-8 into a function */
1491 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1492 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1493 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1494 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1495 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1496 BPF_MOV64_IMM(BPF_REG_0, 0),
1497 BPF_EXIT_INSN(),
1498
1499 /* subprog 1 */
1500 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1501 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1502 /* 1st lookup from map */
1503 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1504 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1506 BPF_LD_MAP_FD(BPF_REG_1, 0),
1507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1508 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1509 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1510 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1511 BPF_MOV64_IMM(BPF_REG_8, 0),
1512 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1513 BPF_MOV64_IMM(BPF_REG_8, 1),
1514
1515 /* 2nd lookup from map */
1516 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1518 BPF_LD_MAP_FD(BPF_REG_1, 0),
1519 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1520 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1521 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1522 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1523 BPF_MOV64_IMM(BPF_REG_9, 0),
1524 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1525 BPF_MOV64_IMM(BPF_REG_9, 1),
1526
1527 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1528 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1529 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1530 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1531 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1532 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1533 BPF_EXIT_INSN(),
1534
1535 /* subprog 2 */
1536 /* if arg2 == 1 do *arg1 = 0 */
1537 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1538 /* fetch map_value_ptr from the stack of this function */
1539 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1540 /* write into map value */
1541 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1542
1543 /* if arg4 == 0 do *arg3 = 0 */
1544 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
1545 /* fetch map_value_ptr from the stack of this function */
1546 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1547 /* write into map value */
1548 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1549 BPF_EXIT_INSN(),
1550 },
1551 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1552 .fixup_map_hash_8b = { 12, 22 },
1553 .result = REJECT,
1554 .errstr = "R0 invalid mem access 'inv'",
1555},
1556{
1557 "calls: pkt_ptr spill into caller stack",
1558 .insns = {
1559 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1561 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1562 BPF_EXIT_INSN(),
1563
1564 /* subprog 1 */
1565 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1566 offsetof(struct __sk_buff, data)),
1567 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1568 offsetof(struct __sk_buff, data_end)),
1569 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1571 /* spill unchecked pkt_ptr into stack of caller */
1572 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1573 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1574 /* now the pkt range is verified, read pkt_ptr from stack */
1575 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1576 /* write 4 bytes into packet */
1577 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1578 BPF_EXIT_INSN(),
1579 },
1580 .result = ACCEPT,
1581 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1582 .retval = POINTER_VALUE,
1583 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1584},
1585{
1586 "calls: pkt_ptr spill into caller stack 2",
1587 .insns = {
1588 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1590 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1591 /* Marking is still kept, but not in all cases safe. */
1592 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1593 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1594 BPF_EXIT_INSN(),
1595
1596 /* subprog 1 */
1597 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1598 offsetof(struct __sk_buff, data)),
1599 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1600 offsetof(struct __sk_buff, data_end)),
1601 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1602 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1603 /* spill unchecked pkt_ptr into stack of caller */
1604 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1605 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1606 /* now the pkt range is verified, read pkt_ptr from stack */
1607 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1608 /* write 4 bytes into packet */
1609 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1610 BPF_EXIT_INSN(),
1611 },
1612 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1613 .errstr = "invalid access to packet",
1614 .result = REJECT,
1615 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1616},
1617{
1618 "calls: pkt_ptr spill into caller stack 3",
1619 .insns = {
1620 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1621 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1622 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1623 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1624 /* Marking is still kept and safe here. */
1625 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1626 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1627 BPF_EXIT_INSN(),
1628
1629 /* subprog 1 */
1630 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1631 offsetof(struct __sk_buff, data)),
1632 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1633 offsetof(struct __sk_buff, data_end)),
1634 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1635 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1636 /* spill unchecked pkt_ptr into stack of caller */
1637 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1638 BPF_MOV64_IMM(BPF_REG_5, 0),
1639 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1640 BPF_MOV64_IMM(BPF_REG_5, 1),
1641 /* now the pkt range is verified, read pkt_ptr from stack */
1642 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1643 /* write 4 bytes into packet */
1644 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1645 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1646 BPF_EXIT_INSN(),
1647 },
1648 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1649 .result = ACCEPT,
1650 .retval = 1,
1651 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1652},
1653{
1654 "calls: pkt_ptr spill into caller stack 4",
1655 .insns = {
1656 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1657 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1658 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1659 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1660 /* Check marking propagated. */
1661 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1662 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1663 BPF_EXIT_INSN(),
1664
1665 /* subprog 1 */
1666 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1667 offsetof(struct __sk_buff, data)),
1668 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1669 offsetof(struct __sk_buff, data_end)),
1670 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1671 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1672 /* spill unchecked pkt_ptr into stack of caller */
1673 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1674 BPF_MOV64_IMM(BPF_REG_5, 0),
1675 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1676 BPF_MOV64_IMM(BPF_REG_5, 1),
1677 /* don't read back pkt_ptr from stack here */
1678 /* write 4 bytes into packet */
1679 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1680 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1681 BPF_EXIT_INSN(),
1682 },
1683 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1684 .result = ACCEPT,
1685 .retval = 1,
1686 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1687},
1688{
1689 "calls: pkt_ptr spill into caller stack 5",
1690 .insns = {
1691 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1692 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1693 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
1694 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1695 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1696 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1697 BPF_EXIT_INSN(),
1698
1699 /* subprog 1 */
1700 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1701 offsetof(struct __sk_buff, data)),
1702 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1703 offsetof(struct __sk_buff, data_end)),
1704 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1705 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1706 BPF_MOV64_IMM(BPF_REG_5, 0),
1707 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1708 /* spill checked pkt_ptr into stack of caller */
1709 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1710 BPF_MOV64_IMM(BPF_REG_5, 1),
1711 /* don't read back pkt_ptr from stack here */
1712 /* write 4 bytes into packet */
1713 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1714 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1715 BPF_EXIT_INSN(),
1716 },
1717 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1718 .errstr = "same insn cannot be used with different",
1719 .result = REJECT,
1720 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1721},
1722{
1723 "calls: pkt_ptr spill into caller stack 6",
1724 .insns = {
1725 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1726 offsetof(struct __sk_buff, data_end)),
1727 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1729 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1730 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1731 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1732 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1733 BPF_EXIT_INSN(),
1734
1735 /* subprog 1 */
1736 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1737 offsetof(struct __sk_buff, data)),
1738 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1739 offsetof(struct __sk_buff, data_end)),
1740 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1742 BPF_MOV64_IMM(BPF_REG_5, 0),
1743 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1744 /* spill checked pkt_ptr into stack of caller */
1745 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1746 BPF_MOV64_IMM(BPF_REG_5, 1),
1747 /* don't read back pkt_ptr from stack here */
1748 /* write 4 bytes into packet */
1749 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1750 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1751 BPF_EXIT_INSN(),
1752 },
1753 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1754 .errstr = "R4 invalid mem access",
1755 .result = REJECT,
1756 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1757},
1758{
1759 "calls: pkt_ptr spill into caller stack 7",
1760 .insns = {
1761 BPF_MOV64_IMM(BPF_REG_2, 0),
1762 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1763 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1764 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1765 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1766 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1767 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1768 BPF_EXIT_INSN(),
1769
1770 /* subprog 1 */
1771 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1772 offsetof(struct __sk_buff, data)),
1773 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1774 offsetof(struct __sk_buff, data_end)),
1775 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1777 BPF_MOV64_IMM(BPF_REG_5, 0),
1778 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1779 /* spill checked pkt_ptr into stack of caller */
1780 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1781 BPF_MOV64_IMM(BPF_REG_5, 1),
1782 /* don't read back pkt_ptr from stack here */
1783 /* write 4 bytes into packet */
1784 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1785 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1786 BPF_EXIT_INSN(),
1787 },
1788 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1789 .errstr = "R4 invalid mem access",
1790 .result = REJECT,
1791 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1792},
1793{
1794 "calls: pkt_ptr spill into caller stack 8",
1795 .insns = {
1796 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1797 offsetof(struct __sk_buff, data)),
1798 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1799 offsetof(struct __sk_buff, data_end)),
1800 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1802 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
1803 BPF_EXIT_INSN(),
1804 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1805 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1806 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1807 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1808 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1809 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1810 BPF_EXIT_INSN(),
1811
1812 /* subprog 1 */
1813 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1814 offsetof(struct __sk_buff, data)),
1815 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1816 offsetof(struct __sk_buff, data_end)),
1817 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1819 BPF_MOV64_IMM(BPF_REG_5, 0),
1820 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1821 /* spill checked pkt_ptr into stack of caller */
1822 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1823 BPF_MOV64_IMM(BPF_REG_5, 1),
1824 /* don't read back pkt_ptr from stack here */
1825 /* write 4 bytes into packet */
1826 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1827 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1828 BPF_EXIT_INSN(),
1829 },
1830 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1831 .result = ACCEPT,
1832 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1833},
1834{
1835 "calls: pkt_ptr spill into caller stack 9",
1836 .insns = {
1837 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1838 offsetof(struct __sk_buff, data)),
1839 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1840 offsetof(struct __sk_buff, data_end)),
1841 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1843 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
1844 BPF_EXIT_INSN(),
1845 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1846 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1847 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1848 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1849 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1850 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1851 BPF_EXIT_INSN(),
1852
1853 /* subprog 1 */
1854 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1855 offsetof(struct __sk_buff, data)),
1856 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1857 offsetof(struct __sk_buff, data_end)),
1858 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1860 BPF_MOV64_IMM(BPF_REG_5, 0),
1861 /* spill unchecked pkt_ptr into stack of caller */
1862 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1863 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1864 BPF_MOV64_IMM(BPF_REG_5, 1),
1865 /* don't read back pkt_ptr from stack here */
1866 /* write 4 bytes into packet */
1867 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1868 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1869 BPF_EXIT_INSN(),
1870 },
1871 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1872 .errstr = "invalid access to packet",
1873 .result = REJECT,
1874 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1875},
1876{
1877 "calls: caller stack init to zero or map_value_or_null",
1878 .insns = {
1879 BPF_MOV64_IMM(BPF_REG_0, 0),
1880 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
1881 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1883 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1884 /* fetch map_value_or_null or const_zero from stack */
1885 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1886 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1887 /* store into map_value */
1888 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
1889 BPF_EXIT_INSN(),
1890
1891 /* subprog 1 */
1892 /* if (ctx == 0) return; */
1893 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
1894 /* else bpf_map_lookup() and *(fp - 8) = r0 */
1895 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
1896 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1898 BPF_LD_MAP_FD(BPF_REG_1, 0),
1899 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1900 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1901 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1902 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1903 BPF_EXIT_INSN(),
1904 },
1905 .fixup_map_hash_8b = { 13 },
1906 .result = ACCEPT,
1907 .prog_type = BPF_PROG_TYPE_XDP,
1908},
1909{
1910 "calls: stack init to zero and pruning",
1911 .insns = {
1912 /* first make allocated_stack 16 byte */
1913 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
1914 /* now fork the execution such that the false branch
1915 * of JGT insn will be verified second and it skisp zero
1916 * init of fp-8 stack slot. If stack liveness marking
1917 * is missing live_read marks from call map_lookup
1918 * processing then pruning will incorrectly assume
1919 * that fp-8 stack slot was unused in the fall-through
1920 * branch and will accept the program incorrectly
1921 */
1922 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
1923 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1924 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1925 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1927 BPF_LD_MAP_FD(BPF_REG_1, 0),
1928 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1929 BPF_EXIT_INSN(),
1930 },
1931 .fixup_map_hash_48b = { 6 },
1932 .errstr = "invalid indirect read from stack off -8+0 size 8",
1933 .result = REJECT,
1934 .prog_type = BPF_PROG_TYPE_XDP,
1935},
1936{
1937 "calls: ctx read at start of subprog",
1938 .insns = {
1939 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1940 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1941 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
1942 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1943 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1944 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1945 BPF_EXIT_INSN(),
1946 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
1947 BPF_MOV64_IMM(BPF_REG_0, 0),
1948 BPF_EXIT_INSN(),
1949 },
1950 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
1951 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
1952 .result_unpriv = REJECT,
1953 .result = ACCEPT,
1954},
1955{
1956 "calls: cross frame pruning",
1957 .insns = {
1958 /* r8 = !!random();
1959 * call pruner()
1960 * if (r8)
1961 * do something bad;
1962 */
1963 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
1964 BPF_MOV64_IMM(BPF_REG_8, 0),
1965 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1966 BPF_MOV64_IMM(BPF_REG_8, 1),
1967 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
1968 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1969 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
1970 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
1971 BPF_MOV64_IMM(BPF_REG_0, 0),
1972 BPF_EXIT_INSN(),
1973 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1974 BPF_EXIT_INSN(),
1975 },
1976 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
1977 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
1978 .errstr = "!read_ok",
1979 .result = REJECT,
1980},
1981{
1982 "calls: cross frame pruning - liveness propagation",
1983 .insns = {
1984 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
1985 BPF_MOV64_IMM(BPF_REG_8, 0),
1986 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1987 BPF_MOV64_IMM(BPF_REG_8, 1),
1988 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
1989 BPF_MOV64_IMM(BPF_REG_9, 0),
1990 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1991 BPF_MOV64_IMM(BPF_REG_9, 1),
1992 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1993 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1994 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
1995 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
1996 BPF_MOV64_IMM(BPF_REG_0, 0),
1997 BPF_EXIT_INSN(),
1998 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1999 BPF_EXIT_INSN(),
2000 },
2001 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2002 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
2003 .errstr = "!read_ok",
2004 .result = REJECT,
2005},