Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2023 SUSE LLC */
3#include <linux/bpf.h>
4#include <bpf/bpf_helpers.h>
5#include "../../../include/linux/filter.h"
6#include "bpf_misc.h"
7
8SEC("?raw_tp")
9__success __log_level(2)
10__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
11__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2")
12__msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2")
13__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8")
14__naked int bpf_neg(void)
15{
16 asm volatile (
17 "r2 = 8;"
18 "r2 = -r2;"
19 "if r2 != -8 goto 1f;"
20 "r1 = r10;"
21 "r1 += r2;"
22 "1:"
23 "r0 = 0;"
24 "exit;"
25 ::: __clobber_all);
26}
27
28SEC("?raw_tp")
29__success __log_level(2)
30__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
31__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
32__msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2")
33__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
34__naked int bpf_end_to_le(void)
35{
36 asm volatile (
37 "r2 = 0;"
38 "r2 = le16 r2;"
39 "if r2 != 0 goto 1f;"
40 "r1 = r10;"
41 "r1 += r2;"
42 "1:"
43 "r0 = 0;"
44 "exit;"
45 ::: __clobber_all);
46}
47
48
49SEC("?raw_tp")
50__success __log_level(2)
51__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
52__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
53__msg("mark_precise: frame0: regs=r2 stack= before 1: (dc) r2 = be16 r2")
54__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
55__naked int bpf_end_to_be(void)
56{
57 asm volatile (
58 "r2 = 0;"
59 "r2 = be16 r2;"
60 "if r2 != 0 goto 1f;"
61 "r1 = r10;"
62 "r1 += r2;"
63 "1:"
64 "r0 = 0;"
65 "exit;"
66 ::: __clobber_all);
67}
68
69#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
70 (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
71 defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
72 __clang_major__ >= 18
73
74SEC("?raw_tp")
75__success __log_level(2)
76__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
77__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
78__msg("mark_precise: frame0: regs=r2 stack= before 1: (d7) r2 = bswap16 r2")
79__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
80__naked int bpf_end_bswap(void)
81{
82 asm volatile (
83 "r2 = 0;"
84 "r2 = bswap16 r2;"
85 "if r2 != 0 goto 1f;"
86 "r1 = r10;"
87 "r1 += r2;"
88 "1:"
89 "r0 = 0;"
90 "exit;"
91 ::: __clobber_all);
92}
93
94#ifdef CAN_USE_LOAD_ACQ_STORE_REL
95
96SEC("?raw_tp")
97__success __log_level(2)
98__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
99__msg("mark_precise: frame0: regs=r2 stack= before 2: (db) r2 = load_acquire((u64 *)(r10 -8))")
100__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
101__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
102__naked int bpf_load_acquire(void)
103{
104 asm volatile (
105 "r1 = 8;"
106 "*(u64 *)(r10 - 8) = r1;"
107 ".8byte %[load_acquire_insn];" /* r2 = load_acquire((u64 *)(r10 - 8)); */
108 "r3 = r10;"
109 "r3 += r2;" /* mark_precise */
110 "r0 = 0;"
111 "exit;"
112 :
113 : __imm_insn(load_acquire_insn,
114 BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -8))
115 : __clobber_all);
116}
117
118SEC("?raw_tp")
119__success __log_level(2)
120__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r2 = r10")
121__msg("mark_precise: frame0: regs=r1 stack= before 2: (79) r1 = *(u64 *)(r10 -8)")
122__msg("mark_precise: frame0: regs= stack=-8 before 1: (db) store_release((u64 *)(r10 -8), r1)")
123__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
124__naked int bpf_store_release(void)
125{
126 asm volatile (
127 "r1 = 8;"
128 ".8byte %[store_release_insn];" /* store_release((u64 *)(r10 - 8), r1); */
129 "r1 = *(u64 *)(r10 - 8);"
130 "r2 = r10;"
131 "r2 += r1;" /* mark_precise */
132 "r0 = 0;"
133 "exit;"
134 :
135 : __imm_insn(store_release_insn,
136 BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8))
137 : __clobber_all);
138}
139
140#endif /* CAN_USE_LOAD_ACQ_STORE_REL */
141#endif /* v4 instruction */
142
143SEC("?raw_tp")
144__success __log_level(2)
145/*
146 * Without the bug fix there will be no history between "last_idx 3 first_idx 3"
147 * and "parent state regs=" lines. "R0=6" parts are here to help anchor
148 * expected log messages to the one specific mark_chain_precision operation.
149 *
150 * This is quite fragile: if verifier checkpointing heuristic changes, this
151 * might need adjusting.
152 */
153__msg("2: (07) r0 += 1 ; R0=6")
154__msg("3: (35) if r0 >= 0xa goto pc+1")
155__msg("mark_precise: frame0: last_idx 3 first_idx 3 subseq_idx -1")
156__msg("mark_precise: frame0: regs=r0 stack= before 2: (07) r0 += 1")
157__msg("mark_precise: frame0: regs=r0 stack= before 1: (07) r0 += 1")
158__msg("mark_precise: frame0: regs=r0 stack= before 4: (05) goto pc-4")
159__msg("mark_precise: frame0: regs=r0 stack= before 3: (35) if r0 >= 0xa goto pc+1")
160__msg("mark_precise: frame0: parent state regs= stack=: R0=P4")
161__msg("3: R0=6")
162__naked int state_loop_first_last_equal(void)
163{
164 asm volatile (
165 "r0 = 0;"
166 "l0_%=:"
167 "r0 += 1;"
168 "r0 += 1;"
169 /* every few iterations we'll have a checkpoint here with
170 * first_idx == last_idx, potentially confusing precision
171 * backtracking logic
172 */
173 "if r0 >= 10 goto l1_%=;" /* checkpoint + mark_precise */
174 "goto l0_%=;"
175 "l1_%=:"
176 "exit;"
177 ::: __clobber_common
178 );
179}
180
181__used __naked static void __bpf_cond_op_r10(void)
182{
183 asm volatile (
184 "r2 = 2314885393468386424 ll;"
185 "goto +0;"
186 "if r2 <= r10 goto +3;"
187 "if r1 >= -1835016 goto +0;"
188 "if r2 <= 8 goto +0;"
189 "if r3 <= 0 goto +0;"
190 "exit;"
191 ::: __clobber_all);
192}
193
194SEC("?raw_tp")
195__success __log_level(2)
196__msg("8: (bd) if r2 <= r10 goto pc+3")
197__msg("9: (35) if r1 >= 0xffe3fff8 goto pc+0")
198__msg("10: (b5) if r2 <= 0x8 goto pc+0")
199__msg("mark_precise: frame1: last_idx 10 first_idx 0 subseq_idx -1")
200__msg("mark_precise: frame1: regs=r2 stack= before 9: (35) if r1 >= 0xffe3fff8 goto pc+0")
201__msg("mark_precise: frame1: regs=r2 stack= before 8: (bd) if r2 <= r10 goto pc+3")
202__msg("mark_precise: frame1: regs=r2 stack= before 7: (05) goto pc+0")
203__naked void bpf_cond_op_r10(void)
204{
205 asm volatile (
206 "r3 = 0 ll;"
207 "call __bpf_cond_op_r10;"
208 "r0 = 0;"
209 "exit;"
210 ::: __clobber_all);
211}
212
213SEC("?raw_tp")
214__success __log_level(2)
215__msg("3: (bf) r3 = r10")
216__msg("4: (bd) if r3 <= r2 goto pc+1")
217__msg("5: (b5) if r2 <= 0x8 goto pc+2")
218__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1")
219__msg("mark_precise: frame0: regs=r2 stack= before 4: (bd) if r3 <= r2 goto pc+1")
220__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
221__naked void bpf_cond_op_not_r10(void)
222{
223 asm volatile (
224 "r0 = 0;"
225 "r2 = 2314885393468386424 ll;"
226 "r3 = r10;"
227 "if r3 <= r2 goto +1;"
228 "if r2 <= 8 goto +2;"
229 "r0 = 2 ll;"
230 "exit;"
231 ::: __clobber_all);
232}
233
234SEC("lsm.s/socket_connect")
235__success __log_level(2)
236__msg("0: (b7) r0 = 1 ; R0=1")
237__msg("1: (84) w0 = -w0 ; R0=0xffffffff")
238__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1")
239__msg("mark_precise: frame0: regs=r0 stack= before 1: (84) w0 = -w0")
240__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1")
241__naked int bpf_neg_2(void)
242{
243 /*
244 * lsm.s/socket_connect requires a return value within [-4095, 0].
245 * Returning -1 is allowed
246 */
247 asm volatile (
248 "r0 = 1;"
249 "w0 = -w0;"
250 "exit;"
251 ::: __clobber_all);
252}
253
254SEC("lsm.s/socket_connect")
255__failure __msg("At program exit the register R0 has")
256__naked int bpf_neg_3(void)
257{
258 /*
259 * lsm.s/socket_connect requires a return value within [-4095, 0].
260 * Returning -10000 is not allowed.
261 */
262 asm volatile (
263 "r0 = 10000;"
264 "w0 = -w0;"
265 "exit;"
266 ::: __clobber_all);
267}
268
269SEC("lsm.s/socket_connect")
270__success __log_level(2)
271__msg("0: (b7) r0 = 1 ; R0=1")
272__msg("1: (87) r0 = -r0 ; R0=-1")
273__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1")
274__msg("mark_precise: frame0: regs=r0 stack= before 1: (87) r0 = -r0")
275__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1")
276__naked int bpf_neg_4(void)
277{
278 /*
279 * lsm.s/socket_connect requires a return value within [-4095, 0].
280 * Returning -1 is allowed
281 */
282 asm volatile (
283 "r0 = 1;"
284 "r0 = -r0;"
285 "exit;"
286 ::: __clobber_all);
287}
288
289SEC("lsm.s/socket_connect")
290__failure __msg("At program exit the register R0 has")
291__naked int bpf_neg_5(void)
292{
293 /*
294 * lsm.s/socket_connect requires a return value within [-4095, 0].
295 * Returning -10000 is not allowed.
296 */
297 asm volatile (
298 "r0 = 10000;"
299 "r0 = -r0;"
300 "exit;"
301 ::: __clobber_all);
302}
303
304char _license[] SEC("license") = "GPL";