Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add selftests for load-acquire and store-release instructions

Add several ./test_progs tests:

- arena_atomics/load_acquire
- arena_atomics/store_release
- verifier_load_acquire/*
- verifier_store_release/*
- verifier_precision/bpf_load_acquire
- verifier_precision/bpf_store_release

The last two tests are added to check if backtrack_insn() handles the
new instructions correctly.

Additionally, the last test also makes sure that the verifier
"remembers" the value (in src_reg) we store-release into e.g. a stack
slot. For example, if we take a look at the test program:

#0: r1 = 8;
/* store_release((u64 *)(r10 - 8), r1); */
#1: .8byte %[store_release];
#2: r1 = *(u64 *)(r10 - 8);
#3: r2 = r10;
#4: r2 += r1;
#5: r0 = 0;
#6: exit;

At #1, if the verifier doesn't remember that we wrote 8 to the stack,
then later at #4 we would be adding an unbounded scalar value to the
stack pointer, which would cause the program to be rejected:

VERIFIER LOG:
=============
...
math between fp pointer and register with unbounded min value is not allowed

For easier CI integration, instead of using built-ins like
__atomic_{load,store}_n() which depend on the new
__BPF_FEATURE_LOAD_ACQ_STORE_REL pre-defined macro, manually craft
load-acquire/store-release instructions using __imm_insn(), as suggested
by Eduard.

All new tests depend on:

(1) Clang major version >= 18, and
(2) ENABLE_ATOMICS_TESTS is defined (currently implies -mcpu=v3 or
v4), and
(3) JIT supports load-acquire/store-release (currently arm64 and
x86-64)

In .../progs/arena_atomics.c:

/* 8-byte-aligned */
__u8 __arena_global load_acquire8_value = 0x12;
/* 1-byte hole */
__u16 __arena_global load_acquire16_value = 0x1234;

That 1-byte hole in the .addr_space.1 ELF section caused clang-17 to
crash:

fatal error: error in backend: unable to write nop sequence of 1 bytes

To work around such llvm-17 CI job failures, conditionally define
__arena_global variables as 64-bit if __clang_major__ < 18, to make sure
.addr_space.1 has no holes. Ideally we should avoid compiling this file
using clang-17 at all (arena tests depend on
__BPF_FEATURE_ADDR_SPACE_CAST, and are skipped for llvm-17 anyway), but
that is a separate topic.

Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Peilin Ye <yepeilin@google.com>
Link: https://lore.kernel.org/r/1b46c6feaf0f1b6984d9ec80e500cc7383e9da1a.1741049567.git.yepeilin@google.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Peilin Ye and committed by
Alexei Starovoitov
ff3afe5d 5341c9a4

+698 -3
+65 -1
tools/testing/selftests/bpf/prog_tests/arena_atomics.c
··· 162 162 ASSERT_EQ(skel->arena->uaf_recovery_fails, 0, "uaf_recovery_fails"); 163 163 } 164 164 165 + static void test_load_acquire(struct arena_atomics *skel) 166 + { 167 + LIBBPF_OPTS(bpf_test_run_opts, topts); 168 + int err, prog_fd; 169 + 170 + if (skel->data->skip_lacq_srel_tests) { 171 + printf("%s:SKIP: ENABLE_ATOMICS_TESTS not defined, Clang doesn't support addr_space_cast, and/or JIT doesn't support load-acquire\n", 172 + __func__); 173 + test__skip(); 174 + return; 175 + } 176 + 177 + /* No need to attach it, just run it directly */ 178 + prog_fd = bpf_program__fd(skel->progs.load_acquire); 179 + err = bpf_prog_test_run_opts(prog_fd, &topts); 180 + if (!ASSERT_OK(err, "test_run_opts err")) 181 + return; 182 + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) 183 + return; 184 + 185 + ASSERT_EQ(skel->arena->load_acquire8_result, 0x12, 186 + "load_acquire8_result"); 187 + ASSERT_EQ(skel->arena->load_acquire16_result, 0x1234, 188 + "load_acquire16_result"); 189 + ASSERT_EQ(skel->arena->load_acquire32_result, 0x12345678, 190 + "load_acquire32_result"); 191 + ASSERT_EQ(skel->arena->load_acquire64_result, 0x1234567890abcdef, 192 + "load_acquire64_result"); 193 + } 194 + 195 + static void test_store_release(struct arena_atomics *skel) 196 + { 197 + LIBBPF_OPTS(bpf_test_run_opts, topts); 198 + int err, prog_fd; 199 + 200 + if (skel->data->skip_lacq_srel_tests) { 201 + printf("%s:SKIP: ENABLE_ATOMICS_TESTS not defined, Clang doesn't support addr_space_cast, and/or JIT doesn't support store-release\n", 202 + __func__); 203 + test__skip(); 204 + return; 205 + } 206 + 207 + /* No need to attach it, just run it directly */ 208 + prog_fd = bpf_program__fd(skel->progs.store_release); 209 + err = bpf_prog_test_run_opts(prog_fd, &topts); 210 + if (!ASSERT_OK(err, "test_run_opts err")) 211 + return; 212 + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) 213 + return; 214 + 215 + ASSERT_EQ(skel->arena->store_release8_result, 0x12, 216 + "store_release8_result"); 217 + ASSERT_EQ(skel->arena->store_release16_result, 0x1234, 218 + "store_release16_result"); 219 + ASSERT_EQ(skel->arena->store_release32_result, 0x12345678, 220 + "store_release32_result"); 221 + ASSERT_EQ(skel->arena->store_release64_result, 0x1234567890abcdef, 222 + "store_release64_result"); 223 + } 224 + 165 225 void test_arena_atomics(void) 166 226 { 167 227 struct arena_atomics *skel; ··· 231 171 if (!ASSERT_OK_PTR(skel, "arena atomics skeleton open")) 232 172 return; 233 173 234 - if (skel->data->skip_tests) { 174 + if (skel->data->skip_all_tests) { 235 175 printf("%s:SKIP:no ENABLE_ATOMICS_TESTS or no addr_space_cast support in clang", 236 176 __func__); 237 177 test__skip(); ··· 258 198 test_xchg(skel); 259 199 if (test__start_subtest("uaf")) 260 200 test_uaf(skel); 201 + if (test__start_subtest("load_acquire")) 202 + test_load_acquire(skel); 203 + if (test__start_subtest("store_release")) 204 + test_store_release(skel); 261 205 262 206 cleanup: 263 207 arena_atomics__destroy(skel);
+4
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 45 45 #include "verifier_ldsx.skel.h" 46 46 #include "verifier_leak_ptr.skel.h" 47 47 #include "verifier_linked_scalars.skel.h" 48 + #include "verifier_load_acquire.skel.h" 48 49 #include "verifier_loops1.skel.h" 49 50 #include "verifier_lwt.skel.h" 50 51 #include "verifier_map_in_map.skel.h" ··· 81 80 #include "verifier_spill_fill.skel.h" 82 81 #include "verifier_spin_lock.skel.h" 83 82 #include "verifier_stack_ptr.skel.h" 83 + #include "verifier_store_release.skel.h" 84 84 #include "verifier_subprog_precision.skel.h" 85 85 #include "verifier_subreg.skel.h" 86 86 #include "verifier_tailcall_jit.skel.h" ··· 175 173 void test_verifier_iterating_callbacks(void) { RUN(verifier_iterating_callbacks); } 176 174 void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); } 177 175 void test_verifier_jit_convergence(void) { RUN(verifier_jit_convergence); } 176 + void test_verifier_load_acquire(void) { RUN(verifier_load_acquire); } 178 177 void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } 179 178 void test_verifier_ldsx(void) { RUN(verifier_ldsx); } 180 179 void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } ··· 214 211 void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } 215 212 void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); } 216 213 void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } 214 + void test_verifier_store_release(void) { RUN(verifier_store_release); } 217 215 void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); } 218 216 void test_verifier_subreg(void) { RUN(verifier_subreg); } 219 217 void test_verifier_tailcall_jit(void) { RUN(verifier_tailcall_jit); }
+119 -2
tools/testing/selftests/bpf/progs/arena_atomics.c
··· 6 6 #include <stdbool.h> 7 7 #include <stdatomic.h> 8 8 #include "bpf_arena_common.h" 9 + #include "../../../include/linux/filter.h" 10 + #include "bpf_misc.h" 9 11 10 12 struct { 11 13 __uint(type, BPF_MAP_TYPE_ARENA); ··· 21 19 } arena SEC(".maps"); 22 20 23 21 #if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST) 24 - bool skip_tests __attribute((__section__(".data"))) = false; 22 + bool skip_all_tests __attribute((__section__(".data"))) = false; 25 23 #else 26 - bool skip_tests = true; 24 + bool skip_all_tests = true; 25 + #endif 26 + 27 + #if defined(ENABLE_ATOMICS_TESTS) && \ 28 + defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \ 29 + (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) 30 + bool skip_lacq_srel_tests __attribute((__section__(".data"))) = false; 31 + #else 32 + bool skip_lacq_srel_tests = true; 27 33 #endif 28 34 29 35 __u32 pid = 0; ··· 281 271 uaf_recovery_fails -= 1; 282 272 #endif 283 273 274 + return 0; 275 + } 276 + 277 + #if __clang_major__ >= 18 278 + __u8 __arena_global load_acquire8_value = 0x12; 279 + __u16 __arena_global load_acquire16_value = 0x1234; 280 + __u32 __arena_global load_acquire32_value = 0x12345678; 281 + __u64 __arena_global load_acquire64_value = 0x1234567890abcdef; 282 + 283 + __u8 __arena_global load_acquire8_result = 0; 284 + __u16 __arena_global load_acquire16_result = 0; 285 + __u32 __arena_global load_acquire32_result = 0; 286 + __u64 __arena_global load_acquire64_result = 0; 287 + #else 288 + /* clang-17 crashes if the .addr_space.1 ELF section has holes. Work around 289 + * this issue by defining the below variables as 64-bit. 290 + */ 291 + __u64 __arena_global load_acquire8_value; 292 + __u64 __arena_global load_acquire16_value; 293 + __u64 __arena_global load_acquire32_value; 294 + __u64 __arena_global load_acquire64_value; 295 + 296 + __u64 __arena_global load_acquire8_result; 297 + __u64 __arena_global load_acquire16_result; 298 + __u64 __arena_global load_acquire32_result; 299 + __u64 __arena_global load_acquire64_result; 300 + #endif 301 + 302 + SEC("raw_tp/sys_enter") 303 + int load_acquire(const void *ctx) 304 + { 305 + #if defined(ENABLE_ATOMICS_TESTS) && \ 306 + defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \ 307 + (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) 308 + 309 + #define LOAD_ACQUIRE_ARENA(SIZEOP, SIZE, SRC, DST) \ 310 + { asm volatile ( \ 311 + "r1 = %[" #SRC "] ll;" \ 312 + "r1 = addr_space_cast(r1, 0x0, 0x1);" \ 313 + ".8byte %[load_acquire_insn];" \ 314 + "r3 = %[" #DST "] ll;" \ 315 + "r3 = addr_space_cast(r3, 0x0, 0x1);" \ 316 + "*(" #SIZE " *)(r3 + 0) = r2;" \ 317 + : \ 318 + : __imm_addr(SRC), \ 319 + __imm_insn(load_acquire_insn, \ 320 + BPF_ATOMIC_OP(BPF_##SIZEOP, BPF_LOAD_ACQ, \ 321 + BPF_REG_2, BPF_REG_1, 0)), \ 322 + __imm_addr(DST) \ 323 + : __clobber_all); } \ 324 + 325 + LOAD_ACQUIRE_ARENA(B, u8, load_acquire8_value, load_acquire8_result) 326 + LOAD_ACQUIRE_ARENA(H, u16, load_acquire16_value, 327 + load_acquire16_result) 328 + LOAD_ACQUIRE_ARENA(W, u32, load_acquire32_value, 329 + load_acquire32_result) 330 + LOAD_ACQUIRE_ARENA(DW, u64, load_acquire64_value, 331 + load_acquire64_result) 332 + #undef LOAD_ACQUIRE_ARENA 333 + 334 + #endif 335 + return 0; 336 + } 337 + 338 + #if __clang_major__ >= 18 339 + __u8 __arena_global store_release8_result = 0; 340 + __u16 __arena_global store_release16_result = 0; 341 + __u32 __arena_global store_release32_result = 0; 342 + __u64 __arena_global store_release64_result = 0; 343 + #else 344 + /* clang-17 crashes if the .addr_space.1 ELF section has holes. Work around 345 + * this issue by defining the below variables as 64-bit. 346 + */ 347 + __u64 __arena_global store_release8_result; 348 + __u64 __arena_global store_release16_result; 349 + __u64 __arena_global store_release32_result; 350 + __u64 __arena_global store_release64_result; 351 + #endif 352 + 353 + SEC("raw_tp/sys_enter") 354 + int store_release(const void *ctx) 355 + { 356 + #if defined(ENABLE_ATOMICS_TESTS) && \ 357 + defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \ 358 + (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) 359 + 360 + #define STORE_RELEASE_ARENA(SIZEOP, DST, VAL) \ 361 + { asm volatile ( \ 362 + "r1 = " VAL ";" \ 363 + "r2 = %[" #DST "] ll;" \ 364 + "r2 = addr_space_cast(r2, 0x0, 0x1);" \ 365 + ".8byte %[store_release_insn];" \ 366 + : \ 367 + : __imm_addr(DST), \ 368 + __imm_insn(store_release_insn, \ 369 + BPF_ATOMIC_OP(BPF_##SIZEOP, BPF_STORE_REL, \ 370 + BPF_REG_2, BPF_REG_1, 0)) \ 371 + : __clobber_all); } \ 372 + 373 + STORE_RELEASE_ARENA(B, store_release8_result, "0x12") 374 + STORE_RELEASE_ARENA(H, store_release16_result, "0x1234") 375 + STORE_RELEASE_ARENA(W, store_release32_result, "0x12345678") 376 + STORE_RELEASE_ARENA(DW, store_release64_result, 377 + "0x1234567890abcdef ll") 378 + #undef STORE_RELEASE_ARENA 379 + 380 + #endif 284 381 return 0; 285 382 } 286 383
+197
tools/testing/selftests/bpf/progs/verifier_load_acquire.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2025 Google LLC. */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "../../../include/linux/filter.h" 7 + #include "bpf_misc.h" 8 + 9 + #if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \ 10 + (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) 11 + 12 + SEC("socket") 13 + __description("load-acquire, 8-bit") 14 + __success __success_unpriv __retval(0x12) 15 + __naked void load_acquire_8(void) 16 + { 17 + asm volatile ( 18 + "w1 = 0x12;" 19 + "*(u8 *)(r10 - 1) = w1;" 20 + ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r10 - 1)); 21 + "exit;" 22 + : 23 + : __imm_insn(load_acquire_insn, 24 + BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -1)) 25 + : __clobber_all); 26 + } 27 + 28 + SEC("socket") 29 + __description("load-acquire, 16-bit") 30 + __success __success_unpriv __retval(0x1234) 31 + __naked void load_acquire_16(void) 32 + { 33 + asm volatile ( 34 + "w1 = 0x1234;" 35 + "*(u16 *)(r10 - 2) = w1;" 36 + ".8byte %[load_acquire_insn];" // w0 = load_acquire((u16 *)(r10 - 2)); 37 + "exit;" 38 + : 39 + : __imm_insn(load_acquire_insn, 40 + BPF_ATOMIC_OP(BPF_H, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -2)) 41 + : __clobber_all); 42 + } 43 + 44 + SEC("socket") 45 + __description("load-acquire, 32-bit") 46 + __success __success_unpriv __retval(0x12345678) 47 + __naked void load_acquire_32(void) 48 + { 49 + asm volatile ( 50 + "w1 = 0x12345678;" 51 + "*(u32 *)(r10 - 4) = w1;" 52 + ".8byte %[load_acquire_insn];" // w0 = load_acquire((u32 *)(r10 - 4)); 53 + "exit;" 54 + : 55 + : __imm_insn(load_acquire_insn, 56 + BPF_ATOMIC_OP(BPF_W, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -4)) 57 + : __clobber_all); 58 + } 59 + 60 + SEC("socket") 61 + __description("load-acquire, 64-bit") 62 + __success __success_unpriv __retval(0x1234567890abcdef) 63 + __naked void load_acquire_64(void) 64 + { 65 + asm volatile ( 66 + "r1 = 0x1234567890abcdef ll;" 67 + "*(u64 *)(r10 - 8) = r1;" 68 + ".8byte %[load_acquire_insn];" // r0 = load_acquire((u64 *)(r10 - 8)); 69 + "exit;" 70 + : 71 + : __imm_insn(load_acquire_insn, 72 + BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -8)) 73 + : __clobber_all); 74 + } 75 + 76 + SEC("socket") 77 + __description("load-acquire with uninitialized src_reg") 78 + __failure __failure_unpriv __msg("R2 !read_ok") 79 + __naked void load_acquire_with_uninitialized_src_reg(void) 80 + { 81 + asm volatile ( 82 + ".8byte %[load_acquire_insn];" // r0 = load_acquire((u64 *)(r2 + 0)); 83 + "exit;" 84 + : 85 + : __imm_insn(load_acquire_insn, 86 + BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0)) 87 + : __clobber_all); 88 + } 89 + 90 + SEC("socket") 91 + __description("load-acquire with non-pointer src_reg") 92 + __failure __failure_unpriv __msg("R1 invalid mem access 'scalar'") 93 + __naked void load_acquire_with_non_pointer_src_reg(void) 94 + { 95 + asm volatile ( 96 + "r1 = 0;" 97 + ".8byte %[load_acquire_insn];" // r0 = load_acquire((u64 *)(r1 + 0)); 98 + "exit;" 99 + : 100 + : __imm_insn(load_acquire_insn, 101 + BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_1, 0)) 102 + : __clobber_all); 103 + } 104 + 105 + SEC("socket") 106 + __description("misaligned load-acquire") 107 + __failure __failure_unpriv __msg("misaligned stack access off") 108 + __flag(BPF_F_ANY_ALIGNMENT) 109 + __naked void load_acquire_misaligned(void) 110 + { 111 + asm volatile ( 112 + "r1 = 0;" 113 + "*(u64 *)(r10 - 8) = r1;" 114 + ".8byte %[load_acquire_insn];" // w0 = load_acquire((u32 *)(r10 - 5)); 115 + "exit;" 116 + : 117 + : __imm_insn(load_acquire_insn, 118 + BPF_ATOMIC_OP(BPF_W, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -5)) 119 + : __clobber_all); 120 + } 121 + 122 + SEC("socket") 123 + __description("load-acquire from ctx pointer") 124 + __failure __failure_unpriv __msg("BPF_ATOMIC loads from R1 ctx is not allowed") 125 + __naked void load_acquire_from_ctx_pointer(void) 126 + { 127 + asm volatile ( 128 + ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r1 + 0)); 129 + "exit;" 130 + : 131 + : __imm_insn(load_acquire_insn, 132 + BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_1, 0)) 133 + : __clobber_all); 134 + } 135 + 136 + SEC("xdp") 137 + __description("load-acquire from pkt pointer") 138 + __failure __msg("BPF_ATOMIC loads from R2 pkt is not allowed") 139 + __naked void load_acquire_from_pkt_pointer(void) 140 + { 141 + asm volatile ( 142 + "r2 = *(u32 *)(r1 + %[xdp_md_data]);" 143 + ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r2 + 0)); 144 + "exit;" 145 + : 146 + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 147 + __imm_insn(load_acquire_insn, 148 + BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0)) 149 + : __clobber_all); 150 + } 151 + 152 + SEC("flow_dissector") 153 + __description("load-acquire from flow_keys pointer") 154 + __failure __msg("BPF_ATOMIC loads from R2 flow_keys is not allowed") 155 + __naked void load_acquire_from_flow_keys_pointer(void) 156 + { 157 + asm volatile ( 158 + "r2 = *(u64 *)(r1 + %[__sk_buff_flow_keys]);" 159 + ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r2 + 0)); 160 + "exit;" 161 + : 162 + : __imm_const(__sk_buff_flow_keys, 163 + offsetof(struct __sk_buff, flow_keys)), 164 + __imm_insn(load_acquire_insn, 165 + BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0)) 166 + : __clobber_all); 167 + } 168 + 169 + SEC("sk_reuseport") 170 + __description("load-acquire from sock pointer") 171 + __failure __msg("BPF_ATOMIC loads from R2 sock is not allowed") 172 + __naked void load_acquire_from_sock_pointer(void) 173 + { 174 + asm volatile ( 175 + "r2 = *(u64 *)(r1 + %[sk_reuseport_md_sk]);" 176 + ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r2 + 0)); 177 + "exit;" 178 + : 179 + : __imm_const(sk_reuseport_md_sk, offsetof(struct sk_reuseport_md, sk)), 180 + __imm_insn(load_acquire_insn, 181 + BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0)) 182 + : __clobber_all); 183 + } 184 + 185 + #else 186 + 187 + SEC("socket") 188 + __description("Clang version < 18, ENABLE_ATOMICS_TESTS not defined, and/or JIT doesn't support load-acquire, use a dummy test") 189 + __success 190 + int dummy_test(void) 191 + { 192 + return 0; 193 + } 194 + 195 + #endif 196 + 197 + char _license[] SEC("license") = "GPL";
+49
tools/testing/selftests/bpf/progs/verifier_precision.c
··· 2 2 /* Copyright (C) 2023 SUSE LLC */ 3 3 #include <linux/bpf.h> 4 4 #include <bpf/bpf_helpers.h> 5 + #include "../../../include/linux/filter.h" 5 6 #include "bpf_misc.h" 6 7 7 8 SEC("?raw_tp") ··· 91 90 ::: __clobber_all); 92 91 } 93 92 93 + #if defined(ENABLE_ATOMICS_TESTS) && \ 94 + (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) 95 + 96 + SEC("?raw_tp") 97 + __success __log_level(2) 98 + __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10") 99 + __msg("mark_precise: frame0: regs=r2 stack= before 2: (db) r2 = load_acquire((u64 *)(r10 -8))") 100 + __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") 101 + __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") 102 + __naked int bpf_load_acquire(void) 103 + { 104 + asm volatile ( 105 + "r1 = 8;" 106 + "*(u64 *)(r10 - 8) = r1;" 107 + ".8byte %[load_acquire_insn];" /* r2 = load_acquire((u64 *)(r10 - 8)); */ 108 + "r3 = r10;" 109 + "r3 += r2;" /* mark_precise */ 110 + "r0 = 0;" 111 + "exit;" 112 + : 113 + : __imm_insn(load_acquire_insn, 114 + BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -8)) 115 + : __clobber_all); 116 + } 117 + 118 + SEC("?raw_tp") 119 + __success __log_level(2) 120 + __msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r2 = r10") 121 + __msg("mark_precise: frame0: regs=r1 stack= before 2: (79) r1 = *(u64 *)(r10 -8)") 122 + __msg("mark_precise: frame0: regs= stack=-8 before 1: (db) store_release((u64 *)(r10 -8), r1)") 123 + __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") 124 + __naked int bpf_store_release(void) 125 + { 126 + asm volatile ( 127 + "r1 = 8;" 128 + ".8byte %[store_release_insn];" /* store_release((u64 *)(r10 - 8), r1); */ 129 + "r1 = *(u64 *)(r10 - 8);" 130 + "r2 = r10;" 131 + "r2 += r1;" /* mark_precise */ 132 + "r0 = 0;" 133 + "exit;" 134 + : 135 + : __imm_insn(store_release_insn, 136 + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8)) 137 + : __clobber_all); 138 + } 139 + 140 + #endif /* load-acquire, store-release */ 94 141 #endif /* v4 instruction */ 95 142 96 143 SEC("?raw_tp")
+264
tools/testing/selftests/bpf/progs/verifier_store_release.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2025 Google LLC. */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "../../../include/linux/filter.h" 7 + #include "bpf_misc.h" 8 + 9 + #if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \ 10 + (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) 11 + 12 + SEC("socket") 13 + __description("store-release, 8-bit") 14 + __success __success_unpriv __retval(0x12) 15 + __naked void store_release_8(void) 16 + { 17 + asm volatile ( 18 + "w1 = 0x12;" 19 + ".8byte %[store_release_insn];" // store_release((u8 *)(r10 - 1), w1); 20 + "w0 = *(u8 *)(r10 - 1);" 21 + "exit;" 22 + : 23 + : __imm_insn(store_release_insn, 24 + BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -1)) 25 + : __clobber_all); 26 + } 27 + 28 + SEC("socket") 29 + __description("store-release, 16-bit") 30 + __success __success_unpriv __retval(0x1234) 31 + __naked void store_release_16(void) 32 + { 33 + asm volatile ( 34 + "w1 = 0x1234;" 35 + ".8byte %[store_release_insn];" // store_release((u16 *)(r10 - 2), w1); 36 + "w0 = *(u16 *)(r10 - 2);" 37 + "exit;" 38 + : 39 + : __imm_insn(store_release_insn, 40 + BPF_ATOMIC_OP(BPF_H, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -2)) 41 + : __clobber_all); 42 + } 43 + 44 + SEC("socket") 45 + __description("store-release, 32-bit") 46 + __success __success_unpriv __retval(0x12345678) 47 + __naked void store_release_32(void) 48 + { 49 + asm volatile ( 50 + "w1 = 0x12345678;" 51 + ".8byte %[store_release_insn];" // store_release((u32 *)(r10 - 4), w1); 52 + "w0 = *(u32 *)(r10 - 4);" 53 + "exit;" 54 + : 55 + : __imm_insn(store_release_insn, 56 + BPF_ATOMIC_OP(BPF_W, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -4)) 57 + : __clobber_all); 58 + } 59 + 60 + SEC("socket") 61 + __description("store-release, 64-bit") 62 + __success __success_unpriv __retval(0x1234567890abcdef) 63 + __naked void store_release_64(void) 64 + { 65 + asm volatile ( 66 + "r1 = 0x1234567890abcdef ll;" 67 + ".8byte %[store_release_insn];" // store_release((u64 *)(r10 - 8), r1); 68 + "r0 = *(u64 *)(r10 - 8);" 69 + "exit;" 70 + : 71 + : __imm_insn(store_release_insn, 72 + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8)) 73 + : __clobber_all); 74 + } 75 + 76 + SEC("socket") 77 + __description("store-release with uninitialized src_reg") 78 + __failure __failure_unpriv __msg("R2 !read_ok") 79 + __naked void store_release_with_uninitialized_src_reg(void) 80 + { 81 + asm volatile ( 82 + ".8byte %[store_release_insn];" // store_release((u64 *)(r10 - 8), r2); 83 + "exit;" 84 + : 85 + : __imm_insn(store_release_insn, 86 + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_2, -8)) 87 + : __clobber_all); 88 + } 89 + 90 + SEC("socket") 91 + __description("store-release with uninitialized dst_reg") 92 + __failure __failure_unpriv __msg("R2 !read_ok") 93 + __naked void store_release_with_uninitialized_dst_reg(void) 94 + { 95 + asm volatile ( 96 + "r1 = 0;" 97 + ".8byte %[store_release_insn];" // store_release((u64 *)(r2 - 8), r1); 98 + "exit;" 99 + : 100 + : __imm_insn(store_release_insn, 101 + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_2, BPF_REG_1, -8)) 102 + : __clobber_all); 103 + } 104 + 105 + SEC("socket") 106 + __description("store-release with non-pointer dst_reg") 107 + __failure __failure_unpriv __msg("R1 invalid mem access 'scalar'") 108 + __naked void store_release_with_non_pointer_dst_reg(void) 109 + { 110 + asm volatile ( 111 + "r1 = 0;" 112 + ".8byte %[store_release_insn];" // store_release((u64 *)(r1 + 0), r1); 113 + "exit;" 114 + : 115 + : __imm_insn(store_release_insn, 116 + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_1, BPF_REG_1, 0)) 117 + : __clobber_all); 118 + } 119 + 120 + SEC("socket") 121 + __description("misaligned store-release") 122 + __failure __failure_unpriv __msg("misaligned stack access off") 123 + __flag(BPF_F_ANY_ALIGNMENT) 124 + __naked void store_release_misaligned(void) 125 + { 126 + asm volatile ( 127 + "w0 = 0;" 128 + ".8byte %[store_release_insn];" // store_release((u32 *)(r10 - 5), w0); 129 + "exit;" 130 + : 131 + : __imm_insn(store_release_insn, 132 + BPF_ATOMIC_OP(BPF_W, BPF_STORE_REL, BPF_REG_10, BPF_REG_0, -5)) 133 + : __clobber_all); 134 + } 135 + 136 + SEC("socket") 137 + __description("store-release to ctx pointer") 138 + __failure __failure_unpriv __msg("BPF_ATOMIC stores into R1 ctx is not allowed") 139 + __naked void store_release_to_ctx_pointer(void) 140 + { 141 + asm volatile ( 142 + "w0 = 0;" 143 + ".8byte %[store_release_insn];" // store_release((u8 *)(r1 + 0), w0); 144 + "exit;" 145 + : 146 + : __imm_insn(store_release_insn, 147 + BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_1, BPF_REG_0, 0)) 148 + : __clobber_all); 149 + } 150 + 151 + SEC("xdp") 152 + __description("store-release to pkt pointer") 153 + __failure __msg("BPF_ATOMIC stores into R2 pkt is not allowed") 154 + __naked void store_release_to_pkt_pointer(void) 155 + { 156 + asm volatile ( 157 + "w0 = 0;" 158 + "r2 = *(u32 *)(r1 + %[xdp_md_data]);" 159 + ".8byte %[store_release_insn];" // store_release((u8 *)(r2 + 0), w0); 160 + "exit;" 161 + : 162 + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 163 + __imm_insn(store_release_insn, 164 + BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_2, BPF_REG_0, 0)) 165 + : __clobber_all); 166 + } 167 + 168 + SEC("flow_dissector") 169 + __description("store-release to flow_keys pointer") 170 + __failure __msg("BPF_ATOMIC stores into R2 flow_keys is not allowed") 171 + __naked void store_release_to_flow_keys_pointer(void) 172 + { 173 + asm volatile ( 174 + "w0 = 0;" 175 + "r2 = *(u64 *)(r1 + %[__sk_buff_flow_keys]);" 176 + ".8byte %[store_release_insn];" // store_release((u8 *)(r2 + 0), w0); 177 + "exit;" 178 + : 179 + : __imm_const(__sk_buff_flow_keys, 180 + offsetof(struct __sk_buff, flow_keys)), 181 + __imm_insn(store_release_insn, 182 + BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_2, BPF_REG_0, 0)) 183 + : __clobber_all); 184 + } 185 + 186 + SEC("sk_reuseport") 187 + __description("store-release to sock pointer") 188 + __failure __msg("BPF_ATOMIC stores into R2 sock is not allowed") 189 + __naked void store_release_to_sock_pointer(void) 190 + { 191 + asm volatile ( 192 + "w0 = 0;" 193 + "r2 = *(u64 *)(r1 + %[sk_reuseport_md_sk]);" 194 + ".8byte %[store_release_insn];" // store_release((u8 *)(r2 + 0), w0); 195 + "exit;" 196 + : 197 + : __imm_const(sk_reuseport_md_sk, offsetof(struct sk_reuseport_md, sk)), 198 + __imm_insn(store_release_insn, 199 + BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_2, BPF_REG_0, 0)) 200 + : __clobber_all); 201 + } 202 + 203 + SEC("socket") 204 + __description("store-release, leak pointer to stack") 205 + __success __success_unpriv __retval(0) 206 + __naked void store_release_leak_pointer_to_stack(void) 207 + { 208 + asm volatile ( 209 + ".8byte %[store_release_insn];" // store_release((u64 *)(r10 - 8), r1); 210 + "r0 = 0;" 211 + "exit;" 212 + : 213 + : __imm_insn(store_release_insn, 214 + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8)) 215 + : __clobber_all); 216 + } 217 + 218 + struct { 219 + __uint(type, BPF_MAP_TYPE_HASH); 220 + __uint(max_entries, 1); 221 + __type(key, long long); 222 + __type(value, long long); 223 + } map_hash_8b SEC(".maps"); 224 + 225 + SEC("socket") 226 + __description("store-release, leak pointer to map") 227 + __success __retval(0) 228 + __failure_unpriv __msg_unpriv("R6 leaks addr into map") 229 + __naked void store_release_leak_pointer_to_map(void) 230 + { 231 + asm volatile ( 232 + "r6 = r1;" 233 + "r1 = %[map_hash_8b] ll;" 234 + "r2 = 0;" 235 + "*(u64 *)(r10 - 8) = r2;" 236 + "r2 = r10;" 237 + "r2 += -8;" 238 + "call %[bpf_map_lookup_elem];" 239 + "if r0 == 0 goto l0_%=;" 240 + ".8byte %[store_release_insn];" // store_release((u64 *)(r0 + 0), r6); 241 + "l0_%=:" 242 + "r0 = 0;" 243 + "exit;" 244 + : 245 + : __imm_addr(map_hash_8b), 246 + __imm(bpf_map_lookup_elem), 247 + __imm_insn(store_release_insn, 248 + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_0, BPF_REG_6, 0)) 249 + : __clobber_all); 250 + } 251 + 252 + #else 253 + 254 + SEC("socket") 255 + __description("Clang version < 18, ENABLE_ATOMICS_TESTS not defined, and/or JIT doesn't support store-release, use a dummy test") 256 + __success 257 + int dummy_test(void) 258 + { 259 + return 0; 260 + } 261 + 262 + #endif 263 + 264 + char _license[] SEC("license") = "GPL";