Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Enable private stack tests for arm64

As arm64 JIT now supports private stack, make sure all relevant tests
run on arm64 architecture.

Relevant tests:

#415/1 struct_ops_private_stack/private_stack:OK
#415/2 struct_ops_private_stack/private_stack_fail:OK
#415/3 struct_ops_private_stack/private_stack_recur:OK
#415 struct_ops_private_stack:OK
#549/1 verifier_private_stack/Private stack, single prog:OK
#549/2 verifier_private_stack/Private stack, subtree > MAX_BPF_STACK:OK
#549/3 verifier_private_stack/No private stack:OK
#549/4 verifier_private_stack/Private stack, callback:OK
#549/5 verifier_private_stack/Private stack, exception in mainprog:OK
#549/6 verifier_private_stack/Private stack, exception in subprog:OK
#549/7 verifier_private_stack/Private stack, async callback, not nested:OK
#549/8 verifier_private_stack/Private stack, async callback, potential nesting:OK
#549 verifier_private_stack:OK
Summary: 2/11 PASSED, 0 SKIPPED, 0 FAILED

Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/bpf/20250724120257.7299-4-puranjay@kernel.org

authored by

Puranjay Mohan and committed by
Daniel Borkmann
e9f545d0 6c17a882

+91 -4
+1 -1
tools/testing/selftests/bpf/progs/struct_ops_private_stack.c
··· 7 7 8 8 char _license[] SEC("license") = "GPL"; 9 9 10 - #if defined(__TARGET_ARCH_x86) 10 + #if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) 11 11 bool skip __attribute((__section__(".data"))) = false; 12 12 #else 13 13 bool skip = true;
+1 -1
tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c
··· 7 7 8 8 char _license[] SEC("license") = "GPL"; 9 9 10 - #if defined(__TARGET_ARCH_x86) 10 + #if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) 11 11 bool skip __attribute((__section__(".data"))) = false; 12 12 #else 13 13 bool skip = true;
+1 -1
tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
··· 7 7 8 8 char _license[] SEC("license") = "GPL"; 9 9 10 - #if defined(__TARGET_ARCH_x86) 10 + #if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) 11 11 bool skip __attribute((__section__(".data"))) = false; 12 12 #else 13 13 bool skip = true;
+88 -1
tools/testing/selftests/bpf/progs/verifier_private_stack.c
··· 8 8 /* From include/linux/filter.h */ 9 9 #define MAX_BPF_STACK 512 10 10 11 - #if defined(__TARGET_ARCH_x86) 11 + #if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) 12 12 13 13 struct elem { 14 14 struct bpf_timer t; ··· 30 30 __jited(" addq %gs:{{.*}}, %r9") 31 31 __jited(" movl $0x2a, %edi") 32 32 __jited(" movq %rdi, -0x100(%r9)") 33 + __arch_arm64 34 + __jited(" stp x25, x27, [sp, {{.*}}]!") 35 + __jited(" mov x27, {{.*}}") 36 + __jited(" movk x27, {{.*}}, lsl #16") 37 + __jited(" movk x27, {{.*}}") 38 + __jited(" mrs x10, TPIDR_EL{{[0-1]}}") 39 + __jited(" add x27, x27, x10") 40 + __jited(" add x25, x27, {{.*}}") 41 + __jited(" mov x0, #0x2a") 42 + __jited(" str x0, [x27]") 43 + __jited("...") 44 + __jited(" ldp x25, x27, [sp], {{.*}}") 33 45 __naked void private_stack_single_prog(void) 34 46 { 35 47 asm volatile (" \ ··· 57 45 __success 58 46 __arch_x86_64 59 47 __jited(" subq $0x8, %rsp") 48 + __arch_arm64 49 + __jited(" mov x25, sp") 50 + __jited(" sub sp, sp, #0x10") 60 51 __naked void no_private_stack_nested(void) 61 52 { 62 53 asm volatile (" \ ··· 96 81 __jited(" callq 0x{{.*}}") 97 82 __jited(" popq %r9") 98 83 __jited(" xorl %eax, %eax") 84 + __arch_arm64 85 + __jited(" stp x25, x27, [sp, {{.*}}]!") 86 + __jited(" mov x27, {{.*}}") 87 + __jited(" movk x27, {{.*}}, lsl #16") 88 + __jited(" movk x27, {{.*}}") 89 + __jited(" mrs x10, TPIDR_EL{{[0-1]}}") 90 + __jited(" add x27, x27, x10") 91 + __jited(" add x25, x27, {{.*}}") 92 + __jited(" mov x0, #0x2a") 93 + __jited(" str x0, [x27]") 94 + __jited(" bl {{.*}}") 95 + __jited("...") 96 + __jited(" ldp x25, x27, [sp], {{.*}}") 99 97 __naked void private_stack_nested_1(void) 100 98 { 101 99 asm volatile (" \ ··· 159 131 __jited(" pushq %r9") 160 132 __jited(" callq") 161 133 __jited(" popq %r9") 134 + __arch_arm64 135 + __jited("func #1") 136 + __jited("...") 137 + __jited(" stp x25, x27, [sp, {{.*}}]!") 138 + __jited(" mov x27, {{.*}}") 139 + __jited(" movk x27, {{.*}}, lsl #16") 140 + __jited(" movk x27, {{.*}}") 141 + __jited(" mrs x10, TPIDR_EL{{[0-1]}}") 142 + __jited(" add x27, x27, x10") 143 + __jited(" add x25, x27, {{.*}}") 144 + __jited(" bl 0x{{.*}}") 145 + __jited(" add x7, x0, #0x0") 146 + __jited(" mov x0, #0x2a") 147 + __jited(" str x0, [x27]") 148 + __jited(" bl 0x{{.*}}") 149 + __jited(" add x7, x0, #0x0") 150 + __jited(" mov x7, #0x0") 151 + __jited(" ldp x25, x27, [sp], {{.*}}") 162 152 __naked void private_stack_callback(void) 163 153 { 164 154 asm volatile (" \ ··· 200 154 __jited(" pushq %r9") 201 155 __jited(" callq") 202 156 __jited(" popq %r9") 157 + __arch_arm64 158 + __jited(" stp x29, x30, [sp, #-0x10]!") 159 + __jited(" mov x29, sp") 160 + __jited(" stp xzr, x26, [sp, #-0x10]!") 161 + __jited(" mov x26, sp") 162 + __jited(" stp x19, x20, [sp, #-0x10]!") 163 + __jited(" stp x21, x22, [sp, #-0x10]!") 164 + __jited(" stp x23, x24, [sp, #-0x10]!") 165 + __jited(" stp x25, x26, [sp, #-0x10]!") 166 + __jited(" stp x27, x28, [sp, #-0x10]!") 167 + __jited(" mov x27, {{.*}}") 168 + __jited(" movk x27, {{.*}}, lsl #16") 169 + __jited(" movk x27, {{.*}}") 170 + __jited(" mrs x10, TPIDR_EL{{[0-1]}}") 171 + __jited(" add x27, x27, x10") 172 + __jited(" add x25, x27, {{.*}}") 173 + __jited(" mov x0, #0x2a") 174 + __jited(" str x0, [x27]") 175 + __jited(" mov x0, #0x0") 176 + __jited(" bl 0x{{.*}}") 177 + __jited(" add x7, x0, #0x0") 178 + __jited(" ldp x27, x28, [sp], #0x10") 203 179 int private_stack_exception_main_prog(void) 204 180 { 205 181 asm volatile (" \ ··· 247 179 __jited(" pushq %r9") 248 180 __jited(" callq") 249 181 __jited(" popq %r9") 182 + __arch_arm64 183 + __jited(" stp x27, x28, [sp, #-0x10]!") 184 + __jited(" mov x27, {{.*}}") 185 + __jited(" movk x27, {{.*}}, lsl #16") 186 + __jited(" movk x27, {{.*}}") 187 + __jited(" mrs x10, TPIDR_EL{{[0-1]}}") 188 + __jited(" add x27, x27, x10") 189 + __jited(" add x25, x27, {{.*}}") 190 + __jited(" mov x0, #0x2a") 191 + __jited(" str x0, [x27]") 192 + __jited(" bl 0x{{.*}}") 193 + __jited(" add x7, x0, #0x0") 194 + __jited(" ldp x27, x28, [sp], #0x10") 250 195 int private_stack_exception_sub_prog(void) 251 196 { 252 197 asm volatile (" \ ··· 301 220 __success __retval(0) 302 221 __arch_x86_64 303 222 __jited(" movabsq $0x{{.*}}, %r9") 223 + __arch_arm64 224 + __jited(" mrs x10, TPIDR_EL{{[0-1]}}") 225 + __jited(" add x27, x27, x10") 226 + __jited(" add x25, x27, {{.*}}") 304 227 int private_stack_async_callback_1(void) 305 228 { 306 229 struct bpf_timer *arr_timer; ··· 326 241 __success __retval(0) 327 242 __arch_x86_64 328 243 __jited(" subq $0x100, %rsp") 244 + __arch_arm64 245 + __jited(" sub sp, sp, #0x100") 329 246 int private_stack_async_callback_2(void) 330 247 { 331 248 struct bpf_timer *arr_timer;