Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf, arm64: Add JIT support for timed may_goto

When verifier sees a timed may_goto instruction, it emits a call to
arch_bpf_timed_may_goto() with a stack offset in BPF_REG_AX (arm64 r9)
and expects a count value to be returned in the same register. The
verifier doesn't save or restore any registers before emitting this
call.

arch_bpf_timed_may_goto() should act as a trampoline to call
bpf_check_timed_may_goto() with AAPCS64 calling convention.

To support this custom calling convention, implement
arch_bpf_timed_may_goto() in assembly and make sure BPF caller saved
registers are saved and restored, call bpf_check_timed_may_goto with
arm64 calling convention where first argument and return value both are
in x0, then put the result back into BPF_REG_AX before returning.

Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Acked-by: Xu Kuohai <xukuohai@huawei.com>
Link: https://lore.kernel.org/r/20250827113245.52629-2-puranjay@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Puranjay Mohan and committed by
Alexei Starovoitov
16175375 4c229f33

+53 -2
+1 -1
arch/arm64/net/Makefile
··· 2 2 # 3 3 # ARM64 networking code 4 4 # 5 - obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o 5 + obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_timed_may_goto.o
+12 -1
arch/arm64/net/bpf_jit_comp.c
··· 1558 1558 if (ret < 0) 1559 1559 return ret; 1560 1560 emit_call(func_addr, ctx); 1561 - emit(A64_MOV(1, r0, A64_R(0)), ctx); 1561 + /* 1562 + * Call to arch_bpf_timed_may_goto() is emitted by the 1563 + * verifier and called with custom calling convention with 1564 + * first argument and return value in BPF_REG_AX (x9). 1565 + */ 1566 + if (func_addr != (u64)arch_bpf_timed_may_goto) 1567 + emit(A64_MOV(1, r0, A64_R(0)), ctx); 1562 1568 break; 1563 1569 } 1564 1570 /* tail call */ ··· 3041 3035 * no need to provide any additional instructions. Therefore, skip 3042 3036 * inserting nospec insns against Spectre v4. 3043 3037 */ 3038 + return true; 3039 + } 3040 + 3041 + bool bpf_jit_supports_timed_may_goto(void) 3042 + { 3044 3043 return true; 3045 3044 } 3046 3045
+40
arch/arm64/net/bpf_timed_may_goto.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2025 Puranjay Mohan <puranjay@kernel.org> */ 3 + 4 + #include <linux/linkage.h> 5 + 6 + SYM_FUNC_START(arch_bpf_timed_may_goto) 7 + /* Allocate stack space and emit frame record */ 8 + stp x29, x30, [sp, #-64]! 9 + mov x29, sp 10 + 11 + /* Save BPF registers R0 - R5 (x7, x0-x4)*/ 12 + stp x7, x0, [sp, #16] 13 + stp x1, x2, [sp, #32] 14 + stp x3, x4, [sp, #48] 15 + 16 + /* 17 + * Stack depth was passed in BPF_REG_AX (x9), add it to the BPF_FP 18 + * (x25) to get the pointer to count and timestamp and pass it as the 19 + * first argument in x0. 20 + * 21 + * Before generating the call to arch_bpf_timed_may_goto, the verifier 22 + * generates a load instruction using FP, i.e. REG_AX = *(u64 *)(FP - 23 + * stack_off_cnt), so BPF_REG_FP (x25) is always set up by the arm64 24 + * jit in this case. 25 + */ 26 + add x0, x9, x25 27 + bl bpf_check_timed_may_goto 28 + /* BPF_REG_AX(x9) will be stored into count, so move return value to it. */ 29 + mov x9, x0 30 + 31 + /* Restore BPF registers R0 - R5 (x7, x0-x4) */ 32 + ldp x7, x0, [sp, #16] 33 + ldp x1, x2, [sp, #32] 34 + ldp x3, x4, [sp, #48] 35 + 36 + /* Restore FP and LR */ 37 + ldp x29, x30, [sp], #64 38 + 39 + ret 40 + SYM_FUNC_END(arch_bpf_timed_may_goto)