Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc64/bpf: Fold bpf_jit_emit_func_call_hlp() into bpf_jit_emit_func_call_rel()

Commit 61688a82e047 ("powerpc/bpf: enable kfunc call") enhanced
bpf_jit_emit_func_call_hlp() to handle calls out to module region, where
bpf progs are generated. The only difference now between
bpf_jit_emit_func_call_hlp() and bpf_jit_emit_func_call_rel() is in
handling of the initial pass where target function address is not known.
Fold that logic into bpf_jit_emit_func_call_hlp() and rename it to
bpf_jit_emit_func_call_rel() to simplify bpf function call JIT code.

We don't actually need to load/restore TOC across a call out to a
different kernel helper or to a different bpf program since they all
work with the kernel TOC. We only need to do it if we have to call out
to a module function. So, guard TOC load/restore with appropriate
conditions.

Signed-off-by: Naveen N Rao <naveen@kernel.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://patch.msgid.link/20241030070850.1361304-10-hbathini@linux.ibm.com

authored by

Naveen N Rao and committed by
Michael Ellerman
9670f6d2 ed614465

+17 -44
+17 -44
arch/powerpc/net/bpf_jit_comp64.c
··· 202 202 EMIT(PPC_RAW_BLR()); 203 203 } 204 204 205 - static int 206 - bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func) 205 + int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func) 207 206 { 208 207 unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0; 209 208 long reladdr; 210 209 211 - if (WARN_ON_ONCE(!kernel_text_address(func_addr))) 212 - return -EINVAL; 210 + /* bpf to bpf call, func is not known in the initial pass. Emit 5 nops as a placeholder */ 211 + if (!func) { 212 + for (int i = 0; i < 5; i++) 213 + EMIT(PPC_RAW_NOP()); 214 + /* elfv1 needs an additional instruction to load addr from descriptor */ 215 + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) 216 + EMIT(PPC_RAW_NOP()); 217 + EMIT(PPC_RAW_MTCTR(_R12)); 218 + EMIT(PPC_RAW_BCTRL()); 219 + return 0; 220 + } 213 221 214 222 #ifdef CONFIG_PPC_KERNEL_PCREL 215 223 reladdr = func_addr - local_paca->kernelbase; ··· 274 266 * We can clobber r2 since we get called through a 275 267 * function pointer (so caller will save/restore r2). 276 268 */ 277 - EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8)); 269 + if (is_module_text_address(func_addr)) 270 + EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8)); 278 271 } else { 279 272 PPC_LI64(_R12, func); 280 273 EMIT(PPC_RAW_MTCTR(_R12)); ··· 285 276 * Load r2 with kernel TOC as kernel TOC is used if function address falls 286 277 * within core kernel text. 287 278 */ 288 - EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc))); 279 + if (is_module_text_address(func_addr)) 280 + EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc))); 289 281 } 290 282 #endif 291 - 292 - return 0; 293 - } 294 - 295 - int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func) 296 - { 297 - unsigned int i, ctx_idx = ctx->idx; 298 - 299 - if (WARN_ON_ONCE(func && is_module_text_address(func))) 300 - return -EINVAL; 301 - 302 - /* skip past descriptor if elf v1 */ 303 - func += FUNCTION_DESCR_SIZE; 304 - 305 - /* Load function address into r12 */ 306 - PPC_LI64(_R12, func); 307 - 308 - /* For bpf-to-bpf function calls, the callee's address is unknown 309 - * until the last extra pass. As seen above, we use PPC_LI64() to 310 - * load the callee's address, but this may optimize the number of 311 - * instructions required based on the nature of the address. 312 - * 313 - * Since we don't want the number of instructions emitted to increase, 314 - * we pad the optimized PPC_LI64() call with NOPs to guarantee that 315 - * we always have a five-instruction sequence, which is the maximum 316 - * that PPC_LI64() can emit. 317 - */ 318 - if (!image) 319 - for (i = ctx->idx - ctx_idx; i < 5; i++) 320 - EMIT(PPC_RAW_NOP()); 321 - 322 - EMIT(PPC_RAW_MTCTR(_R12)); 323 - EMIT(PPC_RAW_BCTRL()); 324 283 325 284 return 0; 326 285 } ··· 1079 1102 if (ret < 0) 1080 1103 return ret; 1081 1104 1082 - if (func_addr_fixed) 1083 - ret = bpf_jit_emit_func_call_hlp(image, fimage, ctx, func_addr); 1084 - else 1085 - ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr); 1086 - 1105 + ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr); 1087 1106 if (ret) 1088 1107 return ret; 1089 1108