Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/cfi,bpf: Fix bpf_callback_t CFI

Where the main BPF program is expected to match bpf_func_t,
sub-programs are expected to match bpf_callback_t.

This fixes things like:

tools/testing/selftests/bpf/progs/bloom_filter_bench.c:

bpf_for_each_map_elem(&array_map, bloom_callback, &data, 0);

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20231215092707.451956710@infradead.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Peter Zijlstra and committed by
Alexei Starovoitov
e72d88d1 4f9087f1

+30 -8
+2
arch/x86/include/asm/cfi.h
··· 106 106 enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); 107 107 #define __bpfcall 108 108 extern u32 cfi_bpf_hash; 109 + extern u32 cfi_bpf_subprog_hash; 109 110 110 111 static inline int cfi_get_offset(void) 111 112 { ··· 129 128 return BUG_TRAP_TYPE_NONE; 130 129 } 131 130 #define cfi_bpf_hash 0U 131 + #define cfi_bpf_subprog_hash 0U 132 132 #endif /* CONFIG_CFI_CLANG */ 133 133 134 134 #endif /* _ASM_X86_CFI_H */
+18
arch/x86/kernel/alternative.c
··· 866 866 " .size cfi_bpf_hash, 4 \n" 867 867 " .popsection \n" 868 868 ); 869 + 870 + /* Must match bpf_callback_t */ 871 + extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64); 872 + 873 + __ADDRESSABLE(__bpf_callback_fn); 874 + 875 + /* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */ 876 + asm ( 877 + " .pushsection .data..ro_after_init,\"aw\",@progbits \n" 878 + " .type cfi_bpf_subprog_hash,@object \n" 879 + " .globl cfi_bpf_subprog_hash \n" 880 + " .p2align 2, 0x0 \n" 881 + "cfi_bpf_subprog_hash: \n" 882 + " .long __kcfi_typeid___bpf_callback_fn \n" 883 + " .size cfi_bpf_subprog_hash, 4 \n" 884 + " .popsection \n" 885 + ); 869 886 #endif 870 887 871 888 #ifdef CONFIG_FINEIBT ··· 1198 1181 if (builtin) { 1199 1182 cfi_seed = get_random_u32(); 1200 1183 cfi_bpf_hash = cfi_rehash(cfi_bpf_hash); 1184 + cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash); 1201 1185 } 1202 1186 1203 1187 ret = cfi_rand_preamble(start_cfi, end_cfi);
+10 -8
arch/x86/net/bpf_jit_comp.c
··· 312 312 * in arch/x86/kernel/alternative.c 313 313 */ 314 314 315 - static void emit_fineibt(u8 **pprog) 315 + static void emit_fineibt(u8 **pprog, bool is_subprog) 316 316 { 317 + u32 hash = is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash; 317 318 u8 *prog = *pprog; 318 319 319 320 EMIT_ENDBR(); 320 - EMIT3_off32(0x41, 0x81, 0xea, cfi_bpf_hash); /* subl $hash, %r10d */ 321 + EMIT3_off32(0x41, 0x81, 0xea, hash); /* subl $hash, %r10d */ 321 322 EMIT2(0x74, 0x07); /* jz.d8 +7 */ 322 323 EMIT2(0x0f, 0x0b); /* ud2 */ 323 324 EMIT1(0x90); /* nop */ ··· 327 326 *pprog = prog; 328 327 } 329 328 330 - static void emit_kcfi(u8 **pprog) 329 + static void emit_kcfi(u8 **pprog, bool is_subprog) 331 330 { 331 + u32 hash = is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash; 332 332 u8 *prog = *pprog; 333 333 334 - EMIT1_off32(0xb8, cfi_bpf_hash); /* movl $hash, %eax */ 334 + EMIT1_off32(0xb8, hash); /* movl $hash, %eax */ 335 335 #ifdef CONFIG_CALL_PADDING 336 336 EMIT1(0x90); 337 337 EMIT1(0x90); ··· 351 349 *pprog = prog; 352 350 } 353 351 354 - static void emit_cfi(u8 **pprog) 352 + static void emit_cfi(u8 **pprog, bool is_subprog) 355 353 { 356 354 u8 *prog = *pprog; 357 355 358 356 switch (cfi_mode) { 359 357 case CFI_FINEIBT: 360 - emit_fineibt(&prog); 358 + emit_fineibt(&prog, is_subprog); 361 359 break; 362 360 363 361 case CFI_KCFI: 364 - emit_kcfi(&prog); 362 + emit_kcfi(&prog, is_subprog); 365 363 break; 366 364 367 365 default: ··· 383 381 { 384 382 u8 *prog = *pprog; 385 383 386 - emit_cfi(&prog); 384 + emit_cfi(&prog, is_subprog); 387 385 /* BPF trampoline can be made to work without these nops, 388 386 * but let's waste 5 bytes for now and optimize later 389 387 */