Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: lto: Strengthen READ_ONCE() to acquire when CONFIG_LTO=y

When building with LTO, there is an increased risk of the compiler
converting an address dependency headed by a READ_ONCE() invocation
into a control dependency and consequently allowing for harmful
reordering by the CPU.

Ensure that such transformations are harmless by overriding the generic
READ_ONCE() definition with one that provides acquire semantics when
building with LTO.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>

+76 -3
+73
arch/arm64/include/asm/rwonce.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2020 Google LLC. 4 + */ 5 + #ifndef __ASM_RWONCE_H 6 + #define __ASM_RWONCE_H 7 + 8 + #ifdef CONFIG_LTO 9 + 10 + #include <linux/compiler_types.h> 11 + #include <asm/alternative-macros.h> 12 + 13 + #ifndef BUILD_VDSO 14 + 15 + #ifdef CONFIG_AS_HAS_LDAPR 16 + #define __LOAD_RCPC(sfx, regs...) \ 17 + ALTERNATIVE( \ 18 + "ldar" #sfx "\t" #regs, \ 19 + ".arch_extension rcpc\n" \ 20 + "ldapr" #sfx "\t" #regs, \ 21 + ARM64_HAS_LDAPR) 22 + #else 23 + #define __LOAD_RCPC(sfx, regs...) "ldar" #sfx "\t" #regs 24 + #endif /* CONFIG_AS_HAS_LDAPR */ 25 + 26 + /* 27 + * When building with LTO, there is an increased risk of the compiler 28 + * converting an address dependency headed by a READ_ONCE() invocation 29 + * into a control dependency and consequently allowing for harmful 30 + * reordering by the CPU. 31 + * 32 + * Ensure that such transformations are harmless by overriding the generic 33 + * READ_ONCE() definition with one that provides RCpc acquire semantics 34 + * when building with LTO. 35 + */ 36 + #define __READ_ONCE(x) \ 37 + ({ \ 38 + typeof(&(x)) __x = &(x); \ 39 + int atomic = 1; \ 40 + union { __unqual_scalar_typeof(*__x) __val; char __c[1]; } __u; \ 41 + switch (sizeof(x)) { \ 42 + case 1: \ 43 + asm volatile(__LOAD_RCPC(b, %w0, %1) \ 44 + : "=r" (*(__u8 *)__u.__c) \ 45 + : "Q" (*__x) : "memory"); \ 46 + break; \ 47 + case 2: \ 48 + asm volatile(__LOAD_RCPC(h, %w0, %1) \ 49 + : "=r" (*(__u16 *)__u.__c) \ 50 + : "Q" (*__x) : "memory"); \ 51 + break; \ 52 + case 4: \ 53 + asm volatile(__LOAD_RCPC(, %w0, %1) \ 54 + : "=r" (*(__u32 *)__u.__c) \ 55 + : "Q" (*__x) : "memory"); \ 56 + break; \ 57 + case 8: \ 58 + asm volatile(__LOAD_RCPC(, %0, %1) \ 59 + : "=r" (*(__u64 *)__u.__c) \ 60 + : "Q" (*__x) : "memory"); \ 61 + break; \ 62 + default: \ 63 + atomic = 0; \ 64 + } \ 65 + atomic ? (typeof(*__x))__u.__val : (*(volatile typeof(__x))__x);\ 66 + }) 67 + 68 + #endif /* !BUILD_VDSO */ 69 + #endif /* CONFIG_LTO */ 70 + 71 + #include <asm-generic/rwonce.h> 72 + 73 + #endif /* __ASM_RWONCE_H */
+1 -1
arch/arm64/kernel/vdso/Makefile
··· 28 28 $(btildflags-y) -T 29 29 30 30 ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 31 - ccflags-y += -DDISABLE_BRANCH_PROFILING 31 + ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO 32 32 33 33 CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) 34 34 KASAN_SANITIZE := n
+1 -1
arch/arm64/kernel/vdso32/Makefile
··· 48 48 # As a result we set our own flags here. 49 49 50 50 # KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile 51 - VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include) 51 + VDSO_CPPFLAGS := -DBUILD_VDSO -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include) 52 52 VDSO_CPPFLAGS += $(LINUXINCLUDE) 53 53 54 54 # Common C and assembly flags
+1 -1
arch/arm64/kernel/vmlinux.lds.S
··· 201 201 INIT_CALLS 202 202 CON_INITCALL 203 203 INIT_RAM_FS 204 - *(.init.rodata.* .init.bss) /* from the EFI stub */ 204 + *(.init.altinstructions .init.rodata.* .init.bss) /* from the EFI stub */ 205 205 } 206 206 .exit.data : { 207 207 EXIT_DATA