Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

- In copy_highpage(), only reset the tag of the destination pointer if
KASAN_HW_TAGS is enabled so that user-space MTE does not interfere
with KASAN_SW_TAGS (which relies on top-byte-ignore).

- Remove warning if SME is detected without SVE, the kernel can cope
with such configuration (though none in the field currently).

- In cfi_handler(), pass the ESR_EL1 value to die() for consistency
with other die() callers.

- Disable HUGETLB_PAGE_OPTIMIZE_VMEMMAP on arm64 since the pte
manipulation from the generic vmemmap_remap_pte() does not follow the
required ARM break-before-make sequence (clear the pte, flush the
TLBs, set the new pte). It may be re-enabled once this sequence is
sorted.

- Fix possible memory leak in the arm64 ACPI code if the SMCCC version
and conduit checks fail.

- Forbid CALL_OPS with CC_OPTIMIZE_FOR_SIZE since gcc ignores
-falign-functions=N with -Os.

- Don't pretend KASLR is enabled if offset < MIN_KIMG_ALIGN as no
randomisation would actually take place.

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: kaslr: don't pretend KASLR is enabled if offset < MIN_KIMG_ALIGN
arm64: ftrace: forbid CALL_OPS with CC_OPTIMIZE_FOR_SIZE
arm64: acpi: Fix possible memory leak of ffh_ctxt
arm64: mm: hugetlb: Disable HUGETLB_PAGE_OPTIMIZE_VMEMMAP
arm64: pass ESR_ELx to die() of cfi_handler
arm64/fpsimd: Remove warning for SME without SVE
arm64: Reset KASAN tag in copy_highpage with HW tags only

+22 -13
+2 -2
arch/arm64/Kconfig
··· 100 100 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT 101 101 select ARCH_WANT_FRAME_POINTERS 102 102 select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36) 103 - select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP 104 103 select ARCH_WANT_LD_ORPHAN_WARN 105 104 select ARCH_WANTS_NO_INSTR 106 105 select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES ··· 186 187 select HAVE_DMA_CONTIGUOUS 187 188 select HAVE_DYNAMIC_FTRACE 188 189 select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \ 189 - if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG) 190 + if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG && \ 191 + !CC_OPTIMIZE_FOR_SIZE) 190 192 select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ 191 193 if DYNAMIC_FTRACE_WITH_ARGS 192 194 select HAVE_EFFICIENT_UNALIGNED_ACCESS
+11
arch/arm64/include/asm/memory.h
··· 180 180 #include <linux/compiler.h> 181 181 #include <linux/mmdebug.h> 182 182 #include <linux/types.h> 183 + #include <asm/boot.h> 183 184 #include <asm/bug.h> 184 185 185 186 #if VA_BITS > 48 ··· 202 201 static inline unsigned long kaslr_offset(void) 203 202 { 204 203 return kimage_vaddr - KIMAGE_VADDR; 204 + } 205 + 206 + static inline bool kaslr_enabled(void) 207 + { 208 + /* 209 + * The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical 210 + * placement of the image rather than from the seed, so a displacement 211 + * of less than MIN_KIMG_ALIGN means that no seed was provided. 212 + */ 213 + return kaslr_offset() >= MIN_KIMG_ALIGN; 205 214 } 206 215 207 216 /*
+4 -4
arch/arm64/kernel/acpi.c
··· 435 435 enum arm_smccc_conduit conduit; 436 436 struct acpi_ffh_data *ffh_ctxt; 437 437 438 - ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL); 439 - if (!ffh_ctxt) 440 - return -ENOMEM; 441 - 442 438 if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2) 443 439 return -EOPNOTSUPP; 444 440 ··· 443 447 pr_err("%s: invalid SMCCC conduit\n", __func__); 444 448 return -EOPNOTSUPP; 445 449 } 450 + 451 + ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL); 452 + if (!ffh_ctxt) 453 + return -ENOMEM; 446 454 447 455 if (conduit == SMCCC_CONDUIT_SMC) { 448 456 ffh_ctxt->invoke_ffh_fn = __arm_smccc_smc;
+1 -1
arch/arm64/kernel/cpufeature.c
··· 1633 1633 return false; 1634 1634 } 1635 1635 1636 - return kaslr_offset() > 0; 1636 + return kaslr_enabled(); 1637 1637 } 1638 1638 1639 1639 static bool __meltdown_safe = true;
-3
arch/arm64/kernel/fpsimd.c
··· 2122 2122 pr_notice("Advanced SIMD is not implemented\n"); 2123 2123 2124 2124 2125 - if (cpu_have_named_feature(SME) && !cpu_have_named_feature(SVE)) 2126 - pr_notice("SME is implemented but not SVE\n"); 2127 - 2128 2125 sve_sysctl_init(); 2129 2126 sme_sysctl_init(); 2130 2127
+1 -1
arch/arm64/kernel/kaslr.c
··· 41 41 return 0; 42 42 } 43 43 44 - if (!kaslr_offset()) { 44 + if (!kaslr_enabled()) { 45 45 pr_warn("KASLR disabled due to lack of seed\n"); 46 46 return 0; 47 47 }
+1 -1
arch/arm64/kernel/traps.c
··· 997 997 998 998 switch (report_cfi_failure(regs, regs->pc, &target, type)) { 999 999 case BUG_TRAP_TYPE_BUG: 1000 - die("Oops - CFI", regs, 0); 1000 + die("Oops - CFI", regs, esr); 1001 1001 break; 1002 1002 1003 1003 case BUG_TRAP_TYPE_WARN:
+2 -1
arch/arm64/mm/copypage.c
··· 22 22 copy_page(kto, kfrom); 23 23 24 24 if (system_supports_mte() && page_mte_tagged(from)) { 25 - page_kasan_tag_reset(to); 25 + if (kasan_hw_tags_enabled()) 26 + page_kasan_tag_reset(to); 26 27 /* It's a new page, shouldn't have been tagged yet */ 27 28 WARN_ON_ONCE(!try_page_mte_tagging(to)); 28 29 mte_copy_page_tags(kto, kfrom);