Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:

- Fix bogus KASAN splat on EFI runtime stack

- Select JUMP_LABEL unconditionally to avoid boot failure with pKVM and
the legacy implementation of static keys

- Avoid touching GCS registers when 'arm64.nogcs' has been passed on
the command-line

- Move a 'cpumask_t' off the stack in smp_send_stop()

- Don't advertise SME-related hwcaps to userspace when ID_AA64PFR1_EL1
indicates that SME is not implemented

- Always check the VMA when handling an Overlay fault

- Avoid corrupting TCR2_EL1 during boot

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64/mm: Drop wrong writes into TCR2_EL1
arm64: poe: Handle spurious Overlay faults
arm64: Filter out SME hwcaps when FEAT_SME isn't implemented
arm64: move smp_send_stop() cpu mask off stack
arm64/gcs: Don't try to access GCS registers if arm64.nogcs is enabled
arm64: Unconditionally select CONFIG_JUMP_LABEL
arm64: efi: Fix KASAN false positive for EFI runtime stack

+76 -53
+1
arch/arm64/Kconfig
··· 256 256 select HOTPLUG_SMT if HOTPLUG_CPU 257 257 select IRQ_DOMAIN 258 258 select IRQ_FORCED_THREADING 259 + select JUMP_LABEL 259 260 select KASAN_VMALLOC if KASAN 260 261 select LOCK_MM_AND_FIND_VMA 261 262 select MODULES_USE_ELF_RELA
+7 -12
arch/arm64/include/asm/el2_setup.h
··· 287 287 .Lskip_fgt2_\@: 288 288 .endm 289 289 290 - .macro __init_el2_gcs 291 - mrs_s x1, SYS_ID_AA64PFR1_EL1 292 - ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4 293 - cbz x1, .Lskip_gcs_\@ 294 - 295 - /* Ensure GCS is not enabled when we start trying to do BLs */ 296 - msr_s SYS_GCSCR_EL1, xzr 297 - msr_s SYS_GCSCRE0_EL1, xzr 298 - .Lskip_gcs_\@: 299 - .endm 300 - 301 290 /** 302 291 * Initialize EL2 registers to sane values. This should be called early on all 303 292 * cores that were booted in EL2. Note that everything gets initialised as ··· 308 319 __init_el2_cptr 309 320 __init_el2_fgt 310 321 __init_el2_fgt2 311 - __init_el2_gcs 312 322 .endm 313 323 314 324 #ifndef __KVM_NVHE_HYPERVISOR__ ··· 359 371 msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2 360 372 361 373 .Lskip_mpam_\@: 374 + check_override id_aa64pfr1, ID_AA64PFR1_EL1_GCS_SHIFT, .Linit_gcs_\@, .Lskip_gcs_\@, x1, x2 375 + 376 + .Linit_gcs_\@: 377 + msr_s SYS_GCSCR_EL1, xzr 378 + msr_s SYS_GCSCRE0_EL1, xzr 379 + 380 + .Lskip_gcs_\@: 362 381 check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2 363 382 364 383 .Linit_sve_\@: /* SVE register access */
+1 -2
arch/arm64/kernel/Makefile
··· 34 34 cpufeature.o alternative.o cacheinfo.o \ 35 35 smp.o smp_spin_table.o topology.o smccc-call.o \ 36 36 syscall.o proton-pack.o idle.o patching.o pi/ \ 37 - rsi.o 37 + rsi.o jump_label.o 38 38 39 39 obj-$(CONFIG_COMPAT) += sys32.o signal32.o \ 40 40 sys_compat.o ··· 47 47 obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o 48 48 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 49 49 obj-$(CONFIG_CPU_PM) += sleep.o suspend.o 50 - obj-$(CONFIG_JUMP_LABEL) += jump_label.o 51 50 obj-$(CONFIG_KGDB) += kgdb.o 52 51 obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o 53 52 obj-$(CONFIG_PCI) += pci.o
+32 -25
arch/arm64/kernel/cpufeature.c
··· 3135 3135 } 3136 3136 #endif 3137 3137 3138 + #ifdef CONFIG_ARM64_SME 3139 + static bool has_sme_feature(const struct arm64_cpu_capabilities *cap, int scope) 3140 + { 3141 + return system_supports_sme() && has_user_cpuid_feature(cap, scope); 3142 + } 3143 + #endif 3144 + 3138 3145 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { 3139 3146 HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL), 3140 3147 HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES), ··· 3230 3223 HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC), 3231 3224 #ifdef CONFIG_ARM64_SME 3232 3225 HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME), 3233 - HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), 3234 - HWCAP_CAP(ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2), 3235 - HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p2, CAP_HWCAP, KERNEL_HWCAP_SME2P2), 3236 - HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), 3237 - HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), 3238 - HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), 3239 - HWCAP_CAP(ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), 3240 - HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), 3241 - HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), 3242 - HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), 3243 - HWCAP_CAP(ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16), 3244 - HWCAP_CAP(ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32), 3245 - HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), 3246 - HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), 3247 - HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), 3248 - HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), 3249 - HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), 3250 - HWCAP_CAP(ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA), 3251 - HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4), 3252 - HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2), 3253 - HWCAP_CAP(ID_AA64SMFR0_EL1, SBitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SBITPERM), 3254 - HWCAP_CAP(ID_AA64SMFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_AES), 3255 - HWCAP_CAP(ID_AA64SMFR0_EL1, SFEXPA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SFEXPA), 3256 - HWCAP_CAP(ID_AA64SMFR0_EL1, STMOP, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_STMOP), 3257 - HWCAP_CAP(ID_AA64SMFR0_EL1, SMOP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SMOP4), 3226 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), 3227 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2), 3228 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p2, CAP_HWCAP, KERNEL_HWCAP_SME2P2), 3229 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), 3230 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), 3231 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), 3232 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), 3233 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), 3234 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), 3235 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), 3236 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16), 3237 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32), 3238 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), 3239 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), 3240 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), 3241 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), 3242 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), 3243 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA), 3244 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4), 3245 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2), 3246 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SBitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SBITPERM), 3247 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_AES), 3248 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SFEXPA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SFEXPA), 3249 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, STMOP, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_STMOP), 3250 + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMOP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SMOP4), 3258 3251 #endif /* CONFIG_ARM64_SME */ 3259 3252 HWCAP_CAP(ID_AA64FPFR0_EL1, F8CVT, IMP, CAP_HWCAP, KERNEL_HWCAP_F8CVT), 3260 3253 HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA),
+8 -3
arch/arm64/kernel/efi.c
··· 15 15 16 16 #include <asm/efi.h> 17 17 #include <asm/stacktrace.h> 18 + #include <asm/vmap_stack.h> 18 19 19 20 static bool region_is_misaligned(const efi_memory_desc_t *md) 20 21 { ··· 215 214 if (!efi_enabled(EFI_RUNTIME_SERVICES)) 216 215 return 0; 217 216 218 - p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL, 219 - NUMA_NO_NODE, &&l); 220 - l: if (!p) { 217 + if (!IS_ENABLED(CONFIG_VMAP_STACK)) { 218 + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 219 + return -ENOMEM; 220 + } 221 + 222 + p = arch_alloc_vmap_stack(THREAD_SIZE, NUMA_NO_NODE); 223 + if (!p) { 221 224 pr_warn("Failed to allocate EFI runtime stack\n"); 222 225 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 223 226 return -ENOMEM;
+5
arch/arm64/kernel/process.c
··· 673 673 current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0); 674 674 if (current->thread.por_el0 != next->thread.por_el0) { 675 675 write_sysreg_s(next->thread.por_el0, SYS_POR_EL0); 676 + /* 677 + * No ISB required as we can tolerate spurious Overlay faults - 678 + * the fault handler will check again based on the new value 679 + * of POR_EL0. 680 + */ 676 681 } 677 682 } 678 683
+1 -1
arch/arm64/kernel/smp.c
··· 1143 1143 void smp_send_stop(void) 1144 1144 { 1145 1145 static unsigned long stop_in_progress; 1146 - cpumask_t mask; 1146 + static cpumask_t mask; 1147 1147 unsigned long timeout; 1148 1148 1149 1149 /*
+21 -9
arch/arm64/mm/fault.c
··· 487 487 } 488 488 } 489 489 490 - static bool fault_from_pkey(unsigned long esr, struct vm_area_struct *vma, 491 - unsigned int mm_flags) 490 + static bool fault_from_pkey(struct vm_area_struct *vma, unsigned int mm_flags) 492 491 { 493 - unsigned long iss2 = ESR_ELx_ISS2(esr); 494 - 495 492 if (!system_supports_poe()) 496 493 return false; 497 494 498 - if (esr_fsc_is_permission_fault(esr) && (iss2 & ESR_ELx_Overlay)) 499 - return true; 500 - 495 + /* 496 + * We do not check whether an Overlay fault has occurred because we 497 + * cannot make a decision based solely on its value: 498 + * 499 + * - If Overlay is set, a fault did occur due to POE, but it may be 500 + * spurious in those cases where we update POR_EL0 without ISB (e.g. 501 + * on context-switch). We would then need to manually check POR_EL0 502 + * against vma_pkey(vma), which is exactly what 503 + * arch_vma_access_permitted() does. 504 + * 505 + * - If Overlay is not set, we may still need to report a pkey fault. 506 + * This is the case if an access was made within a mapping but with no 507 + * page mapped, and POR_EL0 forbids the access (according to 508 + * vma_pkey()). Such access will result in a SIGSEGV regardless 509 + * because core code checks arch_vma_access_permitted(), but in order 510 + * to report the correct error code - SEGV_PKUERR - we must handle 511 + * that case here. 512 + */ 501 513 return !arch_vma_access_permitted(vma, 502 514 mm_flags & FAULT_FLAG_WRITE, 503 515 mm_flags & FAULT_FLAG_INSTRUCTION, ··· 647 635 goto bad_area; 648 636 } 649 637 650 - if (fault_from_pkey(esr, vma, mm_flags)) { 638 + if (fault_from_pkey(vma, mm_flags)) { 651 639 pkey = vma_pkey(vma); 652 640 vma_end_read(vma); 653 641 fault = 0; ··· 691 679 goto bad_area; 692 680 } 693 681 694 - if (fault_from_pkey(esr, vma, mm_flags)) { 682 + if (fault_from_pkey(vma, mm_flags)) { 695 683 pkey = vma_pkey(vma); 696 684 mmap_read_unlock(mm); 697 685 fault = 0;
-1
arch/arm64/mm/proc.S
··· 518 518 msr REG_PIR_EL1, x0 519 519 520 520 orr tcr2, tcr2, TCR2_EL1_PIE 521 - msr REG_TCR2_EL1, x0 522 521 523 522 .Lskip_indirection: 524 523