Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm64-spectre-bhb-for-v5.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 spectre fixes from James Morse:
"ARM64 Spectre-BHB mitigations:

- Make EL1 vectors per-cpu

- Add mitigation sequences to the EL1 and EL2 vectors on vulnerble
CPUs

- Implement ARCH_WORKAROUND_3 for KVM guests

- Report Vulnerable when unprivileged eBPF is enabled"

* tag 'arm64-spectre-bhb-for-v5.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: proton-pack: Include unprivileged eBPF status in Spectre v2 mitigation reporting
arm64: Use the clearbhb instruction in mitigations
KVM: arm64: Allow SMCCC_ARCH_WORKAROUND_3 to be discovered and migrated
arm64: Mitigate spectre style branch history side channels
arm64: proton-pack: Report Spectre-BHB vulnerabilities as part of Spectre-v2
arm64: Add percpu vectors for EL1
arm64: entry: Add macro for reading symbol addresses from the trampoline
arm64: entry: Add vectors that have the bhb mitigation sequences
arm64: entry: Add non-kpti __bp_harden_el1_vectors for mitigations
arm64: entry: Allow the trampoline text to occupy multiple pages
arm64: entry: Make the kpti trampoline's kpti sequence optional
arm64: entry: Move trampoline macros out of ifdef'd section
arm64: entry: Don't assume tramp_vectors is the start of the vectors
arm64: entry: Allow tramp_alias to access symbols after the 4K boundary
arm64: entry: Move the trampoline data page before the text page
arm64: entry: Free up another register on kpti's tramp_exit path
arm64: entry: Make the trampoline cleanup optional
KVM: arm64: Allow indirect vectors to be used without SPECTRE_V3A
arm64: spectre: Rename spectre_v4_patch_fw_mitigation_conduit
arm64: entry.S: Add ventry overflow sanity checks

+830 -76
+9
arch/arm64/Kconfig
··· 1383 1383 1384 1384 If unsure, say Y. 1385 1385 1386 + config MITIGATE_SPECTRE_BRANCH_HISTORY 1387 + bool "Mitigate Spectre style attacks against branch history" if EXPERT 1388 + default y 1389 + help 1390 + Speculation attacks against some high-performance processors can 1391 + make use of branch history to influence future speculation. 1392 + When taking an exception from user-space, a sequence of branches 1393 + or a firmware call overwrites the branch history. 1394 + 1386 1395 config RODATA_FULL_DEFAULT_ENABLED 1387 1396 bool "Apply r/o permissions of VM areas also to their linear aliases" 1388 1397 default y
+53
arch/arm64/include/asm/assembler.h
··· 109 109 .endm 110 110 111 111 /* 112 + * Clear Branch History instruction 113 + */ 114 + .macro clearbhb 115 + hint #22 116 + .endm 117 + 118 + /* 112 119 * Speculation barrier 113 120 */ 114 121 .macro sb ··· 857 850 858 851 #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */ 859 852 853 + .macro __mitigate_spectre_bhb_loop tmp 854 + #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY 855 + alternative_cb spectre_bhb_patch_loop_iter 856 + mov \tmp, #32 // Patched to correct the immediate 857 + alternative_cb_end 858 + .Lspectre_bhb_loop\@: 859 + b . + 4 860 + subs \tmp, \tmp, #1 861 + b.ne .Lspectre_bhb_loop\@ 862 + sb 863 + #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ 864 + .endm 865 + 866 + .macro mitigate_spectre_bhb_loop tmp 867 + #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY 868 + alternative_cb spectre_bhb_patch_loop_mitigation_enable 869 + b .L_spectre_bhb_loop_done\@ // Patched to NOP 870 + alternative_cb_end 871 + __mitigate_spectre_bhb_loop \tmp 872 + .L_spectre_bhb_loop_done\@: 873 + #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ 874 + .endm 875 + 876 + /* Save/restores x0-x3 to the stack */ 877 + .macro __mitigate_spectre_bhb_fw 878 + #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY 879 + stp x0, x1, [sp, #-16]! 880 + stp x2, x3, [sp, #-16]! 881 + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 882 + alternative_cb smccc_patch_fw_mitigation_conduit 883 + nop // Patched to SMC/HVC #0 884 + alternative_cb_end 885 + ldp x2, x3, [sp], #16 886 + ldp x0, x1, [sp], #16 887 + #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ 888 + .endm 889 + 890 + .macro mitigate_spectre_bhb_clear_insn 891 + #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY 892 + alternative_cb spectre_bhb_patch_clearbhb 893 + /* Patched to NOP when not supported */ 894 + clearbhb 895 + isb 896 + alternative_cb_end 897 + #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ 898 + .endm 860 899 #endif /* __ASM_ASSEMBLER_H */
+29
arch/arm64/include/asm/cpufeature.h
··· 637 637 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); 638 638 } 639 639 640 + 641 + static inline bool supports_csv2p3(int scope) 642 + { 643 + u64 pfr0; 644 + u8 csv2_val; 645 + 646 + if (scope == SCOPE_LOCAL_CPU) 647 + pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); 648 + else 649 + pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 650 + 651 + csv2_val = cpuid_feature_extract_unsigned_field(pfr0, 652 + ID_AA64PFR0_CSV2_SHIFT); 653 + return csv2_val == 3; 654 + } 655 + 656 + static inline bool supports_clearbhb(int scope) 657 + { 658 + u64 isar2; 659 + 660 + if (scope == SCOPE_LOCAL_CPU) 661 + isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1); 662 + else 663 + isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); 664 + 665 + return cpuid_feature_extract_unsigned_field(isar2, 666 + ID_AA64ISAR2_CLEARBHB_SHIFT); 667 + } 668 + 640 669 const struct cpumask *system_32bit_el0_cpumask(void); 641 670 DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); 642 671
+8
arch/arm64/include/asm/cputype.h
··· 73 73 #define ARM_CPU_PART_CORTEX_A76 0xD0B 74 74 #define ARM_CPU_PART_NEOVERSE_N1 0xD0C 75 75 #define ARM_CPU_PART_CORTEX_A77 0xD0D 76 + #define ARM_CPU_PART_NEOVERSE_V1 0xD40 77 + #define ARM_CPU_PART_CORTEX_A78 0xD41 78 + #define ARM_CPU_PART_CORTEX_X1 0xD44 76 79 #define ARM_CPU_PART_CORTEX_A510 0xD46 77 80 #define ARM_CPU_PART_CORTEX_A710 0xD47 78 81 #define ARM_CPU_PART_CORTEX_X2 0xD48 79 82 #define ARM_CPU_PART_NEOVERSE_N2 0xD49 83 + #define ARM_CPU_PART_CORTEX_A78C 0xD4B 80 84 81 85 #define APM_CPU_PART_POTENZA 0x000 82 86 ··· 121 117 #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) 122 118 #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) 123 119 #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) 120 + #define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1) 121 + #define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78) 122 + #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) 124 123 #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) 125 124 #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) 126 125 #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) 127 126 #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) 127 + #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) 128 128 #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) 129 129 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) 130 130 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+4 -2
arch/arm64/include/asm/fixmap.h
··· 62 62 #endif /* CONFIG_ACPI_APEI_GHES */ 63 63 64 64 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 65 + FIX_ENTRY_TRAMP_TEXT3, 66 + FIX_ENTRY_TRAMP_TEXT2, 67 + FIX_ENTRY_TRAMP_TEXT1, 65 68 FIX_ENTRY_TRAMP_DATA, 66 - FIX_ENTRY_TRAMP_TEXT, 67 - #define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT)) 69 + #define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1)) 68 70 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ 69 71 __end_of_permanent_fixed_addresses, 70 72
+1
arch/arm64/include/asm/insn.h
··· 65 65 AARCH64_INSN_HINT_PSB = 0x11 << 5, 66 66 AARCH64_INSN_HINT_TSB = 0x12 << 5, 67 67 AARCH64_INSN_HINT_CSDB = 0x14 << 5, 68 + AARCH64_INSN_HINT_CLEARBHB = 0x16 << 5, 68 69 69 70 AARCH64_INSN_HINT_BTI = 0x20 << 5, 70 71 AARCH64_INSN_HINT_BTIC = 0x22 << 5,
+5
arch/arm64/include/asm/kvm_host.h
··· 714 714 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); 715 715 } 716 716 717 + static inline bool kvm_system_needs_idmapped_vectors(void) 718 + { 719 + return cpus_have_const_cap(ARM64_SPECTRE_V3A); 720 + } 721 + 717 722 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); 718 723 719 724 static inline void kvm_arch_hardware_unsetup(void) {}
+5
arch/arm64/include/asm/sections.h
··· 23 23 extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; 24 24 extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[]; 25 25 26 + static inline size_t entry_tramp_text_size(void) 27 + { 28 + return __entry_tramp_text_end - __entry_tramp_text_start; 29 + } 30 + 26 31 #endif /* __ASM_SECTIONS_H */
+4
arch/arm64/include/asm/spectre.h
··· 93 93 94 94 enum mitigation_state arm64_get_meltdown_state(void); 95 95 96 + enum mitigation_state arm64_get_spectre_bhb_state(void); 97 + bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); 98 + u8 spectre_bhb_loop_affected(int scope); 99 + void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); 96 100 #endif /* __ASSEMBLY__ */ 97 101 #endif /* __ASM_SPECTRE_H */
+2
arch/arm64/include/asm/sysreg.h
··· 773 773 #define ID_AA64ISAR1_GPI_IMP_DEF 0x1 774 774 775 775 /* id_aa64isar2 */ 776 + #define ID_AA64ISAR2_CLEARBHB_SHIFT 28 776 777 #define ID_AA64ISAR2_RPRES_SHIFT 4 777 778 #define ID_AA64ISAR2_WFXT_SHIFT 0 778 779 ··· 905 904 #endif 906 905 907 906 /* id_aa64mmfr1 */ 907 + #define ID_AA64MMFR1_ECBHB_SHIFT 60 908 908 #define ID_AA64MMFR1_AFP_SHIFT 44 909 909 #define ID_AA64MMFR1_ETS_SHIFT 36 910 910 #define ID_AA64MMFR1_TWED_SHIFT 32
+73
arch/arm64/include/asm/vectors.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2022 ARM Ltd. 4 + */ 5 + #ifndef __ASM_VECTORS_H 6 + #define __ASM_VECTORS_H 7 + 8 + #include <linux/bug.h> 9 + #include <linux/percpu.h> 10 + 11 + #include <asm/fixmap.h> 12 + 13 + extern char vectors[]; 14 + extern char tramp_vectors[]; 15 + extern char __bp_harden_el1_vectors[]; 16 + 17 + /* 18 + * Note: the order of this enum corresponds to two arrays in entry.S: 19 + * tramp_vecs and __bp_harden_el1_vectors. By default the canonical 20 + * 'full fat' vectors are used directly. 21 + */ 22 + enum arm64_bp_harden_el1_vectors { 23 + #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY 24 + /* 25 + * Perform the BHB loop mitigation, before branching to the canonical 26 + * vectors. 27 + */ 28 + EL1_VECTOR_BHB_LOOP, 29 + 30 + /* 31 + * Make the SMC call for firmware mitigation, before branching to the 32 + * canonical vectors. 33 + */ 34 + EL1_VECTOR_BHB_FW, 35 + 36 + /* 37 + * Use the ClearBHB instruction, before branching to the canonical 38 + * vectors. 39 + */ 40 + EL1_VECTOR_BHB_CLEAR_INSN, 41 + #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ 42 + 43 + /* 44 + * Remap the kernel before branching to the canonical vectors. 45 + */ 46 + EL1_VECTOR_KPTI, 47 + }; 48 + 49 + #ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY 50 + #define EL1_VECTOR_BHB_LOOP -1 51 + #define EL1_VECTOR_BHB_FW -1 52 + #define EL1_VECTOR_BHB_CLEAR_INSN -1 53 + #endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ 54 + 55 + /* The vectors to use on return from EL0. e.g. to remap the kernel */ 56 + DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector); 57 + 58 + #ifndef CONFIG_UNMAP_KERNEL_AT_EL0 59 + #define TRAMP_VALIAS 0 60 + #endif 61 + 62 + static inline const char * 63 + arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot) 64 + { 65 + if (arm64_kernel_unmapped_at_el0()) 66 + return (char *)TRAMP_VALIAS + SZ_2K * slot; 67 + 68 + WARN_ON_ONCE(slot == EL1_VECTOR_KPTI); 69 + 70 + return __bp_harden_el1_vectors + SZ_2K * slot; 71 + } 72 + 73 + #endif /* __ASM_VECTORS_H */
+5
arch/arm64/include/uapi/asm/kvm.h
··· 281 281 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3 282 282 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4) 283 283 284 + #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 KVM_REG_ARM_FW_REG(3) 285 + #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL 0 286 + #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL 1 287 + #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED 2 288 + 284 289 /* SVE registers */ 285 290 #define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT) 286 291
+7
arch/arm64/kernel/cpu_errata.c
··· 502 502 .matches = has_spectre_v4, 503 503 .cpu_enable = spectre_v4_enable_mitigation, 504 504 }, 505 + { 506 + .desc = "Spectre-BHB", 507 + .capability = ARM64_SPECTRE_BHB, 508 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 509 + .matches = is_spectre_bhb_affected, 510 + .cpu_enable = spectre_bhb_enable_mitigation, 511 + }, 505 512 #ifdef CONFIG_ARM64_ERRATUM_1418040 506 513 { 507 514 .desc = "ARM erratum 1418040",
+12
arch/arm64/kernel/cpufeature.c
··· 73 73 #include <linux/mm.h> 74 74 #include <linux/cpu.h> 75 75 #include <linux/kasan.h> 76 + #include <linux/percpu.h> 77 + 76 78 #include <asm/cpu.h> 77 79 #include <asm/cpufeature.h> 78 80 #include <asm/cpu_ops.h> ··· 87 85 #include <asm/smp.h> 88 86 #include <asm/sysreg.h> 89 87 #include <asm/traps.h> 88 + #include <asm/vectors.h> 90 89 #include <asm/virt.h> 91 90 92 91 /* Kernel representation of AT_HWCAP and AT_HWCAP2 */ ··· 112 109 113 110 bool arm64_use_ng_mappings = false; 114 111 EXPORT_SYMBOL(arm64_use_ng_mappings); 112 + 113 + DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors; 115 114 116 115 /* 117 116 * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs ··· 231 226 }; 232 227 233 228 static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { 229 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0), 234 230 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0), 235 231 ARM64_FTR_END, 236 232 }; ··· 1595 1589 kpti_remap_fn *remap_fn; 1596 1590 1597 1591 int cpu = smp_processor_id(); 1592 + 1593 + if (__this_cpu_read(this_cpu_vector) == vectors) { 1594 + const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI); 1595 + 1596 + __this_cpu_write(this_cpu_vector, v); 1597 + } 1598 1598 1599 1599 /* 1600 1600 * We don't need to rewrite the page-tables if either we've done
+157 -57
arch/arm64/kernel/entry.S
··· 37 37 38 38 .macro kernel_ventry, el:req, ht:req, regsize:req, label:req 39 39 .align 7 40 - #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 40 + .Lventry_start\@: 41 41 .if \el == 0 42 - alternative_if ARM64_UNMAP_KERNEL_AT_EL0 42 + /* 43 + * This must be the first instruction of the EL0 vector entries. It is 44 + * skipped by the trampoline vectors, to trigger the cleanup. 45 + */ 46 + b .Lskip_tramp_vectors_cleanup\@ 43 47 .if \regsize == 64 44 48 mrs x30, tpidrro_el0 45 49 msr tpidrro_el0, xzr 46 50 .else 47 51 mov x30, xzr 48 52 .endif 49 - alternative_else_nop_endif 53 + .Lskip_tramp_vectors_cleanup\@: 50 54 .endif 51 - #endif 52 55 53 56 sub sp, sp, #PT_REGS_SIZE 54 57 #ifdef CONFIG_VMAP_STACK ··· 98 95 mrs x0, tpidrro_el0 99 96 #endif 100 97 b el\el\ht\()_\regsize\()_\label 98 + .org .Lventry_start\@ + 128 // Did we overflow the ventry slot? 101 99 .endm 102 100 103 - .macro tramp_alias, dst, sym 101 + .macro tramp_alias, dst, sym, tmp 104 102 mov_q \dst, TRAMP_VALIAS 105 - add \dst, \dst, #(\sym - .entry.tramp.text) 103 + adr_l \tmp, \sym 104 + add \dst, \dst, \tmp 105 + adr_l \tmp, .entry.tramp.text 106 + sub \dst, \dst, \tmp 106 107 .endm 107 108 108 109 /* ··· 123 116 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ 124 117 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 125 118 mov w1, #\state 126 - alternative_cb spectre_v4_patch_fw_mitigation_conduit 119 + alternative_cb smccc_patch_fw_mitigation_conduit 127 120 nop // Patched to SMC/HVC #0 128 121 alternative_cb_end 129 122 .L__asm_ssbd_skip\@: ··· 420 413 ldp x24, x25, [sp, #16 * 12] 421 414 ldp x26, x27, [sp, #16 * 13] 422 415 ldp x28, x29, [sp, #16 * 14] 423 - ldr lr, [sp, #S_LR] 424 - add sp, sp, #PT_REGS_SIZE // restore sp 425 416 426 417 .if \el == 0 427 - alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 418 + alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 419 + ldr lr, [sp, #S_LR] 420 + add sp, sp, #PT_REGS_SIZE // restore sp 421 + eret 422 + alternative_else_nop_endif 428 423 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 429 424 bne 4f 430 - msr far_el1, x30 431 - tramp_alias x30, tramp_exit_native 425 + msr far_el1, x29 426 + tramp_alias x30, tramp_exit_native, x29 432 427 br x30 433 428 4: 434 - tramp_alias x30, tramp_exit_compat 429 + tramp_alias x30, tramp_exit_compat, x29 435 430 br x30 436 431 #endif 437 432 .else 433 + ldr lr, [sp, #S_LR] 434 + add sp, sp, #PT_REGS_SIZE // restore sp 435 + 438 436 /* Ensure any device/NC reads complete */ 439 437 alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412 440 438 ··· 606 594 607 595 .popsection // .entry.text 608 596 609 - #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 610 - /* 611 - * Exception vectors trampoline. 612 - */ 613 - .pushsection ".entry.tramp.text", "ax" 614 - 615 597 // Move from tramp_pg_dir to swapper_pg_dir 616 598 .macro tramp_map_kernel, tmp 617 599 mrs \tmp, ttbr1_el1 ··· 639 633 */ 640 634 .endm 641 635 642 - .macro tramp_ventry, regsize = 64 636 + .macro tramp_data_page dst 637 + adr_l \dst, .entry.tramp.text 638 + sub \dst, \dst, PAGE_SIZE 639 + .endm 640 + 641 + .macro tramp_data_read_var dst, var 642 + #ifdef CONFIG_RANDOMIZE_BASE 643 + tramp_data_page \dst 644 + add \dst, \dst, #:lo12:__entry_tramp_data_\var 645 + ldr \dst, [\dst] 646 + #else 647 + ldr \dst, =\var 648 + #endif 649 + .endm 650 + 651 + #define BHB_MITIGATION_NONE 0 652 + #define BHB_MITIGATION_LOOP 1 653 + #define BHB_MITIGATION_FW 2 654 + #define BHB_MITIGATION_INSN 3 655 + 656 + .macro tramp_ventry, vector_start, regsize, kpti, bhb 643 657 .align 7 644 658 1: 645 659 .if \regsize == 64 646 660 msr tpidrro_el0, x30 // Restored in kernel_ventry 647 661 .endif 662 + 663 + .if \bhb == BHB_MITIGATION_LOOP 664 + /* 665 + * This sequence must appear before the first indirect branch. i.e. the 666 + * ret out of tramp_ventry. It appears here because x30 is free. 667 + */ 668 + __mitigate_spectre_bhb_loop x30 669 + .endif // \bhb == BHB_MITIGATION_LOOP 670 + 671 + .if \bhb == BHB_MITIGATION_INSN 672 + clearbhb 673 + isb 674 + .endif // \bhb == BHB_MITIGATION_INSN 675 + 676 + .if \kpti == 1 648 677 /* 649 678 * Defend against branch aliasing attacks by pushing a dummy 650 679 * entry onto the return stack and using a RET instruction to ··· 689 648 b . 690 649 2: 691 650 tramp_map_kernel x30 692 - #ifdef CONFIG_RANDOMIZE_BASE 693 - adr x30, tramp_vectors + PAGE_SIZE 694 651 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 695 - ldr x30, [x30] 696 - #else 697 - ldr x30, =vectors 698 - #endif 652 + tramp_data_read_var x30, vectors 699 653 alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 700 - prfm plil1strm, [x30, #(1b - tramp_vectors)] 654 + prfm plil1strm, [x30, #(1b - \vector_start)] 701 655 alternative_else_nop_endif 656 + 702 657 msr vbar_el1, x30 703 - add x30, x30, #(1b - tramp_vectors) 704 658 isb 659 + .else 660 + ldr x30, =vectors 661 + .endif // \kpti == 1 662 + 663 + .if \bhb == BHB_MITIGATION_FW 664 + /* 665 + * The firmware sequence must appear before the first indirect branch. 666 + * i.e. the ret out of tramp_ventry. But it also needs the stack to be 667 + * mapped to save/restore the registers the SMC clobbers. 668 + */ 669 + __mitigate_spectre_bhb_fw 670 + .endif // \bhb == BHB_MITIGATION_FW 671 + 672 + add x30, x30, #(1b - \vector_start + 4) 705 673 ret 674 + .org 1b + 128 // Did we overflow the ventry slot? 706 675 .endm 707 676 708 677 .macro tramp_exit, regsize = 64 709 - adr x30, tramp_vectors 678 + tramp_data_read_var x30, this_cpu_vector 679 + get_this_cpu_offset x29 680 + ldr x30, [x30, x29] 681 + 710 682 msr vbar_el1, x30 711 - tramp_unmap_kernel x30 683 + ldr lr, [sp, #S_LR] 684 + tramp_unmap_kernel x29 712 685 .if \regsize == 64 713 - mrs x30, far_el1 686 + mrs x29, far_el1 714 687 .endif 688 + add sp, sp, #PT_REGS_SIZE // restore sp 715 689 eret 716 690 sb 717 691 .endm 718 692 719 - .align 11 720 - SYM_CODE_START_NOALIGN(tramp_vectors) 693 + .macro generate_tramp_vector, kpti, bhb 694 + .Lvector_start\@: 721 695 .space 0x400 722 696 723 - tramp_ventry 724 - tramp_ventry 725 - tramp_ventry 726 - tramp_ventry 697 + .rept 4 698 + tramp_ventry .Lvector_start\@, 64, \kpti, \bhb 699 + .endr 700 + .rept 4 701 + tramp_ventry .Lvector_start\@, 32, \kpti, \bhb 702 + .endr 703 + .endm 727 704 728 - tramp_ventry 32 729 - tramp_ventry 32 730 - tramp_ventry 32 731 - tramp_ventry 32 705 + #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 706 + /* 707 + * Exception vectors trampoline. 708 + * The order must match __bp_harden_el1_vectors and the 709 + * arm64_bp_harden_el1_vectors enum. 710 + */ 711 + .pushsection ".entry.tramp.text", "ax" 712 + .align 11 713 + SYM_CODE_START_NOALIGN(tramp_vectors) 714 + #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY 715 + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP 716 + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW 717 + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN 718 + #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ 719 + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE 732 720 SYM_CODE_END(tramp_vectors) 733 721 734 722 SYM_CODE_START(tramp_exit_native) ··· 774 704 .pushsection ".rodata", "a" 775 705 .align PAGE_SHIFT 776 706 SYM_DATA_START(__entry_tramp_data_start) 707 + __entry_tramp_data_vectors: 777 708 .quad vectors 709 + #ifdef CONFIG_ARM_SDE_INTERFACE 710 + __entry_tramp_data___sdei_asm_handler: 711 + .quad __sdei_asm_handler 712 + #endif /* CONFIG_ARM_SDE_INTERFACE */ 713 + __entry_tramp_data_this_cpu_vector: 714 + .quad this_cpu_vector 778 715 SYM_DATA_END(__entry_tramp_data_start) 779 716 .popsection // .rodata 780 717 #endif /* CONFIG_RANDOMIZE_BASE */ 781 718 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ 719 + 720 + /* 721 + * Exception vectors for spectre mitigations on entry from EL1 when 722 + * kpti is not in use. 723 + */ 724 + .macro generate_el1_vector, bhb 725 + .Lvector_start\@: 726 + kernel_ventry 1, t, 64, sync // Synchronous EL1t 727 + kernel_ventry 1, t, 64, irq // IRQ EL1t 728 + kernel_ventry 1, t, 64, fiq // FIQ EL1h 729 + kernel_ventry 1, t, 64, error // Error EL1t 730 + 731 + kernel_ventry 1, h, 64, sync // Synchronous EL1h 732 + kernel_ventry 1, h, 64, irq // IRQ EL1h 733 + kernel_ventry 1, h, 64, fiq // FIQ EL1h 734 + kernel_ventry 1, h, 64, error // Error EL1h 735 + 736 + .rept 4 737 + tramp_ventry .Lvector_start\@, 64, 0, \bhb 738 + .endr 739 + .rept 4 740 + tramp_ventry .Lvector_start\@, 32, 0, \bhb 741 + .endr 742 + .endm 743 + 744 + /* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */ 745 + .pushsection ".entry.text", "ax" 746 + .align 11 747 + SYM_CODE_START(__bp_harden_el1_vectors) 748 + #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY 749 + generate_el1_vector bhb=BHB_MITIGATION_LOOP 750 + generate_el1_vector bhb=BHB_MITIGATION_FW 751 + generate_el1_vector bhb=BHB_MITIGATION_INSN 752 + #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ 753 + SYM_CODE_END(__bp_harden_el1_vectors) 754 + .popsection 755 + 782 756 783 757 /* 784 758 * Register switch for AArch64. The callee-saved registers need to be saved ··· 949 835 * Remember whether to unmap the kernel on exit. 950 836 */ 951 837 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] 952 - 953 - #ifdef CONFIG_RANDOMIZE_BASE 954 - adr x4, tramp_vectors + PAGE_SIZE 955 - add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler 956 - ldr x4, [x4] 957 - #else 958 - ldr x4, =__sdei_asm_handler 959 - #endif 838 + tramp_data_read_var x4, __sdei_asm_handler 960 839 br x4 961 840 SYM_CODE_END(__sdei_asm_entry_trampoline) 962 841 NOKPROBE(__sdei_asm_entry_trampoline) ··· 972 865 NOKPROBE(__sdei_asm_exit_trampoline) 973 866 .ltorg 974 867 .popsection // .entry.tramp.text 975 - #ifdef CONFIG_RANDOMIZE_BASE 976 - .pushsection ".rodata", "a" 977 - SYM_DATA_START(__sdei_asm_trampoline_next_handler) 978 - .quad __sdei_asm_handler 979 - SYM_DATA_END(__sdei_asm_trampoline_next_handler) 980 - .popsection // .rodata 981 - #endif /* CONFIG_RANDOMIZE_BASE */ 982 868 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ 983 869 984 870 /* ··· 1081 981 alternative_else_nop_endif 1082 982 1083 983 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 1084 - tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline 984 + tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3 1085 985 br x5 1086 986 #endif 1087 987 SYM_CODE_END(__sdei_asm_handler)
+4
arch/arm64/kernel/image-vars.h
··· 66 66 KVM_NVHE_ALIAS(kvm_update_va_mask); 67 67 KVM_NVHE_ALIAS(kvm_get_kimage_voffset); 68 68 KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0); 69 + KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter); 70 + KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable); 71 + KVM_NVHE_ALIAS(spectre_bhb_patch_wa3); 72 + KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb); 69 73 70 74 /* Global kernel state accessed by nVHE hyp code. */ 71 75 KVM_NVHE_ALIAS(kvm_vgic_global_state);
+386 -5
arch/arm64/kernel/proton-pack.c
··· 18 18 */ 19 19 20 20 #include <linux/arm-smccc.h> 21 + #include <linux/bpf.h> 21 22 #include <linux/cpu.h> 22 23 #include <linux/device.h> 23 24 #include <linux/nospec.h> 24 25 #include <linux/prctl.h> 25 26 #include <linux/sched/task_stack.h> 26 27 28 + #include <asm/debug-monitors.h> 27 29 #include <asm/insn.h> 28 30 #include <asm/spectre.h> 29 31 #include <asm/traps.h> 32 + #include <asm/vectors.h> 30 33 #include <asm/virt.h> 31 34 32 35 /* ··· 99 96 return ret; 100 97 } 101 98 99 + static const char *get_bhb_affected_string(enum mitigation_state bhb_state) 100 + { 101 + switch (bhb_state) { 102 + case SPECTRE_UNAFFECTED: 103 + return ""; 104 + default: 105 + case SPECTRE_VULNERABLE: 106 + return ", but not BHB"; 107 + case SPECTRE_MITIGATED: 108 + return ", BHB"; 109 + } 110 + } 111 + 112 + static bool _unprivileged_ebpf_enabled(void) 113 + { 114 + #ifdef CONFIG_BPF_SYSCALL 115 + return !sysctl_unprivileged_bpf_disabled; 116 + #else 117 + return false; 118 + #endif 119 + } 120 + 102 121 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, 103 122 char *buf) 104 123 { 124 + enum mitigation_state bhb_state = arm64_get_spectre_bhb_state(); 125 + const char *bhb_str = get_bhb_affected_string(bhb_state); 126 + const char *v2_str = "Branch predictor hardening"; 127 + 105 128 switch (spectre_v2_state) { 106 129 case SPECTRE_UNAFFECTED: 107 - return sprintf(buf, "Not affected\n"); 130 + if (bhb_state == SPECTRE_UNAFFECTED) 131 + return sprintf(buf, "Not affected\n"); 132 + 133 + /* 134 + * Platforms affected by Spectre-BHB can't report 135 + * "Not affected" for Spectre-v2. 136 + */ 137 + v2_str = "CSV2"; 138 + fallthrough; 108 139 case SPECTRE_MITIGATED: 109 - return sprintf(buf, "Mitigation: Branch predictor hardening\n"); 140 + if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled()) 141 + return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n"); 142 + 143 + return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str); 110 144 case SPECTRE_VULNERABLE: 111 145 fallthrough; 112 146 default: ··· 594 554 * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction 595 555 * to call into firmware to adjust the mitigation state. 596 556 */ 597 - void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt, 598 - __le32 *origptr, 599 - __le32 *updptr, int nr_inst) 557 + void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt, 558 + __le32 *origptr, 559 + __le32 *updptr, int nr_inst) 600 560 { 601 561 u32 insn; 602 562 ··· 810 770 return -ENODEV; 811 771 } 812 772 } 773 + 774 + /* 775 + * Spectre BHB. 776 + * 777 + * A CPU is either: 778 + * - Mitigated by a branchy loop a CPU specific number of times, and listed 779 + * in our "loop mitigated list". 780 + * - Mitigated in software by the firmware Spectre v2 call. 781 + * - Has the ClearBHB instruction to perform the mitigation. 782 + * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no 783 + * software mitigation in the vectors is needed. 784 + * - Has CSV2.3, so is unaffected. 785 + */ 786 + static enum mitigation_state spectre_bhb_state; 787 + 788 + enum mitigation_state arm64_get_spectre_bhb_state(void) 789 + { 790 + return spectre_bhb_state; 791 + } 792 + 793 + enum bhb_mitigation_bits { 794 + BHB_LOOP, 795 + BHB_FW, 796 + BHB_HW, 797 + BHB_INSN, 798 + }; 799 + static unsigned long system_bhb_mitigations; 800 + 801 + /* 802 + * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any 803 + * SCOPE_SYSTEM call will give the right answer. 804 + */ 805 + u8 spectre_bhb_loop_affected(int scope) 806 + { 807 + u8 k = 0; 808 + static u8 max_bhb_k; 809 + 810 + if (scope == SCOPE_LOCAL_CPU) { 811 + static const struct midr_range spectre_bhb_k32_list[] = { 812 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), 813 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), 814 + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), 815 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 816 + MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), 817 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 818 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), 819 + {}, 820 + }; 821 + static const struct midr_range spectre_bhb_k24_list[] = { 822 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), 823 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), 824 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), 825 + {}, 826 + }; 827 + static const struct midr_range spectre_bhb_k8_list[] = { 828 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 829 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 830 + {}, 831 + }; 832 + 833 + if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) 834 + k = 32; 835 + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) 836 + k = 24; 837 + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) 838 + k = 8; 839 + 840 + max_bhb_k = max(max_bhb_k, k); 841 + } else { 842 + k = max_bhb_k; 843 + } 844 + 845 + return k; 846 + } 847 + 848 + static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void) 849 + { 850 + int ret; 851 + struct arm_smccc_res res; 852 + 853 + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 854 + ARM_SMCCC_ARCH_WORKAROUND_3, &res); 855 + 856 + ret = res.a0; 857 + switch (ret) { 858 + case SMCCC_RET_SUCCESS: 859 + return SPECTRE_MITIGATED; 860 + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: 861 + return SPECTRE_UNAFFECTED; 862 + default: 863 + fallthrough; 864 + case SMCCC_RET_NOT_SUPPORTED: 865 + return SPECTRE_VULNERABLE; 866 + } 867 + } 868 + 869 + static bool is_spectre_bhb_fw_affected(int scope) 870 + { 871 + static bool system_affected; 872 + enum mitigation_state fw_state; 873 + bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE; 874 + static const struct midr_range spectre_bhb_firmware_mitigated_list[] = { 875 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 876 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), 877 + {}, 878 + }; 879 + bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(), 880 + spectre_bhb_firmware_mitigated_list); 881 + 882 + if (scope != SCOPE_LOCAL_CPU) 883 + return system_affected; 884 + 885 + fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); 886 + if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) { 887 + system_affected = true; 888 + return true; 889 + } 890 + 891 + return false; 892 + } 893 + 894 + static bool supports_ecbhb(int scope) 895 + { 896 + u64 mmfr1; 897 + 898 + if (scope == SCOPE_LOCAL_CPU) 899 + mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1); 900 + else 901 + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); 902 + 903 + return cpuid_feature_extract_unsigned_field(mmfr1, 904 + ID_AA64MMFR1_ECBHB_SHIFT); 905 + } 906 + 907 + bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, 908 + int scope) 909 + { 910 + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 911 + 912 + if (supports_csv2p3(scope)) 913 + return false; 914 + 915 + if (supports_clearbhb(scope)) 916 + return true; 917 + 918 + if (spectre_bhb_loop_affected(scope)) 919 + return true; 920 + 921 + if (is_spectre_bhb_fw_affected(scope)) 922 + return true; 923 + 924 + return false; 925 + } 926 + 927 + static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) 928 + { 929 + const char *v = arm64_get_bp_hardening_vector(slot); 930 + 931 + if (slot < 0) 932 + return; 933 + 934 + __this_cpu_write(this_cpu_vector, v); 935 + 936 + /* 937 + * When KPTI is in use, the vectors are switched when exiting to 938 + * user-space. 939 + */ 940 + if (arm64_kernel_unmapped_at_el0()) 941 + return; 942 + 943 + write_sysreg(v, vbar_el1); 944 + isb(); 945 + } 946 + 947 + void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) 948 + { 949 + bp_hardening_cb_t cpu_cb; 950 + enum mitigation_state fw_state, state = SPECTRE_VULNERABLE; 951 + struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); 952 + 953 + if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU)) 954 + return; 955 + 956 + if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) { 957 + /* No point mitigating Spectre-BHB alone. */ 958 + } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) { 959 + pr_info_once("spectre-bhb mitigation disabled by compile time option\n"); 960 + } else if (cpu_mitigations_off()) { 961 + pr_info_once("spectre-bhb mitigation disabled by command line option\n"); 962 + } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) { 963 + state = SPECTRE_MITIGATED; 964 + set_bit(BHB_HW, &system_bhb_mitigations); 965 + } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) { 966 + /* 967 + * Ensure KVM uses the indirect vector which will have ClearBHB 968 + * added. 969 + */ 970 + if (!data->slot) 971 + data->slot = HYP_VECTOR_INDIRECT; 972 + 973 + this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN); 974 + state = SPECTRE_MITIGATED; 975 + set_bit(BHB_INSN, &system_bhb_mitigations); 976 + } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) { 977 + /* 978 + * Ensure KVM uses the indirect vector which will have the 979 + * branchy-loop added. A57/A72-r0 will already have selected 980 + * the spectre-indirect vector, which is sufficient for BHB 981 + * too. 982 + */ 983 + if (!data->slot) 984 + data->slot = HYP_VECTOR_INDIRECT; 985 + 986 + this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP); 987 + state = SPECTRE_MITIGATED; 988 + set_bit(BHB_LOOP, &system_bhb_mitigations); 989 + } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) { 990 + fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); 991 + if (fw_state == SPECTRE_MITIGATED) { 992 + /* 993 + * Ensure KVM uses one of the spectre bp_hardening 994 + * vectors. The indirect vector doesn't include the EL3 995 + * call, so needs upgrading to 996 + * HYP_VECTOR_SPECTRE_INDIRECT. 997 + */ 998 + if (!data->slot || data->slot == HYP_VECTOR_INDIRECT) 999 + data->slot += 1; 1000 + 1001 + this_cpu_set_vectors(EL1_VECTOR_BHB_FW); 1002 + 1003 + /* 1004 + * The WA3 call in the vectors supersedes the WA1 call 1005 + * made during context-switch. Uninstall any firmware 1006 + * bp_hardening callback. 1007 + */ 1008 + cpu_cb = spectre_v2_get_sw_mitigation_cb(); 1009 + if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb) 1010 + __this_cpu_write(bp_hardening_data.fn, NULL); 1011 + 1012 + state = SPECTRE_MITIGATED; 1013 + set_bit(BHB_FW, &system_bhb_mitigations); 1014 + } 1015 + } 1016 + 1017 + update_mitigation_state(&spectre_bhb_state, state); 1018 + } 1019 + 1020 + /* Patched to NOP when enabled */ 1021 + void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, 1022 + __le32 *origptr, 1023 + __le32 *updptr, int nr_inst) 1024 + { 1025 + BUG_ON(nr_inst != 1); 1026 + 1027 + if (test_bit(BHB_LOOP, &system_bhb_mitigations)) 1028 + *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); 1029 + } 1030 + 1031 + /* Patched to NOP when enabled */ 1032 + void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt, 1033 + __le32 *origptr, 1034 + __le32 *updptr, int nr_inst) 1035 + { 1036 + BUG_ON(nr_inst != 1); 1037 + 1038 + if (test_bit(BHB_FW, &system_bhb_mitigations)) 1039 + *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); 1040 + } 1041 + 1042 + /* Patched to correct the immediate */ 1043 + void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt, 1044 + __le32 *origptr, __le32 *updptr, int nr_inst) 1045 + { 1046 + u8 rd; 1047 + u32 insn; 1048 + u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM); 1049 + 1050 + BUG_ON(nr_inst != 1); /* MOV -> MOV */ 1051 + 1052 + if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) 1053 + return; 1054 + 1055 + insn = le32_to_cpu(*origptr); 1056 + rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); 1057 + insn = aarch64_insn_gen_movewide(rd, loop_count, 0, 1058 + AARCH64_INSN_VARIANT_64BIT, 1059 + AARCH64_INSN_MOVEWIDE_ZERO); 1060 + *updptr++ = cpu_to_le32(insn); 1061 + } 1062 + 1063 + /* Patched to mov WA3 when supported */ 1064 + void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt, 1065 + __le32 *origptr, __le32 *updptr, int nr_inst) 1066 + { 1067 + u8 rd; 1068 + u32 insn; 1069 + 1070 + BUG_ON(nr_inst != 1); /* MOV -> MOV */ 1071 + 1072 + if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) || 1073 + !test_bit(BHB_FW, &system_bhb_mitigations)) 1074 + return; 1075 + 1076 + insn = le32_to_cpu(*origptr); 1077 + rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); 1078 + 1079 + insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR, 1080 + AARCH64_INSN_VARIANT_32BIT, 1081 + AARCH64_INSN_REG_ZR, rd, 1082 + ARM_SMCCC_ARCH_WORKAROUND_3); 1083 + if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT)) 1084 + return; 1085 + 1086 + *updptr++ = cpu_to_le32(insn); 1087 + } 1088 + 1089 + /* Patched to NOP when not supported */ 1090 + void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt, 1091 + __le32 *origptr, __le32 *updptr, int nr_inst) 1092 + { 1093 + BUG_ON(nr_inst != 2); 1094 + 1095 + if (test_bit(BHB_INSN, &system_bhb_mitigations)) 1096 + return; 1097 + 1098 + *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); 1099 + *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); 1100 + } 1101 + 1102 + #ifdef CONFIG_BPF_SYSCALL 1103 + #define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n" 1104 + void unpriv_ebpf_notify(int new_state) 1105 + { 1106 + if (spectre_v2_state == SPECTRE_VULNERABLE || 1107 + spectre_bhb_state != SPECTRE_MITIGATED) 1108 + return; 1109 + 1110 + if (!new_state) 1111 + pr_err("WARNING: %s", EBPF_WARN); 1112 + } 1113 + #endif
+1 -1
arch/arm64/kernel/vmlinux.lds.S
··· 341 341 <= SZ_4K, "Hibernate exit text too big or misaligned") 342 342 #endif 343 343 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 344 - ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, 344 + ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE, 345 345 "Entry trampoline text too big") 346 346 #endif 347 347 #ifdef CONFIG_KVM
+1 -4
arch/arm64/kvm/arm.c
··· 1491 1491 base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); 1492 1492 kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT); 1493 1493 1494 - if (!cpus_have_const_cap(ARM64_SPECTRE_V3A)) 1495 - return 0; 1496 - 1497 - if (!has_vhe()) { 1494 + if (kvm_system_needs_idmapped_vectors() && !has_vhe()) { 1498 1495 err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs), 1499 1496 __BP_HARDEN_HYP_VECS_SZ, &base); 1500 1497 if (err)
+9
arch/arm64/kvm/hyp/hyp-entry.S
··· 62 62 /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ 63 63 eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \ 64 64 ARM_SMCCC_ARCH_WORKAROUND_2) 65 + cbz w1, wa_epilogue 66 + 67 + eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \ 68 + ARM_SMCCC_ARCH_WORKAROUND_3) 65 69 cbnz w1, el1_trap 66 70 67 71 wa_epilogue: ··· 196 192 sub sp, sp, #(8 * 4) 197 193 stp x2, x3, [sp, #(8 * 0)] 198 194 stp x0, x1, [sp, #(8 * 2)] 195 + alternative_cb spectre_bhb_patch_wa3 196 + /* Patched to mov WA3 when supported */ 199 197 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 198 + alternative_cb_end 200 199 smc #0 201 200 ldp x2, x3, [sp, #(8 * 0)] 202 201 add sp, sp, #(8 * 2) ··· 212 205 spectrev2_smccc_wa1_smc 213 206 .else 214 207 stp x0, x1, [sp, #-16]! 208 + mitigate_spectre_bhb_loop x0 209 + mitigate_spectre_bhb_clear_insn 215 210 .endif 216 211 .if \indirect != 0 217 212 alternative_cb kvm_patch_vector_branch
+3 -1
arch/arm64/kvm/hyp/nvhe/mm.c
··· 148 148 phys_addr_t phys; 149 149 void *bp_base; 150 150 151 - if (!cpus_have_const_cap(ARM64_SPECTRE_V3A)) 151 + if (!kvm_system_needs_idmapped_vectors()) { 152 + __hyp_bp_vect_base = __bp_harden_hyp_vecs; 152 153 return 0; 154 + } 153 155 154 156 phys = __hyp_pa(__bp_harden_hyp_vecs); 155 157 bp_base = (void *)__pkvm_create_private_mapping(phys,
+8 -2
arch/arm64/kvm/hyp/vhe/switch.c
··· 10 10 #include <linux/kvm_host.h> 11 11 #include <linux/types.h> 12 12 #include <linux/jump_label.h> 13 + #include <linux/percpu.h> 13 14 #include <uapi/linux/psci.h> 14 15 15 16 #include <kvm/arm_psci.h> ··· 25 24 #include <asm/fpsimd.h> 26 25 #include <asm/debug-monitors.h> 27 26 #include <asm/processor.h> 27 + #include <asm/thread_info.h> 28 + #include <asm/vectors.h> 28 29 29 30 /* VHE specific context */ 30 31 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); ··· 70 67 71 68 static void __deactivate_traps(struct kvm_vcpu *vcpu) 72 69 { 73 - extern char vectors[]; /* kernel exception vectors */ 70 + const char *host_vectors = vectors; 74 71 75 72 ___deactivate_traps(vcpu); 76 73 ··· 84 81 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); 85 82 86 83 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); 87 - write_sysreg(vectors, vbar_el1); 84 + 85 + if (!arm64_kernel_unmapped_at_el0()) 86 + host_vectors = __this_cpu_read(this_cpu_vector); 87 + write_sysreg(host_vectors, vbar_el1); 88 88 } 89 89 NOKPROBE_SYMBOL(__deactivate_traps); 90 90
+12
arch/arm64/kvm/hypercalls.c
··· 107 107 break; 108 108 } 109 109 break; 110 + case ARM_SMCCC_ARCH_WORKAROUND_3: 111 + switch (arm64_get_spectre_bhb_state()) { 112 + case SPECTRE_VULNERABLE: 113 + break; 114 + case SPECTRE_MITIGATED: 115 + val[0] = SMCCC_RET_SUCCESS; 116 + break; 117 + case SPECTRE_UNAFFECTED: 118 + val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED; 119 + break; 120 + } 121 + break; 110 122 case ARM_SMCCC_HV_PV_TIME_FEATURES: 111 123 val[0] = SMCCC_RET_SUCCESS; 112 124 break;
+17 -1
arch/arm64/kvm/psci.c
··· 405 405 406 406 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) 407 407 { 408 - return 3; /* PSCI version and two workaround registers */ 408 + return 4; /* PSCI version and three workaround registers */ 409 409 } 410 410 411 411 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) ··· 417 417 return -EFAULT; 418 418 419 419 if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++)) 420 + return -EFAULT; 421 + 422 + if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3, uindices++)) 420 423 return -EFAULT; 421 424 422 425 return 0; ··· 461 458 case SPECTRE_VULNERABLE: 462 459 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; 463 460 } 461 + break; 462 + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3: 463 + switch (arm64_get_spectre_bhb_state()) { 464 + case SPECTRE_VULNERABLE: 465 + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL; 466 + case SPECTRE_MITIGATED: 467 + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL; 468 + case SPECTRE_UNAFFECTED: 469 + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED; 470 + } 471 + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL; 464 472 } 465 473 466 474 return -EINVAL; ··· 488 474 break; 489 475 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: 490 476 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: 477 + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3: 491 478 val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; 492 479 break; 493 480 default: ··· 534 519 } 535 520 536 521 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: 522 + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3: 537 523 if (val & ~KVM_REG_FEATURE_LEVEL_MASK) 538 524 return -EINVAL; 539 525
+9 -3
arch/arm64/mm/mmu.c
··· 617 617 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 618 618 static int __init map_entry_trampoline(void) 619 619 { 620 + int i; 621 + 620 622 pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 621 623 phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); 622 624 ··· 627 625 628 626 /* Map only the text into the trampoline page table */ 629 627 memset(tramp_pg_dir, 0, PGD_SIZE); 630 - __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, 631 - prot, __pgd_pgtable_alloc, 0); 628 + __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, 629 + entry_tramp_text_size(), prot, 630 + __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS); 632 631 633 632 /* Map both the text and data into the kernel page table */ 634 - __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); 633 + for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++) 634 + __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, 635 + pa_start + i * PAGE_SIZE, prot); 636 + 635 637 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 636 638 extern char __entry_tramp_data_start[]; 637 639
+1
arch/arm64/tools/cpucaps
··· 44 44 SPECTRE_V2 45 45 SPECTRE_V3A 46 46 SPECTRE_V4 47 + SPECTRE_BHB 47 48 SSBS 48 49 SVE 49 50 UNMAP_KERNEL_AT_EL0
+5
include/linux/arm-smccc.h
··· 92 92 ARM_SMCCC_SMC_32, \ 93 93 0, 0x7fff) 94 94 95 + #define ARM_SMCCC_ARCH_WORKAROUND_3 \ 96 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ 97 + ARM_SMCCC_SMC_32, \ 98 + 0, 0x3fff) 99 + 95 100 #define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \ 96 101 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ 97 102 ARM_SMCCC_SMC_32, \