Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-bhb' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM spectre fixes from Russell King:
"ARM Spectre BHB mitigations.

These patches add Spectre BHB migitations for the following Arm CPUs
to the 32-bit ARM kernels:
- Cortex A15
- Cortex A57
- Cortex A72
- Cortex A73
- Cortex A75
- Brahma B15
for CVE-2022-23960"

* tag 'for-linus-bhb' of git://git.armlinux.org.uk/~rmk/linux-arm:
ARM: include unprivileged BPF status in Spectre V2 reporting
ARM: Spectre-BHB workaround
ARM: use LOADADDR() to get load address of sections
ARM: early traps initialisation
ARM: report Spectre v2 status through sysfs

+482 -56
+10
arch/arm/include/asm/assembler.h
··· 107 107 .endm 108 108 #endif 109 109 110 + #if __LINUX_ARM_ARCH__ < 7 111 + .macro dsb, args 112 + mcr p15, 0, r0, c7, c10, 4 113 + .endm 114 + 115 + .macro isb, args 116 + mcr p15, 0, r0, c7, r5, 4 117 + .endm 118 + #endif 119 + 110 120 .macro asm_trace_hardirqs_off, save=1 111 121 #if defined(CONFIG_TRACE_IRQFLAGS) 112 122 .if \save
+32
arch/arm/include/asm/spectre.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + 3 + #ifndef __ASM_SPECTRE_H 4 + #define __ASM_SPECTRE_H 5 + 6 + enum { 7 + SPECTRE_UNAFFECTED, 8 + SPECTRE_MITIGATED, 9 + SPECTRE_VULNERABLE, 10 + }; 11 + 12 + enum { 13 + __SPECTRE_V2_METHOD_BPIALL, 14 + __SPECTRE_V2_METHOD_ICIALLU, 15 + __SPECTRE_V2_METHOD_SMC, 16 + __SPECTRE_V2_METHOD_HVC, 17 + __SPECTRE_V2_METHOD_LOOP8, 18 + }; 19 + 20 + enum { 21 + SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL), 22 + SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU), 23 + SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC), 24 + SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC), 25 + SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8), 26 + }; 27 + 28 + void spectre_v2_update_state(unsigned int state, unsigned int methods); 29 + 30 + int spectre_bhb_update_vectors(unsigned int method); 31 + 32 + #endif
+26 -9
arch/arm/include/asm/vmlinux.lds.h
··· 26 26 #define ARM_MMU_DISCARD(x) x 27 27 #endif 28 28 29 + /* Set start/end symbol names to the LMA for the section */ 30 + #define ARM_LMA(sym, section) \ 31 + sym##_start = LOADADDR(section); \ 32 + sym##_end = LOADADDR(section) + SIZEOF(section) 33 + 29 34 #define PROC_INFO \ 30 35 . = ALIGN(4); \ 31 36 __proc_info_begin = .; \ ··· 115 110 * only thing that matters is their relative offsets 116 111 */ 117 112 #define ARM_VECTORS \ 118 - __vectors_start = .; \ 119 - .vectors 0xffff0000 : AT(__vectors_start) { \ 120 - *(.vectors) \ 113 + __vectors_lma = .; \ 114 + OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \ 115 + .vectors { \ 116 + *(.vectors) \ 117 + } \ 118 + .vectors.bhb.loop8 { \ 119 + *(.vectors.bhb.loop8) \ 120 + } \ 121 + .vectors.bhb.bpiall { \ 122 + *(.vectors.bhb.bpiall) \ 123 + } \ 121 124 } \ 122 - . = __vectors_start + SIZEOF(.vectors); \ 123 - __vectors_end = .; \ 125 + ARM_LMA(__vectors, .vectors); \ 126 + ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8); \ 127 + ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall); \ 128 + . = __vectors_lma + SIZEOF(.vectors) + \ 129 + SIZEOF(.vectors.bhb.loop8) + \ 130 + SIZEOF(.vectors.bhb.bpiall); \ 124 131 \ 125 - __stubs_start = .; \ 126 - .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { \ 132 + __stubs_lma = .; \ 133 + .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \ 127 134 *(.stubs) \ 128 135 } \ 129 - . = __stubs_start + SIZEOF(.stubs); \ 130 - __stubs_end = .; \ 136 + ARM_LMA(__stubs, .stubs); \ 137 + . = __stubs_lma + SIZEOF(.stubs); \ 131 138 \ 132 139 PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors)); 133 140
+2
arch/arm/kernel/Makefile
··· 106 106 107 107 obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o 108 108 109 + obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o 110 + 109 111 extra-y := $(head-y) vmlinux.lds
+73 -6
arch/arm/kernel/entry-armv.S
··· 1002 1002 sub lr, lr, #\correction 1003 1003 .endif 1004 1004 1005 - @ 1006 - @ Save r0, lr_<exception> (parent PC) and spsr_<exception> 1007 - @ (parent CPSR) 1008 - @ 1005 + @ Save r0, lr_<exception> (parent PC) 1009 1006 stmia sp, {r0, lr} @ save r0, lr 1010 - mrs lr, spsr 1007 + 1008 + @ Save spsr_<exception> (parent CPSR) 1009 + 2: mrs lr, spsr 1011 1010 str lr, [sp, #8] @ save spsr 1012 1011 1013 1012 @ ··· 1027 1028 movs pc, lr @ branch to handler in SVC mode 1028 1029 ENDPROC(vector_\name) 1029 1030 1031 + #ifdef CONFIG_HARDEN_BRANCH_HISTORY 1032 + .subsection 1 1033 + .align 5 1034 + vector_bhb_loop8_\name: 1035 + .if \correction 1036 + sub lr, lr, #\correction 1037 + .endif 1038 + 1039 + @ Save r0, lr_<exception> (parent PC) 1040 + stmia sp, {r0, lr} 1041 + 1042 + @ bhb workaround 1043 + mov r0, #8 1044 + 1: b . + 4 1045 + subs r0, r0, #1 1046 + bne 1b 1047 + dsb 1048 + isb 1049 + b 2b 1050 + ENDPROC(vector_bhb_loop8_\name) 1051 + 1052 + vector_bhb_bpiall_\name: 1053 + .if \correction 1054 + sub lr, lr, #\correction 1055 + .endif 1056 + 1057 + @ Save r0, lr_<exception> (parent PC) 1058 + stmia sp, {r0, lr} 1059 + 1060 + @ bhb workaround 1061 + mcr p15, 0, r0, c7, c5, 6 @ BPIALL 1062 + @ isb not needed due to "movs pc, lr" in the vector stub 1063 + @ which gives a "context synchronisation". 1064 + b 2b 1065 + ENDPROC(vector_bhb_bpiall_\name) 1066 + .previous 1067 + #endif 1068 + 1030 1069 .align 2 1031 1070 @ handler addresses follow this label 1032 1071 1: ··· 1073 1036 .section .stubs, "ax", %progbits 1074 1037 @ This must be the first word 1075 1038 .word vector_swi 1039 + #ifdef CONFIG_HARDEN_BRANCH_HISTORY 1040 + .word vector_bhb_loop8_swi 1041 + .word vector_bhb_bpiall_swi 1042 + #endif 1076 1043 1077 1044 vector_rst: 1078 1045 ARM( swi SYS_ERROR0 ) ··· 1191 1150 * FIQ "NMI" handler 1192 1151 *----------------------------------------------------------------------------- 1193 1152 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86 1194 - * systems. 1153 + * systems. This must be the last vector stub, so lets place it in its own 1154 + * subsection. 1195 1155 */ 1156 + .subsection 2 1196 1157 vector_stub fiq, FIQ_MODE, 4 1197 1158 1198 1159 .long __fiq_usr @ 0 (USR_26 / USR_32) ··· 1226 1183 W(b) vector_addrexcptn 1227 1184 W(b) vector_irq 1228 1185 W(b) vector_fiq 1186 + 1187 + #ifdef CONFIG_HARDEN_BRANCH_HISTORY 1188 + .section .vectors.bhb.loop8, "ax", %progbits 1189 + .L__vectors_bhb_loop8_start: 1190 + W(b) vector_rst 1191 + W(b) vector_bhb_loop8_und 1192 + W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004 1193 + W(b) vector_bhb_loop8_pabt 1194 + W(b) vector_bhb_loop8_dabt 1195 + W(b) vector_addrexcptn 1196 + W(b) vector_bhb_loop8_irq 1197 + W(b) vector_bhb_loop8_fiq 1198 + 1199 + .section .vectors.bhb.bpiall, "ax", %progbits 1200 + .L__vectors_bhb_bpiall_start: 1201 + W(b) vector_rst 1202 + W(b) vector_bhb_bpiall_und 1203 + W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008 1204 + W(b) vector_bhb_bpiall_pabt 1205 + W(b) vector_bhb_bpiall_dabt 1206 + W(b) vector_addrexcptn 1207 + W(b) vector_bhb_bpiall_irq 1208 + W(b) vector_bhb_bpiall_fiq 1209 + #endif 1229 1210 1230 1211 .data 1231 1212 .align 2
+24
arch/arm/kernel/entry-common.S
··· 154 154 */ 155 155 156 156 .align 5 157 + #ifdef CONFIG_HARDEN_BRANCH_HISTORY 158 + ENTRY(vector_bhb_loop8_swi) 159 + sub sp, sp, #PT_REGS_SIZE 160 + stmia sp, {r0 - r12} 161 + mov r8, #8 162 + 1: b 2f 163 + 2: subs r8, r8, #1 164 + bne 1b 165 + dsb 166 + isb 167 + b 3f 168 + ENDPROC(vector_bhb_loop8_swi) 169 + 170 + .align 5 171 + ENTRY(vector_bhb_bpiall_swi) 172 + sub sp, sp, #PT_REGS_SIZE 173 + stmia sp, {r0 - r12} 174 + mcr p15, 0, r8, c7, c5, 6 @ BPIALL 175 + isb 176 + b 3f 177 + ENDPROC(vector_bhb_bpiall_swi) 178 + #endif 179 + .align 5 157 180 ENTRY(vector_swi) 158 181 #ifdef CONFIG_CPU_V7M 159 182 v7m_exception_entry 160 183 #else 161 184 sub sp, sp, #PT_REGS_SIZE 162 185 stmia sp, {r0 - r12} @ Calling r0 - r12 186 + 3: 163 187 ARM( add r8, sp, #S_PC ) 164 188 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr 165 189 THUMB( mov r8, sp )
+71
arch/arm/kernel/spectre.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #include <linux/bpf.h> 3 + #include <linux/cpu.h> 4 + #include <linux/device.h> 5 + 6 + #include <asm/spectre.h> 7 + 8 + static bool _unprivileged_ebpf_enabled(void) 9 + { 10 + #ifdef CONFIG_BPF_SYSCALL 11 + return !sysctl_unprivileged_bpf_disabled; 12 + #else 13 + return false 14 + #endif 15 + } 16 + 17 + ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, 18 + char *buf) 19 + { 20 + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 21 + } 22 + 23 + static unsigned int spectre_v2_state; 24 + static unsigned int spectre_v2_methods; 25 + 26 + void spectre_v2_update_state(unsigned int state, unsigned int method) 27 + { 28 + if (state > spectre_v2_state) 29 + spectre_v2_state = state; 30 + spectre_v2_methods |= method; 31 + } 32 + 33 + ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, 34 + char *buf) 35 + { 36 + const char *method; 37 + 38 + if (spectre_v2_state == SPECTRE_UNAFFECTED) 39 + return sprintf(buf, "%s\n", "Not affected"); 40 + 41 + if (spectre_v2_state != SPECTRE_MITIGATED) 42 + return sprintf(buf, "%s\n", "Vulnerable"); 43 + 44 + if (_unprivileged_ebpf_enabled()) 45 + return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n"); 46 + 47 + switch (spectre_v2_methods) { 48 + case SPECTRE_V2_METHOD_BPIALL: 49 + method = "Branch predictor hardening"; 50 + break; 51 + 52 + case SPECTRE_V2_METHOD_ICIALLU: 53 + method = "I-cache invalidation"; 54 + break; 55 + 56 + case SPECTRE_V2_METHOD_SMC: 57 + case SPECTRE_V2_METHOD_HVC: 58 + method = "Firmware call"; 59 + break; 60 + 61 + case SPECTRE_V2_METHOD_LOOP8: 62 + method = "History overwrite"; 63 + break; 64 + 65 + default: 66 + method = "Multiple mitigations"; 67 + break; 68 + } 69 + 70 + return sprintf(buf, "Mitigation: %s\n", method); 71 + }
+59 -6
arch/arm/kernel/traps.c
··· 30 30 #include <linux/atomic.h> 31 31 #include <asm/cacheflush.h> 32 32 #include <asm/exception.h> 33 + #include <asm/spectre.h> 33 34 #include <asm/unistd.h> 34 35 #include <asm/traps.h> 35 36 #include <asm/ptrace.h> ··· 790 789 } 791 790 #endif 792 791 792 + #ifndef CONFIG_CPU_V7M 793 + static void copy_from_lma(void *vma, void *lma_start, void *lma_end) 794 + { 795 + memcpy(vma, lma_start, lma_end - lma_start); 796 + } 797 + 798 + static void flush_vectors(void *vma, size_t offset, size_t size) 799 + { 800 + unsigned long start = (unsigned long)vma + offset; 801 + unsigned long end = start + size; 802 + 803 + flush_icache_range(start, end); 804 + } 805 + 806 + #ifdef CONFIG_HARDEN_BRANCH_HISTORY 807 + int spectre_bhb_update_vectors(unsigned int method) 808 + { 809 + extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[]; 810 + extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[]; 811 + void *vec_start, *vec_end; 812 + 813 + if (system_state >= SYSTEM_FREEING_INITMEM) { 814 + pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n", 815 + smp_processor_id()); 816 + return SPECTRE_VULNERABLE; 817 + } 818 + 819 + switch (method) { 820 + case SPECTRE_V2_METHOD_LOOP8: 821 + vec_start = __vectors_bhb_loop8_start; 822 + vec_end = __vectors_bhb_loop8_end; 823 + break; 824 + 825 + case SPECTRE_V2_METHOD_BPIALL: 826 + vec_start = __vectors_bhb_bpiall_start; 827 + vec_end = __vectors_bhb_bpiall_end; 828 + break; 829 + 830 + default: 831 + pr_err("CPU%u: unknown Spectre BHB state %d\n", 832 + smp_processor_id(), method); 833 + return SPECTRE_VULNERABLE; 834 + } 835 + 836 + copy_from_lma(vectors_page, vec_start, vec_end); 837 + flush_vectors(vectors_page, 0, vec_end - vec_start); 838 + 839 + return SPECTRE_MITIGATED; 840 + } 841 + #endif 842 + 793 843 void __init early_trap_init(void *vectors_base) 794 844 { 795 - #ifndef CONFIG_CPU_V7M 796 - unsigned long vectors = (unsigned long)vectors_base; 797 845 extern char __stubs_start[], __stubs_end[]; 798 846 extern char __vectors_start[], __vectors_end[]; 799 847 unsigned i; ··· 863 813 * into the vector page, mapped at 0xffff0000, and ensure these 864 814 * are visible to the instruction stream. 865 815 */ 866 - memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); 867 - memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); 816 + copy_from_lma(vectors_base, __vectors_start, __vectors_end); 817 + copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end); 868 818 869 819 kuser_init(vectors_base); 870 820 871 - flush_icache_range(vectors, vectors + PAGE_SIZE * 2); 821 + flush_vectors(vectors_base, 0, PAGE_SIZE * 2); 822 + } 872 823 #else /* ifndef CONFIG_CPU_V7M */ 824 + void __init early_trap_init(void *vectors_base) 825 + { 873 826 /* 874 827 * on V7-M there is no need to copy the vector table to a dedicated 875 828 * memory area. The address is configurable and so a table in the kernel 876 829 * image can be used. 877 830 */ 878 - #endif 879 831 } 832 + #endif
+11
arch/arm/mm/Kconfig
··· 830 830 831 831 config CPU_SPECTRE 832 832 bool 833 + select GENERIC_CPU_VULNERABILITIES 833 834 834 835 config HARDEN_BRANCH_PREDICTOR 835 836 bool "Harden the branch predictor against aliasing attacks" if EXPERT ··· 850 849 the system firmware. 851 850 852 851 If unsure, say Y. 852 + 853 + config HARDEN_BRANCH_HISTORY 854 + bool "Harden Spectre style attacks against branch history" if EXPERT 855 + depends on CPU_SPECTRE 856 + default y 857 + help 858 + Speculation attacks against some high-performance processors can 859 + make use of branch history to influence future speculation. When 860 + taking an exception, a sequence of branches overwrites the branch 861 + history, or branch history is invalidated. 853 862 854 863 config TLS_REG_EMUL 855 864 bool
+174 -35
arch/arm/mm/proc-v7-bugs.c
··· 6 6 #include <asm/cp15.h> 7 7 #include <asm/cputype.h> 8 8 #include <asm/proc-fns.h> 9 + #include <asm/spectre.h> 9 10 #include <asm/system_misc.h> 11 + 12 + #ifdef CONFIG_ARM_PSCI 13 + static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void) 14 + { 15 + struct arm_smccc_res res; 16 + 17 + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 18 + ARM_SMCCC_ARCH_WORKAROUND_1, &res); 19 + 20 + switch ((int)res.a0) { 21 + case SMCCC_RET_SUCCESS: 22 + return SPECTRE_MITIGATED; 23 + 24 + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: 25 + return SPECTRE_UNAFFECTED; 26 + 27 + default: 28 + return SPECTRE_VULNERABLE; 29 + } 30 + } 31 + #else 32 + static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void) 33 + { 34 + return SPECTRE_VULNERABLE; 35 + } 36 + #endif 10 37 11 38 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR 12 39 DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn); ··· 63 36 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); 64 37 } 65 38 66 - static void cpu_v7_spectre_init(void) 39 + static unsigned int spectre_v2_install_workaround(unsigned int method) 67 40 { 68 41 const char *spectre_v2_method = NULL; 69 42 int cpu = smp_processor_id(); 70 43 71 44 if (per_cpu(harden_branch_predictor_fn, cpu)) 72 - return; 45 + return SPECTRE_MITIGATED; 46 + 47 + switch (method) { 48 + case SPECTRE_V2_METHOD_BPIALL: 49 + per_cpu(harden_branch_predictor_fn, cpu) = 50 + harden_branch_predictor_bpiall; 51 + spectre_v2_method = "BPIALL"; 52 + break; 53 + 54 + case SPECTRE_V2_METHOD_ICIALLU: 55 + per_cpu(harden_branch_predictor_fn, cpu) = 56 + harden_branch_predictor_iciallu; 57 + spectre_v2_method = "ICIALLU"; 58 + break; 59 + 60 + case SPECTRE_V2_METHOD_HVC: 61 + per_cpu(harden_branch_predictor_fn, cpu) = 62 + call_hvc_arch_workaround_1; 63 + cpu_do_switch_mm = cpu_v7_hvc_switch_mm; 64 + spectre_v2_method = "hypervisor"; 65 + break; 66 + 67 + case SPECTRE_V2_METHOD_SMC: 68 + per_cpu(harden_branch_predictor_fn, cpu) = 69 + call_smc_arch_workaround_1; 70 + cpu_do_switch_mm = cpu_v7_smc_switch_mm; 71 + spectre_v2_method = "firmware"; 72 + break; 73 + } 74 + 75 + if (spectre_v2_method) 76 + pr_info("CPU%u: Spectre v2: using %s workaround\n", 77 + smp_processor_id(), spectre_v2_method); 78 + 79 + return SPECTRE_MITIGATED; 80 + } 81 + #else 82 + static unsigned int spectre_v2_install_workaround(unsigned int method) 83 + { 84 + pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n"); 85 + 86 + return SPECTRE_VULNERABLE; 87 + } 88 + #endif 89 + 90 + static void cpu_v7_spectre_v2_init(void) 91 + { 92 + unsigned int state, method = 0; 73 93 74 94 switch (read_cpuid_part()) { 75 95 case ARM_CPU_PART_CORTEX_A8: ··· 125 51 case ARM_CPU_PART_CORTEX_A17: 126 52 case ARM_CPU_PART_CORTEX_A73: 127 53 case ARM_CPU_PART_CORTEX_A75: 128 - per_cpu(harden_branch_predictor_fn, cpu) = 129 - harden_branch_predictor_bpiall; 130 - spectre_v2_method = "BPIALL"; 54 + state = SPECTRE_MITIGATED; 55 + method = SPECTRE_V2_METHOD_BPIALL; 131 56 break; 132 57 133 58 case ARM_CPU_PART_CORTEX_A15: 134 59 case ARM_CPU_PART_BRAHMA_B15: 135 - per_cpu(harden_branch_predictor_fn, cpu) = 136 - harden_branch_predictor_iciallu; 137 - spectre_v2_method = "ICIALLU"; 60 + state = SPECTRE_MITIGATED; 61 + method = SPECTRE_V2_METHOD_ICIALLU; 138 62 break; 139 63 140 - #ifdef CONFIG_ARM_PSCI 141 64 case ARM_CPU_PART_BRAHMA_B53: 142 65 /* Requires no workaround */ 66 + state = SPECTRE_UNAFFECTED; 143 67 break; 68 + 144 69 default: 145 70 /* Other ARM CPUs require no workaround */ 146 - if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) 71 + if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) { 72 + state = SPECTRE_UNAFFECTED; 147 73 break; 148 - fallthrough; 149 - /* Cortex A57/A72 require firmware workaround */ 150 - case ARM_CPU_PART_CORTEX_A57: 151 - case ARM_CPU_PART_CORTEX_A72: { 152 - struct arm_smccc_res res; 74 + } 153 75 154 - arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 155 - ARM_SMCCC_ARCH_WORKAROUND_1, &res); 156 - if ((int)res.a0 != 0) 157 - return; 76 + fallthrough; 77 + 78 + /* Cortex A57/A72 require firmware workaround */ 79 + case ARM_CPU_PART_CORTEX_A57: 80 + case ARM_CPU_PART_CORTEX_A72: 81 + state = spectre_v2_get_cpu_fw_mitigation_state(); 82 + if (state != SPECTRE_MITIGATED) 83 + break; 158 84 159 85 switch (arm_smccc_1_1_get_conduit()) { 160 86 case SMCCC_CONDUIT_HVC: 161 - per_cpu(harden_branch_predictor_fn, cpu) = 162 - call_hvc_arch_workaround_1; 163 - cpu_do_switch_mm = cpu_v7_hvc_switch_mm; 164 - spectre_v2_method = "hypervisor"; 87 + method = SPECTRE_V2_METHOD_HVC; 165 88 break; 166 89 167 90 case SMCCC_CONDUIT_SMC: 168 - per_cpu(harden_branch_predictor_fn, cpu) = 169 - call_smc_arch_workaround_1; 170 - cpu_do_switch_mm = cpu_v7_smc_switch_mm; 171 - spectre_v2_method = "firmware"; 91 + method = SPECTRE_V2_METHOD_SMC; 172 92 break; 173 93 174 94 default: 95 + state = SPECTRE_VULNERABLE; 175 96 break; 176 97 } 177 98 } 178 - #endif 99 + 100 + if (state == SPECTRE_MITIGATED) 101 + state = spectre_v2_install_workaround(method); 102 + 103 + spectre_v2_update_state(state, method); 104 + } 105 + 106 + #ifdef CONFIG_HARDEN_BRANCH_HISTORY 107 + static int spectre_bhb_method; 108 + 109 + static const char *spectre_bhb_method_name(int method) 110 + { 111 + switch (method) { 112 + case SPECTRE_V2_METHOD_LOOP8: 113 + return "loop"; 114 + 115 + case SPECTRE_V2_METHOD_BPIALL: 116 + return "BPIALL"; 117 + 118 + default: 119 + return "unknown"; 120 + } 121 + } 122 + 123 + static int spectre_bhb_install_workaround(int method) 124 + { 125 + if (spectre_bhb_method != method) { 126 + if (spectre_bhb_method) { 127 + pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n", 128 + smp_processor_id()); 129 + 130 + return SPECTRE_VULNERABLE; 131 + } 132 + 133 + if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE) 134 + return SPECTRE_VULNERABLE; 135 + 136 + spectre_bhb_method = method; 179 137 } 180 138 181 - if (spectre_v2_method) 182 - pr_info("CPU%u: Spectre v2: using %s workaround\n", 183 - smp_processor_id(), spectre_v2_method); 139 + pr_info("CPU%u: Spectre BHB: using %s workaround\n", 140 + smp_processor_id(), spectre_bhb_method_name(method)); 141 + 142 + return SPECTRE_MITIGATED; 184 143 } 185 144 #else 186 - static void cpu_v7_spectre_init(void) 145 + static int spectre_bhb_install_workaround(int method) 187 146 { 147 + return SPECTRE_VULNERABLE; 188 148 } 189 149 #endif 150 + 151 + static void cpu_v7_spectre_bhb_init(void) 152 + { 153 + unsigned int state, method = 0; 154 + 155 + switch (read_cpuid_part()) { 156 + case ARM_CPU_PART_CORTEX_A15: 157 + case ARM_CPU_PART_BRAHMA_B15: 158 + case ARM_CPU_PART_CORTEX_A57: 159 + case ARM_CPU_PART_CORTEX_A72: 160 + state = SPECTRE_MITIGATED; 161 + method = SPECTRE_V2_METHOD_LOOP8; 162 + break; 163 + 164 + case ARM_CPU_PART_CORTEX_A73: 165 + case ARM_CPU_PART_CORTEX_A75: 166 + state = SPECTRE_MITIGATED; 167 + method = SPECTRE_V2_METHOD_BPIALL; 168 + break; 169 + 170 + default: 171 + state = SPECTRE_UNAFFECTED; 172 + break; 173 + } 174 + 175 + if (state == SPECTRE_MITIGATED) 176 + state = spectre_bhb_install_workaround(method); 177 + 178 + spectre_v2_update_state(state, method); 179 + } 190 180 191 181 static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned, 192 182 u32 mask, const char *msg) ··· 280 142 void cpu_v7_ca8_ibe(void) 281 143 { 282 144 if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6))) 283 - cpu_v7_spectre_init(); 145 + cpu_v7_spectre_v2_init(); 284 146 } 285 147 286 148 void cpu_v7_ca15_ibe(void) 287 149 { 288 150 if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0))) 289 - cpu_v7_spectre_init(); 151 + cpu_v7_spectre_v2_init(); 290 152 } 291 153 292 154 void cpu_v7_bugs_init(void) 293 155 { 294 - cpu_v7_spectre_init(); 156 + cpu_v7_spectre_v2_init(); 157 + cpu_v7_spectre_bhb_init(); 295 158 }