Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes/cleanups from Catalin Marinas:

- Avoid taking a mutex in the secondary CPU bring-up path when
interrupts are disabled

- Ignore perf exclude_hv when the kernel is running in Hyp mode

- Remove redundant instruction in cmpxchg

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64/cpufeature: don't use mutex in bringup path
arm64: perf: Ignore exclude_hv when kernel is running in HYP
arm64: Remove redundant mov from LL/SC cmpxchg

+53 -14
-1
arch/arm64/include/asm/atomic_ll_sc.h
··· 264 264 " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ 265 265 " cbnz %w[tmp], 1b\n" \ 266 266 " " #mb "\n" \ 267 - " mov %" #w "[oldval], %" #w "[old]\n" \ 268 267 "2:" \ 269 268 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ 270 269 [v] "+Q" (*(unsigned long *)ptr) \
+10 -2
arch/arm64/include/asm/cpufeature.h
··· 115 115 116 116 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 117 117 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; 118 + extern struct static_key_false arm64_const_caps_ready; 118 119 119 120 bool this_cpu_has_cap(unsigned int cap); 120 121 ··· 125 124 } 126 125 127 126 /* System capability check for constant caps */ 128 - static inline bool cpus_have_const_cap(int num) 127 + static inline bool __cpus_have_const_cap(int num) 129 128 { 130 129 if (num >= ARM64_NCAPS) 131 130 return false; ··· 139 138 return test_bit(num, cpu_hwcaps); 140 139 } 141 140 141 + static inline bool cpus_have_const_cap(int num) 142 + { 143 + if (static_branch_likely(&arm64_const_caps_ready)) 144 + return __cpus_have_const_cap(num); 145 + else 146 + return cpus_have_cap(num); 147 + } 148 + 142 149 static inline void cpus_set_cap(unsigned int num) 143 150 { 144 151 if (num >= ARM64_NCAPS) { ··· 154 145 num, ARM64_NCAPS); 155 146 } else { 156 147 __set_bit(num, cpu_hwcaps); 157 - static_branch_enable(&cpu_hwcap_keys[num]); 158 148 } 159 149 } 160 150
+6 -2
arch/arm64/include/asm/kvm_host.h
··· 24 24 25 25 #include <linux/types.h> 26 26 #include <linux/kvm_types.h> 27 + #include <asm/cpufeature.h> 27 28 #include <asm/kvm.h> 28 29 #include <asm/kvm_asm.h> 29 30 #include <asm/kvm_mmio.h> ··· 356 355 unsigned long vector_ptr) 357 356 { 358 357 /* 359 - * Call initialization code, and switch to the full blown 360 - * HYP code. 358 + * Call initialization code, and switch to the full blown HYP code. 359 + * If the cpucaps haven't been finalized yet, something has gone very 360 + * wrong, and hyp will crash and burn when it uses any 361 + * cpus_have_const_cap() wrapper. 361 362 */ 363 + BUG_ON(!static_branch_likely(&arm64_const_caps_ready)); 362 364 __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr); 363 365 } 364 366
+21 -2
arch/arm64/kernel/cpufeature.c
··· 985 985 */ 986 986 void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) 987 987 { 988 - for (; caps->matches; caps++) 989 - if (caps->enable && cpus_have_cap(caps->capability)) 988 + for (; caps->matches; caps++) { 989 + unsigned int num = caps->capability; 990 + 991 + if (!cpus_have_cap(num)) 992 + continue; 993 + 994 + /* Ensure cpus_have_const_cap(num) works */ 995 + static_branch_enable(&cpu_hwcap_keys[num]); 996 + 997 + if (caps->enable) { 990 998 /* 991 999 * Use stop_machine() as it schedules the work allowing 992 1000 * us to modify PSTATE, instead of on_each_cpu() which ··· 1002 994 * we return. 1003 995 */ 1004 996 stop_machine(caps->enable, NULL, cpu_online_mask); 997 + } 998 + } 1005 999 } 1006 1000 1007 1001 /* ··· 1106 1096 enable_cpu_capabilities(arm64_features); 1107 1097 } 1108 1098 1099 + DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); 1100 + EXPORT_SYMBOL(arm64_const_caps_ready); 1101 + 1102 + static void __init mark_const_caps_ready(void) 1103 + { 1104 + static_branch_enable(&arm64_const_caps_ready); 1105 + } 1106 + 1109 1107 /* 1110 1108 * Check if the current CPU has a given feature capability. 1111 1109 * Should be called from non-preemptible context. ··· 1149 1131 /* Set the CPU feature capabilies */ 1150 1132 setup_feature_capabilities(); 1151 1133 enable_errata_workarounds(); 1134 + mark_const_caps_ready(); 1152 1135 setup_elf_hwcaps(arm64_elf_hwcaps); 1153 1136 1154 1137 if (system_supports_32bit_el0())
+16 -7
arch/arm64/kernel/perf_event.c
··· 877 877 878 878 if (attr->exclude_idle) 879 879 return -EPERM; 880 - if (is_kernel_in_hyp_mode() && 881 - attr->exclude_kernel != attr->exclude_hv) 882 - return -EINVAL; 880 + 881 + /* 882 + * If we're running in hyp mode, then we *are* the hypervisor. 883 + * Therefore we ignore exclude_hv in this configuration, since 884 + * there's no hypervisor to sample anyway. This is consistent 885 + * with other architectures (x86 and Power). 886 + */ 887 + if (is_kernel_in_hyp_mode()) { 888 + if (!attr->exclude_kernel) 889 + config_base |= ARMV8_PMU_INCLUDE_EL2; 890 + } else { 891 + if (attr->exclude_kernel) 892 + config_base |= ARMV8_PMU_EXCLUDE_EL1; 893 + if (!attr->exclude_hv) 894 + config_base |= ARMV8_PMU_INCLUDE_EL2; 895 + } 883 896 if (attr->exclude_user) 884 897 config_base |= ARMV8_PMU_EXCLUDE_EL0; 885 - if (!is_kernel_in_hyp_mode() && attr->exclude_kernel) 886 - config_base |= ARMV8_PMU_EXCLUDE_EL1; 887 - if (!attr->exclude_hv) 888 - config_base |= ARMV8_PMU_INCLUDE_EL2; 889 898 890 899 /* 891 900 * Install the filter into config_base as this is used to