Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/mtrr: Add a stop_machine() handler calling only cache_cpu_init()

Instead of having a stop_machine() handler for either a specific
MTRR register or all state at once, add a handler just for calling
cache_cpu_init() if appropriate.

Add functions for calling stop_machine() with this handler as well.

Add a generic replacement for mtrr_bp_restore() and a wrapper for
mtrr_bp_init().

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20221102074713.21493-13-jgross@suse.com
Signed-off-by: Borislav Petkov <bp@suse.de>

authored by

Juergen Gross and committed by
Borislav Petkov
0b9a6a8b 955d0e08

+74 -99
+4 -1
arch/x86/include/asm/cacheinfo.h
··· 12 12 13 13 void cache_disable(void); 14 14 void cache_enable(void); 15 - void cache_cpu_init(void); 16 15 void set_cache_aps_delayed_init(bool val); 17 16 bool get_cache_aps_delayed_init(void); 17 + void cache_bp_init(void); 18 + void cache_bp_restore(void); 19 + void cache_ap_init(void); 20 + void cache_aps_init(void); 18 21 19 22 #endif /* _ASM_X86_CACHEINFO_H */
+2 -6
arch/x86/include/asm/mtrr.h
··· 25 25 26 26 #include <uapi/asm/mtrr.h> 27 27 28 - void mtrr_bp_init(void); 29 - 30 28 /* 31 29 * The following functions are for use by other drivers that cannot use 32 30 * arch_phys_wc_add and arch_phys_wc_del. 33 31 */ 34 32 # ifdef CONFIG_MTRR 33 + void mtrr_bp_init(void); 35 34 extern u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform); 36 35 extern void mtrr_save_fixed_ranges(void *); 37 36 extern void mtrr_save_state(void); ··· 41 42 extern int mtrr_del(int reg, unsigned long base, unsigned long size); 42 43 extern int mtrr_del_page(int reg, unsigned long base, unsigned long size); 43 44 extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); 44 - extern void mtrr_ap_init(void); 45 - extern void mtrr_aps_init(void); 46 45 extern void mtrr_bp_restore(void); 47 46 extern int mtrr_trim_uncached_memory(unsigned long end_pfn); 48 47 extern int amd_special_default_mtrr(void); ··· 82 85 static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) 83 86 { 84 87 } 85 - #define mtrr_ap_init() do {} while (0) 86 - #define mtrr_aps_init() do {} while (0) 88 + #define mtrr_bp_init() do {} while (0) 87 89 #define mtrr_bp_restore() do {} while (0) 88 90 #define mtrr_disable() do {} while (0) 89 91 #define mtrr_enable() do {} while (0)
+58 -1
arch/x86/kernel/cpu/cacheinfo.c
··· 15 15 #include <linux/capability.h> 16 16 #include <linux/sysfs.h> 17 17 #include <linux/pci.h> 18 + #include <linux/stop_machine.h> 18 19 19 20 #include <asm/cpufeature.h> 20 21 #include <asm/cacheinfo.h> ··· 1122 1121 raw_spin_unlock(&cache_disable_lock); 1123 1122 } 1124 1123 1125 - void cache_cpu_init(void) 1124 + static void cache_cpu_init(void) 1126 1125 { 1127 1126 unsigned long flags; 1128 1127 ··· 1149 1148 bool get_cache_aps_delayed_init(void) 1150 1149 { 1151 1150 return cache_aps_delayed_init; 1151 + } 1152 + 1153 + static int cache_rendezvous_handler(void *unused) 1154 + { 1155 + if (get_cache_aps_delayed_init() || !cpu_online(smp_processor_id())) 1156 + cache_cpu_init(); 1157 + 1158 + return 0; 1159 + } 1160 + 1161 + void __init cache_bp_init(void) 1162 + { 1163 + mtrr_bp_init(); 1164 + 1165 + if (memory_caching_control) 1166 + cache_cpu_init(); 1167 + } 1168 + 1169 + void cache_bp_restore(void) 1170 + { 1171 + if (memory_caching_control) 1172 + cache_cpu_init(); 1173 + } 1174 + 1175 + void cache_ap_init(void) 1176 + { 1177 + if (!memory_caching_control || get_cache_aps_delayed_init()) 1178 + return; 1179 + 1180 + /* 1181 + * Ideally we should hold mtrr_mutex here to avoid MTRR entries 1182 + * changed, but this routine will be called in CPU boot time, 1183 + * holding the lock breaks it. 1184 + * 1185 + * This routine is called in two cases: 1186 + * 1187 + * 1. very early time of software resume, when there absolutely 1188 + * isn't MTRR entry changes; 1189 + * 1190 + * 2. CPU hotadd time. We let mtrr_add/del_page hold cpuhotplug 1191 + * lock to prevent MTRR entry changes 1192 + */ 1193 + stop_machine_from_inactive_cpu(cache_rendezvous_handler, NULL, 1194 + cpu_callout_mask); 1195 + } 1196 + 1197 + /* 1198 + * Delayed cache initialization for all AP's 1199 + */ 1200 + void cache_aps_init(void) 1201 + { 1202 + if (!memory_caching_control || !get_cache_aps_delayed_init()) 1203 + return; 1204 + 1205 + stop_machine(cache_rendezvous_handler, NULL, cpu_online_mask); 1206 + set_cache_aps_delayed_init(false); 1152 1207 }
+2 -1
arch/x86/kernel/cpu/common.c
··· 52 52 #include <asm/cpu.h> 53 53 #include <asm/mce.h> 54 54 #include <asm/msr.h> 55 + #include <asm/cacheinfo.h> 55 56 #include <asm/memtype.h> 56 57 #include <asm/microcode.h> 57 58 #include <asm/microcode_intel.h> ··· 1949 1948 #ifdef CONFIG_X86_32 1950 1949 enable_sep_cpu(); 1951 1950 #endif 1952 - mtrr_ap_init(); 1951 + cache_ap_init(); 1953 1952 validate_apic_and_package_id(c); 1954 1953 x86_spec_ctrl_setup_ap(); 1955 1954 update_srbds_msr();
+2 -86
arch/x86/kernel/cpu/mtrr/mtrr.c
··· 73 73 74 74 const struct mtrr_ops *mtrr_if; 75 75 76 - static void set_mtrr(unsigned int reg, unsigned long base, 77 - unsigned long size, mtrr_type type); 78 - 79 76 void __init set_mtrr_ops(const struct mtrr_ops *ops) 80 77 { 81 78 if (ops->vendor && ops->vendor < X86_VENDOR_NUM) ··· 155 158 { 156 159 struct set_mtrr_data *data = info; 157 160 158 - /* 159 - * We use this same function to initialize the mtrrs during boot, 160 - * resume, runtime cpu online and on an explicit request to set a 161 - * specific MTRR. 162 - * 163 - * During boot or suspend, the state of the boot cpu's mtrrs has been 164 - * saved, and we want to replicate that across all the cpus that come 165 - * online (either at the end of boot or resume or during a runtime cpu 166 - * online). If we're doing that, @reg is set to something special and on 167 - * all the CPUs we do cache_cpu_init() (On the logical CPU that 168 - * started the boot/resume sequence, this might be a duplicate 169 - * cache_cpu_init()). 170 - */ 171 - if (data->smp_reg != ~0U) { 172 - mtrr_if->set(data->smp_reg, data->smp_base, 173 - data->smp_size, data->smp_type); 174 - } else if (get_cache_aps_delayed_init() || 175 - !cpu_online(smp_processor_id())) { 176 - cache_cpu_init(); 177 - } 161 + mtrr_if->set(data->smp_reg, data->smp_base, 162 + data->smp_size, data->smp_type); 178 163 return 0; 179 164 } 180 165 ··· 224 245 }; 225 246 226 247 stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask); 227 - } 228 - 229 - static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base, 230 - unsigned long size, mtrr_type type) 231 - { 232 - struct set_mtrr_data data = { .smp_reg = reg, 233 - .smp_base = base, 234 - .smp_size = size, 235 - .smp_type = type 236 - }; 237 - 238 - stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data, 239 - cpu_callout_mask); 240 248 } 241 249 242 250 /** ··· 727 761 if (get_mtrr_state()) { 728 762 memory_caching_control |= CACHE_MTRR | CACHE_PAT; 729 763 changed_by_mtrr_cleanup = mtrr_cleanup(phys_addr); 730 - cache_cpu_init(); 731 764 } else { 732 765 mtrr_if = NULL; 733 766 } ··· 745 780 } 746 781 } 747 782 748 - void mtrr_ap_init(void) 749 - { 750 - if (!memory_caching_control || get_cache_aps_delayed_init()) 751 - return; 752 - 753 - /* 754 - * Ideally we should hold mtrr_mutex here to avoid mtrr entries 755 - * changed, but this routine will be called in cpu boot time, 756 - * holding the lock breaks it. 757 - * 758 - * This routine is called in two cases: 759 - * 760 - * 1. very early time of software resume, when there absolutely 761 - * isn't mtrr entry changes; 762 - * 763 - * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug 764 - * lock to prevent mtrr entry changes 765 - */ 766 - set_mtrr_from_inactive_cpu(~0U, 0, 0, 0); 767 - } 768 - 769 783 /** 770 784 * mtrr_save_state - Save current fixed-range MTRR state of the first 771 785 * cpu in cpu_online_mask. ··· 758 814 759 815 first_cpu = cpumask_first(cpu_online_mask); 760 816 smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1); 761 - } 762 - 763 - /* 764 - * Delayed MTRR initialization for all AP's 765 - */ 766 - void mtrr_aps_init(void) 767 - { 768 - if (!memory_caching_control) 769 - return; 770 - 771 - /* 772 - * Check if someone has requested the delay of AP MTRR initialization, 773 - * by doing set_mtrr_aps_delayed_init(), prior to this point. If not, 774 - * then we are done. 775 - */ 776 - if (!get_cache_aps_delayed_init()) 777 - return; 778 - 779 - set_mtrr(~0U, 0, 0, 0); 780 - set_cache_aps_delayed_init(false); 781 - } 782 - 783 - void mtrr_bp_restore(void) 784 - { 785 - if (!memory_caching_control) 786 - return; 787 - 788 - cache_cpu_init(); 789 817 } 790 818 791 819 static int __init mtrr_init_finialize(void)
+2 -1
arch/x86/kernel/setup.c
··· 34 34 #include <asm/numa.h> 35 35 #include <asm/bios_ebda.h> 36 36 #include <asm/bugs.h> 37 + #include <asm/cacheinfo.h> 37 38 #include <asm/cpu.h> 38 39 #include <asm/efi.h> 39 40 #include <asm/gart.h> ··· 1076 1075 1077 1076 /* update e820 for memory not covered by WB MTRRs */ 1078 1077 if (IS_ENABLED(CONFIG_MTRR)) 1079 - mtrr_bp_init(); 1078 + cache_bp_init(); 1080 1079 else 1081 1080 pat_disable("PAT support disabled because CONFIG_MTRR is disabled in the kernel."); 1082 1081
+2 -2
arch/x86/kernel/smpboot.c
··· 1445 1445 1446 1446 void arch_thaw_secondary_cpus_end(void) 1447 1447 { 1448 - mtrr_aps_init(); 1448 + cache_aps_init(); 1449 1449 } 1450 1450 1451 1451 /* ··· 1488 1488 1489 1489 nmi_selftest(); 1490 1490 impress_friends(); 1491 - mtrr_aps_init(); 1491 + cache_aps_init(); 1492 1492 } 1493 1493 1494 1494 static int __initdata setup_possible_cpus = -1;
+2 -1
arch/x86/power/cpu.c
··· 23 23 #include <asm/fpu/api.h> 24 24 #include <asm/debugreg.h> 25 25 #include <asm/cpu.h> 26 + #include <asm/cacheinfo.h> 26 27 #include <asm/mmu_context.h> 27 28 #include <asm/cpu_device_id.h> 28 29 #include <asm/microcode.h> ··· 262 261 do_fpu_end(); 263 262 tsc_verify_tsc_adjust(true); 264 263 x86_platform.restore_sched_clock_state(); 265 - mtrr_bp_restore(); 264 + cache_bp_restore(); 266 265 perf_restore_debug_store(); 267 266 268 267 c = &cpu_data(smp_processor_id());