Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Suspend support specific for i386/x86-64.
4 *
5 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
6 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
8 */
9
10#include <linux/suspend.h>
11#include <linux/export.h>
12#include <linux/smp.h>
13#include <linux/perf_event.h>
14#include <linux/tboot.h>
15#include <linux/dmi.h>
16#include <linux/pgtable.h>
17
18#include <asm/proto.h>
19#include <asm/mtrr.h>
20#include <asm/page.h>
21#include <asm/mce.h>
22#include <asm/suspend.h>
23#include <asm/fpu/internal.h>
24#include <asm/debugreg.h>
25#include <asm/cpu.h>
26#include <asm/mmu_context.h>
27#include <asm/cpu_device_id.h>
28
29#ifdef CONFIG_X86_32
30__visible unsigned long saved_context_ebx;
31__visible unsigned long saved_context_esp, saved_context_ebp;
32__visible unsigned long saved_context_esi, saved_context_edi;
33__visible unsigned long saved_context_eflags;
34#endif
35struct saved_context saved_context;
36
37static void msr_save_context(struct saved_context *ctxt)
38{
39 struct saved_msr *msr = ctxt->saved_msrs.array;
40 struct saved_msr *end = msr + ctxt->saved_msrs.num;
41
42 while (msr < end) {
43 msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
44 msr++;
45 }
46}
47
48static void msr_restore_context(struct saved_context *ctxt)
49{
50 struct saved_msr *msr = ctxt->saved_msrs.array;
51 struct saved_msr *end = msr + ctxt->saved_msrs.num;
52
53 while (msr < end) {
54 if (msr->valid)
55 wrmsrl(msr->info.msr_no, msr->info.reg.q);
56 msr++;
57 }
58}
59
60/**
61 * __save_processor_state - save CPU registers before creating a
62 * hibernation image and before restoring the memory state from it
63 * @ctxt - structure to store the registers contents in
64 *
65 * NOTE: If there is a CPU register the modification of which by the
66 * boot kernel (ie. the kernel used for loading the hibernation image)
67 * might affect the operations of the restored target kernel (ie. the one
68 * saved in the hibernation image), then its contents must be saved by this
69 * function. In other words, if kernel A is hibernated and different
70 * kernel B is used for loading the hibernation image into memory, the
71 * kernel A's __save_processor_state() function must save all registers
72 * needed by kernel A, so that it can operate correctly after the resume
73 * regardless of what kernel B does in the meantime.
74 */
75static void __save_processor_state(struct saved_context *ctxt)
76{
77#ifdef CONFIG_X86_32
78 mtrr_save_fixed_ranges(NULL);
79#endif
80 kernel_fpu_begin();
81
82 /*
83 * descriptor tables
84 */
85 store_idt(&ctxt->idt);
86
87 /*
88 * We save it here, but restore it only in the hibernate case.
89 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
90 * mode in "secondary_startup_64". In 32-bit mode it is done via
91 * 'pmode_gdt' in wakeup_start.
92 */
93 ctxt->gdt_desc.size = GDT_SIZE - 1;
94 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id());
95
96 store_tr(ctxt->tr);
97
98 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
99 /*
100 * segment registers
101 */
102 savesegment(gs, ctxt->gs);
103#ifdef CONFIG_X86_64
104 savesegment(fs, ctxt->fs);
105 savesegment(ds, ctxt->ds);
106 savesegment(es, ctxt->es);
107
108 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
109 rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
110 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
111 mtrr_save_fixed_ranges(NULL);
112
113 rdmsrl(MSR_EFER, ctxt->efer);
114#endif
115
116 /*
117 * control registers
118 */
119 ctxt->cr0 = read_cr0();
120 ctxt->cr2 = read_cr2();
121 ctxt->cr3 = __read_cr3();
122 ctxt->cr4 = __read_cr4();
123 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
124 &ctxt->misc_enable);
125 msr_save_context(ctxt);
126}
127
128/* Needed by apm.c */
129void save_processor_state(void)
130{
131 __save_processor_state(&saved_context);
132 x86_platform.save_sched_clock_state();
133}
134#ifdef CONFIG_X86_32
135EXPORT_SYMBOL(save_processor_state);
136#endif
137
138static void do_fpu_end(void)
139{
140 /*
141 * Restore FPU regs if necessary.
142 */
143 kernel_fpu_end();
144}
145
146static void fix_processor_context(void)
147{
148 int cpu = smp_processor_id();
149#ifdef CONFIG_X86_64
150 struct desc_struct *desc = get_cpu_gdt_rw(cpu);
151 tss_desc tss;
152#endif
153
154 /*
155 * We need to reload TR, which requires that we change the
156 * GDT entry to indicate "available" first.
157 *
158 * XXX: This could probably all be replaced by a call to
159 * force_reload_TR().
160 */
161 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
162
163#ifdef CONFIG_X86_64
164 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
165 tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
166 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
167
168 syscall_init(); /* This sets MSR_*STAR and related */
169#else
170 if (boot_cpu_has(X86_FEATURE_SEP))
171 enable_sep_cpu();
172#endif
173 load_TR_desc(); /* This does ltr */
174 load_mm_ldt(current->active_mm); /* This does lldt */
175 initialize_tlbstate_and_flush();
176
177 fpu__resume_cpu();
178
179 /* The processor is back on the direct GDT, load back the fixmap */
180 load_fixmap_gdt(cpu);
181}
182
183/**
184 * __restore_processor_state - restore the contents of CPU registers saved
185 * by __save_processor_state()
186 * @ctxt - structure to load the registers contents from
187 *
188 * The asm code that gets us here will have restored a usable GDT, although
189 * it will be pointing to the wrong alias.
190 */
191static void notrace __restore_processor_state(struct saved_context *ctxt)
192{
193 struct cpuinfo_x86 *c;
194
195 if (ctxt->misc_enable_saved)
196 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
197 /*
198 * control registers
199 */
200 /* cr4 was introduced in the Pentium CPU */
201#ifdef CONFIG_X86_32
202 if (ctxt->cr4)
203 __write_cr4(ctxt->cr4);
204#else
205/* CONFIG X86_64 */
206 wrmsrl(MSR_EFER, ctxt->efer);
207 __write_cr4(ctxt->cr4);
208#endif
209 write_cr3(ctxt->cr3);
210 write_cr2(ctxt->cr2);
211 write_cr0(ctxt->cr0);
212
213 /* Restore the IDT. */
214 load_idt(&ctxt->idt);
215
216 /*
217 * Just in case the asm code got us here with the SS, DS, or ES
218 * out of sync with the GDT, update them.
219 */
220 loadsegment(ss, __KERNEL_DS);
221 loadsegment(ds, __USER_DS);
222 loadsegment(es, __USER_DS);
223
224 /*
225 * Restore percpu access. Percpu access can happen in exception
226 * handlers or in complicated helpers like load_gs_index().
227 */
228#ifdef CONFIG_X86_64
229 wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
230#else
231 loadsegment(fs, __KERNEL_PERCPU);
232#endif
233
234 /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
235 fix_processor_context();
236
237 /*
238 * Now that we have descriptor tables fully restored and working
239 * exception handling, restore the usermode segments.
240 */
241#ifdef CONFIG_X86_64
242 loadsegment(ds, ctxt->es);
243 loadsegment(es, ctxt->es);
244 loadsegment(fs, ctxt->fs);
245 load_gs_index(ctxt->gs);
246
247 /*
248 * Restore FSBASE and GSBASE after restoring the selectors, since
249 * restoring the selectors clobbers the bases. Keep in mind
250 * that MSR_KERNEL_GS_BASE is horribly misnamed.
251 */
252 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
253 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
254#else
255 loadsegment(gs, ctxt->gs);
256#endif
257
258 do_fpu_end();
259 tsc_verify_tsc_adjust(true);
260 x86_platform.restore_sched_clock_state();
261 mtrr_bp_restore();
262 perf_restore_debug_store();
263 msr_restore_context(ctxt);
264
265 c = &cpu_data(smp_processor_id());
266 if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
267 init_ia32_feat_ctl(c);
268}
269
270/* Needed by apm.c */
271void notrace restore_processor_state(void)
272{
273 __restore_processor_state(&saved_context);
274}
275#ifdef CONFIG_X86_32
276EXPORT_SYMBOL(restore_processor_state);
277#endif
278
279#if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
280static void resume_play_dead(void)
281{
282 play_dead_common();
283 tboot_shutdown(TB_SHUTDOWN_WFS);
284 hlt_play_dead();
285}
286
287int hibernate_resume_nonboot_cpu_disable(void)
288{
289 void (*play_dead)(void) = smp_ops.play_dead;
290 int ret;
291
292 /*
293 * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
294 * during hibernate image restoration, because it is likely that the
295 * monitored address will be actually written to at that time and then
296 * the "dead" CPU will attempt to execute instructions again, but the
297 * address in its instruction pointer may not be possible to resolve
298 * any more at that point (the page tables used by it previously may
299 * have been overwritten by hibernate image data).
300 *
301 * First, make sure that we wake up all the potentially disabled SMT
302 * threads which have been initially brought up and then put into
303 * mwait/cpuidle sleep.
304 * Those will be put to proper (not interfering with hibernation
305 * resume) sleep afterwards, and the resumed kernel will decide itself
306 * what to do with them.
307 */
308 ret = cpuhp_smt_enable();
309 if (ret)
310 return ret;
311 smp_ops.play_dead = resume_play_dead;
312 ret = freeze_secondary_cpus(0);
313 smp_ops.play_dead = play_dead;
314 return ret;
315}
316#endif
317
318/*
319 * When bsp_check() is called in hibernate and suspend, cpu hotplug
320 * is disabled already. So it's unnecessary to handle race condition between
321 * cpumask query and cpu hotplug.
322 */
323static int bsp_check(void)
324{
325 if (cpumask_first(cpu_online_mask) != 0) {
326 pr_warn("CPU0 is offline.\n");
327 return -ENODEV;
328 }
329
330 return 0;
331}
332
333static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
334 void *ptr)
335{
336 int ret = 0;
337
338 switch (action) {
339 case PM_SUSPEND_PREPARE:
340 case PM_HIBERNATION_PREPARE:
341 ret = bsp_check();
342 break;
343#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
344 case PM_RESTORE_PREPARE:
345 /*
346 * When system resumes from hibernation, online CPU0 because
347 * 1. it's required for resume and
348 * 2. the CPU was online before hibernation
349 */
350 if (!cpu_online(0))
351 _debug_hotplug_cpu(0, 1);
352 break;
353 case PM_POST_RESTORE:
354 /*
355 * When a resume really happens, this code won't be called.
356 *
357 * This code is called only when user space hibernation software
358 * prepares for snapshot device during boot time. So we just
359 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
360 * preparing the snapshot device.
361 *
362 * This works for normal boot case in our CPU0 hotplug debug
363 * mode, i.e. CPU0 is offline and user mode hibernation
364 * software initializes during boot time.
365 *
366 * If CPU0 is online and user application accesses snapshot
367 * device after boot time, this will offline CPU0 and user may
368 * see different CPU0 state before and after accessing
369 * the snapshot device. But hopefully this is not a case when
370 * user debugging CPU0 hotplug. Even if users hit this case,
371 * they can easily online CPU0 back.
372 *
373 * To simplify this debug code, we only consider normal boot
374 * case. Otherwise we need to remember CPU0's state and restore
375 * to that state and resolve racy conditions etc.
376 */
377 _debug_hotplug_cpu(0, 0);
378 break;
379#endif
380 default:
381 break;
382 }
383 return notifier_from_errno(ret);
384}
385
386static int __init bsp_pm_check_init(void)
387{
388 /*
389 * Set this bsp_pm_callback as lower priority than
390 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
391 * earlier to disable cpu hotplug before bsp online check.
392 */
393 pm_notifier(bsp_pm_callback, -INT_MAX);
394 return 0;
395}
396
397core_initcall(bsp_pm_check_init);
398
399static int msr_build_context(const u32 *msr_id, const int num)
400{
401 struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
402 struct saved_msr *msr_array;
403 int total_num;
404 int i, j;
405
406 total_num = saved_msrs->num + num;
407
408 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
409 if (!msr_array) {
410 pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
411 return -ENOMEM;
412 }
413
414 if (saved_msrs->array) {
415 /*
416 * Multiple callbacks can invoke this function, so copy any
417 * MSR save requests from previous invocations.
418 */
419 memcpy(msr_array, saved_msrs->array,
420 sizeof(struct saved_msr) * saved_msrs->num);
421
422 kfree(saved_msrs->array);
423 }
424
425 for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
426 msr_array[i].info.msr_no = msr_id[j];
427 msr_array[i].valid = false;
428 msr_array[i].info.reg.q = 0;
429 }
430 saved_msrs->num = total_num;
431 saved_msrs->array = msr_array;
432
433 return 0;
434}
435
436/*
437 * The following sections are a quirk framework for problematic BIOSen:
438 * Sometimes MSRs are modified by the BIOSen after suspended to
439 * RAM, this might cause unexpected behavior after wakeup.
440 * Thus we save/restore these specified MSRs across suspend/resume
441 * in order to work around it.
442 *
443 * For any further problematic BIOSen/platforms,
444 * please add your own function similar to msr_initialize_bdw.
445 */
446static int msr_initialize_bdw(const struct dmi_system_id *d)
447{
448 /* Add any extra MSR ids into this array. */
449 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
450
451 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
452 return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
453}
454
455static const struct dmi_system_id msr_save_dmi_table[] = {
456 {
457 .callback = msr_initialize_bdw,
458 .ident = "BROADWELL BDX_EP",
459 .matches = {
460 DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
461 DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
462 },
463 },
464 {}
465};
466
467static int msr_save_cpuid_features(const struct x86_cpu_id *c)
468{
469 u32 cpuid_msr_id[] = {
470 MSR_AMD64_CPUID_FN_1,
471 };
472
473 pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
474 c->family);
475
476 return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
477}
478
479static const struct x86_cpu_id msr_save_cpu_table[] = {
480 X86_MATCH_VENDOR_FAM(AMD, 0x15, &msr_save_cpuid_features),
481 X86_MATCH_VENDOR_FAM(AMD, 0x16, &msr_save_cpuid_features),
482 {}
483};
484
485typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
486static int pm_cpu_check(const struct x86_cpu_id *c)
487{
488 const struct x86_cpu_id *m;
489 int ret = 0;
490
491 m = x86_match_cpu(msr_save_cpu_table);
492 if (m) {
493 pm_cpu_match_t fn;
494
495 fn = (pm_cpu_match_t)m->driver_data;
496 ret = fn(m);
497 }
498
499 return ret;
500}
501
502static int pm_check_save_msr(void)
503{
504 dmi_check_system(msr_save_dmi_table);
505 pm_cpu_check(msr_save_cpu_table);
506
507 return 0;
508}
509
510device_initcall(pm_check_save_msr);