1 // SPDX-License-Identifier: GPL-2.0-only
3 * Suspend support specific for i386/x86-64.
5 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
6 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
10 #include <linux/suspend.h>
11 #include <linux/export.h>
12 #include <linux/smp.h>
13 #include <linux/perf_event.h>
14 #include <linux/tboot.h>
16 #include <asm/pgtable.h>
17 #include <asm/proto.h>
21 #include <asm/suspend.h>
22 #include <asm/fpu/internal.h>
23 #include <asm/debugreg.h>
25 #include <asm/mmu_context.h>
26 #include <linux/dmi.h>
29 __visible
unsigned long saved_context_ebx
;
30 __visible
unsigned long saved_context_esp
, saved_context_ebp
;
31 __visible
unsigned long saved_context_esi
, saved_context_edi
;
32 __visible
unsigned long saved_context_eflags
;
34 struct saved_context saved_context
;
36 static void msr_save_context(struct saved_context
*ctxt
)
38 struct saved_msr
*msr
= ctxt
->saved_msrs
.array
;
39 struct saved_msr
*end
= msr
+ ctxt
->saved_msrs
.num
;
42 msr
->valid
= !rdmsrl_safe(msr
->info
.msr_no
, &msr
->info
.reg
.q
);
47 static void msr_restore_context(struct saved_context
*ctxt
)
49 struct saved_msr
*msr
= ctxt
->saved_msrs
.array
;
50 struct saved_msr
*end
= msr
+ ctxt
->saved_msrs
.num
;
54 wrmsrl(msr
->info
.msr_no
, msr
->info
.reg
.q
);
60 * __save_processor_state - save CPU registers before creating a
61 * hibernation image and before restoring the memory state from it
62 * @ctxt - structure to store the registers contents in
64 * NOTE: If there is a CPU register the modification of which by the
65 * boot kernel (ie. the kernel used for loading the hibernation image)
66 * might affect the operations of the restored target kernel (ie. the one
67 * saved in the hibernation image), then its contents must be saved by this
68 * function. In other words, if kernel A is hibernated and different
69 * kernel B is used for loading the hibernation image into memory, the
70 * kernel A's __save_processor_state() function must save all registers
71 * needed by kernel A, so that it can operate correctly after the resume
72 * regardless of what kernel B does in the meantime.
74 static void __save_processor_state(struct saved_context
*ctxt
)
77 mtrr_save_fixed_ranges(NULL
);
84 store_idt(&ctxt
->idt
);
87 * We save it here, but restore it only in the hibernate case.
88 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
89 * mode in "secondary_startup_64". In 32-bit mode it is done via
90 * 'pmode_gdt' in wakeup_start.
92 ctxt
->gdt_desc
.size
= GDT_SIZE
- 1;
93 ctxt
->gdt_desc
.address
= (unsigned long)get_cpu_gdt_rw(smp_processor_id());
97 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
101 #ifdef CONFIG_X86_32_LAZY_GS
102 savesegment(gs
, ctxt
->gs
);
105 savesegment(gs
, ctxt
->gs
);
106 savesegment(fs
, ctxt
->fs
);
107 savesegment(ds
, ctxt
->ds
);
108 savesegment(es
, ctxt
->es
);
110 rdmsrl(MSR_FS_BASE
, ctxt
->fs_base
);
111 rdmsrl(MSR_GS_BASE
, ctxt
->kernelmode_gs_base
);
112 rdmsrl(MSR_KERNEL_GS_BASE
, ctxt
->usermode_gs_base
);
113 mtrr_save_fixed_ranges(NULL
);
115 rdmsrl(MSR_EFER
, ctxt
->efer
);
121 ctxt
->cr0
= read_cr0();
122 ctxt
->cr2
= read_cr2();
123 ctxt
->cr3
= __read_cr3();
124 ctxt
->cr4
= __read_cr4();
126 ctxt
->cr8
= read_cr8();
128 ctxt
->misc_enable_saved
= !rdmsrl_safe(MSR_IA32_MISC_ENABLE
,
130 msr_save_context(ctxt
);
133 /* Needed by apm.c */
134 void save_processor_state(void)
136 __save_processor_state(&saved_context
);
137 x86_platform
.save_sched_clock_state();
140 EXPORT_SYMBOL(save_processor_state
);
143 static void do_fpu_end(void)
146 * Restore FPU regs if necessary.
151 static void fix_processor_context(void)
153 int cpu
= smp_processor_id();
155 struct desc_struct
*desc
= get_cpu_gdt_rw(cpu
);
160 * We need to reload TR, which requires that we change the
161 * GDT entry to indicate "available" first.
163 * XXX: This could probably all be replaced by a call to
166 set_tss_desc(cpu
, &get_cpu_entry_area(cpu
)->tss
.x86_tss
);
169 memcpy(&tss
, &desc
[GDT_ENTRY_TSS
], sizeof(tss_desc
));
170 tss
.type
= 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
171 write_gdt_entry(desc
, GDT_ENTRY_TSS
, &tss
, DESC_TSS
);
173 syscall_init(); /* This sets MSR_*STAR and related */
175 if (boot_cpu_has(X86_FEATURE_SEP
))
178 load_TR_desc(); /* This does ltr */
179 load_mm_ldt(current
->active_mm
); /* This does lldt */
180 initialize_tlbstate_and_flush();
184 /* The processor is back on the direct GDT, load back the fixmap */
185 load_fixmap_gdt(cpu
);
189 * __restore_processor_state - restore the contents of CPU registers saved
190 * by __save_processor_state()
191 * @ctxt - structure to load the registers contents from
193 * The asm code that gets us here will have restored a usable GDT, although
194 * it will be pointing to the wrong alias.
196 static void notrace
__restore_processor_state(struct saved_context
*ctxt
)
198 if (ctxt
->misc_enable_saved
)
199 wrmsrl(MSR_IA32_MISC_ENABLE
, ctxt
->misc_enable
);
203 /* cr4 was introduced in the Pentium CPU */
206 __write_cr4(ctxt
->cr4
);
209 wrmsrl(MSR_EFER
, ctxt
->efer
);
210 write_cr8(ctxt
->cr8
);
211 __write_cr4(ctxt
->cr4
);
213 write_cr3(ctxt
->cr3
);
214 write_cr2(ctxt
->cr2
);
215 write_cr0(ctxt
->cr0
);
217 /* Restore the IDT. */
218 load_idt(&ctxt
->idt
);
221 * Just in case the asm code got us here with the SS, DS, or ES
222 * out of sync with the GDT, update them.
224 loadsegment(ss
, __KERNEL_DS
);
225 loadsegment(ds
, __USER_DS
);
226 loadsegment(es
, __USER_DS
);
229 * Restore percpu access. Percpu access can happen in exception
230 * handlers or in complicated helpers like load_gs_index().
233 wrmsrl(MSR_GS_BASE
, ctxt
->kernelmode_gs_base
);
235 loadsegment(fs
, __KERNEL_PERCPU
);
236 loadsegment(gs
, __KERNEL_STACK_CANARY
);
239 /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
240 fix_processor_context();
243 * Now that we have descriptor tables fully restored and working
244 * exception handling, restore the usermode segments.
247 loadsegment(ds
, ctxt
->es
);
248 loadsegment(es
, ctxt
->es
);
249 loadsegment(fs
, ctxt
->fs
);
250 load_gs_index(ctxt
->gs
);
253 * Restore FSBASE and GSBASE after restoring the selectors, since
254 * restoring the selectors clobbers the bases. Keep in mind
255 * that MSR_KERNEL_GS_BASE is horribly misnamed.
257 wrmsrl(MSR_FS_BASE
, ctxt
->fs_base
);
258 wrmsrl(MSR_KERNEL_GS_BASE
, ctxt
->usermode_gs_base
);
259 #elif defined(CONFIG_X86_32_LAZY_GS)
260 loadsegment(gs
, ctxt
->gs
);
264 tsc_verify_tsc_adjust(true);
265 x86_platform
.restore_sched_clock_state();
267 perf_restore_debug_store();
268 msr_restore_context(ctxt
);
271 /* Needed by apm.c */
272 void notrace
restore_processor_state(void)
274 __restore_processor_state(&saved_context
);
277 EXPORT_SYMBOL(restore_processor_state
);
280 #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
281 static void resume_play_dead(void)
284 tboot_shutdown(TB_SHUTDOWN_WFS
);
288 int hibernate_resume_nonboot_cpu_disable(void)
290 void (*play_dead
)(void) = smp_ops
.play_dead
;
294 * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
295 * during hibernate image restoration, because it is likely that the
296 * monitored address will be actually written to at that time and then
297 * the "dead" CPU will attempt to execute instructions again, but the
298 * address in its instruction pointer may not be possible to resolve
299 * any more at that point (the page tables used by it previously may
300 * have been overwritten by hibernate image data).
302 * First, make sure that we wake up all the potentially disabled SMT
303 * threads which have been initially brought up and then put into
304 * mwait/cpuidle sleep.
305 * Those will be put to proper (not interfering with hibernation
306 * resume) sleep afterwards, and the resumed kernel will decide itself
307 * what to do with them.
309 ret
= cpuhp_smt_enable();
312 smp_ops
.play_dead
= resume_play_dead
;
313 ret
= disable_nonboot_cpus();
314 smp_ops
.play_dead
= play_dead
;
320 * When bsp_check() is called in hibernate and suspend, cpu hotplug
321 * is disabled already. So it's unnessary to handle race condition between
322 * cpumask query and cpu hotplug.
324 static int bsp_check(void)
326 if (cpumask_first(cpu_online_mask
) != 0) {
327 pr_warn("CPU0 is offline.\n");
334 static int bsp_pm_callback(struct notifier_block
*nb
, unsigned long action
,
340 case PM_SUSPEND_PREPARE
:
341 case PM_HIBERNATION_PREPARE
:
344 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
345 case PM_RESTORE_PREPARE
:
347 * When system resumes from hibernation, online CPU0 because
348 * 1. it's required for resume and
349 * 2. the CPU was online before hibernation
352 _debug_hotplug_cpu(0, 1);
354 case PM_POST_RESTORE
:
356 * When a resume really happens, this code won't be called.
358 * This code is called only when user space hibernation software
359 * prepares for snapshot device during boot time. So we just
360 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
361 * preparing the snapshot device.
363 * This works for normal boot case in our CPU0 hotplug debug
364 * mode, i.e. CPU0 is offline and user mode hibernation
365 * software initializes during boot time.
367 * If CPU0 is online and user application accesses snapshot
368 * device after boot time, this will offline CPU0 and user may
369 * see different CPU0 state before and after accessing
370 * the snapshot device. But hopefully this is not a case when
371 * user debugging CPU0 hotplug. Even if users hit this case,
372 * they can easily online CPU0 back.
374 * To simplify this debug code, we only consider normal boot
375 * case. Otherwise we need to remember CPU0's state and restore
376 * to that state and resolve racy conditions etc.
378 _debug_hotplug_cpu(0, 0);
384 return notifier_from_errno(ret
);
387 static int __init
bsp_pm_check_init(void)
390 * Set this bsp_pm_callback as lower priority than
391 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
392 * earlier to disable cpu hotplug before bsp online check.
394 pm_notifier(bsp_pm_callback
, -INT_MAX
);
398 core_initcall(bsp_pm_check_init
);
400 static int msr_init_context(const u32
*msr_id
, const int total_num
)
403 struct saved_msr
*msr_array
;
405 if (saved_context
.saved_msrs
.array
|| saved_context
.saved_msrs
.num
> 0) {
406 pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
410 msr_array
= kmalloc_array(total_num
, sizeof(struct saved_msr
), GFP_KERNEL
);
412 pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
416 for (i
= 0; i
< total_num
; i
++) {
417 msr_array
[i
].info
.msr_no
= msr_id
[i
];
418 msr_array
[i
].valid
= false;
419 msr_array
[i
].info
.reg
.q
= 0;
421 saved_context
.saved_msrs
.num
= total_num
;
422 saved_context
.saved_msrs
.array
= msr_array
;
428 * The following section is a quirk framework for problematic BIOSen:
429 * Sometimes MSRs are modified by the BIOSen after suspended to
430 * RAM, this might cause unexpected behavior after wakeup.
431 * Thus we save/restore these specified MSRs across suspend/resume
432 * in order to work around it.
434 * For any further problematic BIOSen/platforms,
435 * please add your own function similar to msr_initialize_bdw.
437 static int msr_initialize_bdw(const struct dmi_system_id
*d
)
439 /* Add any extra MSR ids into this array. */
440 u32 bdw_msr_id
[] = { MSR_IA32_THERM_CONTROL
};
442 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d
->ident
);
443 return msr_init_context(bdw_msr_id
, ARRAY_SIZE(bdw_msr_id
));
446 static const struct dmi_system_id msr_save_dmi_table
[] = {
448 .callback
= msr_initialize_bdw
,
449 .ident
= "BROADWELL BDX_EP",
451 DMI_MATCH(DMI_PRODUCT_NAME
, "GRANTLEY"),
452 DMI_MATCH(DMI_PRODUCT_VERSION
, "E63448-400"),
458 static int pm_check_save_msr(void)
460 dmi_check_system(msr_save_dmi_table
);
464 device_initcall(pm_check_save_msr
);