1 // SPDX-License-Identifier: GPL-2.0-only
3 * Suspend support specific for i386/x86-64.
5 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
6 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
10 #include <linux/suspend.h>
11 #include <linux/export.h>
12 #include <linux/smp.h>
13 #include <linux/perf_event.h>
14 #include <linux/tboot.h>
15 #include <linux/dmi.h>
17 #include <asm/pgtable.h>
18 #include <asm/proto.h>
22 #include <asm/suspend.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/debugreg.h>
26 #include <asm/mmu_context.h>
27 #include <asm/cpu_device_id.h>
30 __visible
unsigned long saved_context_ebx
;
31 __visible
unsigned long saved_context_esp
, saved_context_ebp
;
32 __visible
unsigned long saved_context_esi
, saved_context_edi
;
33 __visible
unsigned long saved_context_eflags
;
35 struct saved_context saved_context
;
37 static void msr_save_context(struct saved_context
*ctxt
)
39 struct saved_msr
*msr
= ctxt
->saved_msrs
.array
;
40 struct saved_msr
*end
= msr
+ ctxt
->saved_msrs
.num
;
44 rdmsrl(msr
->info
.msr_no
, msr
->info
.reg
.q
);
49 static void msr_restore_context(struct saved_context
*ctxt
)
51 struct saved_msr
*msr
= ctxt
->saved_msrs
.array
;
52 struct saved_msr
*end
= msr
+ ctxt
->saved_msrs
.num
;
56 wrmsrl(msr
->info
.msr_no
, msr
->info
.reg
.q
);
62 * __save_processor_state - save CPU registers before creating a
63 * hibernation image and before restoring the memory state from it
64 * @ctxt - structure to store the registers contents in
66 * NOTE: If there is a CPU register the modification of which by the
67 * boot kernel (ie. the kernel used for loading the hibernation image)
68 * might affect the operations of the restored target kernel (ie. the one
69 * saved in the hibernation image), then its contents must be saved by this
70 * function. In other words, if kernel A is hibernated and different
71 * kernel B is used for loading the hibernation image into memory, the
72 * kernel A's __save_processor_state() function must save all registers
73 * needed by kernel A, so that it can operate correctly after the resume
74 * regardless of what kernel B does in the meantime.
76 static void __save_processor_state(struct saved_context
*ctxt
)
79 mtrr_save_fixed_ranges(NULL
);
86 store_idt(&ctxt
->idt
);
89 * We save it here, but restore it only in the hibernate case.
90 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
91 * mode in "secondary_startup_64". In 32-bit mode it is done via
92 * 'pmode_gdt' in wakeup_start.
94 ctxt
->gdt_desc
.size
= GDT_SIZE
- 1;
95 ctxt
->gdt_desc
.address
= (unsigned long)get_cpu_gdt_rw(smp_processor_id());
99 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
103 #ifdef CONFIG_X86_32_LAZY_GS
104 savesegment(gs
, ctxt
->gs
);
107 savesegment(gs
, ctxt
->gs
);
108 savesegment(fs
, ctxt
->fs
);
109 savesegment(ds
, ctxt
->ds
);
110 savesegment(es
, ctxt
->es
);
112 rdmsrl(MSR_FS_BASE
, ctxt
->fs_base
);
113 rdmsrl(MSR_GS_BASE
, ctxt
->kernelmode_gs_base
);
114 rdmsrl(MSR_KERNEL_GS_BASE
, ctxt
->usermode_gs_base
);
115 mtrr_save_fixed_ranges(NULL
);
117 rdmsrl(MSR_EFER
, ctxt
->efer
);
123 ctxt
->cr0
= read_cr0();
124 ctxt
->cr2
= read_cr2();
125 ctxt
->cr3
= __read_cr3();
126 ctxt
->cr4
= __read_cr4();
127 ctxt
->misc_enable_saved
= !rdmsrl_safe(MSR_IA32_MISC_ENABLE
,
129 msr_save_context(ctxt
);
132 /* Needed by apm.c */
133 void save_processor_state(void)
135 __save_processor_state(&saved_context
);
136 x86_platform
.save_sched_clock_state();
139 EXPORT_SYMBOL(save_processor_state
);
142 static void do_fpu_end(void)
145 * Restore FPU regs if necessary.
150 static void fix_processor_context(void)
152 int cpu
= smp_processor_id();
154 struct desc_struct
*desc
= get_cpu_gdt_rw(cpu
);
159 * We need to reload TR, which requires that we change the
160 * GDT entry to indicate "available" first.
162 * XXX: This could probably all be replaced by a call to
165 set_tss_desc(cpu
, &get_cpu_entry_area(cpu
)->tss
.x86_tss
);
168 memcpy(&tss
, &desc
[GDT_ENTRY_TSS
], sizeof(tss_desc
));
169 tss
.type
= 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
170 write_gdt_entry(desc
, GDT_ENTRY_TSS
, &tss
, DESC_TSS
);
172 syscall_init(); /* This sets MSR_*STAR and related */
174 if (boot_cpu_has(X86_FEATURE_SEP
))
177 load_TR_desc(); /* This does ltr */
178 load_mm_ldt(current
->active_mm
); /* This does lldt */
179 initialize_tlbstate_and_flush();
183 /* The processor is back on the direct GDT, load back the fixmap */
184 load_fixmap_gdt(cpu
);
188 * __restore_processor_state - restore the contents of CPU registers saved
189 * by __save_processor_state()
190 * @ctxt - structure to load the registers contents from
192 * The asm code that gets us here will have restored a usable GDT, although
193 * it will be pointing to the wrong alias.
195 static void notrace
__restore_processor_state(struct saved_context
*ctxt
)
197 if (ctxt
->misc_enable_saved
)
198 wrmsrl(MSR_IA32_MISC_ENABLE
, ctxt
->misc_enable
);
202 /* cr4 was introduced in the Pentium CPU */
205 __write_cr4(ctxt
->cr4
);
208 wrmsrl(MSR_EFER
, ctxt
->efer
);
209 __write_cr4(ctxt
->cr4
);
211 write_cr3(ctxt
->cr3
);
212 write_cr2(ctxt
->cr2
);
213 write_cr0(ctxt
->cr0
);
215 /* Restore the IDT. */
216 load_idt(&ctxt
->idt
);
219 * Just in case the asm code got us here with the SS, DS, or ES
220 * out of sync with the GDT, update them.
222 loadsegment(ss
, __KERNEL_DS
);
223 loadsegment(ds
, __USER_DS
);
224 loadsegment(es
, __USER_DS
);
227 * Restore percpu access. Percpu access can happen in exception
228 * handlers or in complicated helpers like load_gs_index().
231 wrmsrl(MSR_GS_BASE
, ctxt
->kernelmode_gs_base
);
233 loadsegment(fs
, __KERNEL_PERCPU
);
234 loadsegment(gs
, __KERNEL_STACK_CANARY
);
237 /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
238 fix_processor_context();
241 * Now that we have descriptor tables fully restored and working
242 * exception handling, restore the usermode segments.
245 loadsegment(ds
, ctxt
->es
);
246 loadsegment(es
, ctxt
->es
);
247 loadsegment(fs
, ctxt
->fs
);
248 load_gs_index(ctxt
->gs
);
251 * Restore FSBASE and GSBASE after restoring the selectors, since
252 * restoring the selectors clobbers the bases. Keep in mind
253 * that MSR_KERNEL_GS_BASE is horribly misnamed.
255 wrmsrl(MSR_FS_BASE
, ctxt
->fs_base
);
256 wrmsrl(MSR_KERNEL_GS_BASE
, ctxt
->usermode_gs_base
);
257 #elif defined(CONFIG_X86_32_LAZY_GS)
258 loadsegment(gs
, ctxt
->gs
);
262 tsc_verify_tsc_adjust(true);
263 x86_platform
.restore_sched_clock_state();
265 perf_restore_debug_store();
266 msr_restore_context(ctxt
);
269 /* Needed by apm.c */
270 void notrace
restore_processor_state(void)
272 __restore_processor_state(&saved_context
);
275 EXPORT_SYMBOL(restore_processor_state
);
278 #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
279 static void resume_play_dead(void)
282 tboot_shutdown(TB_SHUTDOWN_WFS
);
286 int hibernate_resume_nonboot_cpu_disable(void)
288 void (*play_dead
)(void) = smp_ops
.play_dead
;
292 * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
293 * during hibernate image restoration, because it is likely that the
294 * monitored address will be actually written to at that time and then
295 * the "dead" CPU will attempt to execute instructions again, but the
296 * address in its instruction pointer may not be possible to resolve
297 * any more at that point (the page tables used by it previously may
298 * have been overwritten by hibernate image data).
300 * First, make sure that we wake up all the potentially disabled SMT
301 * threads which have been initially brought up and then put into
302 * mwait/cpuidle sleep.
303 * Those will be put to proper (not interfering with hibernation
304 * resume) sleep afterwards, and the resumed kernel will decide itself
305 * what to do with them.
307 ret
= cpuhp_smt_enable();
310 smp_ops
.play_dead
= resume_play_dead
;
311 ret
= disable_nonboot_cpus();
312 smp_ops
.play_dead
= play_dead
;
318 * When bsp_check() is called in hibernate and suspend, cpu hotplug
319 * is disabled already. So it's unnessary to handle race condition between
320 * cpumask query and cpu hotplug.
322 static int bsp_check(void)
324 if (cpumask_first(cpu_online_mask
) != 0) {
325 pr_warn("CPU0 is offline.\n");
332 static int bsp_pm_callback(struct notifier_block
*nb
, unsigned long action
,
338 case PM_SUSPEND_PREPARE
:
339 case PM_HIBERNATION_PREPARE
:
342 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
343 case PM_RESTORE_PREPARE
:
345 * When system resumes from hibernation, online CPU0 because
346 * 1. it's required for resume and
347 * 2. the CPU was online before hibernation
350 _debug_hotplug_cpu(0, 1);
352 case PM_POST_RESTORE
:
354 * When a resume really happens, this code won't be called.
356 * This code is called only when user space hibernation software
357 * prepares for snapshot device during boot time. So we just
358 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
359 * preparing the snapshot device.
361 * This works for normal boot case in our CPU0 hotplug debug
362 * mode, i.e. CPU0 is offline and user mode hibernation
363 * software initializes during boot time.
365 * If CPU0 is online and user application accesses snapshot
366 * device after boot time, this will offline CPU0 and user may
367 * see different CPU0 state before and after accessing
368 * the snapshot device. But hopefully this is not a case when
369 * user debugging CPU0 hotplug. Even if users hit this case,
370 * they can easily online CPU0 back.
372 * To simplify this debug code, we only consider normal boot
373 * case. Otherwise we need to remember CPU0's state and restore
374 * to that state and resolve racy conditions etc.
376 _debug_hotplug_cpu(0, 0);
382 return notifier_from_errno(ret
);
385 static int __init
bsp_pm_check_init(void)
388 * Set this bsp_pm_callback as lower priority than
389 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
390 * earlier to disable cpu hotplug before bsp online check.
392 pm_notifier(bsp_pm_callback
, -INT_MAX
);
396 core_initcall(bsp_pm_check_init
);
398 static int msr_build_context(const u32
*msr_id
, const int num
)
400 struct saved_msrs
*saved_msrs
= &saved_context
.saved_msrs
;
401 struct saved_msr
*msr_array
;
405 total_num
= saved_msrs
->num
+ num
;
407 msr_array
= kmalloc_array(total_num
, sizeof(struct saved_msr
), GFP_KERNEL
);
409 pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
413 if (saved_msrs
->array
) {
415 * Multiple callbacks can invoke this function, so copy any
416 * MSR save requests from previous invocations.
418 memcpy(msr_array
, saved_msrs
->array
,
419 sizeof(struct saved_msr
) * saved_msrs
->num
);
421 kfree(saved_msrs
->array
);
424 for (i
= saved_msrs
->num
, j
= 0; i
< total_num
; i
++, j
++) {
427 msr_array
[i
].info
.msr_no
= msr_id
[j
];
428 msr_array
[i
].valid
= !rdmsrl_safe(msr_id
[j
], &dummy
);
429 msr_array
[i
].info
.reg
.q
= 0;
431 saved_msrs
->num
= total_num
;
432 saved_msrs
->array
= msr_array
;
438 * The following sections are a quirk framework for problematic BIOSen:
439 * Sometimes MSRs are modified by the BIOSen after suspended to
440 * RAM, this might cause unexpected behavior after wakeup.
441 * Thus we save/restore these specified MSRs across suspend/resume
442 * in order to work around it.
444 * For any further problematic BIOSen/platforms,
445 * please add your own function similar to msr_initialize_bdw.
447 static int msr_initialize_bdw(const struct dmi_system_id
*d
)
449 /* Add any extra MSR ids into this array. */
450 u32 bdw_msr_id
[] = { MSR_IA32_THERM_CONTROL
};
452 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d
->ident
);
453 return msr_build_context(bdw_msr_id
, ARRAY_SIZE(bdw_msr_id
));
456 static const struct dmi_system_id msr_save_dmi_table
[] = {
458 .callback
= msr_initialize_bdw
,
459 .ident
= "BROADWELL BDX_EP",
461 DMI_MATCH(DMI_PRODUCT_NAME
, "GRANTLEY"),
462 DMI_MATCH(DMI_PRODUCT_VERSION
, "E63448-400"),
468 static int msr_save_cpuid_features(const struct x86_cpu_id
*c
)
470 u32 cpuid_msr_id
[] = {
471 MSR_AMD64_CPUID_FN_1
,
474 pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
477 return msr_build_context(cpuid_msr_id
, ARRAY_SIZE(cpuid_msr_id
));
480 static const struct x86_cpu_id msr_save_cpu_table
[] = {
482 .vendor
= X86_VENDOR_AMD
,
484 .model
= X86_MODEL_ANY
,
485 .feature
= X86_FEATURE_ANY
,
486 .driver_data
= (kernel_ulong_t
)msr_save_cpuid_features
,
489 .vendor
= X86_VENDOR_AMD
,
491 .model
= X86_MODEL_ANY
,
492 .feature
= X86_FEATURE_ANY
,
493 .driver_data
= (kernel_ulong_t
)msr_save_cpuid_features
,
498 typedef int (*pm_cpu_match_t
)(const struct x86_cpu_id
*);
499 static int pm_cpu_check(const struct x86_cpu_id
*c
)
501 const struct x86_cpu_id
*m
;
504 m
= x86_match_cpu(msr_save_cpu_table
);
508 fn
= (pm_cpu_match_t
)m
->driver_data
;
515 static void pm_save_spec_msr(void)
517 u32 spec_msr_id
[] = {
521 MSR_IA32_MCU_OPT_CTRL
,
525 msr_build_context(spec_msr_id
, ARRAY_SIZE(spec_msr_id
));
528 static int pm_check_save_msr(void)
530 dmi_check_system(msr_save_dmi_table
);
531 pm_cpu_check(msr_save_cpu_table
);
537 device_initcall(pm_check_save_msr
);