]>
Commit | Line | Data |
---|---|---|
767a67b0 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 | 2 | /* |
6d48becd | 3 | * Suspend support specific for i386/x86-64. |
1da177e4 | 4 | * |
cf7700fe | 5 | * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> |
a2531293 | 6 | * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> |
1da177e4 LT |
7 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> |
8 | */ | |
9 | ||
1da177e4 | 10 | #include <linux/suspend.h> |
69c60c88 | 11 | #include <linux/export.h> |
f6783d20 | 12 | #include <linux/smp.h> |
1d9d8639 | 13 | #include <linux/perf_event.h> |
406f992e | 14 | #include <linux/tboot.h> |
c49a0a80 | 15 | #include <linux/dmi.h> |
ca5999fd | 16 | #include <linux/pgtable.h> |
65fddcfc | 17 | |
f6783d20 | 18 | #include <asm/proto.h> |
3ebad590 | 19 | #include <asm/mtrr.h> |
f6783d20 SL |
20 | #include <asm/page.h> |
21 | #include <asm/mce.h> | |
a8af7898 | 22 | #include <asm/suspend.h> |
952f07ec | 23 | #include <asm/fpu/internal.h> |
1e350066 | 24 | #include <asm/debugreg.h> |
a71c8bc5 | 25 | #include <asm/cpu.h> |
37868fe1 | 26 | #include <asm/mmu_context.h> |
c49a0a80 | 27 | #include <asm/cpu_device_id.h> |
1da177e4 | 28 | |
833b2ca0 | 29 | #ifdef CONFIG_X86_32 |
d6efc2f7 AK |
30 | __visible unsigned long saved_context_ebx; |
31 | __visible unsigned long saved_context_esp, saved_context_ebp; | |
32 | __visible unsigned long saved_context_esi, saved_context_edi; | |
33 | __visible unsigned long saved_context_eflags; | |
833b2ca0 | 34 | #endif |
cc456c4e | 35 | struct saved_context saved_context; |
1da177e4 | 36 | |
7a9c2dd0 CY |
37 | static void msr_save_context(struct saved_context *ctxt) |
38 | { | |
39 | struct saved_msr *msr = ctxt->saved_msrs.array; | |
40 | struct saved_msr *end = msr + ctxt->saved_msrs.num; | |
41 | ||
42 | while (msr < end) { | |
43 | msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q); | |
44 | msr++; | |
45 | } | |
46 | } | |
47 | ||
48 | static void msr_restore_context(struct saved_context *ctxt) | |
49 | { | |
50 | struct saved_msr *msr = ctxt->saved_msrs.array; | |
51 | struct saved_msr *end = msr + ctxt->saved_msrs.num; | |
52 | ||
53 | while (msr < end) { | |
54 | if (msr->valid) | |
55 | wrmsrl(msr->info.msr_no, msr->info.reg.q); | |
56 | msr++; | |
57 | } | |
58 | } | |
59 | ||
5c9c9bec RW |
60 | /** |
61 | * __save_processor_state - save CPU registers before creating a | |
62 | * hibernation image and before restoring the memory state from it | |
63 | * @ctxt - structure to store the registers contents in | |
64 | * | |
65 | * NOTE: If there is a CPU register the modification of which by the | |
66 | * boot kernel (ie. the kernel used for loading the hibernation image) | |
67 | * might affect the operations of the restored target kernel (ie. the one | |
68 | * saved in the hibernation image), then its contents must be saved by this | |
69 | * function. In other words, if kernel A is hibernated and different | |
70 | * kernel B is used for loading the hibernation image into memory, the | |
71 | * kernel A's __save_processor_state() function must save all registers | |
72 | * needed by kernel A, so that it can operate correctly after the resume | |
73 | * regardless of what kernel B does in the meantime. | |
74 | */ | |
cae45957 | 75 | static void __save_processor_state(struct saved_context *ctxt) |
1da177e4 | 76 | { |
f9ebbe53 SL |
77 | #ifdef CONFIG_X86_32 |
78 | mtrr_save_fixed_ranges(NULL); | |
79 | #endif | |
1da177e4 LT |
80 | kernel_fpu_begin(); |
81 | ||
82 | /* | |
83 | * descriptor tables | |
84 | */ | |
f9ebbe53 | 85 | store_idt(&ctxt->idt); |
090edbe2 | 86 | |
cc456c4e KRW |
87 | /* |
88 | * We save it here, but restore it only in the hibernate case. | |
89 | * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit | |
90 | * mode in "secondary_startup_64". In 32-bit mode it is done via | |
91 | * 'pmode_gdt' in wakeup_start. | |
92 | */ | |
93 | ctxt->gdt_desc.size = GDT_SIZE - 1; | |
69218e47 | 94 | ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id()); |
cc456c4e | 95 | |
9d1c6e7c | 96 | store_tr(ctxt->tr); |
1da177e4 LT |
97 | |
98 | /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ | |
1da177e4 LT |
99 | /* |
100 | * segment registers | |
101 | */ | |
7ee18d67 | 102 | #ifdef CONFIG_X86_32_LAZY_GS |
f9ebbe53 | 103 | savesegment(gs, ctxt->gs); |
7ee18d67 AL |
104 | #endif |
105 | #ifdef CONFIG_X86_64 | |
106 | savesegment(gs, ctxt->gs); | |
107 | savesegment(fs, ctxt->fs); | |
108 | savesegment(ds, ctxt->ds); | |
109 | savesegment(es, ctxt->es); | |
1da177e4 LT |
110 | |
111 | rdmsrl(MSR_FS_BASE, ctxt->fs_base); | |
7ee18d67 AL |
112 | rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); |
113 | rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); | |
3ebad590 | 114 | mtrr_save_fixed_ranges(NULL); |
1da177e4 | 115 | |
f9ebbe53 SL |
116 | rdmsrl(MSR_EFER, ctxt->efer); |
117 | #endif | |
118 | ||
1da177e4 | 119 | /* |
cf7700fe | 120 | * control registers |
1da177e4 | 121 | */ |
f51c9452 GOC |
122 | ctxt->cr0 = read_cr0(); |
123 | ctxt->cr2 = read_cr2(); | |
6c690ee1 | 124 | ctxt->cr3 = __read_cr3(); |
1ef55be1 | 125 | ctxt->cr4 = __read_cr4(); |
85a0e753 OZ |
126 | ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, |
127 | &ctxt->misc_enable); | |
7a9c2dd0 | 128 | msr_save_context(ctxt); |
1da177e4 LT |
129 | } |
130 | ||
f9ebbe53 | 131 | /* Needed by apm.c */ |
1da177e4 LT |
132 | void save_processor_state(void) |
133 | { | |
134 | __save_processor_state(&saved_context); | |
b74f05d6 | 135 | x86_platform.save_sched_clock_state(); |
1da177e4 | 136 | } |
f9ebbe53 SL |
137 | #ifdef CONFIG_X86_32 |
138 | EXPORT_SYMBOL(save_processor_state); | |
139 | #endif | |
1da177e4 | 140 | |
08967f94 | 141 | static void do_fpu_end(void) |
1da177e4 | 142 | { |
08967f94 | 143 | /* |
3134d04b | 144 | * Restore FPU regs if necessary. |
08967f94 SL |
145 | */ |
146 | kernel_fpu_end(); | |
1da177e4 LT |
147 | } |
148 | ||
3134d04b SL |
149 | static void fix_processor_context(void) |
150 | { | |
151 | int cpu = smp_processor_id(); | |
4d681be3 | 152 | #ifdef CONFIG_X86_64 |
69218e47 | 153 | struct desc_struct *desc = get_cpu_gdt_rw(cpu); |
4d681be3 | 154 | tss_desc tss; |
155 | #endif | |
7fb983b4 AL |
156 | |
157 | /* | |
72f5e08d AL |
158 | * We need to reload TR, which requires that we change the |
159 | * GDT entry to indicate "available" first. | |
160 | * | |
161 | * XXX: This could probably all be replaced by a call to | |
162 | * force_reload_TR(). | |
7fb983b4 | 163 | */ |
72f5e08d | 164 | set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); |
3134d04b SL |
165 | |
166 | #ifdef CONFIG_X86_64 | |
4d681be3 | 167 | memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc)); |
168 | tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */ | |
169 | write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS); | |
3134d04b SL |
170 | |
171 | syscall_init(); /* This sets MSR_*STAR and related */ | |
896c80be AL |
172 | #else |
173 | if (boot_cpu_has(X86_FEATURE_SEP)) | |
174 | enable_sep_cpu(); | |
3134d04b SL |
175 | #endif |
176 | load_TR_desc(); /* This does ltr */ | |
37868fe1 | 177 | load_mm_ldt(current->active_mm); /* This does lldt */ |
72c0098d | 178 | initialize_tlbstate_and_flush(); |
9254aaa0 IM |
179 | |
180 | fpu__resume_cpu(); | |
69218e47 TG |
181 | |
182 | /* The processor is back on the direct GDT, load back the fixmap */ | |
183 | load_fixmap_gdt(cpu); | |
3134d04b SL |
184 | } |
185 | ||
5c9c9bec | 186 | /** |
7ee18d67 AL |
187 | * __restore_processor_state - restore the contents of CPU registers saved |
188 | * by __save_processor_state() | |
189 | * @ctxt - structure to load the registers contents from | |
190 | * | |
191 | * The asm code that gets us here will have restored a usable GDT, although | |
192 | * it will be pointing to the wrong alias. | |
5c9c9bec | 193 | */ |
b8f99b3e | 194 | static void notrace __restore_processor_state(struct saved_context *ctxt) |
1da177e4 | 195 | { |
5d510359 SC |
196 | struct cpuinfo_x86 *c; |
197 | ||
85a0e753 OZ |
198 | if (ctxt->misc_enable_saved) |
199 | wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); | |
1da177e4 LT |
200 | /* |
201 | * control registers | |
202 | */ | |
3134d04b SL |
203 | /* cr4 was introduced in the Pentium CPU */ |
204 | #ifdef CONFIG_X86_32 | |
205 | if (ctxt->cr4) | |
1e02ce4c | 206 | __write_cr4(ctxt->cr4); |
3134d04b SL |
207 | #else |
208 | /* CONFIG X86_64 */ | |
3c321bce | 209 | wrmsrl(MSR_EFER, ctxt->efer); |
1e02ce4c | 210 | __write_cr4(ctxt->cr4); |
3134d04b | 211 | #endif |
f51c9452 GOC |
212 | write_cr3(ctxt->cr3); |
213 | write_cr2(ctxt->cr2); | |
214 | write_cr0(ctxt->cr0); | |
1da177e4 | 215 | |
7ee18d67 AL |
216 | /* Restore the IDT. */ |
217 | load_idt(&ctxt->idt); | |
218 | ||
8d783b3e | 219 | /* |
7ee18d67 AL |
220 | * Just in case the asm code got us here with the SS, DS, or ES |
221 | * out of sync with the GDT, update them. | |
8d783b3e | 222 | */ |
7ee18d67 AL |
223 | loadsegment(ss, __KERNEL_DS); |
224 | loadsegment(ds, __USER_DS); | |
225 | loadsegment(es, __USER_DS); | |
8d783b3e | 226 | |
1da177e4 | 227 | /* |
7ee18d67 AL |
228 | * Restore percpu access. Percpu access can happen in exception |
229 | * handlers or in complicated helpers like load_gs_index(). | |
5b06bbcf | 230 | */ |
7ee18d67 AL |
231 | #ifdef CONFIG_X86_64 |
232 | wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); | |
233 | #else | |
234 | loadsegment(fs, __KERNEL_PERCPU); | |
235 | loadsegment(gs, __KERNEL_STACK_CANARY); | |
5b06bbcf AL |
236 | #endif |
237 | ||
7ee18d67 | 238 | /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */ |
5b06bbcf AL |
239 | fix_processor_context(); |
240 | ||
241 | /* | |
7ee18d67 AL |
242 | * Now that we have descriptor tables fully restored and working |
243 | * exception handling, restore the usermode segments. | |
1da177e4 | 244 | */ |
7ee18d67 AL |
245 | #ifdef CONFIG_X86_64 |
246 | loadsegment(ds, ctxt->es); | |
3134d04b SL |
247 | loadsegment(es, ctxt->es); |
248 | loadsegment(fs, ctxt->fs); | |
1da177e4 | 249 | load_gs_index(ctxt->gs); |
1da177e4 | 250 | |
5b06bbcf | 251 | /* |
7ee18d67 AL |
252 | * Restore FSBASE and GSBASE after restoring the selectors, since |
253 | * restoring the selectors clobbers the bases. Keep in mind | |
254 | * that MSR_KERNEL_GS_BASE is horribly misnamed. | |
5b06bbcf | 255 | */ |
1da177e4 | 256 | wrmsrl(MSR_FS_BASE, ctxt->fs_base); |
7ee18d67 AL |
257 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); |
258 | #elif defined(CONFIG_X86_32_LAZY_GS) | |
259 | loadsegment(gs, ctxt->gs); | |
3134d04b | 260 | #endif |
1da177e4 | 261 | |
1da177e4 | 262 | do_fpu_end(); |
6a369583 | 263 | tsc_verify_tsc_adjust(true); |
dba69d10 | 264 | x86_platform.restore_sched_clock_state(); |
d0af9eed | 265 | mtrr_bp_restore(); |
1d9d8639 | 266 | perf_restore_debug_store(); |
7a9c2dd0 | 267 | msr_restore_context(ctxt); |
5d510359 SC |
268 | |
269 | c = &cpu_data(smp_processor_id()); | |
270 | if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL)) | |
271 | init_ia32_feat_ctl(c); | |
1da177e4 LT |
272 | } |
273 | ||
3134d04b | 274 | /* Needed by apm.c */ |
b8f99b3e | 275 | void notrace restore_processor_state(void) |
1da177e4 LT |
276 | { |
277 | __restore_processor_state(&saved_context); | |
278 | } | |
3134d04b SL |
279 | #ifdef CONFIG_X86_32 |
280 | EXPORT_SYMBOL(restore_processor_state); | |
281 | #endif | |
209efae1 | 282 | |
406f992e RW |
283 | #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU) |
284 | static void resume_play_dead(void) | |
285 | { | |
286 | play_dead_common(); | |
287 | tboot_shutdown(TB_SHUTDOWN_WFS); | |
288 | hlt_play_dead(); | |
289 | } | |
290 | ||
291 | int hibernate_resume_nonboot_cpu_disable(void) | |
292 | { | |
293 | void (*play_dead)(void) = smp_ops.play_dead; | |
294 | int ret; | |
295 | ||
296 | /* | |
297 | * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop | |
298 | * during hibernate image restoration, because it is likely that the | |
299 | * monitored address will be actually written to at that time and then | |
300 | * the "dead" CPU will attempt to execute instructions again, but the | |
301 | * address in its instruction pointer may not be possible to resolve | |
302 | * any more at that point (the page tables used by it previously may | |
303 | * have been overwritten by hibernate image data). | |
ec527c31 JK |
304 | * |
305 | * First, make sure that we wake up all the potentially disabled SMT | |
306 | * threads which have been initially brought up and then put into | |
307 | * mwait/cpuidle sleep. | |
308 | * Those will be put to proper (not interfering with hibernation | |
309 | * resume) sleep afterwards, and the resumed kernel will decide itself | |
310 | * what to do with them. | |
406f992e | 311 | */ |
ec527c31 JK |
312 | ret = cpuhp_smt_enable(); |
313 | if (ret) | |
314 | return ret; | |
406f992e | 315 | smp_ops.play_dead = resume_play_dead; |
56555855 | 316 | ret = freeze_secondary_cpus(0); |
406f992e RW |
317 | smp_ops.play_dead = play_dead; |
318 | return ret; | |
319 | } | |
320 | #endif | |
321 | ||
209efae1 FY |
322 | /* |
323 | * When bsp_check() is called in hibernate and suspend, cpu hotplug | |
163b0991 | 324 | * is disabled already. So it's unnecessary to handle race condition between |
209efae1 FY |
325 | * cpumask query and cpu hotplug. |
326 | */ | |
327 | static int bsp_check(void) | |
328 | { | |
329 | if (cpumask_first(cpu_online_mask) != 0) { | |
330 | pr_warn("CPU0 is offline.\n"); | |
331 | return -ENODEV; | |
332 | } | |
333 | ||
334 | return 0; | |
335 | } | |
336 | ||
337 | static int bsp_pm_callback(struct notifier_block *nb, unsigned long action, | |
338 | void *ptr) | |
339 | { | |
340 | int ret = 0; | |
341 | ||
342 | switch (action) { | |
343 | case PM_SUSPEND_PREPARE: | |
344 | case PM_HIBERNATION_PREPARE: | |
345 | ret = bsp_check(); | |
346 | break; | |
a71c8bc5 FY |
347 | #ifdef CONFIG_DEBUG_HOTPLUG_CPU0 |
348 | case PM_RESTORE_PREPARE: | |
349 | /* | |
350 | * When system resumes from hibernation, online CPU0 because | |
351 | * 1. it's required for resume and | |
352 | * 2. the CPU was online before hibernation | |
353 | */ | |
354 | if (!cpu_online(0)) | |
355 | _debug_hotplug_cpu(0, 1); | |
356 | break; | |
357 | case PM_POST_RESTORE: | |
358 | /* | |
359 | * When a resume really happens, this code won't be called. | |
360 | * | |
361 | * This code is called only when user space hibernation software | |
362 | * prepares for snapshot device during boot time. So we just | |
363 | * call _debug_hotplug_cpu() to restore to CPU0's state prior to | |
364 | * preparing the snapshot device. | |
365 | * | |
366 | * This works for normal boot case in our CPU0 hotplug debug | |
367 | * mode, i.e. CPU0 is offline and user mode hibernation | |
368 | * software initializes during boot time. | |
369 | * | |
370 | * If CPU0 is online and user application accesses snapshot | |
371 | * device after boot time, this will offline CPU0 and user may | |
372 | * see different CPU0 state before and after accessing | |
373 | * the snapshot device. But hopefully this is not a case when | |
374 | * user debugging CPU0 hotplug. Even if users hit this case, | |
375 | * they can easily online CPU0 back. | |
376 | * | |
377 | * To simplify this debug code, we only consider normal boot | |
378 | * case. Otherwise we need to remember CPU0's state and restore | |
379 | * to that state and resolve racy conditions etc. | |
380 | */ | |
381 | _debug_hotplug_cpu(0, 0); | |
382 | break; | |
383 | #endif | |
209efae1 FY |
384 | default: |
385 | break; | |
386 | } | |
387 | return notifier_from_errno(ret); | |
388 | } | |
389 | ||
390 | static int __init bsp_pm_check_init(void) | |
391 | { | |
392 | /* | |
393 | * Set this bsp_pm_callback as lower priority than | |
394 | * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called | |
395 | * earlier to disable cpu hotplug before bsp online check. | |
396 | */ | |
397 | pm_notifier(bsp_pm_callback, -INT_MAX); | |
398 | return 0; | |
399 | } | |
400 | ||
401 | core_initcall(bsp_pm_check_init); | |
7a9c2dd0 | 402 | |
c49a0a80 | 403 | static int msr_build_context(const u32 *msr_id, const int num) |
7a9c2dd0 | 404 | { |
c49a0a80 | 405 | struct saved_msrs *saved_msrs = &saved_context.saved_msrs; |
7a9c2dd0 | 406 | struct saved_msr *msr_array; |
c49a0a80 TL |
407 | int total_num; |
408 | int i, j; | |
7a9c2dd0 | 409 | |
c49a0a80 | 410 | total_num = saved_msrs->num + num; |
7a9c2dd0 CY |
411 | |
412 | msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL); | |
413 | if (!msr_array) { | |
414 | pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n"); | |
415 | return -ENOMEM; | |
416 | } | |
417 | ||
c49a0a80 TL |
418 | if (saved_msrs->array) { |
419 | /* | |
420 | * Multiple callbacks can invoke this function, so copy any | |
421 | * MSR save requests from previous invocations. | |
422 | */ | |
423 | memcpy(msr_array, saved_msrs->array, | |
424 | sizeof(struct saved_msr) * saved_msrs->num); | |
425 | ||
426 | kfree(saved_msrs->array); | |
427 | } | |
428 | ||
429 | for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) { | |
430 | msr_array[i].info.msr_no = msr_id[j]; | |
7a9c2dd0 CY |
431 | msr_array[i].valid = false; |
432 | msr_array[i].info.reg.q = 0; | |
433 | } | |
c49a0a80 TL |
434 | saved_msrs->num = total_num; |
435 | saved_msrs->array = msr_array; | |
7a9c2dd0 CY |
436 | |
437 | return 0; | |
438 | } | |
439 | ||
440 | /* | |
c49a0a80 | 441 | * The following sections are a quirk framework for problematic BIOSen: |
7a9c2dd0 CY |
442 | * Sometimes MSRs are modified by the BIOSen after suspended to |
443 | * RAM, this might cause unexpected behavior after wakeup. | |
444 | * Thus we save/restore these specified MSRs across suspend/resume | |
445 | * in order to work around it. | |
446 | * | |
447 | * For any further problematic BIOSen/platforms, | |
448 | * please add your own function similar to msr_initialize_bdw. | |
449 | */ | |
450 | static int msr_initialize_bdw(const struct dmi_system_id *d) | |
451 | { | |
452 | /* Add any extra MSR ids into this array. */ | |
453 | u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL }; | |
454 | ||
455 | pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident); | |
c49a0a80 | 456 | return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); |
7a9c2dd0 CY |
457 | } |
458 | ||
6faadbbb | 459 | static const struct dmi_system_id msr_save_dmi_table[] = { |
7a9c2dd0 CY |
460 | { |
461 | .callback = msr_initialize_bdw, | |
462 | .ident = "BROADWELL BDX_EP", | |
463 | .matches = { | |
464 | DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"), | |
465 | DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"), | |
466 | }, | |
467 | }, | |
468 | {} | |
469 | }; | |
470 | ||
c49a0a80 TL |
471 | static int msr_save_cpuid_features(const struct x86_cpu_id *c) |
472 | { | |
473 | u32 cpuid_msr_id[] = { | |
474 | MSR_AMD64_CPUID_FN_1, | |
475 | }; | |
476 | ||
477 | pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n", | |
478 | c->family); | |
479 | ||
480 | return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id)); | |
481 | } | |
482 | ||
483 | static const struct x86_cpu_id msr_save_cpu_table[] = { | |
adefe55e TG |
484 | X86_MATCH_VENDOR_FAM(AMD, 0x15, &msr_save_cpuid_features), |
485 | X86_MATCH_VENDOR_FAM(AMD, 0x16, &msr_save_cpuid_features), | |
c49a0a80 TL |
486 | {} |
487 | }; | |
488 | ||
489 | typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *); | |
490 | static int pm_cpu_check(const struct x86_cpu_id *c) | |
491 | { | |
492 | const struct x86_cpu_id *m; | |
493 | int ret = 0; | |
494 | ||
495 | m = x86_match_cpu(msr_save_cpu_table); | |
496 | if (m) { | |
497 | pm_cpu_match_t fn; | |
498 | ||
499 | fn = (pm_cpu_match_t)m->driver_data; | |
500 | ret = fn(m); | |
501 | } | |
502 | ||
503 | return ret; | |
504 | } | |
505 | ||
7a9c2dd0 CY |
506 | static int pm_check_save_msr(void) |
507 | { | |
508 | dmi_check_system(msr_save_dmi_table); | |
c49a0a80 TL |
509 | pm_cpu_check(msr_save_cpu_table); |
510 | ||
7a9c2dd0 CY |
511 | return 0; |
512 | } | |
513 | ||
514 | device_initcall(pm_check_save_msr); |