]>
Commit | Line | Data |
---|---|---|
25862a04 ZG |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Hibernation support for x86 | |
4 | * | |
5 | * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> | |
6 | * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> | |
7 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> | |
8 | */ | |
9 | #include <linux/gfp.h> | |
10 | #include <linux/smp.h> | |
11 | #include <linux/suspend.h> | |
12 | #include <linux/scatterlist.h> | |
13 | #include <linux/kdebug.h> | |
ec527c31 | 14 | #include <linux/cpu.h> |
25862a04 ZG |
15 | |
16 | #include <crypto/hash.h> | |
17 | ||
18 | #include <asm/e820/api.h> | |
19 | #include <asm/init.h> | |
20 | #include <asm/proto.h> | |
21 | #include <asm/page.h> | |
22 | #include <asm/pgtable.h> | |
23 | #include <asm/mtrr.h> | |
24 | #include <asm/sections.h> | |
25 | #include <asm/suspend.h> | |
26 | #include <asm/tlbflush.h> | |
27 | ||
28 | /* | |
29 | * Address to jump to in the last phase of restore in order to get to the image | |
30 | * kernel's text (this value is passed in the image header). | |
31 | */ | |
32 | unsigned long restore_jump_address __visible; | |
33 | unsigned long jump_address_phys; | |
34 | ||
35 | /* | |
36 | * Value of the cr3 register from before the hibernation (this value is passed | |
37 | * in the image header). | |
38 | */ | |
39 | unsigned long restore_cr3 __visible; | |
72adf477 | 40 | unsigned long temp_pgt __visible; |
25862a04 ZG |
41 | unsigned long relocated_restore_code __visible; |
42 | ||
43 | /** | |
44 | * pfn_is_nosave - check if given pfn is in the 'nosave' section | |
45 | */ | |
46 | int pfn_is_nosave(unsigned long pfn) | |
47 | { | |
48 | unsigned long nosave_begin_pfn; | |
49 | unsigned long nosave_end_pfn; | |
50 | ||
51 | nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; | |
52 | nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; | |
53 | ||
54 | return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn; | |
55 | } | |
56 | ||
25862a04 ZG |
57 | |
58 | #define MD5_DIGEST_SIZE 16 | |
59 | ||
60 | struct restore_data_record { | |
61 | unsigned long jump_address; | |
62 | unsigned long jump_address_phys; | |
63 | unsigned long cr3; | |
64 | unsigned long magic; | |
65 | u8 e820_digest[MD5_DIGEST_SIZE]; | |
66 | }; | |
67 | ||
68 | #if IS_BUILTIN(CONFIG_CRYPTO_MD5) | |
69 | /** | |
70 | * get_e820_md5 - calculate md5 according to given e820 table | |
71 | * | |
72 | * @table: the e820 table to be calculated | |
73 | * @buf: the md5 result to be stored to | |
74 | */ | |
75 | static int get_e820_md5(struct e820_table *table, void *buf) | |
76 | { | |
77 | struct crypto_shash *tfm; | |
78 | struct shash_desc *desc; | |
79 | int size; | |
80 | int ret = 0; | |
81 | ||
82 | tfm = crypto_alloc_shash("md5", 0, 0); | |
83 | if (IS_ERR(tfm)) | |
84 | return -ENOMEM; | |
85 | ||
86 | desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm), | |
87 | GFP_KERNEL); | |
88 | if (!desc) { | |
89 | ret = -ENOMEM; | |
90 | goto free_tfm; | |
91 | } | |
92 | ||
93 | desc->tfm = tfm; | |
25862a04 ZG |
94 | |
95 | size = offsetof(struct e820_table, entries) + | |
96 | sizeof(struct e820_entry) * table->nr_entries; | |
97 | ||
98 | if (crypto_shash_digest(desc, (u8 *)table, size, buf)) | |
99 | ret = -EINVAL; | |
100 | ||
101 | kzfree(desc); | |
102 | ||
103 | free_tfm: | |
104 | crypto_free_shash(tfm); | |
105 | return ret; | |
106 | } | |
107 | ||
108 | static int hibernation_e820_save(void *buf) | |
109 | { | |
110 | return get_e820_md5(e820_table_firmware, buf); | |
111 | } | |
112 | ||
113 | static bool hibernation_e820_mismatch(void *buf) | |
114 | { | |
115 | int ret; | |
116 | u8 result[MD5_DIGEST_SIZE]; | |
117 | ||
118 | memset(result, 0, MD5_DIGEST_SIZE); | |
119 | /* If there is no digest in suspend kernel, let it go. */ | |
120 | if (!memcmp(result, buf, MD5_DIGEST_SIZE)) | |
121 | return false; | |
122 | ||
123 | ret = get_e820_md5(e820_table_firmware, result); | |
124 | if (ret) | |
125 | return true; | |
126 | ||
127 | return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false; | |
128 | } | |
129 | #else | |
130 | static int hibernation_e820_save(void *buf) | |
131 | { | |
132 | return 0; | |
133 | } | |
134 | ||
135 | static bool hibernation_e820_mismatch(void *buf) | |
136 | { | |
137 | /* If md5 is not builtin for restore kernel, let it go. */ | |
138 | return false; | |
139 | } | |
140 | #endif | |
141 | ||
44556530 | 142 | #ifdef CONFIG_X86_64 |
25862a04 | 143 | #define RESTORE_MAGIC 0x23456789ABCDEF01UL |
44556530 ZG |
144 | #else |
145 | #define RESTORE_MAGIC 0x12345678UL | |
146 | #endif | |
25862a04 ZG |
147 | |
148 | /** | |
149 | * arch_hibernation_header_save - populate the architecture specific part | |
150 | * of a hibernation image header | |
151 | * @addr: address to save the data at | |
152 | */ | |
153 | int arch_hibernation_header_save(void *addr, unsigned int max_size) | |
154 | { | |
155 | struct restore_data_record *rdr = addr; | |
156 | ||
157 | if (max_size < sizeof(struct restore_data_record)) | |
158 | return -EOVERFLOW; | |
159 | rdr->magic = RESTORE_MAGIC; | |
160 | rdr->jump_address = (unsigned long)restore_registers; | |
161 | rdr->jump_address_phys = __pa_symbol(restore_registers); | |
162 | ||
163 | /* | |
164 | * The restore code fixes up CR3 and CR4 in the following sequence: | |
165 | * | |
166 | * [in hibernation asm] | |
167 | * 1. CR3 <= temporary page tables | |
168 | * 2. CR4 <= mmu_cr4_features (from the kernel that restores us) | |
169 | * 3. CR3 <= rdr->cr3 | |
170 | * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel) | |
171 | * [in restore_processor_state()] | |
172 | * 5. CR4 <= saved CR4 | |
173 | * 6. CR3 <= saved CR3 | |
174 | * | |
175 | * Our mmu_cr4_features has CR4.PCIDE=0, and toggling | |
176 | * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so | |
177 | * rdr->cr3 needs to point to valid page tables but must not | |
178 | * have any of the PCID bits set. | |
179 | */ | |
180 | rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK; | |
181 | ||
182 | return hibernation_e820_save(rdr->e820_digest); | |
183 | } | |
184 | ||
185 | /** | |
186 | * arch_hibernation_header_restore - read the architecture specific data | |
187 | * from the hibernation image header | |
188 | * @addr: address to read the data from | |
189 | */ | |
190 | int arch_hibernation_header_restore(void *addr) | |
191 | { | |
192 | struct restore_data_record *rdr = addr; | |
193 | ||
194 | if (rdr->magic != RESTORE_MAGIC) { | |
195 | pr_crit("Unrecognized hibernate image header format!\n"); | |
196 | return -EINVAL; | |
197 | } | |
198 | ||
199 | restore_jump_address = rdr->jump_address; | |
200 | jump_address_phys = rdr->jump_address_phys; | |
32aa2764 | 201 | restore_cr3 = rdr->cr3; |
25862a04 ZG |
202 | |
203 | if (hibernation_e820_mismatch(rdr->e820_digest)) { | |
204 | pr_crit("Hibernate inconsistent memory map detected!\n"); | |
205 | return -ENODEV; | |
206 | } | |
207 | ||
208 | return 0; | |
209 | } | |
210 | ||
211 | int relocate_restore_code(void) | |
212 | { | |
213 | pgd_t *pgd; | |
214 | p4d_t *p4d; | |
215 | pud_t *pud; | |
216 | pmd_t *pmd; | |
217 | pte_t *pte; | |
218 | ||
219 | relocated_restore_code = get_safe_page(GFP_ATOMIC); | |
220 | if (!relocated_restore_code) | |
221 | return -ENOMEM; | |
222 | ||
223 | memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE); | |
224 | ||
225 | /* Make the page containing the relocated code executable */ | |
226 | pgd = (pgd_t *)__va(read_cr3_pa()) + | |
227 | pgd_index(relocated_restore_code); | |
228 | p4d = p4d_offset(pgd, relocated_restore_code); | |
229 | if (p4d_large(*p4d)) { | |
230 | set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX)); | |
231 | goto out; | |
232 | } | |
233 | pud = pud_offset(p4d, relocated_restore_code); | |
234 | if (pud_large(*pud)) { | |
235 | set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX)); | |
236 | goto out; | |
237 | } | |
238 | pmd = pmd_offset(pud, relocated_restore_code); | |
239 | if (pmd_large(*pmd)) { | |
240 | set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX)); | |
241 | goto out; | |
242 | } | |
243 | pte = pte_offset_kernel(pmd, relocated_restore_code); | |
244 | set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX)); | |
245 | out: | |
246 | __flush_tlb_all(); | |
247 | return 0; | |
248 | } | |
ec527c31 JK |
249 | |
250 | int arch_resume_nosmt(void) | |
251 | { | |
252 | int ret = 0; | |
253 | /* | |
254 | * We reached this while coming out of hibernation. This means | |
255 | * that SMT siblings are sleeping in hlt, as mwait is not safe | |
256 | * against control transition during resume (see comment in | |
257 | * hibernate_resume_nonboot_cpu_disable()). | |
258 | * | |
259 | * If the resumed kernel has SMT disabled, we have to take all the | |
260 | * SMT siblings out of hlt, and offline them again so that they | |
261 | * end up in mwait proper. | |
262 | * | |
263 | * Called with hotplug disabled. | |
264 | */ | |
265 | cpu_hotplug_enable(); | |
266 | if (cpu_smt_control == CPU_SMT_DISABLED || | |
267 | cpu_smt_control == CPU_SMT_FORCE_DISABLED) { | |
268 | enum cpuhp_smt_control old = cpu_smt_control; | |
269 | ||
270 | ret = cpuhp_smt_enable(); | |
271 | if (ret) | |
272 | goto out; | |
273 | ret = cpuhp_smt_disable(old); | |
274 | if (ret) | |
275 | goto out; | |
276 | } | |
277 | out: | |
278 | cpu_hotplug_disable(); | |
279 | return ret; | |
280 | } |