]>
Commit | Line | Data |
---|---|---|
ef8b03fa RW |
1 | /* |
2 | * Hibernation support for x86-64 | |
3 | * | |
4 | * Distribute under GPLv2 | |
5 | * | |
6 | * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> | |
a2531293 | 7 | * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> |
ef8b03fa RW |
8 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> |
9 | */ | |
10 | ||
5a0e3ad6 | 11 | #include <linux/gfp.h> |
ef8b03fa RW |
12 | #include <linux/smp.h> |
13 | #include <linux/suspend.h> | |
62a03def CY |
14 | #include <linux/scatterlist.h> |
15 | #include <linux/kdebug.h> | |
16 | ||
17 | #include <crypto/hash.h> | |
8b78c21d | 18 | |
5520b7e7 | 19 | #include <asm/e820/api.h> |
8b78c21d | 20 | #include <asm/init.h> |
ef8b03fa RW |
21 | #include <asm/proto.h> |
22 | #include <asm/page.h> | |
23 | #include <asm/pgtable.h> | |
24 | #include <asm/mtrr.h> | |
7f8998c7 | 25 | #include <asm/sections.h> |
a8af7898 | 26 | #include <asm/suspend.h> |
65c0554b | 27 | #include <asm/tlbflush.h> |
ef8b03fa | 28 | |
261f0ce5 | 29 | /* Defined in hibernate_asm_64.S */ |
2605fc21 | 30 | extern asmlinkage __visible int restore_image(void); |
ef8b03fa RW |
31 | |
32 | /* | |
33 | * Address to jump to in the last phase of restore in order to get to the image | |
34 | * kernel's text (this value is passed in the image header). | |
35 | */ | |
d6efc2f7 | 36 | unsigned long restore_jump_address __visible; |
65c0554b | 37 | unsigned long jump_address_phys; |
ef8b03fa RW |
38 | |
39 | /* | |
40 | * Value of the cr3 register from before the hibernation (this value is passed | |
41 | * in the image header). | |
42 | */ | |
d6efc2f7 | 43 | unsigned long restore_cr3 __visible; |
ef8b03fa | 44 | |
c226fab4 | 45 | unsigned long temp_level4_pgt __visible; |
ef8b03fa | 46 | |
65c0554b RW |
47 | unsigned long relocated_restore_code __visible; |
48 | ||
c226fab4 | 49 | static int set_up_temporary_text_mapping(pgd_t *pgd) |
65c0554b RW |
50 | { |
51 | pmd_t *pmd; | |
52 | pud_t *pud; | |
06c830a4 | 53 | p4d_t *p4d; |
65c0554b RW |
54 | |
55 | /* | |
56 | * The new mapping only has to cover the page containing the image | |
57 | * kernel's entry point (jump_address_phys), because the switch over to | |
58 | * it is carried out by relocated code running from a page allocated | |
59 | * specifically for this purpose and covered by the identity mapping, so | |
60 | * the temporary kernel text mapping is only needed for the final jump. | |
61 | * Moreover, in that mapping the virtual address of the image kernel's | |
62 | * entry point must be the same as its virtual address in the image | |
63 | * kernel (restore_jump_address), so the image kernel's | |
64 | * restore_registers() code doesn't find itself in a different area of | |
65 | * the virtual address space after switching over to the original page | |
66 | * tables used by the image kernel. | |
67 | */ | |
06c830a4 KS |
68 | |
69 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { | |
70 | p4d = (p4d_t *)get_safe_page(GFP_ATOMIC); | |
71 | if (!p4d) | |
72 | return -ENOMEM; | |
73 | } | |
74 | ||
65c0554b RW |
75 | pud = (pud_t *)get_safe_page(GFP_ATOMIC); |
76 | if (!pud) | |
77 | return -ENOMEM; | |
78 | ||
79 | pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); | |
80 | if (!pmd) | |
81 | return -ENOMEM; | |
82 | ||
83 | set_pmd(pmd + pmd_index(restore_jump_address), | |
84 | __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC)); | |
85 | set_pud(pud + pud_index(restore_jump_address), | |
86 | __pud(__pa(pmd) | _KERNPG_TABLE)); | |
06c830a4 KS |
87 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { |
88 | set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE)); | |
89 | set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE)); | |
90 | } else { | |
91 | /* No p4d for 4-level paging: point the pgd to the pud page table */ | |
92 | set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(pud) | _KERNPG_TABLE)); | |
93 | } | |
65c0554b RW |
94 | |
95 | return 0; | |
96 | } | |
ef8b03fa | 97 | |
8b78c21d | 98 | static void *alloc_pgt_page(void *context) |
ef8b03fa | 99 | { |
8b78c21d | 100 | return (void *)get_safe_page(GFP_ATOMIC); |
ef8b03fa RW |
101 | } |
102 | ||
103 | static int set_up_temporary_mappings(void) | |
104 | { | |
8b78c21d YL |
105 | struct x86_mapping_info info = { |
106 | .alloc_pgt_page = alloc_pgt_page, | |
66aad4fd | 107 | .page_flag = __PAGE_KERNEL_LARGE_EXEC, |
e4630fdd | 108 | .offset = __PAGE_OFFSET, |
8b78c21d YL |
109 | }; |
110 | unsigned long mstart, mend; | |
c226fab4 | 111 | pgd_t *pgd; |
8b78c21d YL |
112 | int result; |
113 | int i; | |
ef8b03fa | 114 | |
c226fab4 RW |
115 | pgd = (pgd_t *)get_safe_page(GFP_ATOMIC); |
116 | if (!pgd) | |
ef8b03fa RW |
117 | return -ENOMEM; |
118 | ||
65c0554b | 119 | /* Prepare a temporary mapping for the kernel text */ |
c226fab4 | 120 | result = set_up_temporary_text_mapping(pgd); |
65c0554b RW |
121 | if (result) |
122 | return result; | |
ef8b03fa RW |
123 | |
124 | /* Set up the direct mapping from scratch */ | |
8b78c21d YL |
125 | for (i = 0; i < nr_pfn_mapped; i++) { |
126 | mstart = pfn_mapped[i].start << PAGE_SHIFT; | |
127 | mend = pfn_mapped[i].end << PAGE_SHIFT; | |
128 | ||
c226fab4 | 129 | result = kernel_ident_mapping_init(&info, pgd, mstart, mend); |
8b78c21d YL |
130 | if (result) |
131 | return result; | |
ef8b03fa | 132 | } |
8b78c21d | 133 | |
5d87f493 | 134 | temp_level4_pgt = __pa(pgd); |
ef8b03fa RW |
135 | return 0; |
136 | } | |
137 | ||
65c0554b RW |
138 | static int relocate_restore_code(void) |
139 | { | |
140 | pgd_t *pgd; | |
06c830a4 | 141 | p4d_t *p4d; |
65c0554b | 142 | pud_t *pud; |
06c830a4 KS |
143 | pmd_t *pmd; |
144 | pte_t *pte; | |
65c0554b RW |
145 | |
146 | relocated_restore_code = get_safe_page(GFP_ATOMIC); | |
147 | if (!relocated_restore_code) | |
148 | return -ENOMEM; | |
149 | ||
150 | memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE); | |
151 | ||
152 | /* Make the page containing the relocated code executable */ | |
6c690ee1 AL |
153 | pgd = (pgd_t *)__va(read_cr3_pa()) + |
154 | pgd_index(relocated_restore_code); | |
06c830a4 KS |
155 | p4d = p4d_offset(pgd, relocated_restore_code); |
156 | if (p4d_large(*p4d)) { | |
157 | set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX)); | |
158 | goto out; | |
159 | } | |
160 | pud = pud_offset(p4d, relocated_restore_code); | |
65c0554b RW |
161 | if (pud_large(*pud)) { |
162 | set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX)); | |
06c830a4 KS |
163 | goto out; |
164 | } | |
165 | pmd = pmd_offset(pud, relocated_restore_code); | |
166 | if (pmd_large(*pmd)) { | |
167 | set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX)); | |
168 | goto out; | |
65c0554b | 169 | } |
06c830a4 KS |
170 | pte = pte_offset_kernel(pmd, relocated_restore_code); |
171 | set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX)); | |
172 | out: | |
65c0554b | 173 | __flush_tlb_all(); |
65c0554b RW |
174 | return 0; |
175 | } | |
176 | ||
ef8b03fa RW |
177 | int swsusp_arch_resume(void) |
178 | { | |
179 | int error; | |
180 | ||
181 | /* We have got enough memory and from now on we cannot recover */ | |
65c0554b RW |
182 | error = set_up_temporary_mappings(); |
183 | if (error) | |
ef8b03fa RW |
184 | return error; |
185 | ||
65c0554b RW |
186 | error = relocate_restore_code(); |
187 | if (error) | |
188 | return error; | |
ef8b03fa RW |
189 | |
190 | restore_image(); | |
191 | return 0; | |
192 | } | |
193 | ||
194 | /* | |
195 | * pfn_is_nosave - check if given pfn is in the 'nosave' section | |
196 | */ | |
197 | ||
198 | int pfn_is_nosave(unsigned long pfn) | |
199 | { | |
200 | unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; | |
201 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; | |
202 | return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); | |
203 | } | |
204 | ||
62a03def CY |
205 | #define MD5_DIGEST_SIZE 16 |
206 | ||
ef8b03fa RW |
207 | struct restore_data_record { |
208 | unsigned long jump_address; | |
65c0554b | 209 | unsigned long jump_address_phys; |
ef8b03fa RW |
210 | unsigned long cr3; |
211 | unsigned long magic; | |
62a03def | 212 | u8 e820_digest[MD5_DIGEST_SIZE]; |
ef8b03fa RW |
213 | }; |
214 | ||
62a03def CY |
215 | #define RESTORE_MAGIC 0x23456789ABCDEF01UL |
216 | ||
217 | #if IS_BUILTIN(CONFIG_CRYPTO_MD5) | |
218 | /** | |
bf495573 | 219 | * get_e820_md5 - calculate md5 according to given e820 table |
62a03def | 220 | * |
bf495573 | 221 | * @table: the e820 table to be calculated |
62a03def CY |
222 | * @buf: the md5 result to be stored to |
223 | */ | |
bf495573 | 224 | static int get_e820_md5(struct e820_table *table, void *buf) |
62a03def CY |
225 | { |
226 | struct scatterlist sg; | |
227 | struct crypto_ahash *tfm; | |
228 | int size; | |
229 | int ret = 0; | |
230 | ||
231 | tfm = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); | |
232 | if (IS_ERR(tfm)) | |
233 | return -ENOMEM; | |
234 | ||
235 | { | |
236 | AHASH_REQUEST_ON_STACK(req, tfm); | |
bf495573 | 237 | size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry) * table->nr_entries; |
62a03def | 238 | ahash_request_set_tfm(req, tfm); |
bf495573 | 239 | sg_init_one(&sg, (u8 *)table, size); |
62a03def CY |
240 | ahash_request_set_callback(req, 0, NULL, NULL); |
241 | ahash_request_set_crypt(req, &sg, buf, size); | |
242 | ||
243 | if (crypto_ahash_digest(req)) | |
244 | ret = -EINVAL; | |
245 | ahash_request_zero(req); | |
246 | } | |
247 | crypto_free_ahash(tfm); | |
248 | ||
249 | return ret; | |
250 | } | |
251 | ||
252 | static void hibernation_e820_save(void *buf) | |
253 | { | |
544a0f47 | 254 | get_e820_md5(e820_table_firmware, buf); |
62a03def CY |
255 | } |
256 | ||
257 | static bool hibernation_e820_mismatch(void *buf) | |
258 | { | |
259 | int ret; | |
260 | u8 result[MD5_DIGEST_SIZE]; | |
261 | ||
262 | memset(result, 0, MD5_DIGEST_SIZE); | |
263 | /* If there is no digest in suspend kernel, let it go. */ | |
264 | if (!memcmp(result, buf, MD5_DIGEST_SIZE)) | |
265 | return false; | |
266 | ||
544a0f47 | 267 | ret = get_e820_md5(e820_table_firmware, result); |
62a03def CY |
268 | if (ret) |
269 | return true; | |
270 | ||
271 | return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false; | |
272 | } | |
273 | #else | |
274 | static void hibernation_e820_save(void *buf) | |
275 | { | |
276 | } | |
277 | ||
278 | static bool hibernation_e820_mismatch(void *buf) | |
279 | { | |
280 | /* If md5 is not builtin for restore kernel, let it go. */ | |
281 | return false; | |
282 | } | |
283 | #endif | |
ef8b03fa RW |
284 | |
285 | /** | |
286 | * arch_hibernation_header_save - populate the architecture specific part | |
287 | * of a hibernation image header | |
288 | * @addr: address to save the data at | |
289 | */ | |
290 | int arch_hibernation_header_save(void *addr, unsigned int max_size) | |
291 | { | |
292 | struct restore_data_record *rdr = addr; | |
293 | ||
294 | if (max_size < sizeof(struct restore_data_record)) | |
295 | return -EOVERFLOW; | |
65c0554b RW |
296 | rdr->jump_address = (unsigned long)&restore_registers; |
297 | rdr->jump_address_phys = __pa_symbol(&restore_registers); | |
ef8b03fa RW |
298 | rdr->cr3 = restore_cr3; |
299 | rdr->magic = RESTORE_MAGIC; | |
62a03def CY |
300 | |
301 | hibernation_e820_save(rdr->e820_digest); | |
302 | ||
ef8b03fa RW |
303 | return 0; |
304 | } | |
305 | ||
306 | /** | |
307 | * arch_hibernation_header_restore - read the architecture specific data | |
308 | * from the hibernation image header | |
309 | * @addr: address to read the data from | |
310 | */ | |
311 | int arch_hibernation_header_restore(void *addr) | |
312 | { | |
313 | struct restore_data_record *rdr = addr; | |
314 | ||
315 | restore_jump_address = rdr->jump_address; | |
65c0554b | 316 | jump_address_phys = rdr->jump_address_phys; |
ef8b03fa | 317 | restore_cr3 = rdr->cr3; |
62a03def CY |
318 | |
319 | if (rdr->magic != RESTORE_MAGIC) { | |
320 | pr_crit("Unrecognized hibernate image header format!\n"); | |
321 | return -EINVAL; | |
322 | } | |
323 | ||
324 | if (hibernation_e820_mismatch(rdr->e820_digest)) { | |
325 | pr_crit("Hibernate inconsistent memory map detected!\n"); | |
326 | return -ENODEV; | |
327 | } | |
328 | ||
329 | return 0; | |
ef8b03fa | 330 | } |