]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/arm64/kernel/hibernate.c
arm64: Rewrite Spectre-v4 mitigation code
[mirror_ubuntu-jammy-kernel.git] / arch / arm64 / kernel / hibernate.c
CommitLineData
af873fce 1// SPDX-License-Identifier: GPL-2.0-only
82869ac5
JM
2/*:
3 * Hibernate support specific for ARM64
4 *
5 * Derived from work on ARM hibernation support by:
6 *
7 * Ubuntu project, hibernation support for mach-dove
8 * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
9 * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
10 * https://lkml.org/lkml/2010/6/18/4
11 * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
12 * https://patchwork.kernel.org/patch/96442/
13 *
14 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
82869ac5
JM
15 */
16#define pr_fmt(x) "hibernate: " x
8ec058fd 17#include <linux/cpu.h>
82869ac5
JM
18#include <linux/kvm_host.h>
19#include <linux/mm.h>
20#include <linux/pm.h>
21#include <linux/sched.h>
22#include <linux/suspend.h>
23#include <linux/utsname.h>
24#include <linux/version.h>
25
26#include <asm/barrier.h>
27#include <asm/cacheflush.h>
8ec058fd 28#include <asm/cputype.h>
0fbeb318 29#include <asm/daifflags.h>
82869ac5 30#include <asm/irqflags.h>
254a41c0 31#include <asm/kexec.h>
82869ac5
JM
32#include <asm/memory.h>
33#include <asm/mmu_context.h>
34#include <asm/pgalloc.h>
82869ac5
JM
35#include <asm/pgtable-hwdef.h>
36#include <asm/sections.h>
d74b4e4f 37#include <asm/smp.h>
8ec058fd 38#include <asm/smp_plat.h>
82869ac5 39#include <asm/suspend.h>
0194e760 40#include <asm/sysreg.h>
82869ac5
JM
41#include <asm/virt.h>
42
43/*
44 * Hibernate core relies on this value being 0 on resume, and marks it
45 * __nosavedata assuming it will keep the resume kernel's '0' value. This
46 * doesn't happen with either KASLR.
47 *
48 * defined as "__visible int in_suspend __nosavedata" in
49 * kernel/power/hibernate.c
50 */
51extern int in_suspend;
52
82869ac5
JM
53/* Do we need to reset el2? */
54#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
55
82869ac5
JM
56/* temporary el2 vectors in the __hibernate_exit_text section. */
57extern char hibernate_el2_vectors[];
58
59/* hyp-stub vectors, used to restore el2 during resume from hibernate. */
60extern char __hyp_stub_vectors[];
61
8ec058fd
JM
62/*
63 * The logical cpu number we should resume on, initialised to a non-cpu
64 * number.
65 */
66static int sleep_cpu = -EINVAL;
67
82869ac5
JM
68/*
69 * Values that may not change over hibernate/resume. We put the build number
70 * and date in here so that we guarantee not to resume with a different
71 * kernel.
72 */
73struct arch_hibernate_hdr_invariants {
74 char uts_version[__NEW_UTS_LEN + 1];
75};
76
77/* These values need to be know across a hibernate/restore. */
78static struct arch_hibernate_hdr {
79 struct arch_hibernate_hdr_invariants invariants;
80
81 /* These are needed to find the relocated kernel if built with kaslr */
82 phys_addr_t ttbr1_el1;
83 void (*reenter_kernel)(void);
84
85 /*
86 * We need to know where the __hyp_stub_vectors are after restore to
87 * re-configure el2.
88 */
89 phys_addr_t __hyp_stub_vectors;
8ec058fd
JM
90
91 u64 sleep_cpu_mpidr;
82869ac5
JM
92} resume_hdr;
93
94static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
95{
96 memset(i, 0, sizeof(*i));
97 memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
98}
99
100int pfn_is_nosave(unsigned long pfn)
101{
2077be67
LA
102 unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
103 unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
82869ac5 104
254a41c0
AT
105 return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
106 crash_is_nosave(pfn);
82869ac5
JM
107}
108
109void notrace save_processor_state(void)
110{
111 WARN_ON(num_online_cpus() != 1);
112}
113
114void notrace restore_processor_state(void)
115{
116}
117
118int arch_hibernation_header_save(void *addr, unsigned int max_size)
119{
120 struct arch_hibernate_hdr *hdr = addr;
121
122 if (max_size < sizeof(*hdr))
123 return -EOVERFLOW;
124
125 arch_hdr_invariants(&hdr->invariants);
2077be67 126 hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
82869ac5
JM
127 hdr->reenter_kernel = _cpu_resume;
128
129 /* We can't use __hyp_get_vectors() because kvm may still be loaded */
130 if (el2_reset_needed())
2077be67 131 hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
82869ac5
JM
132 else
133 hdr->__hyp_stub_vectors = 0;
134
8ec058fd
JM
135 /* Save the mpidr of the cpu we called cpu_suspend() on... */
136 if (sleep_cpu < 0) {
9165dabb 137 pr_err("Failing to hibernate on an unknown CPU.\n");
8ec058fd
JM
138 return -ENODEV;
139 }
140 hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
141 pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
142 hdr->sleep_cpu_mpidr);
143
82869ac5
JM
144 return 0;
145}
146EXPORT_SYMBOL(arch_hibernation_header_save);
147
148int arch_hibernation_header_restore(void *addr)
149{
8ec058fd 150 int ret;
82869ac5
JM
151 struct arch_hibernate_hdr_invariants invariants;
152 struct arch_hibernate_hdr *hdr = addr;
153
154 arch_hdr_invariants(&invariants);
155 if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
156 pr_crit("Hibernate image not generated by this kernel!\n");
157 return -EINVAL;
158 }
159
8ec058fd
JM
160 sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr);
161 pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
162 hdr->sleep_cpu_mpidr);
163 if (sleep_cpu < 0) {
164 pr_crit("Hibernated on a CPU not known to this kernel!\n");
165 sleep_cpu = -EINVAL;
166 return -EINVAL;
167 }
e646ac5b
QY
168
169 ret = bringup_hibernate_cpu(sleep_cpu);
170 if (ret) {
171 sleep_cpu = -EINVAL;
172 return ret;
8ec058fd
JM
173 }
174
82869ac5
JM
175 resume_hdr = *hdr;
176
177 return 0;
178}
179EXPORT_SYMBOL(arch_hibernation_header_restore);
180
a2c2e679
PT
181static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
182 unsigned long dst_addr,
183 pgprot_t pgprot)
82869ac5 184{
20a004e7 185 pgd_t *pgdp;
e9f63768 186 p4d_t *p4dp;
20a004e7
WD
187 pud_t *pudp;
188 pmd_t *pmdp;
189 pte_t *ptep;
82869ac5 190
974b9b2c 191 pgdp = pgd_offset_pgd(trans_pgd, dst_addr);
20a004e7 192 if (pgd_none(READ_ONCE(*pgdp))) {
051a7a94 193 pudp = (void *)get_safe_page(GFP_ATOMIC);
a89d7ff9
PT
194 if (!pudp)
195 return -ENOMEM;
20a004e7 196 pgd_populate(&init_mm, pgdp, pudp);
82869ac5
JM
197 }
198
e9f63768
MR
199 p4dp = p4d_offset(pgdp, dst_addr);
200 if (p4d_none(READ_ONCE(*p4dp))) {
201 pudp = (void *)get_safe_page(GFP_ATOMIC);
202 if (!pudp)
203 return -ENOMEM;
204 p4d_populate(&init_mm, p4dp, pudp);
205 }
206
207 pudp = pud_offset(p4dp, dst_addr);
20a004e7 208 if (pud_none(READ_ONCE(*pudp))) {
051a7a94 209 pmdp = (void *)get_safe_page(GFP_ATOMIC);
a89d7ff9
PT
210 if (!pmdp)
211 return -ENOMEM;
20a004e7 212 pud_populate(&init_mm, pudp, pmdp);
82869ac5
JM
213 }
214
20a004e7
WD
215 pmdp = pmd_offset(pudp, dst_addr);
216 if (pmd_none(READ_ONCE(*pmdp))) {
051a7a94 217 ptep = (void *)get_safe_page(GFP_ATOMIC);
a89d7ff9
PT
218 if (!ptep)
219 return -ENOMEM;
20a004e7 220 pmd_populate_kernel(&init_mm, pmdp, ptep);
82869ac5
JM
221 }
222
20a004e7 223 ptep = pte_offset_kernel(pmdp, dst_addr);
13373f0e 224 set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC));
82869ac5 225
a2c2e679
PT
226 return 0;
227}
228
229/*
230 * Copies length bytes, starting at src_start into an new page,
231 * perform cache maintenance, then maps it at the specified address low
232 * address as executable.
233 *
234 * This is used by hibernate to copy the code it needs to execute when
235 * overwriting the kernel text. This function generates a new set of page
236 * tables, which it loads into ttbr0.
237 *
238 * Length is provided as we probably only want 4K of data, even on a 64K
239 * page system.
240 */
241static int create_safe_exec_page(void *src_start, size_t length,
242 unsigned long dst_addr,
243 phys_addr_t *phys_dst_addr)
244{
245 void *page = (void *)get_safe_page(GFP_ATOMIC);
246 pgd_t *trans_pgd;
247 int rc;
248
249 if (!page)
250 return -ENOMEM;
251
252 memcpy(page, src_start, length);
253 __flush_icache_range((unsigned long)page, (unsigned long)page + length);
254
255 trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
256 if (!trans_pgd)
257 return -ENOMEM;
258
259 rc = trans_pgd_map_page(trans_pgd, page, dst_addr,
260 PAGE_KERNEL_EXEC);
261 if (rc)
262 return rc;
263
0194e760
MR
264 /*
265 * Load our new page tables. A strict BBM approach requires that we
266 * ensure that TLBs are free of any entries that may overlap with the
267 * global mappings we are about to install.
268 *
269 * For a real hibernate/resume cycle TTBR0 currently points to a zero
270 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
271 * runtime services), while for a userspace-driven test_resume cycle it
272 * points to userspace page tables (and we must point it at a zero page
273 * ourselves). Elsewhere we only (un)install the idmap with preemption
274 * disabled, so T0SZ should be as required regardless.
275 */
276 cpu_set_reserved_ttbr0();
277 local_flush_tlb_all();
d234332c 278 write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1);
0194e760 279 isb();
82869ac5 280
13373f0e 281 *phys_dst_addr = virt_to_phys(page);
82869ac5 282
a89d7ff9 283 return 0;
82869ac5
JM
284}
285
5ebe3a44 286#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
82869ac5
JM
287
288int swsusp_arch_suspend(void)
289{
290 int ret = 0;
291 unsigned long flags;
292 struct sleep_stack_data state;
293
d74b4e4f
JM
294 if (cpus_are_stuck_in_kernel()) {
295 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
296 return -EBUSY;
297 }
298
0fbeb318 299 flags = local_daif_save();
82869ac5
JM
300
301 if (__cpu_suspend_enter(&state)) {
254a41c0
AT
302 /* make the crash dump kernel image visible/saveable */
303 crash_prepare_suspend();
304
8ec058fd 305 sleep_cpu = smp_processor_id();
82869ac5
JM
306 ret = swsusp_save();
307 } else {
5ebe3a44
JM
308 /* Clean kernel core startup/idle code to PoC*/
309 dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
310 dcache_clean_range(__idmap_text_start, __idmap_text_end);
311
312 /* Clean kvm setup code to PoC? */
f7daa9c8 313 if (el2_reset_needed()) {
5ebe3a44 314 dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
f7daa9c8
JM
315 dcache_clean_range(__hyp_text_start, __hyp_text_end);
316 }
82869ac5 317
254a41c0
AT
318 /* make the crash dump kernel image protected again */
319 crash_post_resume();
320
82869ac5
JM
321 /*
322 * Tell the hibernation core that we've just restored
323 * the memory
324 */
325 in_suspend = 0;
326
8ec058fd 327 sleep_cpu = -EINVAL;
82869ac5 328 __cpu_suspend_exit();
647d0519
MZ
329
330 /*
331 * Just in case the boot kernel did turn the SSBD
332 * mitigation off behind our back, let's set the state
333 * to what we expect it to be.
334 */
c2876207 335 spectre_v4_enable_mitigation(NULL);
82869ac5
JM
336 }
337
0fbeb318 338 local_daif_restore(flags);
82869ac5
JM
339
340 return ret;
341}
342
20a004e7 343static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
5ebe3a44 344{
20a004e7 345 pte_t pte = READ_ONCE(*src_ptep);
5ebe3a44
JM
346
347 if (pte_valid(pte)) {
348 /*
349 * Resume will overwrite areas that may be marked
350 * read only (code, rodata). Clear the RDONLY bit from
351 * the temporary mappings we use during restore.
352 */
20a004e7 353 set_pte(dst_ptep, pte_mkwrite(pte));
5ebe3a44
JM
354 } else if (debug_pagealloc_enabled() && !pte_none(pte)) {
355 /*
356 * debug_pagealloc will removed the PTE_VALID bit if
357 * the page isn't in use by the resume kernel. It may have
358 * been in use by the original kernel, in which case we need
359 * to put it back in our copy to do the restore.
360 *
361 * Before marking this entry valid, check the pfn should
362 * be mapped.
363 */
364 BUG_ON(!pfn_valid(pte_pfn(pte)));
365
20a004e7 366 set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
5ebe3a44
JM
367 }
368}
369
20a004e7 370static int copy_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start,
82869ac5
JM
371 unsigned long end)
372{
20a004e7
WD
373 pte_t *src_ptep;
374 pte_t *dst_ptep;
82869ac5
JM
375 unsigned long addr = start;
376
20a004e7
WD
377 dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
378 if (!dst_ptep)
82869ac5 379 return -ENOMEM;
20a004e7
WD
380 pmd_populate_kernel(&init_mm, dst_pmdp, dst_ptep);
381 dst_ptep = pte_offset_kernel(dst_pmdp, start);
82869ac5 382
20a004e7 383 src_ptep = pte_offset_kernel(src_pmdp, start);
82869ac5 384 do {
20a004e7
WD
385 _copy_pte(dst_ptep, src_ptep, addr);
386 } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
82869ac5
JM
387
388 return 0;
389}
390
20a004e7 391static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
82869ac5
JM
392 unsigned long end)
393{
20a004e7
WD
394 pmd_t *src_pmdp;
395 pmd_t *dst_pmdp;
82869ac5
JM
396 unsigned long next;
397 unsigned long addr = start;
398
20a004e7
WD
399 if (pud_none(READ_ONCE(*dst_pudp))) {
400 dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
401 if (!dst_pmdp)
82869ac5 402 return -ENOMEM;
20a004e7 403 pud_populate(&init_mm, dst_pudp, dst_pmdp);
82869ac5 404 }
20a004e7 405 dst_pmdp = pmd_offset(dst_pudp, start);
82869ac5 406
20a004e7 407 src_pmdp = pmd_offset(src_pudp, start);
82869ac5 408 do {
20a004e7
WD
409 pmd_t pmd = READ_ONCE(*src_pmdp);
410
82869ac5 411 next = pmd_addr_end(addr, end);
20a004e7 412 if (pmd_none(pmd))
82869ac5 413 continue;
20a004e7
WD
414 if (pmd_table(pmd)) {
415 if (copy_pte(dst_pmdp, src_pmdp, addr, next))
82869ac5
JM
416 return -ENOMEM;
417 } else {
20a004e7
WD
418 set_pmd(dst_pmdp,
419 __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
82869ac5 420 }
20a004e7 421 } while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
82869ac5
JM
422
423 return 0;
424}
425
e9f63768 426static int copy_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start,
82869ac5
JM
427 unsigned long end)
428{
20a004e7
WD
429 pud_t *dst_pudp;
430 pud_t *src_pudp;
82869ac5
JM
431 unsigned long next;
432 unsigned long addr = start;
433
e9f63768 434 if (p4d_none(READ_ONCE(*dst_p4dp))) {
20a004e7
WD
435 dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
436 if (!dst_pudp)
82869ac5 437 return -ENOMEM;
e9f63768 438 p4d_populate(&init_mm, dst_p4dp, dst_pudp);
82869ac5 439 }
e9f63768 440 dst_pudp = pud_offset(dst_p4dp, start);
82869ac5 441
e9f63768 442 src_pudp = pud_offset(src_p4dp, start);
82869ac5 443 do {
20a004e7
WD
444 pud_t pud = READ_ONCE(*src_pudp);
445
82869ac5 446 next = pud_addr_end(addr, end);
20a004e7 447 if (pud_none(pud))
82869ac5 448 continue;
20a004e7
WD
449 if (pud_table(pud)) {
450 if (copy_pmd(dst_pudp, src_pudp, addr, next))
82869ac5
JM
451 return -ENOMEM;
452 } else {
20a004e7 453 set_pud(dst_pudp,
7ea40889 454 __pud(pud_val(pud) & ~PUD_SECT_RDONLY));
82869ac5 455 }
20a004e7 456 } while (dst_pudp++, src_pudp++, addr = next, addr != end);
82869ac5
JM
457
458 return 0;
459}
460
e9f63768
MR
461static int copy_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
462 unsigned long end)
463{
464 p4d_t *dst_p4dp;
465 p4d_t *src_p4dp;
466 unsigned long next;
467 unsigned long addr = start;
468
469 dst_p4dp = p4d_offset(dst_pgdp, start);
470 src_p4dp = p4d_offset(src_pgdp, start);
471 do {
472 next = p4d_addr_end(addr, end);
473 if (p4d_none(READ_ONCE(*src_p4dp)))
474 continue;
475 if (copy_pud(dst_p4dp, src_p4dp, addr, next))
476 return -ENOMEM;
477 } while (dst_p4dp++, src_p4dp++, addr = next, addr != end);
478
479 return 0;
480}
481
20a004e7 482static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
82869ac5
JM
483 unsigned long end)
484{
485 unsigned long next;
486 unsigned long addr = start;
20a004e7 487 pgd_t *src_pgdp = pgd_offset_k(start);
82869ac5 488
974b9b2c 489 dst_pgdp = pgd_offset_pgd(dst_pgdp, start);
82869ac5
JM
490 do {
491 next = pgd_addr_end(addr, end);
20a004e7 492 if (pgd_none(READ_ONCE(*src_pgdp)))
82869ac5 493 continue;
e9f63768 494 if (copy_p4d(dst_pgdp, src_pgdp, addr, next))
82869ac5 495 return -ENOMEM;
20a004e7 496 } while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
82869ac5
JM
497
498 return 0;
499}
500
a2c2e679
PT
501static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
502 unsigned long end)
503{
504 int rc;
505 pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
506
507 if (!trans_pgd) {
508 pr_err("Failed to allocate memory for temporary page tables.\n");
509 return -ENOMEM;
510 }
511
512 rc = copy_page_tables(trans_pgd, start, end);
513 if (!rc)
514 *dst_pgdp = trans_pgd;
515
516 return rc;
517}
518
82869ac5
JM
519/*
520 * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
521 *
522 * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
523 * we don't need to free it here.
524 */
525int swsusp_arch_resume(void)
526{
a89d7ff9 527 int rc;
82869ac5
JM
528 void *zero_page;
529 size_t exit_size;
530 pgd_t *tmp_pg_dir;
82869ac5
JM
531 phys_addr_t phys_hibernate_exit;
532 void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
533 void *, phys_addr_t, phys_addr_t);
534
dfbca61a
MR
535 /*
536 * Restoring the memory image will overwrite the ttbr1 page tables.
537 * Create a second copy of just the linear map, and use this when
538 * restoring.
539 */
a2c2e679 540 rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END);
dfbca61a 541 if (rc)
a89d7ff9 542 return rc;
dfbca61a 543
dfbca61a
MR
544 /*
545 * We need a zero page that is zero before & after resume in order to
546 * to break before make on the ttbr1 page tables.
547 */
548 zero_page = (void *)get_safe_page(GFP_ATOMIC);
549 if (!zero_page) {
117f5727 550 pr_err("Failed to allocate zero page.\n");
a89d7ff9 551 return -ENOMEM;
dfbca61a
MR
552 }
553
82869ac5
JM
554 /*
555 * Locate the exit code in the bottom-but-one page, so that *NULL
556 * still has disastrous affects.
557 */
558 hibernate_exit = (void *)PAGE_SIZE;
559 exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
560 /*
561 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
562 * a new set of ttbr0 page tables and load them.
563 */
564 rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
565 (unsigned long)hibernate_exit,
051a7a94 566 &phys_hibernate_exit);
82869ac5 567 if (rc) {
117f5727 568 pr_err("Failed to create safe executable page for hibernate_exit code.\n");
a89d7ff9 569 return rc;
82869ac5
JM
570 }
571
572 /*
573 * The hibernate exit text contains a set of el2 vectors, that will
574 * be executed at el2 with the mmu off in order to reload hyp-stub.
575 */
576 __flush_dcache_area(hibernate_exit, exit_size);
577
82869ac5
JM
578 /*
579 * KASLR will cause the el2 vectors to be in a different location in
580 * the resumed kernel. Load hibernate's temporary copy into el2.
581 *
582 * We can skip this step if we booted at EL1, or are running with VHE.
583 */
584 if (el2_reset_needed()) {
585 phys_addr_t el2_vectors = phys_hibernate_exit; /* base */
586 el2_vectors += hibernate_el2_vectors -
587 __hibernate_exit_text_start; /* offset */
588
589 __hyp_set_vectors(el2_vectors);
590 }
591
82869ac5 592 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
2077be67 593 resume_hdr.reenter_kernel, restore_pblist,
82869ac5
JM
594 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
595
a89d7ff9 596 return 0;
82869ac5 597}
1fe492ce 598
8ec058fd
JM
599int hibernate_resume_nonboot_cpu_disable(void)
600{
601 if (sleep_cpu < 0) {
9165dabb 602 pr_err("Failing to resume from hibernate on an unknown CPU.\n");
8ec058fd
JM
603 return -ENODEV;
604 }
605
606 return freeze_secondary_cpus(sleep_cpu);
607}