1 // SPDX-License-Identifier: GPL-2.0-only
3 * Hibernate support specific for ARM64
5 * Derived from work on ARM hibernation support by:
7 * Ubuntu project, hibernation support for mach-dove
8 * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
9 * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
10 * https://lkml.org/lkml/2010/6/18/4
11 * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
12 * https://patchwork.kernel.org/patch/96442/
14 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
16 #define pr_fmt(x) "hibernate: " x
17 #include <linux/cpu.h>
18 #include <linux/kvm_host.h>
21 #include <linux/sched.h>
22 #include <linux/suspend.h>
23 #include <linux/utsname.h>
24 #include <linux/version.h>
26 #include <asm/barrier.h>
27 #include <asm/cacheflush.h>
28 #include <asm/cputype.h>
29 #include <asm/daifflags.h>
30 #include <asm/irqflags.h>
31 #include <asm/kexec.h>
32 #include <asm/memory.h>
33 #include <asm/mmu_context.h>
34 #include <asm/pgalloc.h>
35 #include <asm/pgtable-hwdef.h>
36 #include <asm/sections.h>
38 #include <asm/smp_plat.h>
39 #include <asm/suspend.h>
40 #include <asm/sysreg.h>
44 * Hibernate core relies on this value being 0 on resume, and marks it
45 * __nosavedata assuming it will keep the resume kernel's '0' value. This
46 * doesn't happen with either KASLR.
48 * defined as "__visible int in_suspend __nosavedata" in
49 * kernel/power/hibernate.c
51 extern int in_suspend
;
53 /* Do we need to reset el2? */
54 #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
56 /* temporary el2 vectors in the __hibernate_exit_text section. */
57 extern char hibernate_el2_vectors
[];
59 /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
60 extern char __hyp_stub_vectors
[];
63 * The logical cpu number we should resume on, initialised to a non-cpu
66 static int sleep_cpu
= -EINVAL
;
69 * Values that may not change over hibernate/resume. We put the build number
70 * and date in here so that we guarantee not to resume with a different
73 struct arch_hibernate_hdr_invariants
{
74 char uts_version
[__NEW_UTS_LEN
+ 1];
77 /* These values need to be know across a hibernate/restore. */
78 static struct arch_hibernate_hdr
{
79 struct arch_hibernate_hdr_invariants invariants
;
81 /* These are needed to find the relocated kernel if built with kaslr */
82 phys_addr_t ttbr1_el1
;
83 void (*reenter_kernel
)(void);
86 * We need to know where the __hyp_stub_vectors are after restore to
89 phys_addr_t __hyp_stub_vectors
;
94 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants
*i
)
96 memset(i
, 0, sizeof(*i
));
97 memcpy(i
->uts_version
, init_utsname()->version
, sizeof(i
->uts_version
));
100 int pfn_is_nosave(unsigned long pfn
)
102 unsigned long nosave_begin_pfn
= sym_to_pfn(&__nosave_begin
);
103 unsigned long nosave_end_pfn
= sym_to_pfn(&__nosave_end
- 1);
105 return ((pfn
>= nosave_begin_pfn
) && (pfn
<= nosave_end_pfn
)) ||
106 crash_is_nosave(pfn
);
109 void notrace
save_processor_state(void)
111 WARN_ON(num_online_cpus() != 1);
114 void notrace
restore_processor_state(void)
118 int arch_hibernation_header_save(void *addr
, unsigned int max_size
)
120 struct arch_hibernate_hdr
*hdr
= addr
;
122 if (max_size
< sizeof(*hdr
))
125 arch_hdr_invariants(&hdr
->invariants
);
126 hdr
->ttbr1_el1
= __pa_symbol(swapper_pg_dir
);
127 hdr
->reenter_kernel
= _cpu_resume
;
129 /* We can't use __hyp_get_vectors() because kvm may still be loaded */
130 if (el2_reset_needed())
131 hdr
->__hyp_stub_vectors
= __pa_symbol(__hyp_stub_vectors
);
133 hdr
->__hyp_stub_vectors
= 0;
135 /* Save the mpidr of the cpu we called cpu_suspend() on... */
137 pr_err("Failing to hibernate on an unknown CPU.\n");
140 hdr
->sleep_cpu_mpidr
= cpu_logical_map(sleep_cpu
);
141 pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu
,
142 hdr
->sleep_cpu_mpidr
);
146 EXPORT_SYMBOL(arch_hibernation_header_save
);
148 int arch_hibernation_header_restore(void *addr
)
151 struct arch_hibernate_hdr_invariants invariants
;
152 struct arch_hibernate_hdr
*hdr
= addr
;
154 arch_hdr_invariants(&invariants
);
155 if (memcmp(&hdr
->invariants
, &invariants
, sizeof(invariants
))) {
156 pr_crit("Hibernate image not generated by this kernel!\n");
160 sleep_cpu
= get_logical_index(hdr
->sleep_cpu_mpidr
);
161 pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu
,
162 hdr
->sleep_cpu_mpidr
);
164 pr_crit("Hibernated on a CPU not known to this kernel!\n");
169 ret
= bringup_hibernate_cpu(sleep_cpu
);
179 EXPORT_SYMBOL(arch_hibernation_header_restore
);
181 static int trans_pgd_map_page(pgd_t
*trans_pgd
, void *page
,
182 unsigned long dst_addr
,
191 pgdp
= pgd_offset_pgd(trans_pgd
, dst_addr
);
192 if (pgd_none(READ_ONCE(*pgdp
))) {
193 pudp
= (void *)get_safe_page(GFP_ATOMIC
);
196 pgd_populate(&init_mm
, pgdp
, pudp
);
199 p4dp
= p4d_offset(pgdp
, dst_addr
);
200 if (p4d_none(READ_ONCE(*p4dp
))) {
201 pudp
= (void *)get_safe_page(GFP_ATOMIC
);
204 p4d_populate(&init_mm
, p4dp
, pudp
);
207 pudp
= pud_offset(p4dp
, dst_addr
);
208 if (pud_none(READ_ONCE(*pudp
))) {
209 pmdp
= (void *)get_safe_page(GFP_ATOMIC
);
212 pud_populate(&init_mm
, pudp
, pmdp
);
215 pmdp
= pmd_offset(pudp
, dst_addr
);
216 if (pmd_none(READ_ONCE(*pmdp
))) {
217 ptep
= (void *)get_safe_page(GFP_ATOMIC
);
220 pmd_populate_kernel(&init_mm
, pmdp
, ptep
);
223 ptep
= pte_offset_kernel(pmdp
, dst_addr
);
224 set_pte(ptep
, pfn_pte(virt_to_pfn(page
), PAGE_KERNEL_EXEC
));
230 * Copies length bytes, starting at src_start into an new page,
231 * perform cache maintenance, then maps it at the specified address low
232 * address as executable.
234 * This is used by hibernate to copy the code it needs to execute when
235 * overwriting the kernel text. This function generates a new set of page
236 * tables, which it loads into ttbr0.
238 * Length is provided as we probably only want 4K of data, even on a 64K
241 static int create_safe_exec_page(void *src_start
, size_t length
,
242 unsigned long dst_addr
,
243 phys_addr_t
*phys_dst_addr
)
245 void *page
= (void *)get_safe_page(GFP_ATOMIC
);
252 memcpy(page
, src_start
, length
);
253 __flush_icache_range((unsigned long)page
, (unsigned long)page
+ length
);
255 trans_pgd
= (void *)get_safe_page(GFP_ATOMIC
);
259 rc
= trans_pgd_map_page(trans_pgd
, page
, dst_addr
,
265 * Load our new page tables. A strict BBM approach requires that we
266 * ensure that TLBs are free of any entries that may overlap with the
267 * global mappings we are about to install.
269 * For a real hibernate/resume cycle TTBR0 currently points to a zero
270 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
271 * runtime services), while for a userspace-driven test_resume cycle it
272 * points to userspace page tables (and we must point it at a zero page
273 * ourselves). Elsewhere we only (un)install the idmap with preemption
274 * disabled, so T0SZ should be as required regardless.
276 cpu_set_reserved_ttbr0();
277 local_flush_tlb_all();
278 write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd
)), ttbr0_el1
);
281 *phys_dst_addr
= virt_to_phys(page
);
286 #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
288 int swsusp_arch_suspend(void)
292 struct sleep_stack_data state
;
294 if (cpus_are_stuck_in_kernel()) {
295 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
299 flags
= local_daif_save();
301 if (__cpu_suspend_enter(&state
)) {
302 /* make the crash dump kernel image visible/saveable */
303 crash_prepare_suspend();
305 sleep_cpu
= smp_processor_id();
308 /* Clean kernel core startup/idle code to PoC*/
309 dcache_clean_range(__mmuoff_data_start
, __mmuoff_data_end
);
310 dcache_clean_range(__idmap_text_start
, __idmap_text_end
);
312 /* Clean kvm setup code to PoC? */
313 if (el2_reset_needed()) {
314 dcache_clean_range(__hyp_idmap_text_start
, __hyp_idmap_text_end
);
315 dcache_clean_range(__hyp_text_start
, __hyp_text_end
);
318 /* make the crash dump kernel image protected again */
322 * Tell the hibernation core that we've just restored
328 __cpu_suspend_exit();
331 * Just in case the boot kernel did turn the SSBD
332 * mitigation off behind our back, let's set the state
333 * to what we expect it to be.
335 switch (arm64_get_ssbd_state()) {
336 case ARM64_SSBD_FORCE_ENABLE
:
337 case ARM64_SSBD_KERNEL
:
338 arm64_set_ssbd_mitigation(true);
342 local_daif_restore(flags
);
347 static void _copy_pte(pte_t
*dst_ptep
, pte_t
*src_ptep
, unsigned long addr
)
349 pte_t pte
= READ_ONCE(*src_ptep
);
351 if (pte_valid(pte
)) {
353 * Resume will overwrite areas that may be marked
354 * read only (code, rodata). Clear the RDONLY bit from
355 * the temporary mappings we use during restore.
357 set_pte(dst_ptep
, pte_mkwrite(pte
));
358 } else if (debug_pagealloc_enabled() && !pte_none(pte
)) {
360 * debug_pagealloc will removed the PTE_VALID bit if
361 * the page isn't in use by the resume kernel. It may have
362 * been in use by the original kernel, in which case we need
363 * to put it back in our copy to do the restore.
365 * Before marking this entry valid, check the pfn should
368 BUG_ON(!pfn_valid(pte_pfn(pte
)));
370 set_pte(dst_ptep
, pte_mkpresent(pte_mkwrite(pte
)));
374 static int copy_pte(pmd_t
*dst_pmdp
, pmd_t
*src_pmdp
, unsigned long start
,
379 unsigned long addr
= start
;
381 dst_ptep
= (pte_t
*)get_safe_page(GFP_ATOMIC
);
384 pmd_populate_kernel(&init_mm
, dst_pmdp
, dst_ptep
);
385 dst_ptep
= pte_offset_kernel(dst_pmdp
, start
);
387 src_ptep
= pte_offset_kernel(src_pmdp
, start
);
389 _copy_pte(dst_ptep
, src_ptep
, addr
);
390 } while (dst_ptep
++, src_ptep
++, addr
+= PAGE_SIZE
, addr
!= end
);
395 static int copy_pmd(pud_t
*dst_pudp
, pud_t
*src_pudp
, unsigned long start
,
401 unsigned long addr
= start
;
403 if (pud_none(READ_ONCE(*dst_pudp
))) {
404 dst_pmdp
= (pmd_t
*)get_safe_page(GFP_ATOMIC
);
407 pud_populate(&init_mm
, dst_pudp
, dst_pmdp
);
409 dst_pmdp
= pmd_offset(dst_pudp
, start
);
411 src_pmdp
= pmd_offset(src_pudp
, start
);
413 pmd_t pmd
= READ_ONCE(*src_pmdp
);
415 next
= pmd_addr_end(addr
, end
);
418 if (pmd_table(pmd
)) {
419 if (copy_pte(dst_pmdp
, src_pmdp
, addr
, next
))
423 __pmd(pmd_val(pmd
) & ~PMD_SECT_RDONLY
));
425 } while (dst_pmdp
++, src_pmdp
++, addr
= next
, addr
!= end
);
430 static int copy_pud(p4d_t
*dst_p4dp
, p4d_t
*src_p4dp
, unsigned long start
,
436 unsigned long addr
= start
;
438 if (p4d_none(READ_ONCE(*dst_p4dp
))) {
439 dst_pudp
= (pud_t
*)get_safe_page(GFP_ATOMIC
);
442 p4d_populate(&init_mm
, dst_p4dp
, dst_pudp
);
444 dst_pudp
= pud_offset(dst_p4dp
, start
);
446 src_pudp
= pud_offset(src_p4dp
, start
);
448 pud_t pud
= READ_ONCE(*src_pudp
);
450 next
= pud_addr_end(addr
, end
);
453 if (pud_table(pud
)) {
454 if (copy_pmd(dst_pudp
, src_pudp
, addr
, next
))
458 __pud(pud_val(pud
) & ~PUD_SECT_RDONLY
));
460 } while (dst_pudp
++, src_pudp
++, addr
= next
, addr
!= end
);
465 static int copy_p4d(pgd_t
*dst_pgdp
, pgd_t
*src_pgdp
, unsigned long start
,
471 unsigned long addr
= start
;
473 dst_p4dp
= p4d_offset(dst_pgdp
, start
);
474 src_p4dp
= p4d_offset(src_pgdp
, start
);
476 next
= p4d_addr_end(addr
, end
);
477 if (p4d_none(READ_ONCE(*src_p4dp
)))
479 if (copy_pud(dst_p4dp
, src_p4dp
, addr
, next
))
481 } while (dst_p4dp
++, src_p4dp
++, addr
= next
, addr
!= end
);
486 static int copy_page_tables(pgd_t
*dst_pgdp
, unsigned long start
,
490 unsigned long addr
= start
;
491 pgd_t
*src_pgdp
= pgd_offset_k(start
);
493 dst_pgdp
= pgd_offset_pgd(dst_pgdp
, start
);
495 next
= pgd_addr_end(addr
, end
);
496 if (pgd_none(READ_ONCE(*src_pgdp
)))
498 if (copy_p4d(dst_pgdp
, src_pgdp
, addr
, next
))
500 } while (dst_pgdp
++, src_pgdp
++, addr
= next
, addr
!= end
);
505 static int trans_pgd_create_copy(pgd_t
**dst_pgdp
, unsigned long start
,
509 pgd_t
*trans_pgd
= (pgd_t
*)get_safe_page(GFP_ATOMIC
);
512 pr_err("Failed to allocate memory for temporary page tables.\n");
516 rc
= copy_page_tables(trans_pgd
, start
, end
);
518 *dst_pgdp
= trans_pgd
;
524 * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
526 * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
527 * we don't need to free it here.
529 int swsusp_arch_resume(void)
535 phys_addr_t phys_hibernate_exit
;
536 void __noreturn (*hibernate_exit
)(phys_addr_t
, phys_addr_t
, void *,
537 void *, phys_addr_t
, phys_addr_t
);
540 * Restoring the memory image will overwrite the ttbr1 page tables.
541 * Create a second copy of just the linear map, and use this when
544 rc
= trans_pgd_create_copy(&tmp_pg_dir
, PAGE_OFFSET
, PAGE_END
);
549 * We need a zero page that is zero before & after resume in order to
550 * to break before make on the ttbr1 page tables.
552 zero_page
= (void *)get_safe_page(GFP_ATOMIC
);
554 pr_err("Failed to allocate zero page.\n");
559 * Locate the exit code in the bottom-but-one page, so that *NULL
560 * still has disastrous affects.
562 hibernate_exit
= (void *)PAGE_SIZE
;
563 exit_size
= __hibernate_exit_text_end
- __hibernate_exit_text_start
;
565 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
566 * a new set of ttbr0 page tables and load them.
568 rc
= create_safe_exec_page(__hibernate_exit_text_start
, exit_size
,
569 (unsigned long)hibernate_exit
,
570 &phys_hibernate_exit
);
572 pr_err("Failed to create safe executable page for hibernate_exit code.\n");
577 * The hibernate exit text contains a set of el2 vectors, that will
578 * be executed at el2 with the mmu off in order to reload hyp-stub.
580 __flush_dcache_area(hibernate_exit
, exit_size
);
583 * KASLR will cause the el2 vectors to be in a different location in
584 * the resumed kernel. Load hibernate's temporary copy into el2.
586 * We can skip this step if we booted at EL1, or are running with VHE.
588 if (el2_reset_needed()) {
589 phys_addr_t el2_vectors
= phys_hibernate_exit
; /* base */
590 el2_vectors
+= hibernate_el2_vectors
-
591 __hibernate_exit_text_start
; /* offset */
593 __hyp_set_vectors(el2_vectors
);
596 hibernate_exit(virt_to_phys(tmp_pg_dir
), resume_hdr
.ttbr1_el1
,
597 resume_hdr
.reenter_kernel
, restore_pblist
,
598 resume_hdr
.__hyp_stub_vectors
, virt_to_phys(zero_page
));
603 int hibernate_resume_nonboot_cpu_disable(void)
606 pr_err("Failing to resume from hibernate on an unknown CPU.\n");
610 return freeze_secondary_cpus(sleep_cpu
);