2 * Hibernate support specific for ARM64
4 * Derived from work on ARM hibernation support by:
6 * Ubuntu project, hibernation support for mach-dove
7 * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
8 * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
9 * https://lkml.org/lkml/2010/6/18/4
10 * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
11 * https://patchwork.kernel.org/patch/96442/
13 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
15 * License terms: GNU General Public License (GPL) version 2
17 #define pr_fmt(x) "hibernate: " x
18 #include <linux/cpu.h>
19 #include <linux/kvm_host.h>
21 #include <linux/notifier.h>
23 #include <linux/sched.h>
24 #include <linux/suspend.h>
25 #include <linux/utsname.h>
26 #include <linux/version.h>
28 #include <asm/barrier.h>
29 #include <asm/cacheflush.h>
30 #include <asm/cputype.h>
31 #include <asm/irqflags.h>
32 #include <asm/memory.h>
33 #include <asm/mmu_context.h>
34 #include <asm/pgalloc.h>
35 #include <asm/pgtable.h>
36 #include <asm/pgtable-hwdef.h>
37 #include <asm/sections.h>
39 #include <asm/smp_plat.h>
40 #include <asm/suspend.h>
41 #include <asm/sysreg.h>
45 * Hibernate core relies on this value being 0 on resume, and marks it
46 * __nosavedata assuming it will keep the resume kernel's '0' value. This
47 * doesn't happen with either KASLR.
49 * defined as "__visible int in_suspend __nosavedata" in
50 * kernel/power/hibernate.c
52 extern int in_suspend
;
54 /* Find a symbols alias in the linear map */
55 #define LMADDR(x) phys_to_virt(virt_to_phys(x))
57 /* Do we need to reset el2? */
58 #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
60 /* temporary el2 vectors in the __hibernate_exit_text section. */
61 extern char hibernate_el2_vectors
[];
63 /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
64 extern char __hyp_stub_vectors
[];
67 * The logical cpu number we should resume on, initialised to a non-cpu
70 static int sleep_cpu
= -EINVAL
;
73 * Values that may not change over hibernate/resume. We put the build number
74 * and date in here so that we guarantee not to resume with a different
77 struct arch_hibernate_hdr_invariants
{
78 char uts_version
[__NEW_UTS_LEN
+ 1];
81 /* These values need to be know across a hibernate/restore. */
82 static struct arch_hibernate_hdr
{
83 struct arch_hibernate_hdr_invariants invariants
;
85 /* These are needed to find the relocated kernel if built with kaslr */
86 phys_addr_t ttbr1_el1
;
87 void (*reenter_kernel
)(void);
90 * We need to know where the __hyp_stub_vectors are after restore to
93 phys_addr_t __hyp_stub_vectors
;
98 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants
*i
)
100 memset(i
, 0, sizeof(*i
));
101 memcpy(i
->uts_version
, init_utsname()->version
, sizeof(i
->uts_version
));
104 int pfn_is_nosave(unsigned long pfn
)
106 unsigned long nosave_begin_pfn
= virt_to_pfn(&__nosave_begin
);
107 unsigned long nosave_end_pfn
= virt_to_pfn(&__nosave_end
- 1);
109 return (pfn
>= nosave_begin_pfn
) && (pfn
<= nosave_end_pfn
);
112 void notrace
save_processor_state(void)
114 WARN_ON(num_online_cpus() != 1);
117 void notrace
restore_processor_state(void)
121 int arch_hibernation_header_save(void *addr
, unsigned int max_size
)
123 struct arch_hibernate_hdr
*hdr
= addr
;
125 if (max_size
< sizeof(*hdr
))
128 arch_hdr_invariants(&hdr
->invariants
);
129 hdr
->ttbr1_el1
= virt_to_phys(swapper_pg_dir
);
130 hdr
->reenter_kernel
= _cpu_resume
;
132 /* We can't use __hyp_get_vectors() because kvm may still be loaded */
133 if (el2_reset_needed())
134 hdr
->__hyp_stub_vectors
= virt_to_phys(__hyp_stub_vectors
);
136 hdr
->__hyp_stub_vectors
= 0;
138 /* Save the mpidr of the cpu we called cpu_suspend() on... */
140 pr_err("Failing to hibernate on an unkown CPU.\n");
143 hdr
->sleep_cpu_mpidr
= cpu_logical_map(sleep_cpu
);
144 pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu
,
145 hdr
->sleep_cpu_mpidr
);
149 EXPORT_SYMBOL(arch_hibernation_header_save
);
151 int arch_hibernation_header_restore(void *addr
)
154 struct arch_hibernate_hdr_invariants invariants
;
155 struct arch_hibernate_hdr
*hdr
= addr
;
157 arch_hdr_invariants(&invariants
);
158 if (memcmp(&hdr
->invariants
, &invariants
, sizeof(invariants
))) {
159 pr_crit("Hibernate image not generated by this kernel!\n");
163 sleep_cpu
= get_logical_index(hdr
->sleep_cpu_mpidr
);
164 pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu
,
165 hdr
->sleep_cpu_mpidr
);
167 pr_crit("Hibernated on a CPU not known to this kernel!\n");
171 if (!cpu_online(sleep_cpu
)) {
172 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
173 ret
= cpu_up(sleep_cpu
);
175 pr_err("Failed to bring hibernate-CPU up!\n");
185 EXPORT_SYMBOL(arch_hibernation_header_restore
);
188 * Copies length bytes, starting at src_start into an new page,
189 * perform cache maintentance, then maps it at the specified address low
190 * address as executable.
192 * This is used by hibernate to copy the code it needs to execute when
193 * overwriting the kernel text. This function generates a new set of page
194 * tables, which it loads into ttbr0.
196 * Length is provided as we probably only want 4K of data, even on a 64K
199 static int create_safe_exec_page(void *src_start
, size_t length
,
200 unsigned long dst_addr
,
201 phys_addr_t
*phys_dst_addr
,
202 void *(*allocator
)(gfp_t mask
),
210 unsigned long dst
= (unsigned long)allocator(mask
);
217 memcpy((void *)dst
, src_start
, length
);
218 flush_icache_range(dst
, dst
+ length
);
220 pgd
= pgd_offset_raw(allocator(mask
), dst_addr
);
221 if (pgd_none(*pgd
)) {
222 pud
= allocator(mask
);
227 pgd_populate(&init_mm
, pgd
, pud
);
230 pud
= pud_offset(pgd
, dst_addr
);
231 if (pud_none(*pud
)) {
232 pmd
= allocator(mask
);
237 pud_populate(&init_mm
, pud
, pmd
);
240 pmd
= pmd_offset(pud
, dst_addr
);
241 if (pmd_none(*pmd
)) {
242 pte
= allocator(mask
);
247 pmd_populate_kernel(&init_mm
, pmd
, pte
);
250 pte
= pte_offset_kernel(pmd
, dst_addr
);
251 set_pte(pte
, __pte(virt_to_phys((void *)dst
) |
252 pgprot_val(PAGE_KERNEL_EXEC
)));
255 * Load our new page tables. A strict BBM approach requires that we
256 * ensure that TLBs are free of any entries that may overlap with the
257 * global mappings we are about to install.
259 * For a real hibernate/resume cycle TTBR0 currently points to a zero
260 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
261 * runtime services), while for a userspace-driven test_resume cycle it
262 * points to userspace page tables (and we must point it at a zero page
263 * ourselves). Elsewhere we only (un)install the idmap with preemption
264 * disabled, so T0SZ should be as required regardless.
266 cpu_set_reserved_ttbr0();
267 local_flush_tlb_all();
268 write_sysreg(virt_to_phys(pgd
), ttbr0_el1
);
271 *phys_dst_addr
= virt_to_phys((void *)dst
);
277 #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
279 int swsusp_arch_suspend(void)
283 struct sleep_stack_data state
;
285 if (cpus_are_stuck_in_kernel()) {
286 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
290 local_dbg_save(flags
);
292 if (__cpu_suspend_enter(&state
)) {
293 sleep_cpu
= smp_processor_id();
296 /* Clean kernel core startup/idle code to PoC*/
297 dcache_clean_range(__mmuoff_data_start
, __mmuoff_data_end
);
298 dcache_clean_range(__idmap_text_start
, __idmap_text_end
);
300 /* Clean kvm setup code to PoC? */
301 if (el2_reset_needed())
302 dcache_clean_range(__hyp_idmap_text_start
, __hyp_idmap_text_end
);
305 * Tell the hibernation core that we've just restored
311 __cpu_suspend_exit();
314 local_dbg_restore(flags
);
319 static void _copy_pte(pte_t
*dst_pte
, pte_t
*src_pte
, unsigned long addr
)
321 pte_t pte
= *src_pte
;
323 if (pte_valid(pte
)) {
325 * Resume will overwrite areas that may be marked
326 * read only (code, rodata). Clear the RDONLY bit from
327 * the temporary mappings we use during restore.
329 set_pte(dst_pte
, pte_clear_rdonly(pte
));
330 } else if (debug_pagealloc_enabled() && !pte_none(pte
)) {
332 * debug_pagealloc will removed the PTE_VALID bit if
333 * the page isn't in use by the resume kernel. It may have
334 * been in use by the original kernel, in which case we need
335 * to put it back in our copy to do the restore.
337 * Before marking this entry valid, check the pfn should
340 BUG_ON(!pfn_valid(pte_pfn(pte
)));
342 set_pte(dst_pte
, pte_mkpresent(pte_clear_rdonly(pte
)));
346 static int copy_pte(pmd_t
*dst_pmd
, pmd_t
*src_pmd
, unsigned long start
,
351 unsigned long addr
= start
;
353 dst_pte
= (pte_t
*)get_safe_page(GFP_ATOMIC
);
356 pmd_populate_kernel(&init_mm
, dst_pmd
, dst_pte
);
357 dst_pte
= pte_offset_kernel(dst_pmd
, start
);
359 src_pte
= pte_offset_kernel(src_pmd
, start
);
361 _copy_pte(dst_pte
, src_pte
, addr
);
362 } while (dst_pte
++, src_pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
367 static int copy_pmd(pud_t
*dst_pud
, pud_t
*src_pud
, unsigned long start
,
373 unsigned long addr
= start
;
375 if (pud_none(*dst_pud
)) {
376 dst_pmd
= (pmd_t
*)get_safe_page(GFP_ATOMIC
);
379 pud_populate(&init_mm
, dst_pud
, dst_pmd
);
381 dst_pmd
= pmd_offset(dst_pud
, start
);
383 src_pmd
= pmd_offset(src_pud
, start
);
385 next
= pmd_addr_end(addr
, end
);
386 if (pmd_none(*src_pmd
))
388 if (pmd_table(*src_pmd
)) {
389 if (copy_pte(dst_pmd
, src_pmd
, addr
, next
))
393 __pmd(pmd_val(*src_pmd
) & ~PMD_SECT_RDONLY
));
395 } while (dst_pmd
++, src_pmd
++, addr
= next
, addr
!= end
);
400 static int copy_pud(pgd_t
*dst_pgd
, pgd_t
*src_pgd
, unsigned long start
,
406 unsigned long addr
= start
;
408 if (pgd_none(*dst_pgd
)) {
409 dst_pud
= (pud_t
*)get_safe_page(GFP_ATOMIC
);
412 pgd_populate(&init_mm
, dst_pgd
, dst_pud
);
414 dst_pud
= pud_offset(dst_pgd
, start
);
416 src_pud
= pud_offset(src_pgd
, start
);
418 next
= pud_addr_end(addr
, end
);
419 if (pud_none(*src_pud
))
421 if (pud_table(*(src_pud
))) {
422 if (copy_pmd(dst_pud
, src_pud
, addr
, next
))
426 __pud(pud_val(*src_pud
) & ~PMD_SECT_RDONLY
));
428 } while (dst_pud
++, src_pud
++, addr
= next
, addr
!= end
);
433 static int copy_page_tables(pgd_t
*dst_pgd
, unsigned long start
,
437 unsigned long addr
= start
;
438 pgd_t
*src_pgd
= pgd_offset_k(start
);
440 dst_pgd
= pgd_offset_raw(dst_pgd
, start
);
442 next
= pgd_addr_end(addr
, end
);
443 if (pgd_none(*src_pgd
))
445 if (copy_pud(dst_pgd
, src_pgd
, addr
, next
))
447 } while (dst_pgd
++, src_pgd
++, addr
= next
, addr
!= end
);
453 * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
455 * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
456 * we don't need to free it here.
458 int swsusp_arch_resume(void)
464 void *lm_restore_pblist
;
465 phys_addr_t phys_hibernate_exit
;
466 void __noreturn (*hibernate_exit
)(phys_addr_t
, phys_addr_t
, void *,
467 void *, phys_addr_t
, phys_addr_t
);
470 * Restoring the memory image will overwrite the ttbr1 page tables.
471 * Create a second copy of just the linear map, and use this when
474 tmp_pg_dir
= (pgd_t
*)get_safe_page(GFP_ATOMIC
);
476 pr_err("Failed to allocate memory for temporary page tables.");
480 rc
= copy_page_tables(tmp_pg_dir
, PAGE_OFFSET
, 0);
485 * Since we only copied the linear map, we need to find restore_pblist's
486 * linear map address.
488 lm_restore_pblist
= LMADDR(restore_pblist
);
491 * We need a zero page that is zero before & after resume in order to
492 * to break before make on the ttbr1 page tables.
494 zero_page
= (void *)get_safe_page(GFP_ATOMIC
);
496 pr_err("Failed to allocate zero page.");
502 * Locate the exit code in the bottom-but-one page, so that *NULL
503 * still has disastrous affects.
505 hibernate_exit
= (void *)PAGE_SIZE
;
506 exit_size
= __hibernate_exit_text_end
- __hibernate_exit_text_start
;
508 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
509 * a new set of ttbr0 page tables and load them.
511 rc
= create_safe_exec_page(__hibernate_exit_text_start
, exit_size
,
512 (unsigned long)hibernate_exit
,
513 &phys_hibernate_exit
,
514 (void *)get_safe_page
, GFP_ATOMIC
);
516 pr_err("Failed to create safe executable page for hibernate_exit code.");
521 * The hibernate exit text contains a set of el2 vectors, that will
522 * be executed at el2 with the mmu off in order to reload hyp-stub.
524 __flush_dcache_area(hibernate_exit
, exit_size
);
527 * KASLR will cause the el2 vectors to be in a different location in
528 * the resumed kernel. Load hibernate's temporary copy into el2.
530 * We can skip this step if we booted at EL1, or are running with VHE.
532 if (el2_reset_needed()) {
533 phys_addr_t el2_vectors
= phys_hibernate_exit
; /* base */
534 el2_vectors
+= hibernate_el2_vectors
-
535 __hibernate_exit_text_start
; /* offset */
537 __hyp_set_vectors(el2_vectors
);
540 hibernate_exit(virt_to_phys(tmp_pg_dir
), resume_hdr
.ttbr1_el1
,
541 resume_hdr
.reenter_kernel
, lm_restore_pblist
,
542 resume_hdr
.__hyp_stub_vectors
, virt_to_phys(zero_page
));
548 static int check_boot_cpu_online_pm_callback(struct notifier_block
*nb
,
549 unsigned long action
, void *ptr
)
551 if (action
== PM_HIBERNATION_PREPARE
&&
552 cpumask_first(cpu_online_mask
) != 0) {
553 pr_warn("CPU0 is offline.\n");
554 return notifier_from_errno(-ENODEV
);
560 static int __init
check_boot_cpu_online_init(void)
563 * Set this pm_notifier callback with a lower priority than
564 * cpu_hotplug_pm_callback, so that cpu_hotplug_pm_callback will be
565 * called earlier to disable cpu hotplug before the cpu online check.
567 pm_notifier(check_boot_cpu_online_pm_callback
, -INT_MAX
);
571 core_initcall(check_boot_cpu_online_init
);
573 int hibernate_resume_nonboot_cpu_disable(void)
576 pr_err("Failing to resume from hibernate on an unkown CPU.\n");
580 return freeze_secondary_cpus(sleep_cpu
);