]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
64909882 G |
2 | /* |
3 | * linux/arch/unicore32/kernel/hibernate.c | |
4 | * | |
5 | * Code specific to PKUnity SoC and UniCore ISA | |
6 | * | |
7 | * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn> | |
8 | * Copyright (C) 2001-2010 Guan Xuetao | |
64909882 G |
9 | */ |
10 | ||
11 | #include <linux/gfp.h> | |
12 | #include <linux/suspend.h> | |
57c8a661 | 13 | #include <linux/memblock.h> |
64909882 | 14 | |
64909882 | 15 | #include <asm/page.h> |
ca5999fd | 16 | #include <linux/pgtable.h> |
64909882 | 17 | #include <asm/pgalloc.h> |
7f8998c7 | 18 | #include <asm/sections.h> |
64909882 G |
19 | #include <asm/suspend.h> |
20 | ||
21 | #include "mach/pm.h" | |
22 | ||
23 | /* Pointer to the temporary resume page tables */ | |
24 | pgd_t *resume_pg_dir; | |
25 | ||
26 | struct swsusp_arch_regs swsusp_arch_regs_cpu0; | |
27 | ||
28 | /* | |
29 | * Create a middle page table on a resume-safe page and put a pointer to it in | |
30 | * the given global directory entry. This only returns the gd entry | |
31 | * in non-PAE compilation mode, since the middle layer is folded. | |
32 | */ | |
33 | static pmd_t *resume_one_md_table_init(pgd_t *pgd) | |
34 | { | |
35 | pud_t *pud; | |
453668af | 36 | p4d_t *p4d; |
64909882 G |
37 | pmd_t *pmd_table; |
38 | ||
453668af MR |
39 | p4d = p4d_offset(pgd, 0); |
40 | pud = pud_offset(p4d, 0); | |
64909882 G |
41 | pmd_table = pmd_offset(pud, 0); |
42 | ||
43 | return pmd_table; | |
44 | } | |
45 | ||
46 | /* | |
47 | * Create a page table on a resume-safe page and place a pointer to it in | |
48 | * a middle page directory entry. | |
49 | */ | |
50 | static pte_t *resume_one_page_table_init(pmd_t *pmd) | |
51 | { | |
52 | if (pmd_none(*pmd)) { | |
53 | pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC); | |
54 | if (!page_table) | |
55 | return NULL; | |
56 | ||
57 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_KERNEL_TABLE)); | |
58 | ||
59 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); | |
60 | ||
61 | return page_table; | |
62 | } | |
63 | ||
64 | return pte_offset_kernel(pmd, 0); | |
65 | } | |
66 | ||
67 | /* | |
68 | * This maps the physical memory to kernel virtual address space, a total | |
69 | * of max_low_pfn pages, by creating page tables starting from address | |
70 | * PAGE_OFFSET. The page tables are allocated out of resume-safe pages. | |
71 | */ | |
72 | static int resume_physical_mapping_init(pgd_t *pgd_base) | |
73 | { | |
74 | unsigned long pfn; | |
75 | pgd_t *pgd; | |
76 | pmd_t *pmd; | |
77 | pte_t *pte; | |
78 | int pgd_idx, pmd_idx; | |
79 | ||
80 | pgd_idx = pgd_index(PAGE_OFFSET); | |
81 | pgd = pgd_base + pgd_idx; | |
82 | pfn = 0; | |
83 | ||
84 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { | |
85 | pmd = resume_one_md_table_init(pgd); | |
86 | if (!pmd) | |
87 | return -ENOMEM; | |
88 | ||
89 | if (pfn >= max_low_pfn) | |
90 | continue; | |
91 | ||
92 | for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { | |
93 | pte_t *max_pte; | |
94 | ||
95 | if (pfn >= max_low_pfn) | |
96 | break; | |
97 | ||
98 | /* Map with normal page tables. | |
99 | * NOTE: We can mark everything as executable here | |
100 | */ | |
101 | pte = resume_one_page_table_init(pmd); | |
102 | if (!pte) | |
103 | return -ENOMEM; | |
104 | ||
105 | max_pte = pte + PTRS_PER_PTE; | |
106 | for (; pte < max_pte; pte++, pfn++) { | |
107 | if (pfn >= max_low_pfn) | |
108 | break; | |
109 | ||
110 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); | |
111 | } | |
112 | } | |
113 | } | |
114 | ||
115 | return 0; | |
116 | } | |
117 | ||
118 | static inline void resume_init_first_level_page_table(pgd_t *pg_dir) | |
119 | { | |
120 | } | |
121 | ||
122 | int swsusp_arch_resume(void) | |
123 | { | |
124 | int error; | |
125 | ||
126 | resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); | |
127 | if (!resume_pg_dir) | |
128 | return -ENOMEM; | |
129 | ||
130 | resume_init_first_level_page_table(resume_pg_dir); | |
131 | error = resume_physical_mapping_init(resume_pg_dir); | |
132 | if (error) | |
133 | return error; | |
134 | ||
135 | /* We have got enough memory and from now on we cannot recover */ | |
136 | restore_image(resume_pg_dir, restore_pblist); | |
137 | return 0; | |
138 | } | |
139 | ||
140 | /* | |
141 | * pfn_is_nosave - check if given pfn is in the 'nosave' section | |
142 | */ | |
143 | ||
144 | int pfn_is_nosave(unsigned long pfn) | |
145 | { | |
146 | unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; | |
147 | unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; | |
148 | ||
149 | return (pfn >= begin_pfn) && (pfn < end_pfn); | |
150 | } | |
151 | ||
152 | void save_processor_state(void) | |
153 | { | |
154 | } | |
155 | ||
156 | void restore_processor_state(void) | |
157 | { | |
158 | local_flush_tlb_all(); | |
159 | } |