]>
Commit | Line | Data |
---|---|---|
4f04d8f0 CM |
1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | #ifndef __ASM_PGTABLE_H | |
17 | #define __ASM_PGTABLE_H | |
18 | ||
2f4b829c | 19 | #include <asm/bug.h> |
4f04d8f0 CM |
20 | #include <asm/proc-fns.h> |
21 | ||
22 | #include <asm/memory.h> | |
23 | #include <asm/pgtable-hwdef.h> | |
24 | ||
25 | /* | |
26 | * Software defined PTE bits definition. | |
27 | */ | |
a6fadf7e | 28 | #define PTE_VALID (_AT(pteval_t, 1) << 0) |
bf950040 | 29 | #define PTE_WRITE (PTE_DBM) /* same as DBM (51) */ |
4f04d8f0 CM |
30 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) |
31 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) | |
3676f9ef | 32 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ |
4f04d8f0 CM |
33 | |
34 | /* | |
35 | * VMALLOC and SPARSEMEM_VMEMMAP ranges. | |
08375198 CM |
36 | * |
37 | * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array | |
38 | * (rounded up to PUD_SIZE). | |
f9040773 | 39 | * VMALLOC_START: beginning of the kernel vmalloc space |
08375198 CM |
40 | * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, |
41 | * fixed mappings and modules | |
4f04d8f0 | 42 | */ |
08375198 | 43 | #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) |
39d114dd | 44 | |
f9040773 | 45 | #define VMALLOC_START (MODULES_END) |
08375198 | 46 | #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) |
4f04d8f0 CM |
47 | |
48 | #define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) | |
49 | ||
d016bf7e | 50 | #define FIRST_USER_ADDRESS 0UL |
4f04d8f0 CM |
51 | |
52 | #ifndef __ASSEMBLY__ | |
2f4b829c | 53 | |
961faac1 | 54 | #include <asm/fixmap.h> |
2f4b829c CM |
55 | #include <linux/mmdebug.h> |
56 | ||
4f04d8f0 CM |
57 | extern void __pte_error(const char *file, int line, unsigned long val); |
58 | extern void __pmd_error(const char *file, int line, unsigned long val); | |
c79b954b | 59 | extern void __pud_error(const char *file, int line, unsigned long val); |
4f04d8f0 CM |
60 | extern void __pgd_error(const char *file, int line, unsigned long val); |
61 | ||
a501e324 CM |
62 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) |
63 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) | |
4f04d8f0 | 64 | |
ac15bd63 CM |
65 | #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) |
66 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) | |
67 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) | |
68 | #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) | |
69 | #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) | |
4f04d8f0 | 70 | |
a501e324 CM |
71 | #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) |
72 | #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) | |
73 | #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) | |
a6fadf7e | 74 | |
a501e324 | 75 | #define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) |
4f04d8f0 | 76 | |
a501e324 | 77 | #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
fb226c3d | 78 | #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) |
ac15bd63 | 79 | #define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) |
a501e324 | 80 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
06f90d25 | 81 | #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) |
8e620b04 | 82 | |
a501e324 | 83 | #define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP) |
36311607 MZ |
84 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) |
85 | ||
a501e324 | 86 | #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) |
4a513fb0 | 87 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) |
36311607 | 88 | |
1a541b4e | 89 | #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) |
a501e324 CM |
90 | #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) |
91 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) | |
92 | #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | |
93 | #define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) | |
94 | #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | |
95 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) | |
a501e324 CM |
96 | |
97 | #define __P000 PAGE_NONE | |
98 | #define __P001 PAGE_READONLY | |
99 | #define __P010 PAGE_COPY | |
100 | #define __P011 PAGE_COPY | |
5a0fdfad | 101 | #define __P100 PAGE_READONLY_EXEC |
a501e324 CM |
102 | #define __P101 PAGE_READONLY_EXEC |
103 | #define __P110 PAGE_COPY_EXEC | |
104 | #define __P111 PAGE_COPY_EXEC | |
105 | ||
106 | #define __S000 PAGE_NONE | |
107 | #define __S001 PAGE_READONLY | |
108 | #define __S010 PAGE_SHARED | |
109 | #define __S011 PAGE_SHARED | |
5a0fdfad | 110 | #define __S100 PAGE_READONLY_EXEC |
a501e324 CM |
111 | #define __S101 PAGE_READONLY_EXEC |
112 | #define __S110 PAGE_SHARED_EXEC | |
113 | #define __S111 PAGE_SHARED_EXEC | |
4f04d8f0 | 114 | |
4f04d8f0 CM |
115 | /* |
116 | * ZERO_PAGE is a global shared page that is always zero: used | |
117 | * for zero-mapped memory areas etc.. | |
118 | */ | |
5227cfa7 MR |
119 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
120 | #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) | |
4f04d8f0 | 121 | |
7078db46 CM |
122 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) |
123 | ||
4f04d8f0 CM |
124 | #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) |
125 | ||
126 | #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | |
127 | ||
128 | #define pte_none(pte) (!pte_val(pte)) | |
129 | #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) | |
130 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | |
7078db46 | 131 | |
4f04d8f0 CM |
132 | /* |
133 | * The following only work if pte_present(). Undefined behaviour otherwise. | |
134 | */ | |
84fe6826 | 135 | #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) |
84fe6826 SC |
136 | #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) |
137 | #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) | |
138 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) | |
8e620b04 | 139 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
93ef666a | 140 | #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) |
ac15bd63 | 141 | #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) |
4f04d8f0 | 142 | |
2f4b829c | 143 | #ifdef CONFIG_ARM64_HW_AFDBM |
b847415c | 144 | #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) |
2f4b829c CM |
145 | #else |
146 | #define pte_hw_dirty(pte) (0) | |
147 | #endif | |
148 | #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) | |
149 | #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) | |
150 | ||
766ffb69 | 151 | #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) |
7f0b1bf0 CM |
152 | #define pte_valid_not_user(pte) \ |
153 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) | |
76c714be WD |
154 | #define pte_valid_young(pte) \ |
155 | ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) | |
156 | ||
157 | /* | |
158 | * Could the pte be present in the TLB? We must check mm_tlb_flush_pending | |
159 | * so that we don't erroneously return false for pages that have been | |
160 | * remapped as PROT_NONE but are yet to be flushed from the TLB. | |
161 | */ | |
162 | #define pte_accessible(mm, pte) \ | |
163 | (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) | |
4f04d8f0 | 164 | |
b6d4f280 | 165 | static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) |
44b6dfc5 | 166 | { |
b6d4f280 | 167 | pte_val(pte) &= ~pgprot_val(prot); |
44b6dfc5 SC |
168 | return pte; |
169 | } | |
170 | ||
b6d4f280 | 171 | static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) |
44b6dfc5 | 172 | { |
b6d4f280 | 173 | pte_val(pte) |= pgprot_val(prot); |
44b6dfc5 SC |
174 | return pte; |
175 | } | |
176 | ||
b6d4f280 LA |
177 | static inline pte_t pte_wrprotect(pte_t pte) |
178 | { | |
179 | return clear_pte_bit(pte, __pgprot(PTE_WRITE)); | |
180 | } | |
181 | ||
182 | static inline pte_t pte_mkwrite(pte_t pte) | |
183 | { | |
184 | return set_pte_bit(pte, __pgprot(PTE_WRITE)); | |
185 | } | |
186 | ||
44b6dfc5 SC |
187 | static inline pte_t pte_mkclean(pte_t pte) |
188 | { | |
b6d4f280 | 189 | return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); |
44b6dfc5 SC |
190 | } |
191 | ||
192 | static inline pte_t pte_mkdirty(pte_t pte) | |
193 | { | |
b6d4f280 | 194 | return set_pte_bit(pte, __pgprot(PTE_DIRTY)); |
44b6dfc5 SC |
195 | } |
196 | ||
197 | static inline pte_t pte_mkold(pte_t pte) | |
198 | { | |
b6d4f280 | 199 | return clear_pte_bit(pte, __pgprot(PTE_AF)); |
44b6dfc5 SC |
200 | } |
201 | ||
202 | static inline pte_t pte_mkyoung(pte_t pte) | |
203 | { | |
b6d4f280 | 204 | return set_pte_bit(pte, __pgprot(PTE_AF)); |
44b6dfc5 SC |
205 | } |
206 | ||
207 | static inline pte_t pte_mkspecial(pte_t pte) | |
208 | { | |
b6d4f280 | 209 | return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); |
44b6dfc5 | 210 | } |
4f04d8f0 | 211 | |
93ef666a JL |
212 | static inline pte_t pte_mkcont(pte_t pte) |
213 | { | |
66b3923a DW |
214 | pte = set_pte_bit(pte, __pgprot(PTE_CONT)); |
215 | return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE)); | |
93ef666a JL |
216 | } |
217 | ||
218 | static inline pte_t pte_mknoncont(pte_t pte) | |
219 | { | |
220 | return clear_pte_bit(pte, __pgprot(PTE_CONT)); | |
221 | } | |
222 | ||
66b3923a DW |
223 | static inline pmd_t pmd_mkcont(pmd_t pmd) |
224 | { | |
225 | return __pmd(pmd_val(pmd) | PMD_SECT_CONT); | |
226 | } | |
227 | ||
4f04d8f0 CM |
228 | static inline void set_pte(pte_t *ptep, pte_t pte) |
229 | { | |
230 | *ptep = pte; | |
7f0b1bf0 CM |
231 | |
232 | /* | |
233 | * Only if the new pte is valid and kernel, otherwise TLB maintenance | |
234 | * or update_mmu_cache() have the necessary barriers. | |
235 | */ | |
236 | if (pte_valid_not_user(pte)) { | |
237 | dsb(ishst); | |
238 | isb(); | |
239 | } | |
4f04d8f0 CM |
240 | } |
241 | ||
2f4b829c CM |
242 | struct mm_struct; |
243 | struct vm_area_struct; | |
244 | ||
4f04d8f0 CM |
245 | extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); |
246 | ||
2f4b829c CM |
247 | /* |
248 | * PTE bits configuration in the presence of hardware Dirty Bit Management | |
249 | * (PTE_WRITE == PTE_DBM): | |
250 | * | |
251 | * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) | |
252 | * 0 0 | 1 0 0 | |
253 | * 0 1 | 1 1 0 | |
254 | * 1 0 | 1 0 1 | |
255 | * 1 1 | 0 1 x | |
256 | * | |
257 | * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via | |
258 | * the page fault mechanism. Checking the dirty status of a pte becomes: | |
259 | * | |
b847415c | 260 | * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) |
2f4b829c | 261 | */ |
4f04d8f0 CM |
262 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
263 | pte_t *ptep, pte_t pte) | |
264 | { | |
ac15bd63 | 265 | if (pte_valid(pte)) { |
2f4b829c | 266 | if (pte_sw_dirty(pte) && pte_write(pte)) |
c2c93e5b SC |
267 | pte_val(pte) &= ~PTE_RDONLY; |
268 | else | |
269 | pte_val(pte) |= PTE_RDONLY; | |
ac15bd63 CM |
270 | if (pte_user(pte) && pte_exec(pte) && !pte_special(pte)) |
271 | __sync_icache_dcache(pte, addr); | |
02522463 WD |
272 | } |
273 | ||
2f4b829c CM |
274 | /* |
275 | * If the existing pte is valid, check for potential race with | |
276 | * hardware updates of the pte (ptep_set_access_flags safely changes | |
277 | * valid ptes without going through an invalid entry). | |
278 | */ | |
82d34008 CM |
279 | if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) && |
280 | pte_valid(*ptep) && pte_valid(pte)) { | |
281 | VM_WARN_ONCE(!pte_young(pte), | |
282 | "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", | |
283 | __func__, pte_val(*ptep), pte_val(pte)); | |
284 | VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte), | |
285 | "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", | |
286 | __func__, pte_val(*ptep), pte_val(pte)); | |
2f4b829c CM |
287 | } |
288 | ||
4f04d8f0 CM |
289 | set_pte(ptep, pte); |
290 | } | |
291 | ||
292 | /* | |
293 | * Huge pte definitions. | |
294 | */ | |
084bd298 SC |
295 | #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT)) |
296 | #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) | |
297 | ||
298 | /* | |
299 | * Hugetlb definitions. | |
300 | */ | |
66b3923a | 301 | #define HUGE_MAX_HSTATE 4 |
084bd298 SC |
302 | #define HPAGE_SHIFT PMD_SHIFT |
303 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | |
304 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | |
305 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | |
4f04d8f0 | 306 | |
4f04d8f0 CM |
307 | #define __HAVE_ARCH_PTE_SPECIAL |
308 | ||
29e56940 SC |
309 | static inline pte_t pud_pte(pud_t pud) |
310 | { | |
311 | return __pte(pud_val(pud)); | |
312 | } | |
313 | ||
314 | static inline pmd_t pud_pmd(pud_t pud) | |
315 | { | |
316 | return __pmd(pud_val(pud)); | |
317 | } | |
318 | ||
9c7e535f SC |
319 | static inline pte_t pmd_pte(pmd_t pmd) |
320 | { | |
321 | return __pte(pmd_val(pmd)); | |
322 | } | |
af074848 | 323 | |
9c7e535f SC |
324 | static inline pmd_t pte_pmd(pte_t pte) |
325 | { | |
326 | return __pmd(pte_val(pte)); | |
327 | } | |
af074848 | 328 | |
8ce837ce AB |
329 | static inline pgprot_t mk_sect_prot(pgprot_t prot) |
330 | { | |
331 | return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT); | |
332 | } | |
333 | ||
af074848 SC |
334 | /* |
335 | * THP definitions. | |
336 | */ | |
af074848 SC |
337 | |
338 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
339 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) | |
29e56940 | 340 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
af074848 | 341 | |
c164e038 | 342 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) |
9c7e535f SC |
343 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) |
344 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) | |
9c7e535f SC |
345 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) |
346 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | |
05ee26d9 | 347 | #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) |
9c7e535f SC |
348 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) |
349 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) | |
e3a920af | 350 | #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK)) |
af074848 | 351 | |
9c7e535f SC |
352 | #define __HAVE_ARCH_PMD_WRITE |
353 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) | |
af074848 SC |
354 | |
355 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) | |
356 | ||
357 | #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) | |
358 | #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | |
359 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) | |
360 | ||
29e56940 | 361 | #define pud_write(pud) pte_write(pud_pte(pud)) |
206a2a73 | 362 | #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) |
af074848 | 363 | |
ceb21835 | 364 | #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) |
af074848 SC |
365 | |
366 | static inline int has_transparent_hugepage(void) | |
367 | { | |
368 | return 1; | |
369 | } | |
370 | ||
a501e324 CM |
371 | #define __pgprot_modify(prot,mask,bits) \ |
372 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) | |
373 | ||
4f04d8f0 CM |
374 | /* |
375 | * Mark the prot value as uncacheable and unbufferable. | |
376 | */ | |
377 | #define pgprot_noncached(prot) \ | |
de2db743 | 378 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) |
4f04d8f0 | 379 | #define pgprot_writecombine(prot) \ |
de2db743 | 380 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) |
d1e6dc91 LD |
381 | #define pgprot_device(prot) \ |
382 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) | |
4f04d8f0 CM |
383 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
384 | struct file; | |
385 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
386 | unsigned long size, pgprot_t vma_prot); | |
387 | ||
388 | #define pmd_none(pmd) (!pmd_val(pmd)) | |
389 | #define pmd_present(pmd) (pmd_val(pmd)) | |
390 | ||
391 | #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) | |
392 | ||
36311607 MZ |
393 | #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ |
394 | PMD_TYPE_TABLE) | |
395 | #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ | |
396 | PMD_TYPE_SECT) | |
397 | ||
f3b766a2 | 398 | #ifdef CONFIG_ARM64_64K_PAGES |
206a2a73 | 399 | #define pud_sect(pud) (0) |
523d6e9f | 400 | #define pud_table(pud) (1) |
206a2a73 SC |
401 | #else |
402 | #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ | |
403 | PUD_TYPE_SECT) | |
523d6e9f | 404 | #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ |
405 | PUD_TYPE_TABLE) | |
206a2a73 | 406 | #endif |
36311607 | 407 | |
4f04d8f0 CM |
408 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
409 | { | |
410 | *pmdp = pmd; | |
98f7685e | 411 | dsb(ishst); |
7f0b1bf0 | 412 | isb(); |
4f04d8f0 CM |
413 | } |
414 | ||
415 | static inline void pmd_clear(pmd_t *pmdp) | |
416 | { | |
417 | set_pmd(pmdp, __pmd(0)); | |
418 | } | |
419 | ||
dca56dca | 420 | static inline phys_addr_t pmd_page_paddr(pmd_t pmd) |
4f04d8f0 | 421 | { |
dca56dca | 422 | return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK; |
4f04d8f0 CM |
423 | } |
424 | ||
053520f7 MR |
425 | /* Find an entry in the third-level page table. */ |
426 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
427 | ||
dca56dca MR |
428 | #define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t)) |
429 | #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) | |
053520f7 MR |
430 | |
431 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | |
432 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) | |
433 | #define pte_unmap(pte) do { } while (0) | |
434 | #define pte_unmap_nested(pte) do { } while (0) | |
435 | ||
961faac1 MR |
436 | #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) |
437 | #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) | |
438 | #define pte_clear_fixmap() clear_fixmap(FIX_PTE) | |
439 | ||
4f04d8f0 CM |
440 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) |
441 | ||
6533945a AB |
442 | /* use ONLY for statically allocated translation tables */ |
443 | #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) | |
444 | ||
4f04d8f0 CM |
445 | /* |
446 | * Conversion functions: convert a page and protection to a page entry, | |
447 | * and a page entry and page directory to the page they refer to. | |
448 | */ | |
449 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) | |
450 | ||
9f25e6ad | 451 | #if CONFIG_PGTABLE_LEVELS > 2 |
4f04d8f0 | 452 | |
7078db46 CM |
453 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) |
454 | ||
4f04d8f0 CM |
455 | #define pud_none(pud) (!pud_val(pud)) |
456 | #define pud_bad(pud) (!(pud_val(pud) & 2)) | |
457 | #define pud_present(pud) (pud_val(pud)) | |
458 | ||
459 | static inline void set_pud(pud_t *pudp, pud_t pud) | |
460 | { | |
461 | *pudp = pud; | |
98f7685e | 462 | dsb(ishst); |
7f0b1bf0 | 463 | isb(); |
4f04d8f0 CM |
464 | } |
465 | ||
466 | static inline void pud_clear(pud_t *pudp) | |
467 | { | |
468 | set_pud(pudp, __pud(0)); | |
469 | } | |
470 | ||
dca56dca | 471 | static inline phys_addr_t pud_page_paddr(pud_t pud) |
4f04d8f0 | 472 | { |
dca56dca | 473 | return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK; |
4f04d8f0 CM |
474 | } |
475 | ||
7078db46 CM |
476 | /* Find an entry in the second-level page table. */ |
477 | #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) | |
478 | ||
dca56dca MR |
479 | #define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t)) |
480 | #define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr)))) | |
7078db46 | 481 | |
961faac1 MR |
482 | #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) |
483 | #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) | |
484 | #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) | |
485 | ||
5d96e0cb | 486 | #define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) |
29e56940 | 487 | |
6533945a AB |
488 | /* use ONLY for statically allocated translation tables */ |
489 | #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) | |
490 | ||
dca56dca MR |
491 | #else |
492 | ||
493 | #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) | |
494 | ||
961faac1 MR |
495 | /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ |
496 | #define pmd_set_fixmap(addr) NULL | |
497 | #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) | |
498 | #define pmd_clear_fixmap() | |
499 | ||
6533945a AB |
500 | #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) |
501 | ||
9f25e6ad | 502 | #endif /* CONFIG_PGTABLE_LEVELS > 2 */ |
4f04d8f0 | 503 | |
9f25e6ad | 504 | #if CONFIG_PGTABLE_LEVELS > 3 |
c79b954b | 505 | |
7078db46 CM |
506 | #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud)) |
507 | ||
c79b954b JL |
508 | #define pgd_none(pgd) (!pgd_val(pgd)) |
509 | #define pgd_bad(pgd) (!(pgd_val(pgd) & 2)) | |
510 | #define pgd_present(pgd) (pgd_val(pgd)) | |
511 | ||
512 | static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) | |
513 | { | |
514 | *pgdp = pgd; | |
515 | dsb(ishst); | |
516 | } | |
517 | ||
518 | static inline void pgd_clear(pgd_t *pgdp) | |
519 | { | |
520 | set_pgd(pgdp, __pgd(0)); | |
521 | } | |
522 | ||
dca56dca | 523 | static inline phys_addr_t pgd_page_paddr(pgd_t pgd) |
c79b954b | 524 | { |
dca56dca | 525 | return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK; |
c79b954b JL |
526 | } |
527 | ||
7078db46 CM |
528 | /* Find an entry in the frst-level page table. */ |
529 | #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) | |
530 | ||
dca56dca MR |
531 | #define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t)) |
532 | #define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr)))) | |
7078db46 | 533 | |
961faac1 MR |
534 | #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr)) |
535 | #define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr)) | |
536 | #define pud_clear_fixmap() clear_fixmap(FIX_PUD) | |
537 | ||
5d96e0cb JL |
538 | #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK)) |
539 | ||
6533945a AB |
540 | /* use ONLY for statically allocated translation tables */ |
541 | #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr)))) | |
542 | ||
dca56dca MR |
543 | #else |
544 | ||
545 | #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) | |
546 | ||
961faac1 MR |
547 | /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ |
548 | #define pud_set_fixmap(addr) NULL | |
549 | #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) | |
550 | #define pud_clear_fixmap() | |
551 | ||
6533945a AB |
552 | #define pud_offset_kimg(dir,addr) ((pud_t *)dir) |
553 | ||
9f25e6ad | 554 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
c79b954b | 555 | |
7078db46 CM |
556 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) |
557 | ||
4f04d8f0 CM |
558 | /* to find an entry in a page-table-directory */ |
559 | #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | |
560 | ||
dca56dca MR |
561 | #define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr)) |
562 | ||
563 | #define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr))) | |
4f04d8f0 CM |
564 | |
565 | /* to find an entry in a kernel page-table-directory */ | |
566 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | |
567 | ||
961faac1 MR |
568 | #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) |
569 | #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) | |
570 | ||
4f04d8f0 CM |
571 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
572 | { | |
a6fadf7e | 573 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | |
1a541b4e | 574 | PTE_PROT_NONE | PTE_VALID | PTE_WRITE; |
2f4b829c CM |
575 | /* preserve the hardware dirty information */ |
576 | if (pte_hw_dirty(pte)) | |
62d96c71 | 577 | pte = pte_mkdirty(pte); |
4f04d8f0 CM |
578 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
579 | return pte; | |
580 | } | |
581 | ||
9c7e535f SC |
582 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
583 | { | |
584 | return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); | |
585 | } | |
586 | ||
2f4b829c CM |
587 | #ifdef CONFIG_ARM64_HW_AFDBM |
588 | /* | |
589 | * Atomic pte/pmd modifications. | |
590 | */ | |
591 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
592 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
593 | unsigned long address, | |
594 | pte_t *ptep) | |
595 | { | |
596 | pteval_t pteval; | |
597 | unsigned int tmp, res; | |
598 | ||
599 | asm volatile("// ptep_test_and_clear_young\n" | |
600 | " prfm pstl1strm, %2\n" | |
601 | "1: ldxr %0, %2\n" | |
602 | " ubfx %w3, %w0, %5, #1 // extract PTE_AF (young)\n" | |
603 | " and %0, %0, %4 // clear PTE_AF\n" | |
604 | " stxr %w1, %0, %2\n" | |
605 | " cbnz %w1, 1b\n" | |
606 | : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res) | |
607 | : "L" (~PTE_AF), "I" (ilog2(PTE_AF))); | |
608 | ||
609 | return res; | |
610 | } | |
611 | ||
612 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
613 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | |
614 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
615 | unsigned long address, | |
616 | pmd_t *pmdp) | |
617 | { | |
618 | return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); | |
619 | } | |
620 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
621 | ||
622 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
623 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |
624 | unsigned long address, pte_t *ptep) | |
625 | { | |
626 | pteval_t old_pteval; | |
627 | unsigned int tmp; | |
628 | ||
629 | asm volatile("// ptep_get_and_clear\n" | |
630 | " prfm pstl1strm, %2\n" | |
631 | "1: ldxr %0, %2\n" | |
632 | " stxr %w1, xzr, %2\n" | |
633 | " cbnz %w1, 1b\n" | |
634 | : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))); | |
635 | ||
636 | return __pte(old_pteval); | |
637 | } | |
638 | ||
639 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
640 | #define __HAVE_ARCH_PMDP_GET_AND_CLEAR | |
641 | static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, | |
642 | unsigned long address, pmd_t *pmdp) | |
643 | { | |
644 | return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp)); | |
645 | } | |
646 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
647 | ||
648 | /* | |
649 | * ptep_set_wrprotect - mark read-only while trasferring potential hardware | |
650 | * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. | |
651 | */ | |
652 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
653 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) | |
654 | { | |
655 | pteval_t pteval; | |
656 | unsigned long tmp; | |
657 | ||
658 | asm volatile("// ptep_set_wrprotect\n" | |
659 | " prfm pstl1strm, %2\n" | |
660 | "1: ldxr %0, %2\n" | |
661 | " tst %0, %4 // check for hw dirty (!PTE_RDONLY)\n" | |
662 | " csel %1, %3, xzr, eq // set PTE_DIRTY|PTE_RDONLY if dirty\n" | |
663 | " orr %0, %0, %1 // if !dirty, PTE_RDONLY is already set\n" | |
664 | " and %0, %0, %5 // clear PTE_WRITE/PTE_DBM\n" | |
665 | " stxr %w1, %0, %2\n" | |
666 | " cbnz %w1, 1b\n" | |
667 | : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)) | |
668 | : "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE) | |
669 | : "cc"); | |
670 | } | |
671 | ||
672 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
673 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | |
674 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |
675 | unsigned long address, pmd_t *pmdp) | |
676 | { | |
677 | ptep_set_wrprotect(mm, address, (pte_t *)pmdp); | |
678 | } | |
679 | #endif | |
680 | #endif /* CONFIG_ARM64_HW_AFDBM */ | |
681 | ||
4f04d8f0 CM |
682 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
683 | extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; | |
684 | ||
4f04d8f0 CM |
685 | /* |
686 | * Encode and decode a swap entry: | |
3676f9ef | 687 | * bits 0-1: present (must be zero) |
9b3e661e KS |
688 | * bits 2-7: swap type |
689 | * bits 8-57: swap offset | |
4f04d8f0 | 690 | */ |
9b3e661e | 691 | #define __SWP_TYPE_SHIFT 2 |
4f04d8f0 | 692 | #define __SWP_TYPE_BITS 6 |
9b3e661e | 693 | #define __SWP_OFFSET_BITS 50 |
4f04d8f0 CM |
694 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) |
695 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) | |
3676f9ef | 696 | #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) |
4f04d8f0 CM |
697 | |
698 | #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) | |
3676f9ef | 699 | #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) |
4f04d8f0 CM |
700 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) |
701 | ||
702 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
703 | #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) | |
704 | ||
705 | /* | |
706 | * Ensure that there are not more swap files than can be encoded in the kernel | |
aad9061b | 707 | * PTEs. |
4f04d8f0 CM |
708 | */ |
709 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) | |
710 | ||
4f04d8f0 CM |
711 | extern int kern_addr_valid(unsigned long addr); |
712 | ||
713 | #include <asm-generic/pgtable.h> | |
714 | ||
39b5be9b WD |
715 | void pgd_cache_init(void); |
716 | #define pgtable_cache_init pgd_cache_init | |
4f04d8f0 | 717 | |
cba3574f WD |
718 | /* |
719 | * On AArch64, the cache coherency is handled via the set_pte_at() function. | |
720 | */ | |
721 | static inline void update_mmu_cache(struct vm_area_struct *vma, | |
722 | unsigned long addr, pte_t *ptep) | |
723 | { | |
724 | /* | |
120798d2 WD |
725 | * We don't do anything here, so there's a very small chance of |
726 | * us retaking a user fault which we just fixed up. The alternative | |
727 | * is doing a dsb(ishst), but that penalises the fastpath. | |
cba3574f | 728 | */ |
cba3574f WD |
729 | } |
730 | ||
731 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | |
732 | ||
7db743c6 CM |
733 | #define kc_vaddr_to_offset(v) ((v) & ~VA_START) |
734 | #define kc_offset_to_vaddr(o) ((o) | VA_START) | |
735 | ||
4f04d8f0 CM |
736 | #endif /* !__ASSEMBLY__ */ |
737 | ||
738 | #endif /* __ASM_PGTABLE_H */ |