]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/arm64/include/asm/pgtable.h
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-zesty-kernel.git] / arch / arm64 / include / asm / pgtable.h
1 /*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16 #ifndef __ASM_PGTABLE_H
17 #define __ASM_PGTABLE_H
18
19 #include <asm/bug.h>
20 #include <asm/proc-fns.h>
21
22 #include <asm/memory.h>
23 #include <asm/pgtable-hwdef.h>
24
25 /*
26 * Software defined PTE bits definition.
27 */
28 #define PTE_VALID (_AT(pteval_t, 1) << 0)
29 #define PTE_WRITE (PTE_DBM) /* same as DBM (51) */
30 #define PTE_DIRTY (_AT(pteval_t, 1) << 55)
31 #define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
32 #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
33
34 /*
35 * VMALLOC and SPARSEMEM_VMEMMAP ranges.
36 *
37 * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
38 * (rounded up to PUD_SIZE).
39 * VMALLOC_START: beginning of the kernel VA space
40 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
41 * fixed mappings and modules
42 */
43 #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
44
45 #ifndef CONFIG_KASAN
46 #define VMALLOC_START (VA_START)
47 #else
48 #include <asm/kasan.h>
49 #define VMALLOC_START (KASAN_SHADOW_END + SZ_64K)
50 #endif
51
52 #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
53
54 #define VMEMMAP_START (VMALLOC_END + SZ_64K)
55 #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
56
57 #define FIRST_USER_ADDRESS 0UL
58
59 #ifndef __ASSEMBLY__
60
61 #include <linux/mmdebug.h>
62
63 extern void __pte_error(const char *file, int line, unsigned long val);
64 extern void __pmd_error(const char *file, int line, unsigned long val);
65 extern void __pud_error(const char *file, int line, unsigned long val);
66 extern void __pgd_error(const char *file, int line, unsigned long val);
67
68 #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
69 #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
70
71 #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
72 #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
73 #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
74 #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
75 #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
76
77 #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
78 #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
79 #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
80
81 #define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
82
83 #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
84 #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
85 #define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
86 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
87 #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
88
89 #define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP)
90 #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
91
92 #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
93 #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
94
95 #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
96 #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
97 #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
98 #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
99 #define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
100 #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
101 #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
102
103 #define __P000 PAGE_NONE
104 #define __P001 PAGE_READONLY
105 #define __P010 PAGE_COPY
106 #define __P011 PAGE_COPY
107 #define __P100 PAGE_READONLY_EXEC
108 #define __P101 PAGE_READONLY_EXEC
109 #define __P110 PAGE_COPY_EXEC
110 #define __P111 PAGE_COPY_EXEC
111
112 #define __S000 PAGE_NONE
113 #define __S001 PAGE_READONLY
114 #define __S010 PAGE_SHARED
115 #define __S011 PAGE_SHARED
116 #define __S100 PAGE_READONLY_EXEC
117 #define __S101 PAGE_READONLY_EXEC
118 #define __S110 PAGE_SHARED_EXEC
119 #define __S111 PAGE_SHARED_EXEC
120
121 /*
122 * ZERO_PAGE is a global shared page that is always zero: used
123 * for zero-mapped memory areas etc..
124 */
125 extern struct page *empty_zero_page;
126 #define ZERO_PAGE(vaddr) (empty_zero_page)
127
128 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
129
130 #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
131
132 #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
133
134 #define pte_none(pte) (!pte_val(pte))
135 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
136 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
137
138 /* Find an entry in the third-level page table. */
139 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
140
141 #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr))
142
143 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
144 #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
145 #define pte_unmap(pte) do { } while (0)
146 #define pte_unmap_nested(pte) do { } while (0)
147
148 /*
149 * The following only work if pte_present(). Undefined behaviour otherwise.
150 */
151 #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
152 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
153 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
154 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
155 #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
156 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
157 #define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
158
159 #ifdef CONFIG_ARM64_HW_AFDBM
160 #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
161 #else
162 #define pte_hw_dirty(pte) (0)
163 #endif
164 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
165 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
166
167 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
168 #define pte_valid_not_user(pte) \
169 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
170 #define pte_valid_young(pte) \
171 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
172
173 /*
174 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
175 * so that we don't erroneously return false for pages that have been
176 * remapped as PROT_NONE but are yet to be flushed from the TLB.
177 */
178 #define pte_accessible(mm, pte) \
179 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
180
181 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
182 {
183 pte_val(pte) &= ~pgprot_val(prot);
184 return pte;
185 }
186
187 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
188 {
189 pte_val(pte) |= pgprot_val(prot);
190 return pte;
191 }
192
193 static inline pte_t pte_wrprotect(pte_t pte)
194 {
195 return clear_pte_bit(pte, __pgprot(PTE_WRITE));
196 }
197
198 static inline pte_t pte_mkwrite(pte_t pte)
199 {
200 return set_pte_bit(pte, __pgprot(PTE_WRITE));
201 }
202
203 static inline pte_t pte_mkclean(pte_t pte)
204 {
205 return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
206 }
207
208 static inline pte_t pte_mkdirty(pte_t pte)
209 {
210 return set_pte_bit(pte, __pgprot(PTE_DIRTY));
211 }
212
213 static inline pte_t pte_mkold(pte_t pte)
214 {
215 return clear_pte_bit(pte, __pgprot(PTE_AF));
216 }
217
218 static inline pte_t pte_mkyoung(pte_t pte)
219 {
220 return set_pte_bit(pte, __pgprot(PTE_AF));
221 }
222
223 static inline pte_t pte_mkspecial(pte_t pte)
224 {
225 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
226 }
227
228 static inline pte_t pte_mkcont(pte_t pte)
229 {
230 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
231 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
232 }
233
234 static inline pte_t pte_mknoncont(pte_t pte)
235 {
236 return clear_pte_bit(pte, __pgprot(PTE_CONT));
237 }
238
239 static inline pmd_t pmd_mkcont(pmd_t pmd)
240 {
241 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
242 }
243
244 static inline void set_pte(pte_t *ptep, pte_t pte)
245 {
246 *ptep = pte;
247
248 /*
249 * Only if the new pte is valid and kernel, otherwise TLB maintenance
250 * or update_mmu_cache() have the necessary barriers.
251 */
252 if (pte_valid_not_user(pte)) {
253 dsb(ishst);
254 isb();
255 }
256 }
257
258 struct mm_struct;
259 struct vm_area_struct;
260
261 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
262
263 /*
264 * PTE bits configuration in the presence of hardware Dirty Bit Management
265 * (PTE_WRITE == PTE_DBM):
266 *
267 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
268 * 0 0 | 1 0 0
269 * 0 1 | 1 1 0
270 * 1 0 | 1 0 1
271 * 1 1 | 0 1 x
272 *
273 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
274 * the page fault mechanism. Checking the dirty status of a pte becomes:
275 *
276 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
277 */
278 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
279 pte_t *ptep, pte_t pte)
280 {
281 if (pte_valid(pte)) {
282 if (pte_sw_dirty(pte) && pte_write(pte))
283 pte_val(pte) &= ~PTE_RDONLY;
284 else
285 pte_val(pte) |= PTE_RDONLY;
286 if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
287 __sync_icache_dcache(pte, addr);
288 }
289
290 /*
291 * If the existing pte is valid, check for potential race with
292 * hardware updates of the pte (ptep_set_access_flags safely changes
293 * valid ptes without going through an invalid entry).
294 */
295 if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
296 pte_valid(*ptep) && pte_valid(pte)) {
297 VM_WARN_ONCE(!pte_young(pte),
298 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
299 __func__, pte_val(*ptep), pte_val(pte));
300 VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
301 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
302 __func__, pte_val(*ptep), pte_val(pte));
303 }
304
305 set_pte(ptep, pte);
306 }
307
308 /*
309 * Huge pte definitions.
310 */
311 #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
312 #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
313
314 /*
315 * Hugetlb definitions.
316 */
317 #define HUGE_MAX_HSTATE 4
318 #define HPAGE_SHIFT PMD_SHIFT
319 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
320 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
321 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
322
323 #define __HAVE_ARCH_PTE_SPECIAL
324
325 static inline pte_t pud_pte(pud_t pud)
326 {
327 return __pte(pud_val(pud));
328 }
329
330 static inline pmd_t pud_pmd(pud_t pud)
331 {
332 return __pmd(pud_val(pud));
333 }
334
335 static inline pte_t pmd_pte(pmd_t pmd)
336 {
337 return __pte(pmd_val(pmd));
338 }
339
340 static inline pmd_t pte_pmd(pte_t pte)
341 {
342 return __pmd(pte_val(pte));
343 }
344
345 static inline pgprot_t mk_sect_prot(pgprot_t prot)
346 {
347 return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
348 }
349
350 /*
351 * THP definitions.
352 */
353
354 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
355 #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
356 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
357
358 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
359 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
360 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
361 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
362 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
363 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
364 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
365 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
366 #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
367
368 #define __HAVE_ARCH_PMD_WRITE
369 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
370
371 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
372
373 #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
374 #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
375 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
376
377 #define pud_write(pud) pte_write(pud_pte(pud))
378 #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
379
380 #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
381
382 static inline int has_transparent_hugepage(void)
383 {
384 return 1;
385 }
386
387 #define __pgprot_modify(prot,mask,bits) \
388 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
389
390 /*
391 * Mark the prot value as uncacheable and unbufferable.
392 */
393 #define pgprot_noncached(prot) \
394 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
395 #define pgprot_writecombine(prot) \
396 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
397 #define pgprot_device(prot) \
398 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
399 #define __HAVE_PHYS_MEM_ACCESS_PROT
400 struct file;
401 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
402 unsigned long size, pgprot_t vma_prot);
403
404 #define pmd_none(pmd) (!pmd_val(pmd))
405 #define pmd_present(pmd) (pmd_val(pmd))
406
407 #define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
408
409 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
410 PMD_TYPE_TABLE)
411 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
412 PMD_TYPE_SECT)
413
414 #ifdef CONFIG_ARM64_64K_PAGES
415 #define pud_sect(pud) (0)
416 #define pud_table(pud) (1)
417 #else
418 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
419 PUD_TYPE_SECT)
420 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
421 PUD_TYPE_TABLE)
422 #endif
423
424 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
425 {
426 *pmdp = pmd;
427 dsb(ishst);
428 isb();
429 }
430
431 static inline void pmd_clear(pmd_t *pmdp)
432 {
433 set_pmd(pmdp, __pmd(0));
434 }
435
436 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
437 {
438 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
439 }
440
441 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
442
443 /*
444 * Conversion functions: convert a page and protection to a page entry,
445 * and a page entry and page directory to the page they refer to.
446 */
447 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
448
449 #if CONFIG_PGTABLE_LEVELS > 2
450
451 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
452
453 #define pud_none(pud) (!pud_val(pud))
454 #define pud_bad(pud) (!(pud_val(pud) & 2))
455 #define pud_present(pud) (pud_val(pud))
456
457 static inline void set_pud(pud_t *pudp, pud_t pud)
458 {
459 *pudp = pud;
460 dsb(ishst);
461 isb();
462 }
463
464 static inline void pud_clear(pud_t *pudp)
465 {
466 set_pud(pudp, __pud(0));
467 }
468
469 static inline pmd_t *pud_page_vaddr(pud_t pud)
470 {
471 return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
472 }
473
474 /* Find an entry in the second-level page table. */
475 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
476
477 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
478 {
479 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
480 }
481
482 #define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
483
484 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
485
486 #if CONFIG_PGTABLE_LEVELS > 3
487
488 #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
489
490 #define pgd_none(pgd) (!pgd_val(pgd))
491 #define pgd_bad(pgd) (!(pgd_val(pgd) & 2))
492 #define pgd_present(pgd) (pgd_val(pgd))
493
494 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
495 {
496 *pgdp = pgd;
497 dsb(ishst);
498 }
499
500 static inline void pgd_clear(pgd_t *pgdp)
501 {
502 set_pgd(pgdp, __pgd(0));
503 }
504
505 static inline pud_t *pgd_page_vaddr(pgd_t pgd)
506 {
507 return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
508 }
509
510 /* Find an entry in the frst-level page table. */
511 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
512
513 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
514 {
515 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
516 }
517
518 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
519
520 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
521
522 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
523
524 /* to find an entry in a page-table-directory */
525 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
526
527 #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
528
529 /* to find an entry in a kernel page-table-directory */
530 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
531
532 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
533 {
534 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
535 PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
536 /* preserve the hardware dirty information */
537 if (pte_hw_dirty(pte))
538 pte = pte_mkdirty(pte);
539 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
540 return pte;
541 }
542
543 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
544 {
545 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
546 }
547
548 #ifdef CONFIG_ARM64_HW_AFDBM
549 /*
550 * Atomic pte/pmd modifications.
551 */
552 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
553 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
554 unsigned long address,
555 pte_t *ptep)
556 {
557 pteval_t pteval;
558 unsigned int tmp, res;
559
560 asm volatile("// ptep_test_and_clear_young\n"
561 " prfm pstl1strm, %2\n"
562 "1: ldxr %0, %2\n"
563 " ubfx %w3, %w0, %5, #1 // extract PTE_AF (young)\n"
564 " and %0, %0, %4 // clear PTE_AF\n"
565 " stxr %w1, %0, %2\n"
566 " cbnz %w1, 1b\n"
567 : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res)
568 : "L" (~PTE_AF), "I" (ilog2(PTE_AF)));
569
570 return res;
571 }
572
573 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
574 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
575 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
576 unsigned long address,
577 pmd_t *pmdp)
578 {
579 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
580 }
581 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
582
583 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
584 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
585 unsigned long address, pte_t *ptep)
586 {
587 pteval_t old_pteval;
588 unsigned int tmp;
589
590 asm volatile("// ptep_get_and_clear\n"
591 " prfm pstl1strm, %2\n"
592 "1: ldxr %0, %2\n"
593 " stxr %w1, xzr, %2\n"
594 " cbnz %w1, 1b\n"
595 : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)));
596
597 return __pte(old_pteval);
598 }
599
600 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
601 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
602 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
603 unsigned long address, pmd_t *pmdp)
604 {
605 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
606 }
607 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
608
609 /*
610 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
611 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
612 */
613 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
614 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
615 {
616 pteval_t pteval;
617 unsigned long tmp;
618
619 asm volatile("// ptep_set_wrprotect\n"
620 " prfm pstl1strm, %2\n"
621 "1: ldxr %0, %2\n"
622 " tst %0, %4 // check for hw dirty (!PTE_RDONLY)\n"
623 " csel %1, %3, xzr, eq // set PTE_DIRTY|PTE_RDONLY if dirty\n"
624 " orr %0, %0, %1 // if !dirty, PTE_RDONLY is already set\n"
625 " and %0, %0, %5 // clear PTE_WRITE/PTE_DBM\n"
626 " stxr %w1, %0, %2\n"
627 " cbnz %w1, 1b\n"
628 : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
629 : "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE)
630 : "cc");
631 }
632
633 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
634 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
635 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
636 unsigned long address, pmd_t *pmdp)
637 {
638 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
639 }
640 #endif
641 #endif /* CONFIG_ARM64_HW_AFDBM */
642
643 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
644 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
645
646 /*
647 * Encode and decode a swap entry:
648 * bits 0-1: present (must be zero)
649 * bits 2-7: swap type
650 * bits 8-57: swap offset
651 */
652 #define __SWP_TYPE_SHIFT 2
653 #define __SWP_TYPE_BITS 6
654 #define __SWP_OFFSET_BITS 50
655 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
656 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
657 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
658
659 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
660 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
661 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
662
663 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
664 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
665
666 /*
667 * Ensure that there are not more swap files than can be encoded in the kernel
668 * PTEs.
669 */
670 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
671
672 extern int kern_addr_valid(unsigned long addr);
673
674 #include <asm-generic/pgtable.h>
675
676 void pgd_cache_init(void);
677 #define pgtable_cache_init pgd_cache_init
678
679 /*
680 * On AArch64, the cache coherency is handled via the set_pte_at() function.
681 */
682 static inline void update_mmu_cache(struct vm_area_struct *vma,
683 unsigned long addr, pte_t *ptep)
684 {
685 /*
686 * We don't do anything here, so there's a very small chance of
687 * us retaking a user fault which we just fixed up. The alternative
688 * is doing a dsb(ishst), but that penalises the fastpath.
689 */
690 }
691
692 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
693
694 #define kc_vaddr_to_offset(v) ((v) & ~VA_START)
695 #define kc_offset_to_vaddr(o) ((o) | VA_START)
696
697 #endif /* !__ASSEMBLY__ */
698
699 #endif /* __ASM_PGTABLE_H */