]>
git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/arm64/include/asm/pgtable.h
2 * Copyright (C) 2012 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #ifndef __ASM_PGTABLE_H
17 #define __ASM_PGTABLE_H
20 #include <asm/proc-fns.h>
22 #include <asm/memory.h>
23 #include <asm/pgtable-hwdef.h>
24 #include <asm/pgtable-prot.h>
25 #include <asm/tlbflush.h>
30 * VMALLOC_START: beginning of the kernel vmalloc space
31 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space
34 #define VMALLOC_START (MODULES_END)
35 #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
37 #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
39 #define FIRST_USER_ADDRESS 0UL
43 #include <asm/cmpxchg.h>
44 #include <asm/fixmap.h>
45 #include <linux/mmdebug.h>
46 #include <linux/mm_types.h>
47 #include <linux/sched.h>
49 extern void __pte_error(const char *file
, int line
, unsigned long val
);
50 extern void __pmd_error(const char *file
, int line
, unsigned long val
);
51 extern void __pud_error(const char *file
, int line
, unsigned long val
);
52 extern void __pgd_error(const char *file
, int line
, unsigned long val
);
55 * ZERO_PAGE is a global shared page that is always zero: used
56 * for zero-mapped memory areas etc..
58 extern unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)];
59 #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
61 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
64 * Macros to convert between a physical address and its placement in a
65 * page table entry, taking care of 52-bit addresses.
67 #ifdef CONFIG_ARM64_PA_BITS_52
68 #define __pte_to_phys(pte) \
69 ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36))
70 #define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK)
72 #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
73 #define __phys_to_pte_val(phys) (phys)
76 #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
77 #define pfn_pte(pfn,prot) \
78 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
80 #define pte_none(pte) (!pte_val(pte))
81 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
82 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
85 * The following only work if pte_present(). Undefined behaviour otherwise.
87 #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
88 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
89 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
90 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
91 #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
92 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
94 #define pte_cont_addr_end(addr, end) \
95 ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
96 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
99 #define pmd_cont_addr_end(addr, end) \
100 ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
101 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
104 #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
105 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
106 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
108 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
110 * Execute-only user mappings do not have the PTE_USER bit set. All valid
111 * kernel mappings have the PTE_UXN bit set.
113 #define pte_valid_not_user(pte) \
114 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
115 #define pte_valid_young(pte) \
116 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
117 #define pte_valid_user(pte) \
118 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
121 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
122 * so that we don't erroneously return false for pages that have been
123 * remapped as PROT_NONE but are yet to be flushed from the TLB.
125 #define pte_accessible(mm, pte) \
126 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
129 * p??_access_permitted() is true for valid user mappings (subject to the
130 * write permission check) other than user execute-only which do not have the
131 * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
133 #define pte_access_permitted(pte, write) \
134 (pte_valid_user(pte) && (!(write) || pte_write(pte)))
135 #define pmd_access_permitted(pmd, write) \
136 (pte_access_permitted(pmd_pte(pmd), (write)))
137 #define pud_access_permitted(pud, write) \
138 (pte_access_permitted(pud_pte(pud), (write)))
140 static inline pte_t
clear_pte_bit(pte_t pte
, pgprot_t prot
)
142 pte_val(pte
) &= ~pgprot_val(prot
);
146 static inline pte_t
set_pte_bit(pte_t pte
, pgprot_t prot
)
148 pte_val(pte
) |= pgprot_val(prot
);
152 static inline pte_t
pte_wrprotect(pte_t pte
)
154 pte
= clear_pte_bit(pte
, __pgprot(PTE_WRITE
));
155 pte
= set_pte_bit(pte
, __pgprot(PTE_RDONLY
));
159 static inline pte_t
pte_mkwrite(pte_t pte
)
161 pte
= set_pte_bit(pte
, __pgprot(PTE_WRITE
));
162 pte
= clear_pte_bit(pte
, __pgprot(PTE_RDONLY
));
166 static inline pte_t
pte_mkclean(pte_t pte
)
168 pte
= clear_pte_bit(pte
, __pgprot(PTE_DIRTY
));
169 pte
= set_pte_bit(pte
, __pgprot(PTE_RDONLY
));
174 static inline pte_t
pte_mkdirty(pte_t pte
)
176 pte
= set_pte_bit(pte
, __pgprot(PTE_DIRTY
));
179 pte
= clear_pte_bit(pte
, __pgprot(PTE_RDONLY
));
184 static inline pte_t
pte_mkold(pte_t pte
)
186 return clear_pte_bit(pte
, __pgprot(PTE_AF
));
189 static inline pte_t
pte_mkyoung(pte_t pte
)
191 return set_pte_bit(pte
, __pgprot(PTE_AF
));
194 static inline pte_t
pte_mkspecial(pte_t pte
)
196 return set_pte_bit(pte
, __pgprot(PTE_SPECIAL
));
199 static inline pte_t
pte_mkcont(pte_t pte
)
201 pte
= set_pte_bit(pte
, __pgprot(PTE_CONT
));
202 return set_pte_bit(pte
, __pgprot(PTE_TYPE_PAGE
));
205 static inline pte_t
pte_mknoncont(pte_t pte
)
207 return clear_pte_bit(pte
, __pgprot(PTE_CONT
));
210 static inline pte_t
pte_mkpresent(pte_t pte
)
212 return set_pte_bit(pte
, __pgprot(PTE_VALID
));
215 static inline pmd_t
pmd_mkcont(pmd_t pmd
)
217 return __pmd(pmd_val(pmd
) | PMD_SECT_CONT
);
220 static inline void set_pte(pte_t
*ptep
, pte_t pte
)
222 WRITE_ONCE(*ptep
, pte
);
225 * Only if the new pte is valid and kernel, otherwise TLB maintenance
226 * or update_mmu_cache() have the necessary barriers.
228 if (pte_valid_not_user(pte
))
232 extern void __sync_icache_dcache(pte_t pteval
);
235 * PTE bits configuration in the presence of hardware Dirty Bit Management
236 * (PTE_WRITE == PTE_DBM):
238 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
244 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
245 * the page fault mechanism. Checking the dirty status of a pte becomes:
247 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
249 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
250 pte_t
*ptep
, pte_t pte
)
254 if (pte_present(pte
) && pte_user_exec(pte
) && !pte_special(pte
))
255 __sync_icache_dcache(pte
);
258 * If the existing pte is valid, check for potential race with
259 * hardware updates of the pte (ptep_set_access_flags safely changes
260 * valid ptes without going through an invalid entry).
262 old_pte
= READ_ONCE(*ptep
);
263 if (IS_ENABLED(CONFIG_DEBUG_VM
) && pte_valid(old_pte
) && pte_valid(pte
) &&
264 (mm
== current
->active_mm
|| atomic_read(&mm
->mm_users
) > 1)) {
265 VM_WARN_ONCE(!pte_young(pte
),
266 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
267 __func__
, pte_val(old_pte
), pte_val(pte
));
268 VM_WARN_ONCE(pte_write(old_pte
) && !pte_dirty(pte
),
269 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
270 __func__
, pte_val(old_pte
), pte_val(pte
));
276 #define __HAVE_ARCH_PTE_SAME
277 static inline int pte_same(pte_t pte_a
, pte_t pte_b
)
281 lhs
= pte_val(pte_a
);
282 rhs
= pte_val(pte_b
);
284 if (pte_present(pte_a
))
287 if (pte_present(pte_b
))
294 * Huge pte definitions.
296 #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
297 #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
300 * Hugetlb definitions.
302 #define HUGE_MAX_HSTATE 4
303 #define HPAGE_SHIFT PMD_SHIFT
304 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
305 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
306 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
308 static inline pte_t
pgd_pte(pgd_t pgd
)
310 return __pte(pgd_val(pgd
));
313 static inline pte_t
pud_pte(pud_t pud
)
315 return __pte(pud_val(pud
));
318 static inline pud_t
pte_pud(pte_t pte
)
320 return __pud(pte_val(pte
));
323 static inline pmd_t
pud_pmd(pud_t pud
)
325 return __pmd(pud_val(pud
));
328 static inline pte_t
pmd_pte(pmd_t pmd
)
330 return __pte(pmd_val(pmd
));
333 static inline pmd_t
pte_pmd(pte_t pte
)
335 return __pmd(pte_val(pte
));
338 static inline pgprot_t
mk_sect_prot(pgprot_t prot
)
340 return __pgprot(pgprot_val(prot
) & ~PTE_TABLE_BIT
);
343 #ifdef CONFIG_NUMA_BALANCING
345 * See the comment in include/asm-generic/pgtable.h
347 static inline int pte_protnone(pte_t pte
)
349 return (pte_val(pte
) & (PTE_VALID
| PTE_PROT_NONE
)) == PTE_PROT_NONE
;
352 static inline int pmd_protnone(pmd_t pmd
)
354 return pte_protnone(pmd_pte(pmd
));
362 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
363 #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
364 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
366 #define pmd_present(pmd) pte_present(pmd_pte(pmd))
367 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
368 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
369 #define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
370 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
371 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
372 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
373 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
374 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
375 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
376 #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
378 #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
380 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
382 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
384 #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
385 #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
386 #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
387 #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
388 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
390 #define pud_young(pud) pte_young(pud_pte(pud))
391 #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
392 #define pud_write(pud) pte_write(pud_pte(pud))
394 #define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
396 #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
397 #define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
398 #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
399 #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
401 #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
403 #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
404 #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
406 #define __pgprot_modify(prot,mask,bits) \
407 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
410 * Mark the prot value as uncacheable and unbufferable.
412 #define pgprot_noncached(prot) \
413 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
414 #define pgprot_writecombine(prot) \
415 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
416 #define pgprot_device(prot) \
417 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
418 #define __HAVE_PHYS_MEM_ACCESS_PROT
420 extern pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
421 unsigned long size
, pgprot_t vma_prot
);
423 #define pmd_none(pmd) (!pmd_val(pmd))
425 #define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
427 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
429 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
432 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
433 #define pud_sect(pud) (0)
434 #define pud_table(pud) (1)
436 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
438 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
442 extern pgd_t init_pg_dir
[PTRS_PER_PGD
];
443 extern pgd_t init_pg_end
[];
444 extern pgd_t swapper_pg_dir
[PTRS_PER_PGD
];
445 extern pgd_t idmap_pg_dir
[PTRS_PER_PGD
];
446 extern pgd_t tramp_pg_dir
[PTRS_PER_PGD
];
448 extern void set_swapper_pgd(pgd_t
*pgdp
, pgd_t pgd
);
450 static inline bool in_swapper_pgdir(void *addr
)
452 return ((unsigned long)addr
& PAGE_MASK
) ==
453 ((unsigned long)swapper_pg_dir
& PAGE_MASK
);
456 static inline void set_pmd(pmd_t
*pmdp
, pmd_t pmd
)
458 #ifdef __PAGETABLE_PMD_FOLDED
459 if (in_swapper_pgdir(pmdp
)) {
460 set_swapper_pgd((pgd_t
*)pmdp
, __pgd(pmd_val(pmd
)));
463 #endif /* __PAGETABLE_PMD_FOLDED */
465 WRITE_ONCE(*pmdp
, pmd
);
471 static inline void pmd_clear(pmd_t
*pmdp
)
473 set_pmd(pmdp
, __pmd(0));
476 static inline phys_addr_t
pmd_page_paddr(pmd_t pmd
)
478 return __pmd_to_phys(pmd
);
481 /* Find an entry in the third-level page table. */
482 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
484 #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
485 #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
487 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
488 #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
489 #define pte_unmap(pte) do { } while (0)
490 #define pte_unmap_nested(pte) do { } while (0)
492 #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
493 #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
494 #define pte_clear_fixmap() clear_fixmap(FIX_PTE)
496 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(__pmd_to_phys(pmd)))
498 /* use ONLY for statically allocated translation tables */
499 #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
502 * Conversion functions: convert a page and protection to a page entry,
503 * and a page entry and page directory to the page they refer to.
505 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
507 #if CONFIG_PGTABLE_LEVELS > 2
509 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
511 #define pud_none(pud) (!pud_val(pud))
512 #define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
513 #define pud_present(pud) pte_present(pud_pte(pud))
514 #define pud_valid(pud) pte_valid(pud_pte(pud))
516 static inline void set_pud(pud_t
*pudp
, pud_t pud
)
518 #ifdef __PAGETABLE_PUD_FOLDED
519 if (in_swapper_pgdir(pudp
)) {
520 set_swapper_pgd((pgd_t
*)pudp
, __pgd(pud_val(pud
)));
523 #endif /* __PAGETABLE_PUD_FOLDED */
525 WRITE_ONCE(*pudp
, pud
);
531 static inline void pud_clear(pud_t
*pudp
)
533 set_pud(pudp
, __pud(0));
536 static inline phys_addr_t
pud_page_paddr(pud_t pud
)
538 return __pud_to_phys(pud
);
541 /* Find an entry in the second-level page table. */
542 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
544 #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
545 #define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
547 #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
548 #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
549 #define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
551 #define pud_page(pud) pfn_to_page(__phys_to_pfn(__pud_to_phys(pud)))
553 /* use ONLY for statically allocated translation tables */
554 #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
558 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
560 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
561 #define pmd_set_fixmap(addr) NULL
562 #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
563 #define pmd_clear_fixmap()
565 #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
567 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
569 #if CONFIG_PGTABLE_LEVELS > 3
571 #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
573 #define pgd_none(pgd) (!pgd_val(pgd))
574 #define pgd_bad(pgd) (!(pgd_val(pgd) & 2))
575 #define pgd_present(pgd) (pgd_val(pgd))
577 static inline void set_pgd(pgd_t
*pgdp
, pgd_t pgd
)
579 if (in_swapper_pgdir(pgdp
)) {
580 set_swapper_pgd(pgdp
, pgd
);
584 WRITE_ONCE(*pgdp
, pgd
);
588 static inline void pgd_clear(pgd_t
*pgdp
)
590 set_pgd(pgdp
, __pgd(0));
593 static inline phys_addr_t
pgd_page_paddr(pgd_t pgd
)
595 return __pgd_to_phys(pgd
);
598 /* Find an entry in the frst-level page table. */
599 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
601 #define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
602 #define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
604 #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
605 #define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
606 #define pud_clear_fixmap() clear_fixmap(FIX_PUD)
608 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd)))
610 /* use ONLY for statically allocated translation tables */
611 #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
615 #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
617 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
618 #define pud_set_fixmap(addr) NULL
619 #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
620 #define pud_clear_fixmap()
622 #define pud_offset_kimg(dir,addr) ((pud_t *)dir)
624 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
626 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
628 /* to find an entry in a page-table-directory */
629 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
631 #define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
633 #define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
635 /* to find an entry in a kernel page-table-directory */
636 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
638 #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
639 #define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
641 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
643 const pteval_t mask
= PTE_USER
| PTE_PXN
| PTE_UXN
| PTE_RDONLY
|
644 PTE_PROT_NONE
| PTE_VALID
| PTE_WRITE
;
645 /* preserve the hardware dirty information */
646 if (pte_hw_dirty(pte
))
647 pte
= pte_mkdirty(pte
);
648 pte_val(pte
) = (pte_val(pte
) & ~mask
) | (pgprot_val(newprot
) & mask
);
652 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
654 return pte_pmd(pte_modify(pmd_pte(pmd
), newprot
));
657 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
658 extern int ptep_set_access_flags(struct vm_area_struct
*vma
,
659 unsigned long address
, pte_t
*ptep
,
660 pte_t entry
, int dirty
);
662 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
663 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
664 static inline int pmdp_set_access_flags(struct vm_area_struct
*vma
,
665 unsigned long address
, pmd_t
*pmdp
,
666 pmd_t entry
, int dirty
)
668 return ptep_set_access_flags(vma
, address
, (pte_t
*)pmdp
, pmd_pte(entry
), dirty
);
673 * Atomic pte/pmd modifications.
675 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
676 static inline int __ptep_test_and_clear_young(pte_t
*ptep
)
680 pte
= READ_ONCE(*ptep
);
683 pte
= pte_mkold(pte
);
684 pte_val(pte
) = cmpxchg_relaxed(&pte_val(*ptep
),
685 pte_val(old_pte
), pte_val(pte
));
686 } while (pte_val(pte
) != pte_val(old_pte
));
688 return pte_young(pte
);
691 static inline int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
692 unsigned long address
,
695 return __ptep_test_and_clear_young(ptep
);
698 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
699 static inline int ptep_clear_flush_young(struct vm_area_struct
*vma
,
700 unsigned long address
, pte_t
*ptep
)
702 int young
= ptep_test_and_clear_young(vma
, address
, ptep
);
706 * We can elide the trailing DSB here since the worst that can
707 * happen is that a CPU continues to use the young entry in its
708 * TLB and we mistakenly reclaim the associated page. The
709 * window for such an event is bounded by the next
710 * context-switch, which provides a DSB to complete the TLB
713 flush_tlb_page_nosync(vma
, address
);
719 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
720 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
721 static inline int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
722 unsigned long address
,
725 return ptep_test_and_clear_young(vma
, address
, (pte_t
*)pmdp
);
727 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
729 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
730 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
,
731 unsigned long address
, pte_t
*ptep
)
733 return __pte(xchg_relaxed(&pte_val(*ptep
), 0));
736 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
737 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
738 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
,
739 unsigned long address
, pmd_t
*pmdp
)
741 return pte_pmd(ptep_get_and_clear(mm
, address
, (pte_t
*)pmdp
));
743 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
746 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
747 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
749 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
750 static inline void ptep_set_wrprotect(struct mm_struct
*mm
, unsigned long address
, pte_t
*ptep
)
754 pte
= READ_ONCE(*ptep
);
758 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
759 * clear), set the PTE_DIRTY bit.
761 if (pte_hw_dirty(pte
))
762 pte
= pte_mkdirty(pte
);
763 pte
= pte_wrprotect(pte
);
764 pte_val(pte
) = cmpxchg_relaxed(&pte_val(*ptep
),
765 pte_val(old_pte
), pte_val(pte
));
766 } while (pte_val(pte
) != pte_val(old_pte
));
769 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
770 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
771 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
772 unsigned long address
, pmd_t
*pmdp
)
774 ptep_set_wrprotect(mm
, address
, (pte_t
*)pmdp
);
777 #define pmdp_establish pmdp_establish
778 static inline pmd_t
pmdp_establish(struct vm_area_struct
*vma
,
779 unsigned long address
, pmd_t
*pmdp
, pmd_t pmd
)
781 return __pmd(xchg_relaxed(&pmd_val(*pmdp
), pmd_val(pmd
)));
786 * Encode and decode a swap entry:
787 * bits 0-1: present (must be zero)
788 * bits 2-7: swap type
789 * bits 8-57: swap offset
790 * bit 58: PTE_PROT_NONE (must be zero)
792 #define __SWP_TYPE_SHIFT 2
793 #define __SWP_TYPE_BITS 6
794 #define __SWP_OFFSET_BITS 50
795 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
796 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
797 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
799 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
800 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
801 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
803 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
804 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
807 * Ensure that there are not more swap files than can be encoded in the kernel
810 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
812 extern int kern_addr_valid(unsigned long addr
);
814 #include <asm-generic/pgtable.h>
816 void pgd_cache_init(void);
817 #define pgtable_cache_init pgd_cache_init
820 * On AArch64, the cache coherency is handled via the set_pte_at() function.
822 static inline void update_mmu_cache(struct vm_area_struct
*vma
,
823 unsigned long addr
, pte_t
*ptep
)
826 * We don't do anything here, so there's a very small chance of
827 * us retaking a user fault which we just fixed up. The alternative
828 * is doing a dsb(ishst), but that penalises the fastpath.
832 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
834 #define kc_vaddr_to_offset(v) ((v) & ~VA_START)
835 #define kc_offset_to_vaddr(o) ((o) | VA_START)
837 #ifdef CONFIG_ARM64_PA_BITS_52
838 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
840 #define phys_to_ttbr(addr) (addr)
843 #endif /* !__ASSEMBLY__ */
845 #endif /* __ASM_PGTABLE_H */