1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
4 #include <linux/mem_encrypt.h>
6 #include <asm/pgtable_types.h>
9 * Macro to mark a page protection value as UC-
11 #define pgprot_noncached(prot) \
12 ((boot_cpu_data.x86 > 3) \
13 ? (__pgprot(pgprot_val(prot) | \
14 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
18 * Macros to add or remove encryption attribute
20 #define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot)))
21 #define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
24 #include <asm/x86_init.h>
26 extern pgd_t early_top_pgt
[PTRS_PER_PGD
];
27 int __init
__early_make_pgtable(unsigned long address
, pmdval_t pmd
);
29 void ptdump_walk_pgd_level(struct seq_file
*m
, pgd_t
*pgd
);
30 void ptdump_walk_pgd_level_checkwx(void);
32 #ifdef CONFIG_DEBUG_WX
33 #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
35 #define debug_checkwx() do { } while (0)
39 * ZERO_PAGE is a global shared page that is always zero: used
40 * for zero-mapped memory areas etc..
42 extern unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)]
44 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
46 extern spinlock_t pgd_lock
;
47 extern struct list_head pgd_list
;
49 extern struct mm_struct
*pgd_page_get_mm(struct page
*page
);
51 extern pmdval_t early_pmd_flags
;
53 #ifdef CONFIG_PARAVIRT
54 #include <asm/paravirt.h>
55 #else /* !CONFIG_PARAVIRT */
56 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
57 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
58 #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
59 #define set_pud_at(mm, addr, pudp, pud) native_set_pud_at(mm, addr, pudp, pud)
61 #define set_pte_atomic(ptep, pte) \
62 native_set_pte_atomic(ptep, pte)
64 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
66 #ifndef __PAGETABLE_P4D_FOLDED
67 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
68 #define pgd_clear(pgd) native_pgd_clear(pgd)
72 # define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
75 #ifndef __PAGETABLE_PUD_FOLDED
76 #define p4d_clear(p4d) native_p4d_clear(p4d)
80 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
83 #ifndef __PAGETABLE_PUD_FOLDED
84 #define pud_clear(pud) native_pud_clear(pud)
87 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
88 #define pmd_clear(pmd) native_pmd_clear(pmd)
90 #define pte_update(mm, addr, ptep) do { } while (0)
92 #define pgd_val(x) native_pgd_val(x)
93 #define __pgd(x) native_make_pgd(x)
95 #ifndef __PAGETABLE_P4D_FOLDED
96 #define p4d_val(x) native_p4d_val(x)
97 #define __p4d(x) native_make_p4d(x)
100 #ifndef __PAGETABLE_PUD_FOLDED
101 #define pud_val(x) native_pud_val(x)
102 #define __pud(x) native_make_pud(x)
105 #ifndef __PAGETABLE_PMD_FOLDED
106 #define pmd_val(x) native_pmd_val(x)
107 #define __pmd(x) native_make_pmd(x)
110 #define pte_val(x) native_pte_val(x)
111 #define __pte(x) native_make_pte(x)
113 #define arch_end_context_switch(prev) do {} while(0)
115 #endif /* CONFIG_PARAVIRT */
118 * The following only work if pte_present() is true.
119 * Undefined behaviour if not..
121 static inline int pte_dirty(pte_t pte
)
123 return pte_flags(pte
) & _PAGE_DIRTY
;
127 static inline u32
read_pkru(void)
129 if (boot_cpu_has(X86_FEATURE_OSPKE
))
130 return __read_pkru();
134 static inline void write_pkru(u32 pkru
)
136 if (boot_cpu_has(X86_FEATURE_OSPKE
))
140 static inline int pte_young(pte_t pte
)
142 return pte_flags(pte
) & _PAGE_ACCESSED
;
145 static inline int pmd_dirty(pmd_t pmd
)
147 return pmd_flags(pmd
) & _PAGE_DIRTY
;
150 static inline int pmd_young(pmd_t pmd
)
152 return pmd_flags(pmd
) & _PAGE_ACCESSED
;
155 static inline int pud_dirty(pud_t pud
)
157 return pud_flags(pud
) & _PAGE_DIRTY
;
160 static inline int pud_young(pud_t pud
)
162 return pud_flags(pud
) & _PAGE_ACCESSED
;
165 static inline int pte_write(pte_t pte
)
167 return pte_flags(pte
) & _PAGE_RW
;
170 static inline int pte_huge(pte_t pte
)
172 return pte_flags(pte
) & _PAGE_PSE
;
175 static inline int pte_global(pte_t pte
)
177 return pte_flags(pte
) & _PAGE_GLOBAL
;
180 static inline int pte_exec(pte_t pte
)
182 return !(pte_flags(pte
) & _PAGE_NX
);
185 static inline int pte_special(pte_t pte
)
187 return pte_flags(pte
) & _PAGE_SPECIAL
;
190 static inline unsigned long pte_pfn(pte_t pte
)
192 return (pte_val(pte
) & PTE_PFN_MASK
) >> PAGE_SHIFT
;
195 static inline unsigned long pmd_pfn(pmd_t pmd
)
197 return (pmd_val(pmd
) & pmd_pfn_mask(pmd
)) >> PAGE_SHIFT
;
200 static inline unsigned long pud_pfn(pud_t pud
)
202 return (pud_val(pud
) & pud_pfn_mask(pud
)) >> PAGE_SHIFT
;
205 static inline unsigned long p4d_pfn(p4d_t p4d
)
207 return (p4d_val(p4d
) & p4d_pfn_mask(p4d
)) >> PAGE_SHIFT
;
210 static inline unsigned long pgd_pfn(pgd_t pgd
)
212 return (pgd_val(pgd
) & PTE_PFN_MASK
) >> PAGE_SHIFT
;
215 static inline int p4d_large(p4d_t p4d
)
217 /* No 512 GiB pages yet */
221 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
223 static inline int pmd_large(pmd_t pte
)
225 return pmd_flags(pte
) & _PAGE_PSE
;
228 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
229 static inline int pmd_trans_huge(pmd_t pmd
)
231 return (pmd_val(pmd
) & (_PAGE_PSE
|_PAGE_DEVMAP
)) == _PAGE_PSE
;
234 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
235 static inline int pud_trans_huge(pud_t pud
)
237 return (pud_val(pud
) & (_PAGE_PSE
|_PAGE_DEVMAP
)) == _PAGE_PSE
;
241 #define has_transparent_hugepage has_transparent_hugepage
242 static inline int has_transparent_hugepage(void)
244 return boot_cpu_has(X86_FEATURE_PSE
);
247 #ifdef __HAVE_ARCH_PTE_DEVMAP
248 static inline int pmd_devmap(pmd_t pmd
)
250 return !!(pmd_val(pmd
) & _PAGE_DEVMAP
);
253 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
254 static inline int pud_devmap(pud_t pud
)
256 return !!(pud_val(pud
) & _PAGE_DEVMAP
);
259 static inline int pud_devmap(pud_t pud
)
265 static inline int pgd_devmap(pgd_t pgd
)
270 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
272 static inline pte_t
pte_set_flags(pte_t pte
, pteval_t set
)
274 pteval_t v
= native_pte_val(pte
);
276 return native_make_pte(v
| set
);
279 static inline pte_t
pte_clear_flags(pte_t pte
, pteval_t clear
)
281 pteval_t v
= native_pte_val(pte
);
283 return native_make_pte(v
& ~clear
);
286 static inline pte_t
pte_mkclean(pte_t pte
)
288 return pte_clear_flags(pte
, _PAGE_DIRTY
);
291 static inline pte_t
pte_mkold(pte_t pte
)
293 return pte_clear_flags(pte
, _PAGE_ACCESSED
);
296 static inline pte_t
pte_wrprotect(pte_t pte
)
298 return pte_clear_flags(pte
, _PAGE_RW
);
301 static inline pte_t
pte_mkexec(pte_t pte
)
303 return pte_clear_flags(pte
, _PAGE_NX
);
306 static inline pte_t
pte_mkdirty(pte_t pte
)
308 return pte_set_flags(pte
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
311 static inline pte_t
pte_mkyoung(pte_t pte
)
313 return pte_set_flags(pte
, _PAGE_ACCESSED
);
316 static inline pte_t
pte_mkwrite(pte_t pte
)
318 return pte_set_flags(pte
, _PAGE_RW
);
321 static inline pte_t
pte_mkhuge(pte_t pte
)
323 return pte_set_flags(pte
, _PAGE_PSE
);
326 static inline pte_t
pte_clrhuge(pte_t pte
)
328 return pte_clear_flags(pte
, _PAGE_PSE
);
331 static inline pte_t
pte_mkglobal(pte_t pte
)
333 return pte_set_flags(pte
, _PAGE_GLOBAL
);
336 static inline pte_t
pte_clrglobal(pte_t pte
)
338 return pte_clear_flags(pte
, _PAGE_GLOBAL
);
341 static inline pte_t
pte_mkspecial(pte_t pte
)
343 return pte_set_flags(pte
, _PAGE_SPECIAL
);
346 static inline pte_t
pte_mkdevmap(pte_t pte
)
348 return pte_set_flags(pte
, _PAGE_SPECIAL
|_PAGE_DEVMAP
);
351 static inline pmd_t
pmd_set_flags(pmd_t pmd
, pmdval_t set
)
353 pmdval_t v
= native_pmd_val(pmd
);
355 return __pmd(v
| set
);
358 static inline pmd_t
pmd_clear_flags(pmd_t pmd
, pmdval_t clear
)
360 pmdval_t v
= native_pmd_val(pmd
);
362 return __pmd(v
& ~clear
);
365 static inline pmd_t
pmd_mkold(pmd_t pmd
)
367 return pmd_clear_flags(pmd
, _PAGE_ACCESSED
);
370 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
372 return pmd_clear_flags(pmd
, _PAGE_DIRTY
);
375 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
377 return pmd_clear_flags(pmd
, _PAGE_RW
);
380 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
382 return pmd_set_flags(pmd
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
385 static inline pmd_t
pmd_mkdevmap(pmd_t pmd
)
387 return pmd_set_flags(pmd
, _PAGE_DEVMAP
);
390 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
392 return pmd_set_flags(pmd
, _PAGE_PSE
);
395 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
397 return pmd_set_flags(pmd
, _PAGE_ACCESSED
);
400 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
402 return pmd_set_flags(pmd
, _PAGE_RW
);
405 static inline pmd_t
pmd_mknotpresent(pmd_t pmd
)
407 return pmd_clear_flags(pmd
, _PAGE_PRESENT
| _PAGE_PROTNONE
);
410 static inline pud_t
pud_set_flags(pud_t pud
, pudval_t set
)
412 pudval_t v
= native_pud_val(pud
);
414 return __pud(v
| set
);
417 static inline pud_t
pud_clear_flags(pud_t pud
, pudval_t clear
)
419 pudval_t v
= native_pud_val(pud
);
421 return __pud(v
& ~clear
);
424 static inline pud_t
pud_mkold(pud_t pud
)
426 return pud_clear_flags(pud
, _PAGE_ACCESSED
);
429 static inline pud_t
pud_mkclean(pud_t pud
)
431 return pud_clear_flags(pud
, _PAGE_DIRTY
);
434 static inline pud_t
pud_wrprotect(pud_t pud
)
436 return pud_clear_flags(pud
, _PAGE_RW
);
439 static inline pud_t
pud_mkdirty(pud_t pud
)
441 return pud_set_flags(pud
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
444 static inline pud_t
pud_mkdevmap(pud_t pud
)
446 return pud_set_flags(pud
, _PAGE_DEVMAP
);
449 static inline pud_t
pud_mkhuge(pud_t pud
)
451 return pud_set_flags(pud
, _PAGE_PSE
);
454 static inline pud_t
pud_mkyoung(pud_t pud
)
456 return pud_set_flags(pud
, _PAGE_ACCESSED
);
459 static inline pud_t
pud_mkwrite(pud_t pud
)
461 return pud_set_flags(pud
, _PAGE_RW
);
464 static inline pud_t
pud_mknotpresent(pud_t pud
)
466 return pud_clear_flags(pud
, _PAGE_PRESENT
| _PAGE_PROTNONE
);
469 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
470 static inline int pte_soft_dirty(pte_t pte
)
472 return pte_flags(pte
) & _PAGE_SOFT_DIRTY
;
475 static inline int pmd_soft_dirty(pmd_t pmd
)
477 return pmd_flags(pmd
) & _PAGE_SOFT_DIRTY
;
480 static inline int pud_soft_dirty(pud_t pud
)
482 return pud_flags(pud
) & _PAGE_SOFT_DIRTY
;
485 static inline pte_t
pte_mksoft_dirty(pte_t pte
)
487 return pte_set_flags(pte
, _PAGE_SOFT_DIRTY
);
490 static inline pmd_t
pmd_mksoft_dirty(pmd_t pmd
)
492 return pmd_set_flags(pmd
, _PAGE_SOFT_DIRTY
);
495 static inline pud_t
pud_mksoft_dirty(pud_t pud
)
497 return pud_set_flags(pud
, _PAGE_SOFT_DIRTY
);
500 static inline pte_t
pte_clear_soft_dirty(pte_t pte
)
502 return pte_clear_flags(pte
, _PAGE_SOFT_DIRTY
);
505 static inline pmd_t
pmd_clear_soft_dirty(pmd_t pmd
)
507 return pmd_clear_flags(pmd
, _PAGE_SOFT_DIRTY
);
510 static inline pud_t
pud_clear_soft_dirty(pud_t pud
)
512 return pud_clear_flags(pud
, _PAGE_SOFT_DIRTY
);
515 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
518 * Mask out unsupported bits in a present pgprot. Non-present pgprots
519 * can use those bits for other purposes, so leave them be.
521 static inline pgprotval_t
massage_pgprot(pgprot_t pgprot
)
523 pgprotval_t protval
= pgprot_val(pgprot
);
525 if (protval
& _PAGE_PRESENT
)
526 protval
&= __supported_pte_mask
;
531 static inline pte_t
pfn_pte(unsigned long page_nr
, pgprot_t pgprot
)
533 return __pte(((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
534 massage_pgprot(pgprot
));
537 static inline pmd_t
pfn_pmd(unsigned long page_nr
, pgprot_t pgprot
)
539 return __pmd(((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
540 massage_pgprot(pgprot
));
543 static inline pud_t
pfn_pud(unsigned long page_nr
, pgprot_t pgprot
)
545 return __pud(((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
546 massage_pgprot(pgprot
));
549 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
551 pteval_t val
= pte_val(pte
);
554 * Chop off the NX bit (if present), and add the NX portion of
555 * the newprot (if present):
557 val
&= _PAGE_CHG_MASK
;
558 val
|= massage_pgprot(newprot
) & ~_PAGE_CHG_MASK
;
563 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
565 pmdval_t val
= pmd_val(pmd
);
567 val
&= _HPAGE_CHG_MASK
;
568 val
|= massage_pgprot(newprot
) & ~_HPAGE_CHG_MASK
;
573 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
574 #define pgprot_modify pgprot_modify
575 static inline pgprot_t
pgprot_modify(pgprot_t oldprot
, pgprot_t newprot
)
577 pgprotval_t preservebits
= pgprot_val(oldprot
) & _PAGE_CHG_MASK
;
578 pgprotval_t addbits
= pgprot_val(newprot
);
579 return __pgprot(preservebits
| addbits
);
582 #define pte_pgprot(x) __pgprot(pte_flags(x))
583 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
584 #define pud_pgprot(x) __pgprot(pud_flags(x))
585 #define p4d_pgprot(x) __pgprot(p4d_flags(x))
587 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
589 static inline int is_new_memtype_allowed(u64 paddr
, unsigned long size
,
590 enum page_cache_mode pcm
,
591 enum page_cache_mode new_pcm
)
594 * PAT type is always WB for untracked ranges, so no need to check.
596 if (x86_platform
.is_untracked_pat_range(paddr
, paddr
+ size
))
600 * Certain new memtypes are not allowed with certain
602 * - request is uncached, return cannot be write-back
603 * - request is write-combine, return cannot be write-back
604 * - request is write-through, return cannot be write-back
605 * - request is write-through, return cannot be write-combine
607 if ((pcm
== _PAGE_CACHE_MODE_UC_MINUS
&&
608 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
609 (pcm
== _PAGE_CACHE_MODE_WC
&&
610 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
611 (pcm
== _PAGE_CACHE_MODE_WT
&&
612 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
613 (pcm
== _PAGE_CACHE_MODE_WT
&&
614 new_pcm
== _PAGE_CACHE_MODE_WC
)) {
621 pmd_t
*populate_extra_pmd(unsigned long vaddr
);
622 pte_t
*populate_extra_pte(unsigned long vaddr
);
623 #endif /* __ASSEMBLY__ */
626 # include <asm/pgtable_32.h>
628 # include <asm/pgtable_64.h>
632 #include <linux/mm_types.h>
633 #include <linux/mmdebug.h>
634 #include <linux/log2.h>
635 #include <asm/fixmap.h>
637 static inline int pte_none(pte_t pte
)
639 return !(pte
.pte
& ~(_PAGE_KNL_ERRATUM_MASK
));
642 #define __HAVE_ARCH_PTE_SAME
643 static inline int pte_same(pte_t a
, pte_t b
)
645 return a
.pte
== b
.pte
;
648 static inline int pte_present(pte_t a
)
650 return pte_flags(a
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
);
653 #ifdef __HAVE_ARCH_PTE_DEVMAP
654 static inline int pte_devmap(pte_t a
)
656 return (pte_flags(a
) & _PAGE_DEVMAP
) == _PAGE_DEVMAP
;
660 #define pte_accessible pte_accessible
661 static inline bool pte_accessible(struct mm_struct
*mm
, pte_t a
)
663 if (pte_flags(a
) & _PAGE_PRESENT
)
666 if ((pte_flags(a
) & _PAGE_PROTNONE
) &&
667 mm_tlb_flush_pending(mm
))
673 static inline int pte_hidden(pte_t pte
)
675 return pte_flags(pte
) & _PAGE_HIDDEN
;
678 static inline int pmd_present(pmd_t pmd
)
681 * Checking for _PAGE_PSE is needed too because
682 * split_huge_page will temporarily clear the present bit (but
683 * the _PAGE_PSE flag will remain set at all times while the
684 * _PAGE_PRESENT bit is clear).
686 return pmd_flags(pmd
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
| _PAGE_PSE
);
689 #ifdef CONFIG_NUMA_BALANCING
691 * These work without NUMA balancing but the kernel does not care. See the
692 * comment in include/asm-generic/pgtable.h
694 static inline int pte_protnone(pte_t pte
)
696 return (pte_flags(pte
) & (_PAGE_PROTNONE
| _PAGE_PRESENT
))
700 static inline int pmd_protnone(pmd_t pmd
)
702 return (pmd_flags(pmd
) & (_PAGE_PROTNONE
| _PAGE_PRESENT
))
705 #endif /* CONFIG_NUMA_BALANCING */
707 static inline int pmd_none(pmd_t pmd
)
709 /* Only check low word on 32-bit platforms, since it might be
710 out of sync with upper half. */
711 unsigned long val
= native_pmd_val(pmd
);
712 return (val
& ~_PAGE_KNL_ERRATUM_MASK
) == 0;
715 static inline unsigned long pmd_page_vaddr(pmd_t pmd
)
717 return (unsigned long)__va(pmd_val(pmd
) & pmd_pfn_mask(pmd
));
721 * Currently stuck as a macro due to indirect forward reference to
722 * linux/mmzone.h's __section_mem_map_addr() definition:
724 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
727 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
729 * this macro returns the index of the entry in the pmd page which would
730 * control the given virtual address
732 static inline unsigned long pmd_index(unsigned long address
)
734 return (address
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1);
738 * Conversion functions: convert a page and protection to a page entry,
739 * and a page entry and page directory to the page they refer to.
741 * (Currently stuck as a macro because of indirect forward reference
742 * to linux/mm.h:page_to_nid())
744 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
747 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
749 * this function returns the index of the entry in the pte page which would
750 * control the given virtual address
752 static inline unsigned long pte_index(unsigned long address
)
754 return (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
757 static inline pte_t
*pte_offset_kernel(pmd_t
*pmd
, unsigned long address
)
759 return (pte_t
*)pmd_page_vaddr(*pmd
) + pte_index(address
);
762 static inline int pmd_bad(pmd_t pmd
)
764 return (pmd_flags(pmd
) & ~_PAGE_USER
) != _KERNPG_TABLE
;
767 static inline unsigned long pages_to_mb(unsigned long npg
)
769 return npg
>> (20 - PAGE_SHIFT
);
772 #if CONFIG_PGTABLE_LEVELS > 2
773 static inline int pud_none(pud_t pud
)
775 return (native_pud_val(pud
) & ~(_PAGE_KNL_ERRATUM_MASK
)) == 0;
778 static inline int pud_present(pud_t pud
)
780 return pud_flags(pud
) & _PAGE_PRESENT
;
783 static inline unsigned long pud_page_vaddr(pud_t pud
)
785 return (unsigned long)__va(pud_val(pud
) & pud_pfn_mask(pud
));
789 * Currently stuck as a macro due to indirect forward reference to
790 * linux/mmzone.h's __section_mem_map_addr() definition:
792 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
794 /* Find an entry in the second-level page table.. */
795 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
797 return (pmd_t
*)pud_page_vaddr(*pud
) + pmd_index(address
);
800 static inline int pud_large(pud_t pud
)
802 return (pud_val(pud
) & (_PAGE_PSE
| _PAGE_PRESENT
)) ==
803 (_PAGE_PSE
| _PAGE_PRESENT
);
806 static inline int pud_bad(pud_t pud
)
808 return (pud_flags(pud
) & ~(_KERNPG_TABLE
| _PAGE_USER
)) != 0;
811 static inline int pud_large(pud_t pud
)
815 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
817 static inline unsigned long pud_index(unsigned long address
)
819 return (address
>> PUD_SHIFT
) & (PTRS_PER_PUD
- 1);
822 #if CONFIG_PGTABLE_LEVELS > 3
823 static inline int p4d_none(p4d_t p4d
)
825 return (native_p4d_val(p4d
) & ~(_PAGE_KNL_ERRATUM_MASK
)) == 0;
828 static inline int p4d_present(p4d_t p4d
)
830 return p4d_flags(p4d
) & _PAGE_PRESENT
;
833 static inline unsigned long p4d_page_vaddr(p4d_t p4d
)
835 return (unsigned long)__va(p4d_val(p4d
) & p4d_pfn_mask(p4d
));
839 * Currently stuck as a macro due to indirect forward reference to
840 * linux/mmzone.h's __section_mem_map_addr() definition:
842 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
844 /* Find an entry in the third-level page table.. */
845 static inline pud_t
*pud_offset(p4d_t
*p4d
, unsigned long address
)
847 return (pud_t
*)p4d_page_vaddr(*p4d
) + pud_index(address
);
850 static inline int p4d_bad(p4d_t p4d
)
852 return (p4d_flags(p4d
) & ~(_KERNPG_TABLE
| _PAGE_USER
)) != 0;
854 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
856 static inline unsigned long p4d_index(unsigned long address
)
858 return (address
>> P4D_SHIFT
) & (PTRS_PER_P4D
- 1);
861 #if CONFIG_PGTABLE_LEVELS > 4
862 static inline int pgd_present(pgd_t pgd
)
864 return pgd_flags(pgd
) & _PAGE_PRESENT
;
867 static inline unsigned long pgd_page_vaddr(pgd_t pgd
)
869 return (unsigned long)__va((unsigned long)pgd_val(pgd
) & PTE_PFN_MASK
);
873 * Currently stuck as a macro due to indirect forward reference to
874 * linux/mmzone.h's __section_mem_map_addr() definition:
876 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
878 /* to find an entry in a page-table-directory. */
879 static inline p4d_t
*p4d_offset(pgd_t
*pgd
, unsigned long address
)
881 return (p4d_t
*)pgd_page_vaddr(*pgd
) + p4d_index(address
);
884 static inline int pgd_bad(pgd_t pgd
)
886 return (pgd_flags(pgd
) & ~_PAGE_USER
) != _KERNPG_TABLE
;
889 static inline int pgd_none(pgd_t pgd
)
892 * There is no need to do a workaround for the KNL stray
893 * A/D bit erratum here. PGDs only point to page tables
894 * except on 32-bit non-PAE which is not supported on
897 return !native_pgd_val(pgd
);
899 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
901 #endif /* __ASSEMBLY__ */
904 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
906 * this macro returns the index of the entry in the pgd page which would
907 * control the given virtual address
909 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
912 * pgd_offset() returns a (pgd_t *)
913 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
915 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
917 * a shortcut which implies the use of the kernel's pgd, instead
920 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
923 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
924 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
928 extern int direct_gbpages
;
929 void init_mem_mapping(void);
930 void early_alloc_pgt_buf(void);
931 extern void memblock_find_dma_reserve(void);
934 /* Realmode trampoline initialization. */
935 extern pgd_t trampoline_pgd_entry
;
936 static inline void __meminit
init_trampoline_default(void)
938 /* Default trampoline pgd value */
939 trampoline_pgd_entry
= init_top_pgt
[pgd_index(__PAGE_OFFSET
)];
941 # ifdef CONFIG_RANDOMIZE_MEMORY
942 void __meminit
init_trampoline(void);
944 # define init_trampoline init_trampoline_default
947 static inline void init_trampoline(void) { }
950 /* local pte updates need not use xchg for locking */
951 static inline pte_t
native_local_ptep_get_and_clear(pte_t
*ptep
)
955 /* Pure native function needs no input for mm, addr */
956 native_pte_clear(NULL
, 0, ptep
);
960 static inline pmd_t
native_local_pmdp_get_and_clear(pmd_t
*pmdp
)
964 native_pmd_clear(pmdp
);
968 static inline pud_t
native_local_pudp_get_and_clear(pud_t
*pudp
)
972 native_pud_clear(pudp
);
976 static inline void native_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
977 pte_t
*ptep
, pte_t pte
)
979 native_set_pte(ptep
, pte
);
982 static inline void native_set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
983 pmd_t
*pmdp
, pmd_t pmd
)
985 native_set_pmd(pmdp
, pmd
);
988 static inline void native_set_pud_at(struct mm_struct
*mm
, unsigned long addr
,
989 pud_t
*pudp
, pud_t pud
)
991 native_set_pud(pudp
, pud
);
994 #ifndef CONFIG_PARAVIRT
996 * Rules for using pte_update - it must be called after any PTE update which
997 * has not been done using the set_pte / clear_pte interfaces. It is used by
998 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
999 * updates should either be sets, clears, or set_pte_atomic for P->P
1000 * transitions, which means this hook should only be called for user PTEs.
1001 * This hook implies a P->P protection or access change has taken place, which
1002 * requires a subsequent TLB flush.
1004 #define pte_update(mm, addr, ptep) do { } while (0)
1008 * We only update the dirty/accessed state if we set
1009 * the dirty bit by hand in the kernel, since the hardware
1010 * will do the accessed bit for us, and we don't want to
1011 * race with other CPU's that might be updating the dirty
1012 * bit at the same time.
1014 struct vm_area_struct
;
1016 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1017 extern int ptep_set_access_flags(struct vm_area_struct
*vma
,
1018 unsigned long address
, pte_t
*ptep
,
1019 pte_t entry
, int dirty
);
1021 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1022 extern int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
1023 unsigned long addr
, pte_t
*ptep
);
1025 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1026 extern int ptep_clear_flush_young(struct vm_area_struct
*vma
,
1027 unsigned long address
, pte_t
*ptep
);
1029 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1030 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
1033 pte_t pte
= native_ptep_get_and_clear(ptep
);
1034 pte_update(mm
, addr
, ptep
);
1038 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1039 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
1040 unsigned long addr
, pte_t
*ptep
,
1046 * Full address destruction in progress; paravirt does not
1047 * care about updates and native needs no locking
1049 pte
= native_local_ptep_get_and_clear(ptep
);
1051 pte
= ptep_get_and_clear(mm
, addr
, ptep
);
1056 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1057 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
1058 unsigned long addr
, pte_t
*ptep
)
1060 clear_bit(_PAGE_BIT_RW
, (unsigned long *)&ptep
->pte
);
1061 pte_update(mm
, addr
, ptep
);
1064 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1066 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1068 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1069 extern int pmdp_set_access_flags(struct vm_area_struct
*vma
,
1070 unsigned long address
, pmd_t
*pmdp
,
1071 pmd_t entry
, int dirty
);
1072 extern int pudp_set_access_flags(struct vm_area_struct
*vma
,
1073 unsigned long address
, pud_t
*pudp
,
1074 pud_t entry
, int dirty
);
1076 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1077 extern int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
1078 unsigned long addr
, pmd_t
*pmdp
);
1079 extern int pudp_test_and_clear_young(struct vm_area_struct
*vma
,
1080 unsigned long addr
, pud_t
*pudp
);
1082 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1083 extern int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
1084 unsigned long address
, pmd_t
*pmdp
);
1087 #define __HAVE_ARCH_PMD_WRITE
1088 static inline int pmd_write(pmd_t pmd
)
1090 return pmd_flags(pmd
) & _PAGE_RW
;
1093 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1094 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
1097 return native_pmdp_get_and_clear(pmdp
);
1100 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1101 static inline pud_t
pudp_huge_get_and_clear(struct mm_struct
*mm
,
1102 unsigned long addr
, pud_t
*pudp
)
1104 return native_pudp_get_and_clear(pudp
);
1107 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1108 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
1109 unsigned long addr
, pmd_t
*pmdp
)
1111 clear_bit(_PAGE_BIT_RW
, (unsigned long *)pmdp
);
1115 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1117 * dst - pointer to pgd range anwhere on a pgd page
1119 * count - the number of pgds to copy.
1121 * dst and src can be on the same page, but the range must not overlap,
1122 * and must not cross a page boundary.
1124 static inline void clone_pgd_range(pgd_t
*dst
, pgd_t
*src
, int count
)
1126 memcpy(dst
, src
, count
* sizeof(pgd_t
));
1129 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
1130 static inline int page_level_shift(enum pg_level level
)
1132 return (PAGE_SHIFT
- PTE_SHIFT
) + level
* PTE_SHIFT
;
1134 static inline unsigned long page_level_size(enum pg_level level
)
1136 return 1UL << page_level_shift(level
);
1138 static inline unsigned long page_level_mask(enum pg_level level
)
1140 return ~(page_level_size(level
) - 1);
1144 * The x86 doesn't have any external MMU info: the kernel page
1145 * tables contain all the necessary information.
1147 static inline void update_mmu_cache(struct vm_area_struct
*vma
,
1148 unsigned long addr
, pte_t
*ptep
)
1151 static inline void update_mmu_cache_pmd(struct vm_area_struct
*vma
,
1152 unsigned long addr
, pmd_t
*pmd
)
1155 static inline void update_mmu_cache_pud(struct vm_area_struct
*vma
,
1156 unsigned long addr
, pud_t
*pud
)
1160 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1161 static inline pte_t
pte_swp_mksoft_dirty(pte_t pte
)
1163 return pte_set_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
1166 static inline int pte_swp_soft_dirty(pte_t pte
)
1168 return pte_flags(pte
) & _PAGE_SWP_SOFT_DIRTY
;
1171 static inline pte_t
pte_swp_clear_soft_dirty(pte_t pte
)
1173 return pte_clear_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
1177 #define PKRU_AD_BIT 0x1
1178 #define PKRU_WD_BIT 0x2
1179 #define PKRU_BITS_PER_PKEY 2
1181 static inline bool __pkru_allows_read(u32 pkru
, u16 pkey
)
1183 int pkru_pkey_bits
= pkey
* PKRU_BITS_PER_PKEY
;
1184 return !(pkru
& (PKRU_AD_BIT
<< pkru_pkey_bits
));
1187 static inline bool __pkru_allows_write(u32 pkru
, u16 pkey
)
1189 int pkru_pkey_bits
= pkey
* PKRU_BITS_PER_PKEY
;
1191 * Access-disable disables writes too so we need to check
1194 return !(pkru
& ((PKRU_AD_BIT
|PKRU_WD_BIT
) << pkru_pkey_bits
));
1197 static inline u16
pte_flags_pkey(unsigned long pte_flags
)
1199 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1200 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1201 return (pte_flags
& _PAGE_PKEY_MASK
) >> _PAGE_BIT_PKEY_BIT0
;
1207 static inline bool __pkru_allows_pkey(u16 pkey
, bool write
)
1209 u32 pkru
= read_pkru();
1211 if (!__pkru_allows_read(pkru
, pkey
))
1213 if (write
&& !__pkru_allows_write(pkru
, pkey
))
1220 * 'pteval' can come from a PTE, PMD or PUD. We only check
1221 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1222 * same value on all 3 types.
1224 static inline bool __pte_access_permitted(unsigned long pteval
, bool write
)
1226 unsigned long need_pte_bits
= _PAGE_PRESENT
|_PAGE_USER
;
1229 need_pte_bits
|= _PAGE_RW
;
1231 if ((pteval
& need_pte_bits
) != need_pte_bits
)
1234 return __pkru_allows_pkey(pte_flags_pkey(pteval
), write
);
1237 #define pte_access_permitted pte_access_permitted
1238 static inline bool pte_access_permitted(pte_t pte
, bool write
)
1240 return __pte_access_permitted(pte_val(pte
), write
);
1243 #define pmd_access_permitted pmd_access_permitted
1244 static inline bool pmd_access_permitted(pmd_t pmd
, bool write
)
1246 return __pte_access_permitted(pmd_val(pmd
), write
);
1249 #define pud_access_permitted pud_access_permitted
1250 static inline bool pud_access_permitted(pud_t pud
, bool write
)
1252 return __pte_access_permitted(pud_val(pud
), write
);
1255 #include <asm-generic/pgtable.h>
1256 #endif /* __ASSEMBLY__ */
1258 #endif /* _ASM_X86_PGTABLE_H */