1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
7 #include <asm/pgtable_types.h>
10 * Macro to mark a page protection value as UC-
12 #define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
19 #include <asm/x86_init.h>
21 void ptdump_walk_pgd_level(struct seq_file
*m
, pgd_t
*pgd
);
24 * ZERO_PAGE is a global shared page that is always zero: used
25 * for zero-mapped memory areas etc..
27 extern unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)]
29 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
31 extern spinlock_t pgd_lock
;
32 extern struct list_head pgd_list
;
34 extern struct mm_struct
*pgd_page_get_mm(struct page
*page
);
36 #ifdef CONFIG_PARAVIRT
37 #include <asm/paravirt.h>
38 #else /* !CONFIG_PARAVIRT */
39 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
40 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
41 #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
43 #define set_pte_atomic(ptep, pte) \
44 native_set_pte_atomic(ptep, pte)
46 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
48 #ifndef __PAGETABLE_PUD_FOLDED
49 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
50 #define pgd_clear(pgd) native_pgd_clear(pgd)
54 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
57 #ifndef __PAGETABLE_PMD_FOLDED
58 #define pud_clear(pud) native_pud_clear(pud)
61 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
62 #define pmd_clear(pmd) native_pmd_clear(pmd)
64 #define pte_update(mm, addr, ptep) do { } while (0)
65 #define pte_update_defer(mm, addr, ptep) do { } while (0)
66 #define pmd_update(mm, addr, ptep) do { } while (0)
67 #define pmd_update_defer(mm, addr, ptep) do { } while (0)
69 #define pgd_val(x) native_pgd_val(x)
70 #define __pgd(x) native_make_pgd(x)
72 #ifndef __PAGETABLE_PUD_FOLDED
73 #define pud_val(x) native_pud_val(x)
74 #define __pud(x) native_make_pud(x)
77 #ifndef __PAGETABLE_PMD_FOLDED
78 #define pmd_val(x) native_pmd_val(x)
79 #define __pmd(x) native_make_pmd(x)
82 #define pte_val(x) native_pte_val(x)
83 #define __pte(x) native_make_pte(x)
85 #define arch_end_context_switch(prev) do {} while(0)
87 #endif /* CONFIG_PARAVIRT */
90 * The following only work if pte_present() is true.
91 * Undefined behaviour if not..
93 static inline int pte_dirty(pte_t pte
)
95 return pte_flags(pte
) & _PAGE_DIRTY
;
98 static inline int pte_young(pte_t pte
)
100 return pte_flags(pte
) & _PAGE_ACCESSED
;
103 static inline int pmd_dirty(pmd_t pmd
)
105 return pmd_flags(pmd
) & _PAGE_DIRTY
;
108 static inline int pmd_young(pmd_t pmd
)
110 return pmd_flags(pmd
) & _PAGE_ACCESSED
;
113 static inline int pte_write(pte_t pte
)
115 return pte_flags(pte
) & _PAGE_RW
;
118 static inline int pte_file(pte_t pte
)
120 return pte_flags(pte
) & _PAGE_FILE
;
123 static inline int pte_huge(pte_t pte
)
125 return pte_flags(pte
) & _PAGE_PSE
;
128 static inline int pte_global(pte_t pte
)
130 return pte_flags(pte
) & _PAGE_GLOBAL
;
133 static inline int pte_exec(pte_t pte
)
135 return !(pte_flags(pte
) & _PAGE_NX
);
138 static inline int pte_special(pte_t pte
)
141 * See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h.
142 * On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 ==
143 * __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL.
145 return (pte_flags(pte
) & _PAGE_SPECIAL
) &&
146 (pte_flags(pte
) & (_PAGE_PRESENT
|_PAGE_PROTNONE
));
149 static inline unsigned long pte_pfn(pte_t pte
)
151 return (pte_val(pte
) & PTE_PFN_MASK
) >> PAGE_SHIFT
;
154 static inline unsigned long pmd_pfn(pmd_t pmd
)
156 return (pmd_val(pmd
) & PTE_PFN_MASK
) >> PAGE_SHIFT
;
159 static inline unsigned long pud_pfn(pud_t pud
)
161 return (pud_val(pud
) & PTE_PFN_MASK
) >> PAGE_SHIFT
;
164 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
166 static inline int pmd_large(pmd_t pte
)
168 return pmd_flags(pte
) & _PAGE_PSE
;
171 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
172 static inline int pmd_trans_splitting(pmd_t pmd
)
174 return pmd_val(pmd
) & _PAGE_SPLITTING
;
177 static inline int pmd_trans_huge(pmd_t pmd
)
179 return pmd_val(pmd
) & _PAGE_PSE
;
182 static inline int has_transparent_hugepage(void)
186 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
188 static inline pte_t
pte_set_flags(pte_t pte
, pteval_t set
)
190 pteval_t v
= native_pte_val(pte
);
192 return native_make_pte(v
| set
);
195 static inline pte_t
pte_clear_flags(pte_t pte
, pteval_t clear
)
197 pteval_t v
= native_pte_val(pte
);
199 return native_make_pte(v
& ~clear
);
202 static inline pte_t
pte_mkclean(pte_t pte
)
204 return pte_clear_flags(pte
, _PAGE_DIRTY
);
207 static inline pte_t
pte_mkold(pte_t pte
)
209 return pte_clear_flags(pte
, _PAGE_ACCESSED
);
212 static inline pte_t
pte_wrprotect(pte_t pte
)
214 return pte_clear_flags(pte
, _PAGE_RW
);
217 static inline pte_t
pte_mkexec(pte_t pte
)
219 return pte_clear_flags(pte
, _PAGE_NX
);
222 static inline pte_t
pte_mkdirty(pte_t pte
)
224 return pte_set_flags(pte
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
227 static inline pte_t
pte_mkyoung(pte_t pte
)
229 return pte_set_flags(pte
, _PAGE_ACCESSED
);
232 static inline pte_t
pte_mkwrite(pte_t pte
)
234 return pte_set_flags(pte
, _PAGE_RW
);
237 static inline pte_t
pte_mkhuge(pte_t pte
)
239 return pte_set_flags(pte
, _PAGE_PSE
);
242 static inline pte_t
pte_clrhuge(pte_t pte
)
244 return pte_clear_flags(pte
, _PAGE_PSE
);
247 static inline pte_t
pte_mkglobal(pte_t pte
)
249 return pte_set_flags(pte
, _PAGE_GLOBAL
);
252 static inline pte_t
pte_clrglobal(pte_t pte
)
254 return pte_clear_flags(pte
, _PAGE_GLOBAL
);
257 static inline pte_t
pte_mkspecial(pte_t pte
)
259 return pte_set_flags(pte
, _PAGE_SPECIAL
);
262 static inline pmd_t
pmd_set_flags(pmd_t pmd
, pmdval_t set
)
264 pmdval_t v
= native_pmd_val(pmd
);
266 return __pmd(v
| set
);
269 static inline pmd_t
pmd_clear_flags(pmd_t pmd
, pmdval_t clear
)
271 pmdval_t v
= native_pmd_val(pmd
);
273 return __pmd(v
& ~clear
);
276 static inline pmd_t
pmd_mkold(pmd_t pmd
)
278 return pmd_clear_flags(pmd
, _PAGE_ACCESSED
);
281 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
283 return pmd_clear_flags(pmd
, _PAGE_RW
);
286 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
288 return pmd_set_flags(pmd
, _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
);
291 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
293 return pmd_set_flags(pmd
, _PAGE_PSE
);
296 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
298 return pmd_set_flags(pmd
, _PAGE_ACCESSED
);
301 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
303 return pmd_set_flags(pmd
, _PAGE_RW
);
306 static inline pmd_t
pmd_mknotpresent(pmd_t pmd
)
308 return pmd_clear_flags(pmd
, _PAGE_PRESENT
);
311 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
312 static inline int pte_soft_dirty(pte_t pte
)
314 return pte_flags(pte
) & _PAGE_SOFT_DIRTY
;
317 static inline int pmd_soft_dirty(pmd_t pmd
)
319 return pmd_flags(pmd
) & _PAGE_SOFT_DIRTY
;
322 static inline pte_t
pte_mksoft_dirty(pte_t pte
)
324 return pte_set_flags(pte
, _PAGE_SOFT_DIRTY
);
327 static inline pmd_t
pmd_mksoft_dirty(pmd_t pmd
)
329 return pmd_set_flags(pmd
, _PAGE_SOFT_DIRTY
);
332 static inline pte_t
pte_file_clear_soft_dirty(pte_t pte
)
334 return pte_clear_flags(pte
, _PAGE_SOFT_DIRTY
);
337 static inline pte_t
pte_file_mksoft_dirty(pte_t pte
)
339 return pte_set_flags(pte
, _PAGE_SOFT_DIRTY
);
342 static inline int pte_file_soft_dirty(pte_t pte
)
344 return pte_flags(pte
) & _PAGE_SOFT_DIRTY
;
347 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
350 * Mask out unsupported bits in a present pgprot. Non-present pgprots
351 * can use those bits for other purposes, so leave them be.
353 static inline pgprotval_t
massage_pgprot(pgprot_t pgprot
)
355 pgprotval_t protval
= pgprot_val(pgprot
);
357 if (protval
& _PAGE_PRESENT
)
358 protval
&= __supported_pte_mask
;
363 static inline pte_t
pfn_pte(unsigned long page_nr
, pgprot_t pgprot
)
365 return __pte(((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
366 massage_pgprot(pgprot
));
369 static inline pmd_t
pfn_pmd(unsigned long page_nr
, pgprot_t pgprot
)
371 return __pmd(((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
372 massage_pgprot(pgprot
));
375 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
377 pteval_t val
= pte_val(pte
);
380 * Chop off the NX bit (if present), and add the NX portion of
381 * the newprot (if present):
383 val
&= _PAGE_CHG_MASK
;
384 val
|= massage_pgprot(newprot
) & ~_PAGE_CHG_MASK
;
389 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
391 pmdval_t val
= pmd_val(pmd
);
393 val
&= _HPAGE_CHG_MASK
;
394 val
|= massage_pgprot(newprot
) & ~_HPAGE_CHG_MASK
;
399 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
400 #define pgprot_modify pgprot_modify
401 static inline pgprot_t
pgprot_modify(pgprot_t oldprot
, pgprot_t newprot
)
403 pgprotval_t preservebits
= pgprot_val(oldprot
) & _PAGE_CHG_MASK
;
404 pgprotval_t addbits
= pgprot_val(newprot
);
405 return __pgprot(preservebits
| addbits
);
408 #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
410 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
412 static inline int is_new_memtype_allowed(u64 paddr
, unsigned long size
,
413 enum page_cache_mode pcm
,
414 enum page_cache_mode new_pcm
)
417 * PAT type is always WB for untracked ranges, so no need to check.
419 if (x86_platform
.is_untracked_pat_range(paddr
, paddr
+ size
))
423 * Certain new memtypes are not allowed with certain
425 * - request is uncached, return cannot be write-back
426 * - request is write-combine, return cannot be write-back
428 if ((pcm
== _PAGE_CACHE_MODE_UC_MINUS
&&
429 new_pcm
== _PAGE_CACHE_MODE_WB
) ||
430 (pcm
== _PAGE_CACHE_MODE_WC
&&
431 new_pcm
== _PAGE_CACHE_MODE_WB
)) {
438 pmd_t
*populate_extra_pmd(unsigned long vaddr
);
439 pte_t
*populate_extra_pte(unsigned long vaddr
);
440 #endif /* __ASSEMBLY__ */
443 # include <asm/pgtable_32.h>
445 # include <asm/pgtable_64.h>
449 #include <linux/mm_types.h>
450 #include <linux/mmdebug.h>
451 #include <linux/log2.h>
453 static inline int pte_none(pte_t pte
)
458 #define __HAVE_ARCH_PTE_SAME
459 static inline int pte_same(pte_t a
, pte_t b
)
461 return a
.pte
== b
.pte
;
464 static inline int pte_present(pte_t a
)
466 return pte_flags(a
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
|
470 #define pte_present_nonuma pte_present_nonuma
471 static inline int pte_present_nonuma(pte_t a
)
473 return pte_flags(a
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
);
476 #define pte_accessible pte_accessible
477 static inline bool pte_accessible(struct mm_struct
*mm
, pte_t a
)
479 if (pte_flags(a
) & _PAGE_PRESENT
)
482 if ((pte_flags(a
) & (_PAGE_PROTNONE
| _PAGE_NUMA
)) &&
483 mm_tlb_flush_pending(mm
))
489 static inline int pte_hidden(pte_t pte
)
491 return pte_flags(pte
) & _PAGE_HIDDEN
;
494 static inline int pmd_present(pmd_t pmd
)
497 * Checking for _PAGE_PSE is needed too because
498 * split_huge_page will temporarily clear the present bit (but
499 * the _PAGE_PSE flag will remain set at all times while the
500 * _PAGE_PRESENT bit is clear).
502 return pmd_flags(pmd
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
| _PAGE_PSE
|
506 static inline int pmd_none(pmd_t pmd
)
508 /* Only check low word on 32-bit platforms, since it might be
509 out of sync with upper half. */
510 return (unsigned long)native_pmd_val(pmd
) == 0;
513 static inline unsigned long pmd_page_vaddr(pmd_t pmd
)
515 return (unsigned long)__va(pmd_val(pmd
) & PTE_PFN_MASK
);
519 * Currently stuck as a macro due to indirect forward reference to
520 * linux/mmzone.h's __section_mem_map_addr() definition:
522 #define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
525 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
527 * this macro returns the index of the entry in the pmd page which would
528 * control the given virtual address
530 static inline unsigned long pmd_index(unsigned long address
)
532 return (address
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1);
536 * Conversion functions: convert a page and protection to a page entry,
537 * and a page entry and page directory to the page they refer to.
539 * (Currently stuck as a macro because of indirect forward reference
540 * to linux/mm.h:page_to_nid())
542 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
545 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
547 * this function returns the index of the entry in the pte page which would
548 * control the given virtual address
550 static inline unsigned long pte_index(unsigned long address
)
552 return (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
555 static inline pte_t
*pte_offset_kernel(pmd_t
*pmd
, unsigned long address
)
557 return (pte_t
*)pmd_page_vaddr(*pmd
) + pte_index(address
);
560 static inline int pmd_bad(pmd_t pmd
)
562 #ifdef CONFIG_NUMA_BALANCING
564 if ((pmd_flags(pmd
) & (_PAGE_NUMA
|_PAGE_PRESENT
)) == _PAGE_NUMA
)
567 return (pmd_flags(pmd
) & ~_PAGE_USER
) != _KERNPG_TABLE
;
570 static inline unsigned long pages_to_mb(unsigned long npg
)
572 return npg
>> (20 - PAGE_SHIFT
);
575 #if PAGETABLE_LEVELS > 2
576 static inline int pud_none(pud_t pud
)
578 return native_pud_val(pud
) == 0;
581 static inline int pud_present(pud_t pud
)
583 return pud_flags(pud
) & _PAGE_PRESENT
;
586 static inline unsigned long pud_page_vaddr(pud_t pud
)
588 return (unsigned long)__va((unsigned long)pud_val(pud
) & PTE_PFN_MASK
);
592 * Currently stuck as a macro due to indirect forward reference to
593 * linux/mmzone.h's __section_mem_map_addr() definition:
595 #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
597 /* Find an entry in the second-level page table.. */
598 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
600 return (pmd_t
*)pud_page_vaddr(*pud
) + pmd_index(address
);
603 static inline int pud_large(pud_t pud
)
605 return (pud_val(pud
) & (_PAGE_PSE
| _PAGE_PRESENT
)) ==
606 (_PAGE_PSE
| _PAGE_PRESENT
);
609 static inline int pud_bad(pud_t pud
)
611 return (pud_flags(pud
) & ~(_KERNPG_TABLE
| _PAGE_USER
)) != 0;
614 static inline int pud_large(pud_t pud
)
618 #endif /* PAGETABLE_LEVELS > 2 */
620 #if PAGETABLE_LEVELS > 3
621 static inline int pgd_present(pgd_t pgd
)
623 return pgd_flags(pgd
) & _PAGE_PRESENT
;
626 static inline unsigned long pgd_page_vaddr(pgd_t pgd
)
628 return (unsigned long)__va((unsigned long)pgd_val(pgd
) & PTE_PFN_MASK
);
632 * Currently stuck as a macro due to indirect forward reference to
633 * linux/mmzone.h's __section_mem_map_addr() definition:
635 #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
637 /* to find an entry in a page-table-directory. */
638 static inline unsigned long pud_index(unsigned long address
)
640 return (address
>> PUD_SHIFT
) & (PTRS_PER_PUD
- 1);
643 static inline pud_t
*pud_offset(pgd_t
*pgd
, unsigned long address
)
645 return (pud_t
*)pgd_page_vaddr(*pgd
) + pud_index(address
);
648 static inline int pgd_bad(pgd_t pgd
)
650 return (pgd_flags(pgd
) & ~_PAGE_USER
) != _KERNPG_TABLE
;
653 static inline int pgd_none(pgd_t pgd
)
655 return !native_pgd_val(pgd
);
657 #endif /* PAGETABLE_LEVELS > 3 */
659 #endif /* __ASSEMBLY__ */
662 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
664 * this macro returns the index of the entry in the pgd page which would
665 * control the given virtual address
667 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
670 * pgd_offset() returns a (pgd_t *)
671 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
673 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
675 * a shortcut which implies the use of the kernel's pgd, instead
678 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
681 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
682 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
686 extern int direct_gbpages
;
687 void init_mem_mapping(void);
688 void early_alloc_pgt_buf(void);
690 /* local pte updates need not use xchg for locking */
691 static inline pte_t
native_local_ptep_get_and_clear(pte_t
*ptep
)
695 /* Pure native function needs no input for mm, addr */
696 native_pte_clear(NULL
, 0, ptep
);
700 static inline pmd_t
native_local_pmdp_get_and_clear(pmd_t
*pmdp
)
704 native_pmd_clear(pmdp
);
708 static inline void native_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
709 pte_t
*ptep
, pte_t pte
)
711 native_set_pte(ptep
, pte
);
714 static inline void native_set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
715 pmd_t
*pmdp
, pmd_t pmd
)
717 native_set_pmd(pmdp
, pmd
);
720 #ifndef CONFIG_PARAVIRT
722 * Rules for using pte_update - it must be called after any PTE update which
723 * has not been done using the set_pte / clear_pte interfaces. It is used by
724 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
725 * updates should either be sets, clears, or set_pte_atomic for P->P
726 * transitions, which means this hook should only be called for user PTEs.
727 * This hook implies a P->P protection or access change has taken place, which
728 * requires a subsequent TLB flush. The notification can optionally be delayed
729 * until the TLB flush event by using the pte_update_defer form of the
730 * interface, but care must be taken to assure that the flush happens while
731 * still holding the same page table lock so that the shadow and primary pages
732 * do not become out of sync on SMP.
734 #define pte_update(mm, addr, ptep) do { } while (0)
735 #define pte_update_defer(mm, addr, ptep) do { } while (0)
739 * We only update the dirty/accessed state if we set
740 * the dirty bit by hand in the kernel, since the hardware
741 * will do the accessed bit for us, and we don't want to
742 * race with other CPU's that might be updating the dirty
743 * bit at the same time.
745 struct vm_area_struct
;
747 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
748 extern int ptep_set_access_flags(struct vm_area_struct
*vma
,
749 unsigned long address
, pte_t
*ptep
,
750 pte_t entry
, int dirty
);
752 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
753 extern int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
754 unsigned long addr
, pte_t
*ptep
);
756 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
757 extern int ptep_clear_flush_young(struct vm_area_struct
*vma
,
758 unsigned long address
, pte_t
*ptep
);
760 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
761 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
764 pte_t pte
= native_ptep_get_and_clear(ptep
);
765 pte_update(mm
, addr
, ptep
);
769 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
770 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
771 unsigned long addr
, pte_t
*ptep
,
777 * Full address destruction in progress; paravirt does not
778 * care about updates and native needs no locking
780 pte
= native_local_ptep_get_and_clear(ptep
);
782 pte
= ptep_get_and_clear(mm
, addr
, ptep
);
787 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
788 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
789 unsigned long addr
, pte_t
*ptep
)
791 clear_bit(_PAGE_BIT_RW
, (unsigned long *)&ptep
->pte
);
792 pte_update(mm
, addr
, ptep
);
795 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
797 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
799 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
800 extern int pmdp_set_access_flags(struct vm_area_struct
*vma
,
801 unsigned long address
, pmd_t
*pmdp
,
802 pmd_t entry
, int dirty
);
804 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
805 extern int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
806 unsigned long addr
, pmd_t
*pmdp
);
808 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
809 extern int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
810 unsigned long address
, pmd_t
*pmdp
);
813 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
814 extern void pmdp_splitting_flush(struct vm_area_struct
*vma
,
815 unsigned long addr
, pmd_t
*pmdp
);
817 #define __HAVE_ARCH_PMD_WRITE
818 static inline int pmd_write(pmd_t pmd
)
820 return pmd_flags(pmd
) & _PAGE_RW
;
823 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
824 static inline pmd_t
pmdp_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
827 pmd_t pmd
= native_pmdp_get_and_clear(pmdp
);
828 pmd_update(mm
, addr
, pmdp
);
832 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
833 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
834 unsigned long addr
, pmd_t
*pmdp
)
836 clear_bit(_PAGE_BIT_RW
, (unsigned long *)pmdp
);
837 pmd_update(mm
, addr
, pmdp
);
841 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
843 * dst - pointer to pgd range anwhere on a pgd page
845 * count - the number of pgds to copy.
847 * dst and src can be on the same page, but the range must not overlap,
848 * and must not cross a page boundary.
850 static inline void clone_pgd_range(pgd_t
*dst
, pgd_t
*src
, int count
)
852 memcpy(dst
, src
, count
* sizeof(pgd_t
));
855 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
856 static inline int page_level_shift(enum pg_level level
)
858 return (PAGE_SHIFT
- PTE_SHIFT
) + level
* PTE_SHIFT
;
860 static inline unsigned long page_level_size(enum pg_level level
)
862 return 1UL << page_level_shift(level
);
864 static inline unsigned long page_level_mask(enum pg_level level
)
866 return ~(page_level_size(level
) - 1);
870 * The x86 doesn't have any external MMU info: the kernel page
871 * tables contain all the necessary information.
873 static inline void update_mmu_cache(struct vm_area_struct
*vma
,
874 unsigned long addr
, pte_t
*ptep
)
877 static inline void update_mmu_cache_pmd(struct vm_area_struct
*vma
,
878 unsigned long addr
, pmd_t
*pmd
)
882 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
883 static inline pte_t
pte_swp_mksoft_dirty(pte_t pte
)
885 VM_BUG_ON(pte_present_nonuma(pte
));
886 return pte_set_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
889 static inline int pte_swp_soft_dirty(pte_t pte
)
891 VM_BUG_ON(pte_present_nonuma(pte
));
892 return pte_flags(pte
) & _PAGE_SWP_SOFT_DIRTY
;
895 static inline pte_t
pte_swp_clear_soft_dirty(pte_t pte
)
897 VM_BUG_ON(pte_present_nonuma(pte
));
898 return pte_clear_flags(pte
, _PAGE_SWP_SOFT_DIRTY
);
902 #include <asm-generic/pgtable.h>
903 #endif /* __ASSEMBLY__ */
905 #endif /* _ASM_X86_PGTABLE_H */