]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/asm-x86/pgtable-3level.h
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-artful-kernel.git] / include / asm-x86 / pgtable-3level.h
1 #ifndef _I386_PGTABLE_3LEVEL_H
2 #define _I386_PGTABLE_3LEVEL_H
3
4 /*
5 * Intel Physical Address Extension (PAE) Mode - three-level page
6 * tables on PPro+ CPUs.
7 *
8 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
9 */
10
11 #define pte_ERROR(e) \
12 printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
13 #define pmd_ERROR(e) \
14 printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
15 #define pgd_ERROR(e) \
16 printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
17
18
19 static inline int pud_none(pud_t pud)
20 {
21 return pud_val(pud) == 0;
22 }
23 static inline int pud_bad(pud_t pud)
24 {
25 return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
26 }
27 static inline int pud_present(pud_t pud)
28 {
29 return pud_val(pud) & _PAGE_PRESENT;
30 }
31
32 /* Rules for using set_pte: the pte being assigned *must* be
33 * either not present or in a state where the hardware will
34 * not attempt to update the pte. In places where this is
35 * not possible, use pte_get_and_clear to obtain the old pte
36 * value and then use set_pte to update it. -ben
37 */
38 static inline void native_set_pte(pte_t *ptep, pte_t pte)
39 {
40 ptep->pte_high = pte.pte_high;
41 smp_wmb();
42 ptep->pte_low = pte.pte_low;
43 }
44
45 /*
46 * Since this is only called on user PTEs, and the page fault handler
47 * must handle the already racy situation of simultaneous page faults,
48 * we are justified in merely clearing the PTE present bit, followed
49 * by a set. The ordering here is important.
50 */
51 static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr,
52 pte_t *ptep, pte_t pte)
53 {
54 ptep->pte_low = 0;
55 smp_wmb();
56 ptep->pte_high = pte.pte_high;
57 smp_wmb();
58 ptep->pte_low = pte.pte_low;
59 }
60
61 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
62 {
63 set_64bit((unsigned long long *)(ptep),native_pte_val(pte));
64 }
65 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
66 {
67 set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd));
68 }
69 static inline void native_set_pud(pud_t *pudp, pud_t pud)
70 {
71 set_64bit((unsigned long long *)(pudp),native_pud_val(pud));
72 }
73
74 /*
75 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
76 * entry, so clear the bottom half first and enforce ordering with a compiler
77 * barrier.
78 */
79 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
80 {
81 ptep->pte_low = 0;
82 smp_wmb();
83 ptep->pte_high = 0;
84 }
85
86 static inline void native_pmd_clear(pmd_t *pmd)
87 {
88 u32 *tmp = (u32 *)pmd;
89 *tmp = 0;
90 smp_wmb();
91 *(tmp + 1) = 0;
92 }
93
94 static inline void pud_clear(pud_t *pudp)
95 {
96 unsigned long pgd;
97
98 set_pud(pudp, __pud(0));
99
100 /*
101 * According to Intel App note "TLBs, Paging-Structure Caches,
102 * and Their Invalidation", April 2007, document 317080-001,
103 * section 8.1: in PAE mode we explicitly have to flush the
104 * TLB via cr3 if the top-level pgd is changed...
105 *
106 * Make sure the pud entry we're updating is within the
107 * current pgd to avoid unnecessary TLB flushes.
108 */
109 pgd = read_cr3();
110 if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
111 write_cr3(pgd);
112 }
113
114 #define pud_page(pud) \
115 ((struct page *) __va(pud_val(pud) & PAGE_MASK))
116
117 #define pud_page_vaddr(pud) \
118 ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
119
120
121 /* Find an entry in the second-level page table.. */
122 #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
123 pmd_index(address))
124
125 #ifdef CONFIG_SMP
126 static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
127 {
128 pte_t res;
129
130 /* xchg acts as a barrier before the setting of the high bits */
131 res.pte_low = xchg(&ptep->pte_low, 0);
132 res.pte_high = ptep->pte_high;
133 ptep->pte_high = 0;
134
135 return res;
136 }
137 #else
138 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
139 #endif
140
141 #define __HAVE_ARCH_PTE_SAME
142 static inline int pte_same(pte_t a, pte_t b)
143 {
144 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
145 }
146
147 #define pte_page(x) pfn_to_page(pte_pfn(x))
148
149 static inline int pte_none(pte_t pte)
150 {
151 return !pte.pte_low && !pte.pte_high;
152 }
153
154 static inline unsigned long pte_pfn(pte_t pte)
155 {
156 return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT;
157 }
158
159 /*
160 * Bits 0, 6 and 7 are taken in the low part of the pte,
161 * put the 32 bits of offset into the high part.
162 */
163 #define pte_to_pgoff(pte) ((pte).pte_high)
164 #define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
165 #define PTE_FILE_MAX_BITS 32
166
167 /* Encode and de-code a swap entry */
168 #define __swp_type(x) (((x).val) & 0x1f)
169 #define __swp_offset(x) ((x).val >> 5)
170 #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
171 #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
172 #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
173
174 #endif /* _I386_PGTABLE_3LEVEL_H */