]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_PGTABLE_3LEVEL_H |
2 | #define _ASM_X86_PGTABLE_3LEVEL_H | |
1da177e4 | 3 | |
1da177e4 LT |
4 | /* |
5 | * Intel Physical Address Extension (PAE) Mode - three-level page | |
6 | * tables on PPro+ CPUs. | |
7 | * | |
8 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | |
9 | */ | |
10 | ||
4b01fef8 JP |
11 | #define pte_ERROR(e) \ |
12 | printk("%s:%d: bad pte %p(%08lx%08lx).\n", \ | |
13 | __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) | |
14 | #define pmd_ERROR(e) \ | |
15 | printk("%s:%d: bad pmd %p(%016Lx).\n", \ | |
16 | __FILE__, __LINE__, &(e), pmd_val(e)) | |
17 | #define pgd_ERROR(e) \ | |
18 | printk("%s:%d: bad pgd %p(%016Lx).\n", \ | |
19 | __FILE__, __LINE__, &(e), pgd_val(e)) | |
6194ba6f | 20 | |
1da177e4 LT |
21 | /* Rules for using set_pte: the pte being assigned *must* be |
22 | * either not present or in a state where the hardware will | |
23 | * not attempt to update the pte. In places where this is | |
24 | * not possible, use pte_get_and_clear to obtain the old pte | |
25 | * value and then use set_pte to update it. -ben | |
26 | */ | |
3dc494e8 | 27 | static inline void native_set_pte(pte_t *ptep, pte_t pte) |
1da177e4 LT |
28 | { |
29 | ptep->pte_high = pte.pte_high; | |
30 | smp_wmb(); | |
31 | ptep->pte_low = pte.pte_low; | |
32 | } | |
1da177e4 | 33 | |
26c19178 AA |
34 | #define pmd_read_atomic pmd_read_atomic |
35 | /* | |
36 | * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with | |
37 | * a "*pmdp" dereference done by gcc. Problem is, in certain places | |
38 | * where pte_offset_map_lock is called, concurrent page faults are | |
39 | * allowed, if the mmap_sem is hold for reading. An example is mincore | |
40 | * vs page faults vs MADV_DONTNEED. On the page fault side | |
41 | * pmd_populate rightfully does a set_64bit, but if we're reading the | |
42 | * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen | |
43 | * because gcc will not read the 64bit of the pmd atomically. To fix | |
44 | * this all places running pmd_offset_map_lock() while holding the | |
45 | * mmap_sem in read mode, shall read the pmdp pointer using this | |
46 | * function to know if the pmd is null nor not, and in turn to know if | |
47 | * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd | |
48 | * operations. | |
49 | * | |
50 | * Without THP if the mmap_sem is hold for reading, the | |
51 | * pmd can only transition from null to not null while pmd_read_atomic runs. | |
52 | * So there's no need of literally reading it atomically. | |
53 | * | |
54 | * With THP if the mmap_sem is hold for reading, the pmd can become | |
55 | * THP or null or point to a pte (and in turn become "stable") at any | |
56 | * time under pmd_read_atomic, so it's mandatory to read it atomically | |
57 | * with cmpxchg8b. | |
58 | */ | |
59 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE | |
60 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) | |
61 | { | |
62 | pmdval_t ret; | |
63 | u32 *tmp = (u32 *)pmdp; | |
64 | ||
65 | ret = (pmdval_t) (*tmp); | |
66 | if (ret) { | |
67 | /* | |
68 | * If the low part is null, we must not read the high part | |
69 | * or we can end up with a partial pmd. | |
70 | */ | |
71 | smp_rmb(); | |
72 | ret |= ((pmdval_t)*(tmp + 1)) << 32; | |
73 | } | |
74 | ||
75 | return (pmd_t) { ret }; | |
76 | } | |
77 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
78 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) | |
79 | { | |
80 | return (pmd_t) { atomic64_read((atomic64_t *)pmdp) }; | |
81 | } | |
82 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
83 | ||
3dc494e8 JF |
84 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
85 | { | |
4b01fef8 | 86 | set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); |
3dc494e8 | 87 | } |
4b01fef8 | 88 | |
3dc494e8 JF |
89 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
90 | { | |
4b01fef8 | 91 | set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); |
3dc494e8 | 92 | } |
4b01fef8 | 93 | |
3dc494e8 JF |
94 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
95 | { | |
4b01fef8 | 96 | set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); |
3dc494e8 | 97 | } |
1da177e4 | 98 | |
6e5882cf ZA |
99 | /* |
100 | * For PTEs and PDEs, we must clear the P-bit first when clearing a page table | |
101 | * entry, so clear the bottom half first and enforce ordering with a compiler | |
102 | * barrier. | |
103 | */ | |
4b01fef8 JP |
104 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, |
105 | pte_t *ptep) | |
6e5882cf ZA |
106 | { |
107 | ptep->pte_low = 0; | |
108 | smp_wmb(); | |
109 | ptep->pte_high = 0; | |
110 | } | |
111 | ||
3dc494e8 | 112 | static inline void native_pmd_clear(pmd_t *pmd) |
6e5882cf ZA |
113 | { |
114 | u32 *tmp = (u32 *)pmd; | |
115 | *tmp = 0; | |
116 | smp_wmb(); | |
117 | *(tmp + 1) = 0; | |
118 | } | |
3dc494e8 | 119 | |
6194ba6f JF |
120 | static inline void pud_clear(pud_t *pudp) |
121 | { | |
122 | set_pud(pudp, __pud(0)); | |
123 | ||
124 | /* | |
f5430f93 JF |
125 | * According to Intel App note "TLBs, Paging-Structure Caches, |
126 | * and Their Invalidation", April 2007, document 317080-001, | |
127 | * section 8.1: in PAE mode we explicitly have to flush the | |
128 | * TLB via cr3 if the top-level pgd is changed... | |
6194ba6f | 129 | * |
4981d01e SL |
130 | * Currently all places where pud_clear() is called either have |
131 | * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or | |
132 | * pud_clear_bad()), so we don't need TLB flush here. | |
6194ba6f | 133 | */ |
6194ba6f | 134 | } |
da181a8b | 135 | |
142dd975 | 136 | #ifdef CONFIG_SMP |
3dc494e8 | 137 | static inline pte_t native_ptep_get_and_clear(pte_t *ptep) |
1da177e4 LT |
138 | { |
139 | pte_t res; | |
140 | ||
141 | /* xchg acts as a barrier before the setting of the high bits */ | |
142 | res.pte_low = xchg(&ptep->pte_low, 0); | |
143 | res.pte_high = ptep->pte_high; | |
144 | ptep->pte_high = 0; | |
145 | ||
146 | return res; | |
147 | } | |
142dd975 ZA |
148 | #else |
149 | #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) | |
150 | #endif | |
1da177e4 | 151 | |
f2d6bfe9 JW |
152 | #ifdef CONFIG_SMP |
153 | union split_pmd { | |
154 | struct { | |
155 | u32 pmd_low; | |
156 | u32 pmd_high; | |
157 | }; | |
158 | pmd_t pmd; | |
159 | }; | |
160 | static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) | |
161 | { | |
162 | union split_pmd res, *orig = (union split_pmd *)pmdp; | |
163 | ||
164 | /* xchg acts as a barrier before setting of the high bits */ | |
165 | res.pmd_low = xchg(&orig->pmd_low, 0); | |
166 | res.pmd_high = orig->pmd_high; | |
167 | orig->pmd_high = 0; | |
168 | ||
169 | return res.pmd; | |
170 | } | |
171 | #else | |
172 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) | |
173 | #endif | |
174 | ||
1da177e4 LT |
175 | /* |
176 | * Bits 0, 6 and 7 are taken in the low part of the pte, | |
177 | * put the 32 bits of offset into the high part. | |
178 | */ | |
179 | #define pte_to_pgoff(pte) ((pte).pte_high) | |
4b01fef8 JP |
180 | #define pgoff_to_pte(off) \ |
181 | ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } }) | |
1da177e4 LT |
182 | #define PTE_FILE_MAX_BITS 32 |
183 | ||
184 | /* Encode and de-code a swap entry */ | |
1796316a | 185 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) |
1da177e4 LT |
186 | #define __swp_type(x) (((x).val) & 0x1f) |
187 | #define __swp_offset(x) ((x).val >> 5) | |
188 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) | |
189 | #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) | |
c8e5393a | 190 | #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) |
1da177e4 | 191 | |
1965aae3 | 192 | #endif /* _ASM_X86_PGTABLE_3LEVEL_H */ |