]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_PGTABLE_3LEVEL_H |
3 | #define _ASM_X86_PGTABLE_3LEVEL_H | |
1da177e4 | 4 | |
1da177e4 LT |
5 | /* |
6 | * Intel Physical Address Extension (PAE) Mode - three-level page | |
7 | * tables on PPro+ CPUs. | |
8 | * | |
9 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | |
10 | */ | |
11 | ||
4b01fef8 | 12 | #define pte_ERROR(e) \ |
c767a54b | 13 | pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \ |
4b01fef8 JP |
14 | __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) |
15 | #define pmd_ERROR(e) \ | |
c767a54b | 16 | pr_err("%s:%d: bad pmd %p(%016Lx)\n", \ |
4b01fef8 JP |
17 | __FILE__, __LINE__, &(e), pmd_val(e)) |
18 | #define pgd_ERROR(e) \ | |
c767a54b | 19 | pr_err("%s:%d: bad pgd %p(%016Lx)\n", \ |
4b01fef8 | 20 | __FILE__, __LINE__, &(e), pgd_val(e)) |
6194ba6f | 21 | |
1da177e4 LT |
22 | /* Rules for using set_pte: the pte being assigned *must* be |
23 | * either not present or in a state where the hardware will | |
24 | * not attempt to update the pte. In places where this is | |
25 | * not possible, use pte_get_and_clear to obtain the old pte | |
26 | * value and then use set_pte to update it. -ben | |
27 | */ | |
3dc494e8 | 28 | static inline void native_set_pte(pte_t *ptep, pte_t pte) |
1da177e4 LT |
29 | { |
30 | ptep->pte_high = pte.pte_high; | |
31 | smp_wmb(); | |
32 | ptep->pte_low = pte.pte_low; | |
33 | } | |
1da177e4 | 34 | |
26c19178 AA |
35 | #define pmd_read_atomic pmd_read_atomic |
36 | /* | |
37 | * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with | |
38 | * a "*pmdp" dereference done by gcc. Problem is, in certain places | |
39 | * where pte_offset_map_lock is called, concurrent page faults are | |
40 | * allowed, if the mmap_sem is hold for reading. An example is mincore | |
41 | * vs page faults vs MADV_DONTNEED. On the page fault side | |
42 | * pmd_populate rightfully does a set_64bit, but if we're reading the | |
43 | * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen | |
44 | * because gcc will not read the 64bit of the pmd atomically. To fix | |
45 | * this all places running pmd_offset_map_lock() while holding the | |
46 | * mmap_sem in read mode, shall read the pmdp pointer using this | |
47 | * function to know if the pmd is null nor not, and in turn to know if | |
48 | * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd | |
49 | * operations. | |
50 | * | |
e4eed03f AA |
51 | * Without THP if the mmap_sem is hold for reading, the pmd can only |
52 | * transition from null to not null while pmd_read_atomic runs. So | |
53 | * we can always return atomic pmd values with this function. | |
26c19178 AA |
54 | * |
55 | * With THP if the mmap_sem is hold for reading, the pmd can become | |
e4eed03f AA |
56 | * trans_huge or none or point to a pte (and in turn become "stable") |
57 | * at any time under pmd_read_atomic. We could read it really | |
58 | * atomically here with a atomic64_read for the THP enabled case (and | |
59 | * it would be a whole lot simpler), but to avoid using cmpxchg8b we | |
60 | * only return an atomic pmdval if the low part of the pmdval is later | |
61 | * found stable (i.e. pointing to a pte). And we're returning a none | |
62 | * pmdval if the low part of the pmd is none. In some cases the high | |
63 | * and low part of the pmdval returned may not be consistent if THP is | |
64 | * enabled (the low part may point to previously mapped hugepage, | |
65 | * while the high part may point to a more recently mapped hugepage), | |
66 | * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part | |
67 | * of the pmd to be read atomically to decide if the pmd is unstable | |
68 | * or not, with the only exception of when the low part of the pmd is | |
69 | * zero in which case we return a none pmd. | |
26c19178 | 70 | */ |
26c19178 AA |
71 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) |
72 | { | |
73 | pmdval_t ret; | |
74 | u32 *tmp = (u32 *)pmdp; | |
75 | ||
76 | ret = (pmdval_t) (*tmp); | |
77 | if (ret) { | |
78 | /* | |
79 | * If the low part is null, we must not read the high part | |
80 | * or we can end up with a partial pmd. | |
81 | */ | |
82 | smp_rmb(); | |
83 | ret |= ((pmdval_t)*(tmp + 1)) << 32; | |
84 | } | |
85 | ||
86 | return (pmd_t) { ret }; | |
87 | } | |
26c19178 | 88 | |
3dc494e8 JF |
89 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
90 | { | |
4b01fef8 | 91 | set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); |
3dc494e8 | 92 | } |
4b01fef8 | 93 | |
3dc494e8 JF |
94 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
95 | { | |
4b01fef8 | 96 | set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); |
3dc494e8 | 97 | } |
4b01fef8 | 98 | |
3dc494e8 JF |
99 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
100 | { | |
6229e8d0 JR |
101 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
102 | pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd); | |
103 | #endif | |
4b01fef8 | 104 | set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); |
3dc494e8 | 105 | } |
1da177e4 | 106 | |
6e5882cf ZA |
107 | /* |
108 | * For PTEs and PDEs, we must clear the P-bit first when clearing a page table | |
109 | * entry, so clear the bottom half first and enforce ordering with a compiler | |
110 | * barrier. | |
111 | */ | |
4b01fef8 JP |
112 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, |
113 | pte_t *ptep) | |
6e5882cf ZA |
114 | { |
115 | ptep->pte_low = 0; | |
116 | smp_wmb(); | |
117 | ptep->pte_high = 0; | |
118 | } | |
119 | ||
3dc494e8 | 120 | static inline void native_pmd_clear(pmd_t *pmd) |
6e5882cf ZA |
121 | { |
122 | u32 *tmp = (u32 *)pmd; | |
123 | *tmp = 0; | |
124 | smp_wmb(); | |
125 | *(tmp + 1) = 0; | |
126 | } | |
3dc494e8 | 127 | |
a00cc7d9 MW |
128 | static inline void native_pud_clear(pud_t *pudp) |
129 | { | |
130 | } | |
a00cc7d9 | 131 | |
6194ba6f JF |
132 | static inline void pud_clear(pud_t *pudp) |
133 | { | |
134 | set_pud(pudp, __pud(0)); | |
135 | ||
136 | /* | |
f5430f93 JF |
137 | * According to Intel App note "TLBs, Paging-Structure Caches, |
138 | * and Their Invalidation", April 2007, document 317080-001, | |
139 | * section 8.1: in PAE mode we explicitly have to flush the | |
140 | * TLB via cr3 if the top-level pgd is changed... | |
6194ba6f | 141 | * |
4981d01e SL |
142 | * Currently all places where pud_clear() is called either have |
143 | * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or | |
144 | * pud_clear_bad()), so we don't need TLB flush here. | |
6194ba6f | 145 | */ |
6194ba6f | 146 | } |
da181a8b | 147 | |
142dd975 | 148 | #ifdef CONFIG_SMP |
3dc494e8 | 149 | static inline pte_t native_ptep_get_and_clear(pte_t *ptep) |
1da177e4 LT |
150 | { |
151 | pte_t res; | |
152 | ||
153 | /* xchg acts as a barrier before the setting of the high bits */ | |
154 | res.pte_low = xchg(&ptep->pte_low, 0); | |
155 | res.pte_high = ptep->pte_high; | |
156 | ptep->pte_high = 0; | |
157 | ||
158 | return res; | |
159 | } | |
142dd975 ZA |
160 | #else |
161 | #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) | |
162 | #endif | |
1da177e4 | 163 | |
f2d6bfe9 JW |
164 | #ifdef CONFIG_SMP |
165 | union split_pmd { | |
166 | struct { | |
167 | u32 pmd_low; | |
168 | u32 pmd_high; | |
169 | }; | |
170 | pmd_t pmd; | |
171 | }; | |
172 | static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) | |
173 | { | |
174 | union split_pmd res, *orig = (union split_pmd *)pmdp; | |
175 | ||
176 | /* xchg acts as a barrier before setting of the high bits */ | |
177 | res.pmd_low = xchg(&orig->pmd_low, 0); | |
178 | res.pmd_high = orig->pmd_high; | |
179 | orig->pmd_high = 0; | |
180 | ||
181 | return res.pmd; | |
182 | } | |
183 | #else | |
184 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) | |
185 | #endif | |
186 | ||
a00cc7d9 MW |
187 | #ifdef CONFIG_SMP |
188 | union split_pud { | |
189 | struct { | |
190 | u32 pud_low; | |
191 | u32 pud_high; | |
192 | }; | |
193 | pud_t pud; | |
194 | }; | |
195 | ||
196 | static inline pud_t native_pudp_get_and_clear(pud_t *pudp) | |
197 | { | |
198 | union split_pud res, *orig = (union split_pud *)pudp; | |
199 | ||
6229e8d0 JR |
200 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
201 | pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0)); | |
202 | #endif | |
203 | ||
a00cc7d9 MW |
204 | /* xchg acts as a barrier before setting of the high bits */ |
205 | res.pud_low = xchg(&orig->pud_low, 0); | |
206 | res.pud_high = orig->pud_high; | |
207 | orig->pud_high = 0; | |
208 | ||
209 | return res.pud; | |
210 | } | |
211 | #else | |
212 | #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp) | |
213 | #endif | |
214 | ||
1da177e4 | 215 | /* Encode and de-code a swap entry */ |
fcaa4d35 VB |
216 | #define SWP_TYPE_BITS 5 |
217 | ||
218 | #define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) | |
219 | ||
220 | /* We always extract/encode the offset by shifting it all the way up, and then down again */ | |
221 | #define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS) | |
222 | ||
1796316a | 223 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) |
1da177e4 LT |
224 | #define __swp_type(x) (((x).val) & 0x1f) |
225 | #define __swp_offset(x) ((x).val >> 5) | |
226 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) | |
fcaa4d35 VB |
227 | |
228 | /* | |
229 | * Normally, __swp_entry() converts from arch-independent swp_entry_t to | |
230 | * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result | |
231 | * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the | |
232 | * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to | |
233 | * __swp_entry_to_pte() through the following helper macro based on 64bit | |
234 | * __swp_entry(). | |
235 | */ | |
236 | #define __swp_pteval_entry(type, offset) ((pteval_t) { \ | |
237 | (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \ | |
238 | | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) }) | |
239 | ||
240 | #define __swp_entry_to_pte(x) ((pte_t){ .pte = \ | |
241 | __swp_pteval_entry(__swp_type(x), __swp_offset(x)) }) | |
242 | /* | |
243 | * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent | |
244 | * swp_entry_t, but also has to convert it from 64bit to the 32bit | |
245 | * intermediate representation, using the following macros based on 64bit | |
246 | * __swp_type() and __swp_offset(). | |
247 | */ | |
248 | #define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS))) | |
249 | #define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)) | |
250 | ||
251 | #define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \ | |
252 | __pteval_swp_offset(pte))) | |
1da177e4 | 253 | |
e585513b KS |
254 | #define gup_get_pte gup_get_pte |
255 | /* | |
256 | * WARNING: only to be used in the get_user_pages_fast() implementation. | |
257 | * | |
258 | * With get_user_pages_fast(), we walk down the pagetables without taking | |
259 | * any locks. For this we would like to load the pointers atomically, | |
260 | * but that is not possible (without expensive cmpxchg8b) on PAE. What | |
261 | * we do have is the guarantee that a PTE will only either go from not | |
262 | * present to present, or present to not present or both -- it will not | |
263 | * switch to a completely different present page without a TLB flush in | |
264 | * between; something that we are blocking by holding interrupts off. | |
265 | * | |
266 | * Setting ptes from not present to present goes: | |
267 | * | |
268 | * ptep->pte_high = h; | |
269 | * smp_wmb(); | |
270 | * ptep->pte_low = l; | |
271 | * | |
272 | * And present to not present goes: | |
273 | * | |
274 | * ptep->pte_low = 0; | |
275 | * smp_wmb(); | |
276 | * ptep->pte_high = 0; | |
277 | * | |
278 | * We must ensure here that the load of pte_low sees 'l' iff pte_high | |
279 | * sees 'h'. We load pte_high *after* loading pte_low, which ensures we | |
280 | * don't see an older value of pte_high. *Then* we recheck pte_low, | |
281 | * which ensures that we haven't picked up a changed pte high. We might | |
282 | * have gotten rubbish values from pte_low and pte_high, but we are | |
283 | * guaranteed that pte_low will not have the present bit set *unless* | |
284 | * it is 'l'. Because get_user_pages_fast() only operates on present ptes | |
285 | * we're safe. | |
286 | */ | |
287 | static inline pte_t gup_get_pte(pte_t *ptep) | |
288 | { | |
289 | pte_t pte; | |
290 | ||
291 | do { | |
292 | pte.pte_low = ptep->pte_low; | |
293 | smp_rmb(); | |
294 | pte.pte_high = ptep->pte_high; | |
295 | smp_rmb(); | |
296 | } while (unlikely(pte.pte_low != ptep->pte_low)); | |
297 | ||
298 | return pte; | |
299 | } | |
300 | ||
b1fb6302 AK |
301 | #include <asm/pgtable-invert.h> |
302 | ||
1965aae3 | 303 | #endif /* _ASM_X86_PGTABLE_3LEVEL_H */ |