]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_PGTABLE_3LEVEL_H |
3 | #define _ASM_X86_PGTABLE_3LEVEL_H | |
1da177e4 | 4 | |
05ddf831 JG |
5 | #include <asm/atomic64_32.h> |
6 | ||
1da177e4 LT |
7 | /* |
8 | * Intel Physical Address Extension (PAE) Mode - three-level page | |
9 | * tables on PPro+ CPUs. | |
10 | * | |
11 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | |
12 | */ | |
13 | ||
4b01fef8 | 14 | #define pte_ERROR(e) \ |
c767a54b | 15 | pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \ |
4b01fef8 JP |
16 | __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) |
17 | #define pmd_ERROR(e) \ | |
c767a54b | 18 | pr_err("%s:%d: bad pmd %p(%016Lx)\n", \ |
4b01fef8 JP |
19 | __FILE__, __LINE__, &(e), pmd_val(e)) |
20 | #define pgd_ERROR(e) \ | |
c767a54b | 21 | pr_err("%s:%d: bad pgd %p(%016Lx)\n", \ |
4b01fef8 | 22 | __FILE__, __LINE__, &(e), pgd_val(e)) |
6194ba6f | 23 | |
1da177e4 LT |
24 | /* Rules for using set_pte: the pte being assigned *must* be |
25 | * either not present or in a state where the hardware will | |
26 | * not attempt to update the pte. In places where this is | |
27 | * not possible, use pte_get_and_clear to obtain the old pte | |
28 | * value and then use set_pte to update it. -ben | |
29 | */ | |
3dc494e8 | 30 | static inline void native_set_pte(pte_t *ptep, pte_t pte) |
1da177e4 LT |
31 | { |
32 | ptep->pte_high = pte.pte_high; | |
33 | smp_wmb(); | |
34 | ptep->pte_low = pte.pte_low; | |
35 | } | |
1da177e4 | 36 | |
26c19178 AA |
37 | #define pmd_read_atomic pmd_read_atomic |
38 | /* | |
39 | * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with | |
40 | * a "*pmdp" dereference done by gcc. Problem is, in certain places | |
41 | * where pte_offset_map_lock is called, concurrent page faults are | |
42 | * allowed, if the mmap_sem is hold for reading. An example is mincore | |
43 | * vs page faults vs MADV_DONTNEED. On the page fault side | |
44 | * pmd_populate rightfully does a set_64bit, but if we're reading the | |
45 | * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen | |
46 | * because gcc will not read the 64bit of the pmd atomically. To fix | |
47 | * this all places running pmd_offset_map_lock() while holding the | |
48 | * mmap_sem in read mode, shall read the pmdp pointer using this | |
49 | * function to know if the pmd is null nor not, and in turn to know if | |
50 | * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd | |
51 | * operations. | |
52 | * | |
e4eed03f AA |
53 | * Without THP if the mmap_sem is hold for reading, the pmd can only |
54 | * transition from null to not null while pmd_read_atomic runs. So | |
55 | * we can always return atomic pmd values with this function. | |
26c19178 AA |
56 | * |
57 | * With THP if the mmap_sem is hold for reading, the pmd can become | |
e4eed03f AA |
58 | * trans_huge or none or point to a pte (and in turn become "stable") |
59 | * at any time under pmd_read_atomic. We could read it really | |
60 | * atomically here with a atomic64_read for the THP enabled case (and | |
61 | * it would be a whole lot simpler), but to avoid using cmpxchg8b we | |
62 | * only return an atomic pmdval if the low part of the pmdval is later | |
63 | * found stable (i.e. pointing to a pte). And we're returning a none | |
64 | * pmdval if the low part of the pmd is none. In some cases the high | |
65 | * and low part of the pmdval returned may not be consistent if THP is | |
66 | * enabled (the low part may point to previously mapped hugepage, | |
67 | * while the high part may point to a more recently mapped hugepage), | |
68 | * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part | |
69 | * of the pmd to be read atomically to decide if the pmd is unstable | |
70 | * or not, with the only exception of when the low part of the pmd is | |
71 | * zero in which case we return a none pmd. | |
26c19178 | 72 | */ |
26c19178 AA |
73 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) |
74 | { | |
75 | pmdval_t ret; | |
76 | u32 *tmp = (u32 *)pmdp; | |
77 | ||
78 | ret = (pmdval_t) (*tmp); | |
79 | if (ret) { | |
80 | /* | |
81 | * If the low part is null, we must not read the high part | |
82 | * or we can end up with a partial pmd. | |
83 | */ | |
84 | smp_rmb(); | |
85 | ret |= ((pmdval_t)*(tmp + 1)) << 32; | |
86 | } | |
87 | ||
88 | return (pmd_t) { ret }; | |
89 | } | |
26c19178 | 90 | |
3dc494e8 JF |
91 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
92 | { | |
4b01fef8 | 93 | set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); |
3dc494e8 | 94 | } |
4b01fef8 | 95 | |
3dc494e8 JF |
96 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
97 | { | |
4b01fef8 | 98 | set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); |
3dc494e8 | 99 | } |
4b01fef8 | 100 | |
3dc494e8 JF |
101 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
102 | { | |
6229e8d0 JR |
103 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
104 | pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd); | |
105 | #endif | |
4b01fef8 | 106 | set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); |
3dc494e8 | 107 | } |
1da177e4 | 108 | |
6e5882cf ZA |
109 | /* |
110 | * For PTEs and PDEs, we must clear the P-bit first when clearing a page table | |
111 | * entry, so clear the bottom half first and enforce ordering with a compiler | |
112 | * barrier. | |
113 | */ | |
4b01fef8 JP |
114 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, |
115 | pte_t *ptep) | |
6e5882cf ZA |
116 | { |
117 | ptep->pte_low = 0; | |
118 | smp_wmb(); | |
119 | ptep->pte_high = 0; | |
120 | } | |
121 | ||
3dc494e8 | 122 | static inline void native_pmd_clear(pmd_t *pmd) |
6e5882cf ZA |
123 | { |
124 | u32 *tmp = (u32 *)pmd; | |
125 | *tmp = 0; | |
126 | smp_wmb(); | |
127 | *(tmp + 1) = 0; | |
128 | } | |
3dc494e8 | 129 | |
a00cc7d9 MW |
130 | static inline void native_pud_clear(pud_t *pudp) |
131 | { | |
132 | } | |
a00cc7d9 | 133 | |
6194ba6f JF |
134 | static inline void pud_clear(pud_t *pudp) |
135 | { | |
136 | set_pud(pudp, __pud(0)); | |
137 | ||
138 | /* | |
f5430f93 JF |
139 | * According to Intel App note "TLBs, Paging-Structure Caches, |
140 | * and Their Invalidation", April 2007, document 317080-001, | |
141 | * section 8.1: in PAE mode we explicitly have to flush the | |
142 | * TLB via cr3 if the top-level pgd is changed... | |
6194ba6f | 143 | * |
4981d01e SL |
144 | * Currently all places where pud_clear() is called either have |
145 | * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or | |
146 | * pud_clear_bad()), so we don't need TLB flush here. | |
6194ba6f | 147 | */ |
6194ba6f | 148 | } |
da181a8b | 149 | |
142dd975 | 150 | #ifdef CONFIG_SMP |
3dc494e8 | 151 | static inline pte_t native_ptep_get_and_clear(pte_t *ptep) |
1da177e4 LT |
152 | { |
153 | pte_t res; | |
154 | ||
05ddf831 | 155 | res.pte = (pteval_t)atomic64_xchg((atomic64_t *)ptep, 0); |
1da177e4 LT |
156 | |
157 | return res; | |
158 | } | |
142dd975 ZA |
159 | #else |
160 | #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) | |
161 | #endif | |
1da177e4 | 162 | |
f2d6bfe9 JW |
163 | union split_pmd { |
164 | struct { | |
165 | u32 pmd_low; | |
166 | u32 pmd_high; | |
167 | }; | |
168 | pmd_t pmd; | |
169 | }; | |
8f79a130 KS |
170 | |
171 | #ifdef CONFIG_SMP | |
f2d6bfe9 JW |
172 | static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) |
173 | { | |
174 | union split_pmd res, *orig = (union split_pmd *)pmdp; | |
175 | ||
176 | /* xchg acts as a barrier before setting of the high bits */ | |
177 | res.pmd_low = xchg(&orig->pmd_low, 0); | |
178 | res.pmd_high = orig->pmd_high; | |
179 | orig->pmd_high = 0; | |
180 | ||
181 | return res.pmd; | |
182 | } | |
183 | #else | |
184 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) | |
185 | #endif | |
186 | ||
8f79a130 KS |
187 | #ifndef pmdp_establish |
188 | #define pmdp_establish pmdp_establish | |
189 | static inline pmd_t pmdp_establish(struct vm_area_struct *vma, | |
190 | unsigned long address, pmd_t *pmdp, pmd_t pmd) | |
191 | { | |
192 | pmd_t old; | |
193 | ||
194 | /* | |
195 | * If pmd has present bit cleared we can get away without expensive | |
196 | * cmpxchg64: we can update pmdp half-by-half without racing with | |
197 | * anybody. | |
198 | */ | |
199 | if (!(pmd_val(pmd) & _PAGE_PRESENT)) { | |
200 | union split_pmd old, new, *ptr; | |
201 | ||
202 | ptr = (union split_pmd *)pmdp; | |
203 | ||
204 | new.pmd = pmd; | |
205 | ||
206 | /* xchg acts as a barrier before setting of the high bits */ | |
207 | old.pmd_low = xchg(&ptr->pmd_low, new.pmd_low); | |
208 | old.pmd_high = ptr->pmd_high; | |
209 | ptr->pmd_high = new.pmd_high; | |
210 | return old.pmd; | |
211 | } | |
212 | ||
213 | do { | |
214 | old = *pmdp; | |
215 | } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); | |
216 | ||
217 | return old; | |
218 | } | |
219 | #endif | |
220 | ||
a00cc7d9 MW |
221 | #ifdef CONFIG_SMP |
222 | union split_pud { | |
223 | struct { | |
224 | u32 pud_low; | |
225 | u32 pud_high; | |
226 | }; | |
227 | pud_t pud; | |
228 | }; | |
229 | ||
230 | static inline pud_t native_pudp_get_and_clear(pud_t *pudp) | |
231 | { | |
232 | union split_pud res, *orig = (union split_pud *)pudp; | |
233 | ||
6229e8d0 JR |
234 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
235 | pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0)); | |
236 | #endif | |
237 | ||
a00cc7d9 MW |
238 | /* xchg acts as a barrier before setting of the high bits */ |
239 | res.pud_low = xchg(&orig->pud_low, 0); | |
240 | res.pud_high = orig->pud_high; | |
241 | orig->pud_high = 0; | |
242 | ||
243 | return res.pud; | |
244 | } | |
245 | #else | |
246 | #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp) | |
247 | #endif | |
248 | ||
1da177e4 | 249 | /* Encode and de-code a swap entry */ |
fcaa4d35 VB |
250 | #define SWP_TYPE_BITS 5 |
251 | ||
252 | #define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) | |
253 | ||
254 | /* We always extract/encode the offset by shifting it all the way up, and then down again */ | |
255 | #define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS) | |
256 | ||
1796316a | 257 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) |
1da177e4 LT |
258 | #define __swp_type(x) (((x).val) & 0x1f) |
259 | #define __swp_offset(x) ((x).val >> 5) | |
260 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) | |
fcaa4d35 VB |
261 | |
262 | /* | |
263 | * Normally, __swp_entry() converts from arch-independent swp_entry_t to | |
264 | * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result | |
265 | * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the | |
266 | * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to | |
267 | * __swp_entry_to_pte() through the following helper macro based on 64bit | |
268 | * __swp_entry(). | |
269 | */ | |
270 | #define __swp_pteval_entry(type, offset) ((pteval_t) { \ | |
271 | (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \ | |
272 | | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) }) | |
273 | ||
274 | #define __swp_entry_to_pte(x) ((pte_t){ .pte = \ | |
275 | __swp_pteval_entry(__swp_type(x), __swp_offset(x)) }) | |
276 | /* | |
277 | * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent | |
278 | * swp_entry_t, but also has to convert it from 64bit to the 32bit | |
279 | * intermediate representation, using the following macros based on 64bit | |
280 | * __swp_type() and __swp_offset(). | |
281 | */ | |
282 | #define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS))) | |
283 | #define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)) | |
284 | ||
285 | #define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \ | |
286 | __pteval_swp_offset(pte))) | |
1da177e4 | 287 | |
e585513b KS |
288 | #define gup_get_pte gup_get_pte |
289 | /* | |
290 | * WARNING: only to be used in the get_user_pages_fast() implementation. | |
291 | * | |
292 | * With get_user_pages_fast(), we walk down the pagetables without taking | |
293 | * any locks. For this we would like to load the pointers atomically, | |
294 | * but that is not possible (without expensive cmpxchg8b) on PAE. What | |
295 | * we do have is the guarantee that a PTE will only either go from not | |
296 | * present to present, or present to not present or both -- it will not | |
297 | * switch to a completely different present page without a TLB flush in | |
298 | * between; something that we are blocking by holding interrupts off. | |
299 | * | |
300 | * Setting ptes from not present to present goes: | |
301 | * | |
302 | * ptep->pte_high = h; | |
303 | * smp_wmb(); | |
304 | * ptep->pte_low = l; | |
305 | * | |
306 | * And present to not present goes: | |
307 | * | |
308 | * ptep->pte_low = 0; | |
309 | * smp_wmb(); | |
310 | * ptep->pte_high = 0; | |
311 | * | |
312 | * We must ensure here that the load of pte_low sees 'l' iff pte_high | |
313 | * sees 'h'. We load pte_high *after* loading pte_low, which ensures we | |
314 | * don't see an older value of pte_high. *Then* we recheck pte_low, | |
315 | * which ensures that we haven't picked up a changed pte high. We might | |
316 | * have gotten rubbish values from pte_low and pte_high, but we are | |
317 | * guaranteed that pte_low will not have the present bit set *unless* | |
318 | * it is 'l'. Because get_user_pages_fast() only operates on present ptes | |
319 | * we're safe. | |
320 | */ | |
321 | static inline pte_t gup_get_pte(pte_t *ptep) | |
322 | { | |
323 | pte_t pte; | |
324 | ||
325 | do { | |
326 | pte.pte_low = ptep->pte_low; | |
327 | smp_rmb(); | |
328 | pte.pte_high = ptep->pte_high; | |
329 | smp_rmb(); | |
330 | } while (unlikely(pte.pte_low != ptep->pte_low)); | |
331 | ||
332 | return pte; | |
333 | } | |
334 | ||
b1fb6302 AK |
335 | #include <asm/pgtable-invert.h> |
336 | ||
1965aae3 | 337 | #endif /* _ASM_X86_PGTABLE_3LEVEL_H */ |