]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_PGTABLE_3LEVEL_H |
3 | #define _ASM_X86_PGTABLE_3LEVEL_H | |
1da177e4 | 4 | |
1da177e4 LT |
5 | /* |
6 | * Intel Physical Address Extension (PAE) Mode - three-level page | |
7 | * tables on PPro+ CPUs. | |
8 | * | |
9 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | |
10 | */ | |
11 | ||
4b01fef8 | 12 | #define pte_ERROR(e) \ |
c767a54b | 13 | pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \ |
4b01fef8 JP |
14 | __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) |
15 | #define pmd_ERROR(e) \ | |
c767a54b | 16 | pr_err("%s:%d: bad pmd %p(%016Lx)\n", \ |
4b01fef8 JP |
17 | __FILE__, __LINE__, &(e), pmd_val(e)) |
18 | #define pgd_ERROR(e) \ | |
c767a54b | 19 | pr_err("%s:%d: bad pgd %p(%016Lx)\n", \ |
4b01fef8 | 20 | __FILE__, __LINE__, &(e), pgd_val(e)) |
6194ba6f | 21 | |
1da177e4 LT |
22 | /* Rules for using set_pte: the pte being assigned *must* be |
23 | * either not present or in a state where the hardware will | |
24 | * not attempt to update the pte. In places where this is | |
25 | * not possible, use pte_get_and_clear to obtain the old pte | |
26 | * value and then use set_pte to update it. -ben | |
27 | */ | |
3dc494e8 | 28 | static inline void native_set_pte(pte_t *ptep, pte_t pte) |
1da177e4 LT |
29 | { |
30 | ptep->pte_high = pte.pte_high; | |
31 | smp_wmb(); | |
32 | ptep->pte_low = pte.pte_low; | |
33 | } | |
1da177e4 | 34 | |
26c19178 AA |
35 | #define pmd_read_atomic pmd_read_atomic |
36 | /* | |
37 | * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with | |
38 | * a "*pmdp" dereference done by gcc. Problem is, in certain places | |
39 | * where pte_offset_map_lock is called, concurrent page faults are | |
40 | * allowed, if the mmap_sem is hold for reading. An example is mincore | |
41 | * vs page faults vs MADV_DONTNEED. On the page fault side | |
42 | * pmd_populate rightfully does a set_64bit, but if we're reading the | |
43 | * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen | |
44 | * because gcc will not read the 64bit of the pmd atomically. To fix | |
45 | * this all places running pmd_offset_map_lock() while holding the | |
46 | * mmap_sem in read mode, shall read the pmdp pointer using this | |
47 | * function to know if the pmd is null nor not, and in turn to know if | |
48 | * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd | |
49 | * operations. | |
50 | * | |
e4eed03f AA |
51 | * Without THP if the mmap_sem is hold for reading, the pmd can only |
52 | * transition from null to not null while pmd_read_atomic runs. So | |
53 | * we can always return atomic pmd values with this function. | |
26c19178 AA |
54 | * |
55 | * With THP if the mmap_sem is hold for reading, the pmd can become | |
e4eed03f AA |
56 | * trans_huge or none or point to a pte (and in turn become "stable") |
57 | * at any time under pmd_read_atomic. We could read it really | |
58 | * atomically here with a atomic64_read for the THP enabled case (and | |
59 | * it would be a whole lot simpler), but to avoid using cmpxchg8b we | |
60 | * only return an atomic pmdval if the low part of the pmdval is later | |
61 | * found stable (i.e. pointing to a pte). And we're returning a none | |
62 | * pmdval if the low part of the pmd is none. In some cases the high | |
63 | * and low part of the pmdval returned may not be consistent if THP is | |
64 | * enabled (the low part may point to previously mapped hugepage, | |
65 | * while the high part may point to a more recently mapped hugepage), | |
66 | * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part | |
67 | * of the pmd to be read atomically to decide if the pmd is unstable | |
68 | * or not, with the only exception of when the low part of the pmd is | |
69 | * zero in which case we return a none pmd. | |
26c19178 | 70 | */ |
26c19178 AA |
71 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) |
72 | { | |
73 | pmdval_t ret; | |
74 | u32 *tmp = (u32 *)pmdp; | |
75 | ||
76 | ret = (pmdval_t) (*tmp); | |
77 | if (ret) { | |
78 | /* | |
79 | * If the low part is null, we must not read the high part | |
80 | * or we can end up with a partial pmd. | |
81 | */ | |
82 | smp_rmb(); | |
83 | ret |= ((pmdval_t)*(tmp + 1)) << 32; | |
84 | } | |
85 | ||
86 | return (pmd_t) { ret }; | |
87 | } | |
26c19178 | 88 | |
3dc494e8 JF |
89 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
90 | { | |
4b01fef8 | 91 | set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); |
3dc494e8 | 92 | } |
4b01fef8 | 93 | |
3dc494e8 JF |
94 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
95 | { | |
4b01fef8 | 96 | set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); |
3dc494e8 | 97 | } |
4b01fef8 | 98 | |
3dc494e8 JF |
99 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
100 | { | |
4b01fef8 | 101 | set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); |
3dc494e8 | 102 | } |
1da177e4 | 103 | |
6e5882cf ZA |
104 | /* |
105 | * For PTEs and PDEs, we must clear the P-bit first when clearing a page table | |
106 | * entry, so clear the bottom half first and enforce ordering with a compiler | |
107 | * barrier. | |
108 | */ | |
4b01fef8 JP |
109 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, |
110 | pte_t *ptep) | |
6e5882cf ZA |
111 | { |
112 | ptep->pte_low = 0; | |
113 | smp_wmb(); | |
114 | ptep->pte_high = 0; | |
115 | } | |
116 | ||
3dc494e8 | 117 | static inline void native_pmd_clear(pmd_t *pmd) |
6e5882cf ZA |
118 | { |
119 | u32 *tmp = (u32 *)pmd; | |
120 | *tmp = 0; | |
121 | smp_wmb(); | |
122 | *(tmp + 1) = 0; | |
123 | } | |
3dc494e8 | 124 | |
a00cc7d9 MW |
125 | static inline void native_pud_clear(pud_t *pudp) |
126 | { | |
127 | } | |
a00cc7d9 | 128 | |
6194ba6f JF |
129 | static inline void pud_clear(pud_t *pudp) |
130 | { | |
131 | set_pud(pudp, __pud(0)); | |
132 | ||
133 | /* | |
f5430f93 JF |
134 | * According to Intel App note "TLBs, Paging-Structure Caches, |
135 | * and Their Invalidation", April 2007, document 317080-001, | |
136 | * section 8.1: in PAE mode we explicitly have to flush the | |
137 | * TLB via cr3 if the top-level pgd is changed... | |
6194ba6f | 138 | * |
4981d01e SL |
139 | * Currently all places where pud_clear() is called either have |
140 | * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or | |
141 | * pud_clear_bad()), so we don't need TLB flush here. | |
6194ba6f | 142 | */ |
6194ba6f | 143 | } |
da181a8b | 144 | |
142dd975 | 145 | #ifdef CONFIG_SMP |
3dc494e8 | 146 | static inline pte_t native_ptep_get_and_clear(pte_t *ptep) |
1da177e4 LT |
147 | { |
148 | pte_t res; | |
149 | ||
150 | /* xchg acts as a barrier before the setting of the high bits */ | |
151 | res.pte_low = xchg(&ptep->pte_low, 0); | |
152 | res.pte_high = ptep->pte_high; | |
153 | ptep->pte_high = 0; | |
154 | ||
155 | return res; | |
156 | } | |
142dd975 ZA |
157 | #else |
158 | #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) | |
159 | #endif | |
1da177e4 | 160 | |
f2d6bfe9 JW |
161 | #ifdef CONFIG_SMP |
162 | union split_pmd { | |
163 | struct { | |
164 | u32 pmd_low; | |
165 | u32 pmd_high; | |
166 | }; | |
167 | pmd_t pmd; | |
168 | }; | |
169 | static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) | |
170 | { | |
171 | union split_pmd res, *orig = (union split_pmd *)pmdp; | |
172 | ||
173 | /* xchg acts as a barrier before setting of the high bits */ | |
174 | res.pmd_low = xchg(&orig->pmd_low, 0); | |
175 | res.pmd_high = orig->pmd_high; | |
176 | orig->pmd_high = 0; | |
177 | ||
178 | return res.pmd; | |
179 | } | |
180 | #else | |
181 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) | |
182 | #endif | |
183 | ||
a00cc7d9 MW |
184 | #ifdef CONFIG_SMP |
185 | union split_pud { | |
186 | struct { | |
187 | u32 pud_low; | |
188 | u32 pud_high; | |
189 | }; | |
190 | pud_t pud; | |
191 | }; | |
192 | ||
193 | static inline pud_t native_pudp_get_and_clear(pud_t *pudp) | |
194 | { | |
195 | union split_pud res, *orig = (union split_pud *)pudp; | |
196 | ||
197 | /* xchg acts as a barrier before setting of the high bits */ | |
198 | res.pud_low = xchg(&orig->pud_low, 0); | |
199 | res.pud_high = orig->pud_high; | |
200 | orig->pud_high = 0; | |
201 | ||
202 | return res.pud; | |
203 | } | |
204 | #else | |
205 | #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp) | |
206 | #endif | |
207 | ||
1da177e4 | 208 | /* Encode and de-code a swap entry */ |
1796316a | 209 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) |
1da177e4 LT |
210 | #define __swp_type(x) (((x).val) & 0x1f) |
211 | #define __swp_offset(x) ((x).val >> 5) | |
212 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) | |
213 | #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) | |
c8e5393a | 214 | #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) |
1da177e4 | 215 | |
e585513b KS |
216 | #define gup_get_pte gup_get_pte |
217 | /* | |
218 | * WARNING: only to be used in the get_user_pages_fast() implementation. | |
219 | * | |
220 | * With get_user_pages_fast(), we walk down the pagetables without taking | |
221 | * any locks. For this we would like to load the pointers atomically, | |
222 | * but that is not possible (without expensive cmpxchg8b) on PAE. What | |
223 | * we do have is the guarantee that a PTE will only either go from not | |
224 | * present to present, or present to not present or both -- it will not | |
225 | * switch to a completely different present page without a TLB flush in | |
226 | * between; something that we are blocking by holding interrupts off. | |
227 | * | |
228 | * Setting ptes from not present to present goes: | |
229 | * | |
230 | * ptep->pte_high = h; | |
231 | * smp_wmb(); | |
232 | * ptep->pte_low = l; | |
233 | * | |
234 | * And present to not present goes: | |
235 | * | |
236 | * ptep->pte_low = 0; | |
237 | * smp_wmb(); | |
238 | * ptep->pte_high = 0; | |
239 | * | |
240 | * We must ensure here that the load of pte_low sees 'l' iff pte_high | |
241 | * sees 'h'. We load pte_high *after* loading pte_low, which ensures we | |
242 | * don't see an older value of pte_high. *Then* we recheck pte_low, | |
243 | * which ensures that we haven't picked up a changed pte high. We might | |
244 | * have gotten rubbish values from pte_low and pte_high, but we are | |
245 | * guaranteed that pte_low will not have the present bit set *unless* | |
246 | * it is 'l'. Because get_user_pages_fast() only operates on present ptes | |
247 | * we're safe. | |
248 | */ | |
249 | static inline pte_t gup_get_pte(pte_t *ptep) | |
250 | { | |
251 | pte_t pte; | |
252 | ||
253 | do { | |
254 | pte.pte_low = ptep->pte_low; | |
255 | smp_rmb(); | |
256 | pte.pte_high = ptep->pte_high; | |
257 | smp_rmb(); | |
258 | } while (unlikely(pte.pte_low != ptep->pte_low)); | |
259 | ||
260 | return pte; | |
261 | } | |
262 | ||
1965aae3 | 263 | #endif /* _ASM_X86_PGTABLE_3LEVEL_H */ |