]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_PGTABLE_H |
3 | #define _ASM_X86_PGTABLE_H | |
6c386655 | 4 | |
21729f81 | 5 | #include <linux/mem_encrypt.h> |
c47c1b1f | 6 | #include <asm/page.h> |
8d19c99f | 7 | #include <asm/pgtable_types.h> |
b2bc2731 | 8 | |
8a7b12f7 | 9 | /* |
10 | * Macro to mark a page protection value as UC- | |
11 | */ | |
d85f3334 JG |
12 | #define pgprot_noncached(prot) \ |
13 | ((boot_cpu_data.x86 > 3) \ | |
14 | ? (__pgprot(pgprot_val(prot) | \ | |
15 | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \ | |
8a7b12f7 | 16 | : (prot)) |
17 | ||
21729f81 TL |
18 | /* |
19 | * Macros to add or remove encryption attribute | |
20 | */ | |
21 | #define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot))) | |
22 | #define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot))) | |
23 | ||
4614139c | 24 | #ifndef __ASSEMBLY__ |
55a6ca25 PA |
25 | #include <asm/x86_init.h> |
26 | ||
b9d05200 TL |
27 | extern pgd_t early_top_pgt[PTRS_PER_PGD]; |
28 | int __init __early_make_pgtable(unsigned long address, pmdval_t pmd); | |
29 | ||
ef6bea6d | 30 | void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); |
a4b51ef6 | 31 | void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user); |
e1a58320 SS |
32 | void ptdump_walk_pgd_level_checkwx(void); |
33 | ||
34 | #ifdef CONFIG_DEBUG_WX | |
35 | #define debug_checkwx() ptdump_walk_pgd_level_checkwx() | |
36 | #else | |
37 | #define debug_checkwx() do { } while (0) | |
38 | #endif | |
ef6bea6d | 39 | |
8405b122 JF |
40 | /* |
41 | * ZERO_PAGE is a global shared page that is always zero: used | |
42 | * for zero-mapped memory areas etc.. | |
43 | */ | |
277d5b40 AK |
44 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] |
45 | __visible; | |
8405b122 JF |
46 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
47 | ||
e3ed910d JF |
48 | extern spinlock_t pgd_lock; |
49 | extern struct list_head pgd_list; | |
8405b122 | 50 | |
617d34d9 JF |
51 | extern struct mm_struct *pgd_page_get_mm(struct page *page); |
52 | ||
21729f81 TL |
53 | extern pmdval_t early_pmd_flags; |
54 | ||
54321d94 JF |
55 | #ifdef CONFIG_PARAVIRT |
56 | #include <asm/paravirt.h> | |
57 | #else /* !CONFIG_PARAVIRT */ | |
58 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) | |
59 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) | |
60 | ||
54321d94 JF |
61 | #define set_pte_atomic(ptep, pte) \ |
62 | native_set_pte_atomic(ptep, pte) | |
63 | ||
64 | #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) | |
65 | ||
f2a6a705 | 66 | #ifndef __PAGETABLE_P4D_FOLDED |
54321d94 JF |
67 | #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) |
68 | #define pgd_clear(pgd) native_pgd_clear(pgd) | |
69 | #endif | |
70 | ||
f2a6a705 KS |
71 | #ifndef set_p4d |
72 | # define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d) | |
73 | #endif | |
74 | ||
75 | #ifndef __PAGETABLE_PUD_FOLDED | |
76 | #define p4d_clear(p4d) native_p4d_clear(p4d) | |
77 | #endif | |
78 | ||
54321d94 JF |
79 | #ifndef set_pud |
80 | # define set_pud(pudp, pud) native_set_pud(pudp, pud) | |
81 | #endif | |
82 | ||
d0f33ac9 | 83 | #ifndef __PAGETABLE_PUD_FOLDED |
54321d94 JF |
84 | #define pud_clear(pud) native_pud_clear(pud) |
85 | #endif | |
86 | ||
87 | #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) | |
88 | #define pmd_clear(pmd) native_pmd_clear(pmd) | |
89 | ||
54321d94 JF |
90 | #define pgd_val(x) native_pgd_val(x) |
91 | #define __pgd(x) native_make_pgd(x) | |
92 | ||
f2a6a705 KS |
93 | #ifndef __PAGETABLE_P4D_FOLDED |
94 | #define p4d_val(x) native_p4d_val(x) | |
95 | #define __p4d(x) native_make_p4d(x) | |
96 | #endif | |
97 | ||
54321d94 JF |
98 | #ifndef __PAGETABLE_PUD_FOLDED |
99 | #define pud_val(x) native_pud_val(x) | |
100 | #define __pud(x) native_make_pud(x) | |
101 | #endif | |
102 | ||
103 | #ifndef __PAGETABLE_PMD_FOLDED | |
104 | #define pmd_val(x) native_pmd_val(x) | |
105 | #define __pmd(x) native_make_pmd(x) | |
106 | #endif | |
107 | ||
108 | #define pte_val(x) native_pte_val(x) | |
109 | #define __pte(x) native_make_pte(x) | |
110 | ||
224101ed JF |
111 | #define arch_end_context_switch(prev) do {} while(0) |
112 | ||
54321d94 JF |
113 | #endif /* CONFIG_PARAVIRT */ |
114 | ||
4614139c JF |
115 | /* |
116 | * The following only work if pte_present() is true. | |
117 | * Undefined behaviour if not.. | |
118 | */ | |
3cbaeafe JP |
119 | static inline int pte_dirty(pte_t pte) |
120 | { | |
a15af1c9 | 121 | return pte_flags(pte) & _PAGE_DIRTY; |
3cbaeafe JP |
122 | } |
123 | ||
a927cb83 DH |
124 | |
125 | static inline u32 read_pkru(void) | |
126 | { | |
127 | if (boot_cpu_has(X86_FEATURE_OSPKE)) | |
128 | return __read_pkru(); | |
129 | return 0; | |
130 | } | |
131 | ||
9e90199c XG |
132 | static inline void write_pkru(u32 pkru) |
133 | { | |
134 | if (boot_cpu_has(X86_FEATURE_OSPKE)) | |
135 | __write_pkru(pkru); | |
136 | } | |
137 | ||
3cbaeafe JP |
138 | static inline int pte_young(pte_t pte) |
139 | { | |
a15af1c9 | 140 | return pte_flags(pte) & _PAGE_ACCESSED; |
3cbaeafe JP |
141 | } |
142 | ||
c164e038 KS |
143 | static inline int pmd_dirty(pmd_t pmd) |
144 | { | |
145 | return pmd_flags(pmd) & _PAGE_DIRTY; | |
146 | } | |
3cbaeafe | 147 | |
f2d6bfe9 JW |
148 | static inline int pmd_young(pmd_t pmd) |
149 | { | |
150 | return pmd_flags(pmd) & _PAGE_ACCESSED; | |
151 | } | |
152 | ||
a00cc7d9 MW |
153 | static inline int pud_dirty(pud_t pud) |
154 | { | |
155 | return pud_flags(pud) & _PAGE_DIRTY; | |
156 | } | |
157 | ||
158 | static inline int pud_young(pud_t pud) | |
159 | { | |
160 | return pud_flags(pud) & _PAGE_ACCESSED; | |
161 | } | |
162 | ||
3cbaeafe JP |
163 | static inline int pte_write(pte_t pte) |
164 | { | |
a15af1c9 | 165 | return pte_flags(pte) & _PAGE_RW; |
3cbaeafe JP |
166 | } |
167 | ||
3cbaeafe JP |
168 | static inline int pte_huge(pte_t pte) |
169 | { | |
a15af1c9 | 170 | return pte_flags(pte) & _PAGE_PSE; |
4614139c JF |
171 | } |
172 | ||
3cbaeafe JP |
173 | static inline int pte_global(pte_t pte) |
174 | { | |
a15af1c9 | 175 | return pte_flags(pte) & _PAGE_GLOBAL; |
3cbaeafe JP |
176 | } |
177 | ||
178 | static inline int pte_exec(pte_t pte) | |
179 | { | |
a15af1c9 | 180 | return !(pte_flags(pte) & _PAGE_NX); |
3cbaeafe JP |
181 | } |
182 | ||
7e675137 NP |
183 | static inline int pte_special(pte_t pte) |
184 | { | |
c819f37e | 185 | return pte_flags(pte) & _PAGE_SPECIAL; |
7e675137 NP |
186 | } |
187 | ||
b1fb6302 AK |
188 | /* Entries that were set to PROT_NONE are inverted */ |
189 | ||
190 | static inline u64 protnone_mask(u64 val); | |
191 | ||
91030ca1 HD |
192 | static inline unsigned long pte_pfn(pte_t pte) |
193 | { | |
3c739aee | 194 | phys_addr_t pfn = pte_val(pte); |
b1fb6302 AK |
195 | pfn ^= protnone_mask(pfn); |
196 | return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT; | |
91030ca1 HD |
197 | } |
198 | ||
087975b0 AM |
199 | static inline unsigned long pmd_pfn(pmd_t pmd) |
200 | { | |
3c739aee | 201 | phys_addr_t pfn = pmd_val(pmd); |
b1fb6302 AK |
202 | pfn ^= protnone_mask(pfn); |
203 | return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; | |
087975b0 AM |
204 | } |
205 | ||
0ee364eb MG |
206 | static inline unsigned long pud_pfn(pud_t pud) |
207 | { | |
3c739aee | 208 | phys_addr_t pfn = pud_val(pud); |
b1fb6302 AK |
209 | pfn ^= protnone_mask(pfn); |
210 | return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT; | |
0ee364eb MG |
211 | } |
212 | ||
fe1e8c3e KS |
213 | static inline unsigned long p4d_pfn(p4d_t p4d) |
214 | { | |
215 | return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT; | |
216 | } | |
217 | ||
fd7e3159 TL |
218 | static inline unsigned long pgd_pfn(pgd_t pgd) |
219 | { | |
220 | return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT; | |
221 | } | |
222 | ||
fe1e8c3e KS |
223 | static inline int p4d_large(p4d_t p4d) |
224 | { | |
225 | /* No 512 GiB pages yet */ | |
226 | return 0; | |
227 | } | |
228 | ||
91030ca1 HD |
229 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
230 | ||
3cbaeafe JP |
231 | static inline int pmd_large(pmd_t pte) |
232 | { | |
027ef6c8 | 233 | return pmd_flags(pte) & _PAGE_PSE; |
3cbaeafe JP |
234 | } |
235 | ||
f2d6bfe9 | 236 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
f2d6bfe9 JW |
237 | static inline int pmd_trans_huge(pmd_t pmd) |
238 | { | |
5c7fb56e | 239 | return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; |
f2d6bfe9 | 240 | } |
4b7167b9 | 241 | |
a00cc7d9 MW |
242 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
243 | static inline int pud_trans_huge(pud_t pud) | |
244 | { | |
245 | return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; | |
246 | } | |
247 | #endif | |
248 | ||
fd8cfd30 | 249 | #define has_transparent_hugepage has_transparent_hugepage |
4b7167b9 AA |
250 | static inline int has_transparent_hugepage(void) |
251 | { | |
16bf9226 | 252 | return boot_cpu_has(X86_FEATURE_PSE); |
4b7167b9 | 253 | } |
5c7fb56e DW |
254 | |
255 | #ifdef __HAVE_ARCH_PTE_DEVMAP | |
256 | static inline int pmd_devmap(pmd_t pmd) | |
257 | { | |
258 | return !!(pmd_val(pmd) & _PAGE_DEVMAP); | |
259 | } | |
a00cc7d9 MW |
260 | |
261 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
262 | static inline int pud_devmap(pud_t pud) | |
263 | { | |
264 | return !!(pud_val(pud) & _PAGE_DEVMAP); | |
265 | } | |
266 | #else | |
267 | static inline int pud_devmap(pud_t pud) | |
268 | { | |
269 | return 0; | |
270 | } | |
271 | #endif | |
e585513b KS |
272 | |
273 | static inline int pgd_devmap(pgd_t pgd) | |
274 | { | |
275 | return 0; | |
276 | } | |
5c7fb56e | 277 | #endif |
f2d6bfe9 JW |
278 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
279 | ||
6522869c JF |
280 | static inline pte_t pte_set_flags(pte_t pte, pteval_t set) |
281 | { | |
282 | pteval_t v = native_pte_val(pte); | |
283 | ||
284 | return native_make_pte(v | set); | |
285 | } | |
286 | ||
287 | static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear) | |
288 | { | |
289 | pteval_t v = native_pte_val(pte); | |
290 | ||
291 | return native_make_pte(v & ~clear); | |
292 | } | |
293 | ||
3cbaeafe JP |
294 | static inline pte_t pte_mkclean(pte_t pte) |
295 | { | |
6522869c | 296 | return pte_clear_flags(pte, _PAGE_DIRTY); |
3cbaeafe JP |
297 | } |
298 | ||
299 | static inline pte_t pte_mkold(pte_t pte) | |
300 | { | |
6522869c | 301 | return pte_clear_flags(pte, _PAGE_ACCESSED); |
3cbaeafe JP |
302 | } |
303 | ||
304 | static inline pte_t pte_wrprotect(pte_t pte) | |
305 | { | |
6522869c | 306 | return pte_clear_flags(pte, _PAGE_RW); |
3cbaeafe JP |
307 | } |
308 | ||
309 | static inline pte_t pte_mkexec(pte_t pte) | |
310 | { | |
6522869c | 311 | return pte_clear_flags(pte, _PAGE_NX); |
3cbaeafe JP |
312 | } |
313 | ||
314 | static inline pte_t pte_mkdirty(pte_t pte) | |
315 | { | |
0f8975ec | 316 | return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); |
3cbaeafe JP |
317 | } |
318 | ||
319 | static inline pte_t pte_mkyoung(pte_t pte) | |
320 | { | |
6522869c | 321 | return pte_set_flags(pte, _PAGE_ACCESSED); |
3cbaeafe JP |
322 | } |
323 | ||
324 | static inline pte_t pte_mkwrite(pte_t pte) | |
325 | { | |
6522869c | 326 | return pte_set_flags(pte, _PAGE_RW); |
3cbaeafe JP |
327 | } |
328 | ||
329 | static inline pte_t pte_mkhuge(pte_t pte) | |
330 | { | |
6522869c | 331 | return pte_set_flags(pte, _PAGE_PSE); |
3cbaeafe JP |
332 | } |
333 | ||
334 | static inline pte_t pte_clrhuge(pte_t pte) | |
335 | { | |
6522869c | 336 | return pte_clear_flags(pte, _PAGE_PSE); |
3cbaeafe JP |
337 | } |
338 | ||
339 | static inline pte_t pte_mkglobal(pte_t pte) | |
340 | { | |
6522869c | 341 | return pte_set_flags(pte, _PAGE_GLOBAL); |
3cbaeafe JP |
342 | } |
343 | ||
344 | static inline pte_t pte_clrglobal(pte_t pte) | |
345 | { | |
6522869c | 346 | return pte_clear_flags(pte, _PAGE_GLOBAL); |
3cbaeafe | 347 | } |
4614139c | 348 | |
7e675137 NP |
349 | static inline pte_t pte_mkspecial(pte_t pte) |
350 | { | |
6522869c | 351 | return pte_set_flags(pte, _PAGE_SPECIAL); |
7e675137 NP |
352 | } |
353 | ||
01c8f1c4 DW |
354 | static inline pte_t pte_mkdevmap(pte_t pte) |
355 | { | |
356 | return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP); | |
357 | } | |
358 | ||
f2d6bfe9 JW |
359 | static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) |
360 | { | |
361 | pmdval_t v = native_pmd_val(pmd); | |
362 | ||
bc511804 | 363 | return native_make_pmd(v | set); |
f2d6bfe9 JW |
364 | } |
365 | ||
366 | static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) | |
367 | { | |
368 | pmdval_t v = native_pmd_val(pmd); | |
369 | ||
bc511804 | 370 | return native_make_pmd(v & ~clear); |
f2d6bfe9 JW |
371 | } |
372 | ||
373 | static inline pmd_t pmd_mkold(pmd_t pmd) | |
374 | { | |
375 | return pmd_clear_flags(pmd, _PAGE_ACCESSED); | |
376 | } | |
377 | ||
590a471c MK |
378 | static inline pmd_t pmd_mkclean(pmd_t pmd) |
379 | { | |
380 | return pmd_clear_flags(pmd, _PAGE_DIRTY); | |
381 | } | |
382 | ||
f2d6bfe9 JW |
383 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
384 | { | |
385 | return pmd_clear_flags(pmd, _PAGE_RW); | |
386 | } | |
387 | ||
388 | static inline pmd_t pmd_mkdirty(pmd_t pmd) | |
389 | { | |
0f8975ec | 390 | return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); |
f2d6bfe9 JW |
391 | } |
392 | ||
f25748e3 DW |
393 | static inline pmd_t pmd_mkdevmap(pmd_t pmd) |
394 | { | |
395 | return pmd_set_flags(pmd, _PAGE_DEVMAP); | |
396 | } | |
397 | ||
f2d6bfe9 JW |
398 | static inline pmd_t pmd_mkhuge(pmd_t pmd) |
399 | { | |
400 | return pmd_set_flags(pmd, _PAGE_PSE); | |
401 | } | |
402 | ||
403 | static inline pmd_t pmd_mkyoung(pmd_t pmd) | |
404 | { | |
405 | return pmd_set_flags(pmd, _PAGE_ACCESSED); | |
406 | } | |
407 | ||
408 | static inline pmd_t pmd_mkwrite(pmd_t pmd) | |
409 | { | |
410 | return pmd_set_flags(pmd, _PAGE_RW); | |
411 | } | |
412 | ||
a00cc7d9 MW |
413 | static inline pud_t pud_set_flags(pud_t pud, pudval_t set) |
414 | { | |
415 | pudval_t v = native_pud_val(pud); | |
416 | ||
bc511804 | 417 | return native_make_pud(v | set); |
a00cc7d9 MW |
418 | } |
419 | ||
420 | static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) | |
421 | { | |
422 | pudval_t v = native_pud_val(pud); | |
423 | ||
bc511804 | 424 | return native_make_pud(v & ~clear); |
a00cc7d9 MW |
425 | } |
426 | ||
427 | static inline pud_t pud_mkold(pud_t pud) | |
428 | { | |
429 | return pud_clear_flags(pud, _PAGE_ACCESSED); | |
430 | } | |
431 | ||
432 | static inline pud_t pud_mkclean(pud_t pud) | |
433 | { | |
434 | return pud_clear_flags(pud, _PAGE_DIRTY); | |
435 | } | |
436 | ||
437 | static inline pud_t pud_wrprotect(pud_t pud) | |
438 | { | |
439 | return pud_clear_flags(pud, _PAGE_RW); | |
440 | } | |
441 | ||
442 | static inline pud_t pud_mkdirty(pud_t pud) | |
443 | { | |
444 | return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); | |
445 | } | |
446 | ||
447 | static inline pud_t pud_mkdevmap(pud_t pud) | |
448 | { | |
449 | return pud_set_flags(pud, _PAGE_DEVMAP); | |
450 | } | |
451 | ||
452 | static inline pud_t pud_mkhuge(pud_t pud) | |
453 | { | |
454 | return pud_set_flags(pud, _PAGE_PSE); | |
455 | } | |
456 | ||
457 | static inline pud_t pud_mkyoung(pud_t pud) | |
458 | { | |
459 | return pud_set_flags(pud, _PAGE_ACCESSED); | |
460 | } | |
461 | ||
462 | static inline pud_t pud_mkwrite(pud_t pud) | |
463 | { | |
464 | return pud_set_flags(pud, _PAGE_RW); | |
465 | } | |
466 | ||
2bf01f9f | 467 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
0f8975ec PE |
468 | static inline int pte_soft_dirty(pte_t pte) |
469 | { | |
470 | return pte_flags(pte) & _PAGE_SOFT_DIRTY; | |
471 | } | |
472 | ||
473 | static inline int pmd_soft_dirty(pmd_t pmd) | |
474 | { | |
475 | return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; | |
476 | } | |
477 | ||
a00cc7d9 MW |
478 | static inline int pud_soft_dirty(pud_t pud) |
479 | { | |
480 | return pud_flags(pud) & _PAGE_SOFT_DIRTY; | |
481 | } | |
482 | ||
0f8975ec PE |
483 | static inline pte_t pte_mksoft_dirty(pte_t pte) |
484 | { | |
485 | return pte_set_flags(pte, _PAGE_SOFT_DIRTY); | |
486 | } | |
487 | ||
488 | static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) | |
489 | { | |
490 | return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); | |
491 | } | |
492 | ||
a00cc7d9 MW |
493 | static inline pud_t pud_mksoft_dirty(pud_t pud) |
494 | { | |
495 | return pud_set_flags(pud, _PAGE_SOFT_DIRTY); | |
496 | } | |
497 | ||
a7b76174 MS |
498 | static inline pte_t pte_clear_soft_dirty(pte_t pte) |
499 | { | |
500 | return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); | |
501 | } | |
502 | ||
503 | static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) | |
504 | { | |
505 | return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); | |
506 | } | |
507 | ||
a00cc7d9 MW |
508 | static inline pud_t pud_clear_soft_dirty(pud_t pud) |
509 | { | |
510 | return pud_clear_flags(pud, _PAGE_SOFT_DIRTY); | |
511 | } | |
512 | ||
2bf01f9f CG |
513 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
514 | ||
b534816b JF |
515 | /* |
516 | * Mask out unsupported bits in a present pgprot. Non-present pgprots | |
517 | * can use those bits for other purposes, so leave them be. | |
518 | */ | |
519 | static inline pgprotval_t massage_pgprot(pgprot_t pgprot) | |
520 | { | |
521 | pgprotval_t protval = pgprot_val(pgprot); | |
522 | ||
523 | if (protval & _PAGE_PRESENT) | |
524 | protval &= __supported_pte_mask; | |
525 | ||
526 | return protval; | |
527 | } | |
528 | ||
78fb5523 DH |
529 | static inline pgprotval_t check_pgprot(pgprot_t pgprot) |
530 | { | |
531 | pgprotval_t massaged_val = massage_pgprot(pgprot); | |
532 | ||
533 | /* mmdebug.h can not be included here because of dependencies */ | |
534 | #ifdef CONFIG_DEBUG_VM | |
535 | WARN_ONCE(pgprot_val(pgprot) != massaged_val, | |
536 | "attempted to set unsupported pgprot: %016llx " | |
537 | "bits: %016llx supported: %016llx\n", | |
538 | (u64)pgprot_val(pgprot), | |
539 | (u64)pgprot_val(pgprot) ^ massaged_val, | |
540 | (u64)__supported_pte_mask); | |
541 | #endif | |
542 | ||
543 | return massaged_val; | |
544 | } | |
545 | ||
6fdc05d4 JF |
546 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) |
547 | { | |
3c739aee | 548 | phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; |
b1fb6302 AK |
549 | pfn ^= protnone_mask(pgprot_val(pgprot)); |
550 | pfn &= PTE_PFN_MASK; | |
551 | return __pte(pfn | check_pgprot(pgprot)); | |
6fdc05d4 JF |
552 | } |
553 | ||
554 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |
555 | { | |
3c739aee | 556 | phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; |
b1fb6302 AK |
557 | pfn ^= protnone_mask(pgprot_val(pgprot)); |
558 | pfn &= PHYSICAL_PMD_PAGE_MASK; | |
559 | return __pmd(pfn | check_pgprot(pgprot)); | |
6fdc05d4 JF |
560 | } |
561 | ||
a00cc7d9 MW |
562 | static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) |
563 | { | |
3c739aee | 564 | phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; |
b1fb6302 AK |
565 | pfn ^= protnone_mask(pgprot_val(pgprot)); |
566 | pfn &= PHYSICAL_PUD_PAGE_MASK; | |
567 | return __pud(pfn | check_pgprot(pgprot)); | |
a00cc7d9 MW |
568 | } |
569 | ||
d76c928d AK |
570 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
571 | { | |
572 | return pfn_pmd(pmd_pfn(pmd), | |
573 | __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); | |
574 | } | |
575 | ||
576 | static inline pud_t pud_mknotpresent(pud_t pud) | |
577 | { | |
578 | return pfn_pud(pud_pfn(pud), | |
579 | __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); | |
580 | } | |
581 | ||
b1fb6302 AK |
582 | static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask); |
583 | ||
38472311 IM |
584 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
585 | { | |
b1fb6302 | 586 | pteval_t val = pte_val(pte), oldval = val; |
38472311 IM |
587 | |
588 | /* | |
589 | * Chop off the NX bit (if present), and add the NX portion of | |
590 | * the newprot (if present): | |
591 | */ | |
1c12c4cf | 592 | val &= _PAGE_CHG_MASK; |
78fb5523 | 593 | val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK; |
b1fb6302 | 594 | val = flip_protnone_guard(oldval, val, PTE_PFN_MASK); |
38472311 IM |
595 | return __pte(val); |
596 | } | |
597 | ||
c489f125 JW |
598 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
599 | { | |
b1fb6302 | 600 | pmdval_t val = pmd_val(pmd), oldval = val; |
c489f125 JW |
601 | |
602 | val &= _HPAGE_CHG_MASK; | |
78fb5523 | 603 | val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK; |
b1fb6302 | 604 | val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK); |
c489f125 JW |
605 | return __pmd(val); |
606 | } | |
607 | ||
1c12c4cf VP |
608 | /* mprotect needs to preserve PAT bits when updating vm_page_prot */ |
609 | #define pgprot_modify pgprot_modify | |
610 | static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |
611 | { | |
612 | pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; | |
613 | pgprotval_t addbits = pgprot_val(newprot); | |
614 | return __pgprot(preservebits | addbits); | |
615 | } | |
616 | ||
bbac8c6d TK |
617 | #define pte_pgprot(x) __pgprot(pte_flags(x)) |
618 | #define pmd_pgprot(x) __pgprot(pmd_flags(x)) | |
619 | #define pud_pgprot(x) __pgprot(pud_flags(x)) | |
f2a6a705 | 620 | #define p4d_pgprot(x) __pgprot(p4d_flags(x)) |
c6ca18eb | 621 | |
b534816b | 622 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) |
1e8e23bc | 623 | |
59cf57f4 DH |
624 | static inline pgprot_t arch_filter_pgprot(pgprot_t prot) |
625 | { | |
626 | return canon_pgprot(prot); | |
627 | } | |
628 | ||
1adcaafe | 629 | static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, |
d85f3334 JG |
630 | enum page_cache_mode pcm, |
631 | enum page_cache_mode new_pcm) | |
afc7d20c | 632 | { |
1adcaafe | 633 | /* |
55a6ca25 | 634 | * PAT type is always WB for untracked ranges, so no need to check. |
1adcaafe | 635 | */ |
8a271389 | 636 | if (x86_platform.is_untracked_pat_range(paddr, paddr + size)) |
1adcaafe SS |
637 | return 1; |
638 | ||
afc7d20c | 639 | /* |
640 | * Certain new memtypes are not allowed with certain | |
641 | * requested memtype: | |
642 | * - request is uncached, return cannot be write-back | |
643 | * - request is write-combine, return cannot be write-back | |
ecb2feba TK |
644 | * - request is write-through, return cannot be write-back |
645 | * - request is write-through, return cannot be write-combine | |
afc7d20c | 646 | */ |
d85f3334 JG |
647 | if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && |
648 | new_pcm == _PAGE_CACHE_MODE_WB) || | |
649 | (pcm == _PAGE_CACHE_MODE_WC && | |
ecb2feba TK |
650 | new_pcm == _PAGE_CACHE_MODE_WB) || |
651 | (pcm == _PAGE_CACHE_MODE_WT && | |
652 | new_pcm == _PAGE_CACHE_MODE_WB) || | |
653 | (pcm == _PAGE_CACHE_MODE_WT && | |
654 | new_pcm == _PAGE_CACHE_MODE_WC)) { | |
afc7d20c | 655 | return 0; |
656 | } | |
657 | ||
658 | return 1; | |
659 | } | |
660 | ||
458a3e64 TH |
661 | pmd_t *populate_extra_pmd(unsigned long vaddr); |
662 | pte_t *populate_extra_pte(unsigned long vaddr); | |
212dde6a JR |
663 | |
664 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | |
665 | pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd); | |
666 | ||
667 | /* | |
668 | * Take a PGD location (pgdp) and a pgd value that needs to be set there. | |
669 | * Populates the user and returns the resulting PGD that must be set in | |
670 | * the kernel copy of the page tables. | |
671 | */ | |
672 | static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd) | |
673 | { | |
674 | if (!static_cpu_has(X86_FEATURE_PTI)) | |
675 | return pgd; | |
676 | return __pti_set_user_pgtbl(pgdp, pgd); | |
677 | } | |
678 | #else /* CONFIG_PAGE_TABLE_ISOLATION */ | |
679 | static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd) | |
680 | { | |
681 | return pgd; | |
682 | } | |
683 | #endif /* CONFIG_PAGE_TABLE_ISOLATION */ | |
684 | ||
4614139c JF |
685 | #endif /* __ASSEMBLY__ */ |
686 | ||
212dde6a | 687 | |
96a388de | 688 | #ifdef CONFIG_X86_32 |
a1ce3928 | 689 | # include <asm/pgtable_32.h> |
96a388de | 690 | #else |
a1ce3928 | 691 | # include <asm/pgtable_64.h> |
96a388de | 692 | #endif |
6c386655 | 693 | |
aca159db | 694 | #ifndef __ASSEMBLY__ |
f476961c | 695 | #include <linux/mm_types.h> |
fa0f281c | 696 | #include <linux/mmdebug.h> |
4cbeb51b | 697 | #include <linux/log2.h> |
ef37bc36 | 698 | #include <asm/fixmap.h> |
aca159db | 699 | |
a034a010 JF |
700 | static inline int pte_none(pte_t pte) |
701 | { | |
97e3c602 | 702 | return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK)); |
a034a010 JF |
703 | } |
704 | ||
8de01da3 JF |
705 | #define __HAVE_ARCH_PTE_SAME |
706 | static inline int pte_same(pte_t a, pte_t b) | |
707 | { | |
708 | return a.pte == b.pte; | |
709 | } | |
710 | ||
7c683851 | 711 | static inline int pte_present(pte_t a) |
c46a7c81 MG |
712 | { |
713 | return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); | |
714 | } | |
715 | ||
3565fce3 DW |
716 | #ifdef __HAVE_ARCH_PTE_DEVMAP |
717 | static inline int pte_devmap(pte_t a) | |
718 | { | |
719 | return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; | |
720 | } | |
721 | #endif | |
722 | ||
2c3cf556 | 723 | #define pte_accessible pte_accessible |
20841405 | 724 | static inline bool pte_accessible(struct mm_struct *mm, pte_t a) |
2c3cf556 | 725 | { |
20841405 RR |
726 | if (pte_flags(a) & _PAGE_PRESENT) |
727 | return true; | |
728 | ||
21d9ee3e | 729 | if ((pte_flags(a) & _PAGE_PROTNONE) && |
20841405 RR |
730 | mm_tlb_flush_pending(mm)) |
731 | return true; | |
732 | ||
733 | return false; | |
2c3cf556 RR |
734 | } |
735 | ||
649e8ef6 JF |
736 | static inline int pmd_present(pmd_t pmd) |
737 | { | |
027ef6c8 AA |
738 | /* |
739 | * Checking for _PAGE_PSE is needed too because | |
740 | * split_huge_page will temporarily clear the present bit (but | |
741 | * the _PAGE_PSE flag will remain set at all times while the | |
742 | * _PAGE_PRESENT bit is clear). | |
743 | */ | |
21d9ee3e | 744 | return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); |
649e8ef6 JF |
745 | } |
746 | ||
e7bb4b6d MG |
747 | #ifdef CONFIG_NUMA_BALANCING |
748 | /* | |
749 | * These work without NUMA balancing but the kernel does not care. See the | |
750 | * comment in include/asm-generic/pgtable.h | |
751 | */ | |
752 | static inline int pte_protnone(pte_t pte) | |
753 | { | |
e3a1f6ca DV |
754 | return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT)) |
755 | == _PAGE_PROTNONE; | |
e7bb4b6d MG |
756 | } |
757 | ||
758 | static inline int pmd_protnone(pmd_t pmd) | |
759 | { | |
e3a1f6ca DV |
760 | return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT)) |
761 | == _PAGE_PROTNONE; | |
e7bb4b6d MG |
762 | } |
763 | #endif /* CONFIG_NUMA_BALANCING */ | |
764 | ||
4fea801a JF |
765 | static inline int pmd_none(pmd_t pmd) |
766 | { | |
767 | /* Only check low word on 32-bit platforms, since it might be | |
768 | out of sync with upper half. */ | |
97e3c602 DH |
769 | unsigned long val = native_pmd_val(pmd); |
770 | return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0; | |
4fea801a JF |
771 | } |
772 | ||
3ffb3564 JF |
773 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) |
774 | { | |
f70abb0f | 775 | return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd)); |
3ffb3564 JF |
776 | } |
777 | ||
e5f7f202 IM |
778 | /* |
779 | * Currently stuck as a macro due to indirect forward reference to | |
780 | * linux/mmzone.h's __section_mem_map_addr() definition: | |
781 | */ | |
fd7e3159 | 782 | #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) |
20063ca4 | 783 | |
e24d7eee JF |
784 | /* |
785 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] | |
786 | * | |
787 | * this macro returns the index of the entry in the pmd page which would | |
788 | * control the given virtual address | |
789 | */ | |
ce0c0f9e | 790 | static inline unsigned long pmd_index(unsigned long address) |
e24d7eee JF |
791 | { |
792 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); | |
793 | } | |
794 | ||
97e2817d JF |
795 | /* |
796 | * Conversion functions: convert a page and protection to a page entry, | |
797 | * and a page entry and page directory to the page they refer to. | |
798 | * | |
799 | * (Currently stuck as a macro because of indirect forward reference | |
800 | * to linux/mm.h:page_to_nid()) | |
801 | */ | |
802 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | |
803 | ||
346309cf JF |
804 | /* |
805 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | |
806 | * | |
807 | * this function returns the index of the entry in the pte page which would | |
808 | * control the given virtual address | |
809 | */ | |
ce0c0f9e | 810 | static inline unsigned long pte_index(unsigned long address) |
346309cf JF |
811 | { |
812 | return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | |
813 | } | |
814 | ||
3fbc2444 JF |
815 | static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) |
816 | { | |
817 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); | |
818 | } | |
819 | ||
99510238 JF |
820 | static inline int pmd_bad(pmd_t pmd) |
821 | { | |
18a7a199 | 822 | return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; |
99510238 JF |
823 | } |
824 | ||
cc290ca3 JF |
825 | static inline unsigned long pages_to_mb(unsigned long npg) |
826 | { | |
827 | return npg >> (20 - PAGE_SHIFT); | |
828 | } | |
829 | ||
98233368 | 830 | #if CONFIG_PGTABLE_LEVELS > 2 |
deb79cfb JF |
831 | static inline int pud_none(pud_t pud) |
832 | { | |
97e3c602 | 833 | return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0; |
deb79cfb JF |
834 | } |
835 | ||
5ba7c913 JF |
836 | static inline int pud_present(pud_t pud) |
837 | { | |
18a7a199 | 838 | return pud_flags(pud) & _PAGE_PRESENT; |
5ba7c913 | 839 | } |
6fff47e3 JF |
840 | |
841 | static inline unsigned long pud_page_vaddr(pud_t pud) | |
842 | { | |
f70abb0f | 843 | return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud)); |
6fff47e3 | 844 | } |
f476961c | 845 | |
e5f7f202 IM |
846 | /* |
847 | * Currently stuck as a macro due to indirect forward reference to | |
848 | * linux/mmzone.h's __section_mem_map_addr() definition: | |
849 | */ | |
fd7e3159 | 850 | #define pud_page(pud) pfn_to_page(pud_pfn(pud)) |
01ade20d JF |
851 | |
852 | /* Find an entry in the second-level page table.. */ | |
853 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) | |
854 | { | |
855 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); | |
856 | } | |
3180fba0 | 857 | |
3f6cbef1 JF |
858 | static inline int pud_large(pud_t pud) |
859 | { | |
e2f5bda9 | 860 | return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == |
3f6cbef1 JF |
861 | (_PAGE_PSE | _PAGE_PRESENT); |
862 | } | |
a61bb29a JF |
863 | |
864 | static inline int pud_bad(pud_t pud) | |
865 | { | |
18a7a199 | 866 | return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; |
a61bb29a | 867 | } |
e2f5bda9 JF |
868 | #else |
869 | static inline int pud_large(pud_t pud) | |
870 | { | |
871 | return 0; | |
872 | } | |
98233368 | 873 | #endif /* CONFIG_PGTABLE_LEVELS > 2 */ |
5ba7c913 | 874 | |
fe1e8c3e KS |
875 | static inline unsigned long pud_index(unsigned long address) |
876 | { | |
877 | return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); | |
878 | } | |
879 | ||
f2a6a705 KS |
880 | #if CONFIG_PGTABLE_LEVELS > 3 |
881 | static inline int p4d_none(p4d_t p4d) | |
882 | { | |
883 | return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0; | |
884 | } | |
885 | ||
886 | static inline int p4d_present(p4d_t p4d) | |
887 | { | |
888 | return p4d_flags(p4d) & _PAGE_PRESENT; | |
889 | } | |
890 | ||
891 | static inline unsigned long p4d_page_vaddr(p4d_t p4d) | |
892 | { | |
893 | return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d)); | |
894 | } | |
895 | ||
896 | /* | |
897 | * Currently stuck as a macro due to indirect forward reference to | |
898 | * linux/mmzone.h's __section_mem_map_addr() definition: | |
899 | */ | |
fd7e3159 | 900 | #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) |
f2a6a705 KS |
901 | |
902 | /* Find an entry in the third-level page table.. */ | |
903 | static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) | |
904 | { | |
905 | return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address); | |
906 | } | |
907 | ||
908 | static inline int p4d_bad(p4d_t p4d) | |
909 | { | |
1c4de1ff DH |
910 | unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER; |
911 | ||
912 | if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) | |
913 | ignore_flags |= _PAGE_NX; | |
914 | ||
915 | return (p4d_flags(p4d) & ~ignore_flags) != 0; | |
f2a6a705 KS |
916 | } |
917 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ | |
918 | ||
fe1e8c3e KS |
919 | static inline unsigned long p4d_index(unsigned long address) |
920 | { | |
921 | return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1); | |
922 | } | |
923 | ||
f2a6a705 | 924 | #if CONFIG_PGTABLE_LEVELS > 4 |
9f38d7e8 JF |
925 | static inline int pgd_present(pgd_t pgd) |
926 | { | |
18a7a199 | 927 | return pgd_flags(pgd) & _PAGE_PRESENT; |
9f38d7e8 | 928 | } |
c5f040b1 JF |
929 | |
930 | static inline unsigned long pgd_page_vaddr(pgd_t pgd) | |
931 | { | |
932 | return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK); | |
933 | } | |
777cba16 | 934 | |
e5f7f202 IM |
935 | /* |
936 | * Currently stuck as a macro due to indirect forward reference to | |
937 | * linux/mmzone.h's __section_mem_map_addr() definition: | |
938 | */ | |
fd7e3159 | 939 | #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) |
7cfb8102 JF |
940 | |
941 | /* to find an entry in a page-table-directory. */ | |
f2a6a705 | 942 | static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) |
3d081b18 | 943 | { |
f2a6a705 | 944 | return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address); |
3d081b18 | 945 | } |
30f10316 JF |
946 | |
947 | static inline int pgd_bad(pgd_t pgd) | |
948 | { | |
1c4de1ff DH |
949 | unsigned long ignore_flags = _PAGE_USER; |
950 | ||
951 | if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) | |
952 | ignore_flags |= _PAGE_NX; | |
953 | ||
954 | return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE; | |
30f10316 | 955 | } |
7325cc2e JF |
956 | |
957 | static inline int pgd_none(pgd_t pgd) | |
958 | { | |
97e3c602 DH |
959 | /* |
960 | * There is no need to do a workaround for the KNL stray | |
961 | * A/D bit erratum here. PGDs only point to page tables | |
962 | * except on 32-bit non-PAE which is not supported on | |
963 | * KNL. | |
964 | */ | |
26c8e317 | 965 | return !native_pgd_val(pgd); |
7325cc2e | 966 | } |
f2a6a705 | 967 | #endif /* CONFIG_PGTABLE_LEVELS > 4 */ |
9f38d7e8 | 968 | |
4614139c JF |
969 | #endif /* __ASSEMBLY__ */ |
970 | ||
fb15a9b3 JF |
971 | /* |
972 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | |
973 | * | |
974 | * this macro returns the index of the entry in the pgd page which would | |
975 | * control the given virtual address | |
976 | */ | |
977 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | |
978 | ||
979 | /* | |
980 | * pgd_offset() returns a (pgd_t *) | |
981 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | |
982 | */ | |
61e9b367 DH |
983 | #define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address))) |
984 | /* | |
985 | * a shortcut to get a pgd_t in a given mm | |
986 | */ | |
987 | #define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address)) | |
fb15a9b3 JF |
988 | /* |
989 | * a shortcut which implies the use of the kernel's pgd, instead | |
990 | * of a process's | |
991 | */ | |
992 | #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) | |
993 | ||
994 | ||
68db065c JF |
995 | #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) |
996 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) | |
997 | ||
195466dc JF |
998 | #ifndef __ASSEMBLY__ |
999 | ||
2c1b284e | 1000 | extern int direct_gbpages; |
22ddfcaa | 1001 | void init_mem_mapping(void); |
8d57470d | 1002 | void early_alloc_pgt_buf(void); |
4270fd8b | 1003 | extern void memblock_find_dma_reserve(void); |
2c1b284e | 1004 | |
b234e8a0 TG |
1005 | #ifdef CONFIG_X86_64 |
1006 | /* Realmode trampoline initialization. */ | |
1007 | extern pgd_t trampoline_pgd_entry; | |
0483e1fa | 1008 | static inline void __meminit init_trampoline_default(void) |
b234e8a0 TG |
1009 | { |
1010 | /* Default trampoline pgd value */ | |
65ade2f8 | 1011 | trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)]; |
b234e8a0 | 1012 | } |
0483e1fa TG |
1013 | # ifdef CONFIG_RANDOMIZE_MEMORY |
1014 | void __meminit init_trampoline(void); | |
1015 | # else | |
1016 | # define init_trampoline init_trampoline_default | |
1017 | # endif | |
b234e8a0 TG |
1018 | #else |
1019 | static inline void init_trampoline(void) { } | |
1020 | #endif | |
1021 | ||
4891645e JF |
1022 | /* local pte updates need not use xchg for locking */ |
1023 | static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) | |
1024 | { | |
1025 | pte_t res = *ptep; | |
1026 | ||
1027 | /* Pure native function needs no input for mm, addr */ | |
1028 | native_pte_clear(NULL, 0, ptep); | |
1029 | return res; | |
1030 | } | |
1031 | ||
f2d6bfe9 JW |
1032 | static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) |
1033 | { | |
1034 | pmd_t res = *pmdp; | |
1035 | ||
1036 | native_pmd_clear(pmdp); | |
1037 | return res; | |
1038 | } | |
1039 | ||
a00cc7d9 MW |
1040 | static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp) |
1041 | { | |
1042 | pud_t res = *pudp; | |
1043 | ||
1044 | native_pud_clear(pudp); | |
1045 | return res; | |
1046 | } | |
1047 | ||
4891645e JF |
1048 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, |
1049 | pte_t *ptep , pte_t pte) | |
1050 | { | |
1051 | native_set_pte(ptep, pte); | |
1052 | } | |
1053 | ||
87930019 JG |
1054 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
1055 | pmd_t *pmdp, pmd_t pmd) | |
0a47de52 AA |
1056 | { |
1057 | native_set_pmd(pmdp, pmd); | |
1058 | } | |
1059 | ||
87930019 JG |
1060 | static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, |
1061 | pud_t *pudp, pud_t pud) | |
a00cc7d9 MW |
1062 | { |
1063 | native_set_pud(pudp, pud); | |
1064 | } | |
1065 | ||
195466dc JF |
1066 | /* |
1067 | * We only update the dirty/accessed state if we set | |
1068 | * the dirty bit by hand in the kernel, since the hardware | |
1069 | * will do the accessed bit for us, and we don't want to | |
1070 | * race with other CPU's that might be updating the dirty | |
1071 | * bit at the same time. | |
1072 | */ | |
bea41808 JF |
1073 | struct vm_area_struct; |
1074 | ||
195466dc | 1075 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
ee5aa8d3 JF |
1076 | extern int ptep_set_access_flags(struct vm_area_struct *vma, |
1077 | unsigned long address, pte_t *ptep, | |
1078 | pte_t entry, int dirty); | |
195466dc JF |
1079 | |
1080 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
f9fbf1a3 JF |
1081 | extern int ptep_test_and_clear_young(struct vm_area_struct *vma, |
1082 | unsigned long addr, pte_t *ptep); | |
195466dc JF |
1083 | |
1084 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
c20311e1 JF |
1085 | extern int ptep_clear_flush_young(struct vm_area_struct *vma, |
1086 | unsigned long address, pte_t *ptep); | |
195466dc JF |
1087 | |
1088 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
3cbaeafe JP |
1089 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
1090 | pte_t *ptep) | |
195466dc JF |
1091 | { |
1092 | pte_t pte = native_ptep_get_and_clear(ptep); | |
195466dc JF |
1093 | return pte; |
1094 | } | |
1095 | ||
1096 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | |
3cbaeafe JP |
1097 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
1098 | unsigned long addr, pte_t *ptep, | |
1099 | int full) | |
195466dc JF |
1100 | { |
1101 | pte_t pte; | |
1102 | if (full) { | |
1103 | /* | |
1104 | * Full address destruction in progress; paravirt does not | |
1105 | * care about updates and native needs no locking | |
1106 | */ | |
1107 | pte = native_local_ptep_get_and_clear(ptep); | |
1108 | } else { | |
1109 | pte = ptep_get_and_clear(mm, addr, ptep); | |
1110 | } | |
1111 | return pte; | |
1112 | } | |
1113 | ||
1114 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
3cbaeafe JP |
1115 | static inline void ptep_set_wrprotect(struct mm_struct *mm, |
1116 | unsigned long addr, pte_t *ptep) | |
195466dc | 1117 | { |
d8d89827 | 1118 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); |
195466dc JF |
1119 | } |
1120 | ||
2ac13462 | 1121 | #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) |
61c77326 | 1122 | |
f2d6bfe9 JW |
1123 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) |
1124 | ||
1125 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS | |
1126 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, | |
1127 | unsigned long address, pmd_t *pmdp, | |
1128 | pmd_t entry, int dirty); | |
a00cc7d9 MW |
1129 | extern int pudp_set_access_flags(struct vm_area_struct *vma, |
1130 | unsigned long address, pud_t *pudp, | |
1131 | pud_t entry, int dirty); | |
f2d6bfe9 JW |
1132 | |
1133 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | |
1134 | extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
1135 | unsigned long addr, pmd_t *pmdp); | |
a00cc7d9 MW |
1136 | extern int pudp_test_and_clear_young(struct vm_area_struct *vma, |
1137 | unsigned long addr, pud_t *pudp); | |
f2d6bfe9 JW |
1138 | |
1139 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH | |
1140 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, | |
1141 | unsigned long address, pmd_t *pmdp); | |
1142 | ||
1143 | ||
e4e40e02 | 1144 | #define pmd_write pmd_write |
f2d6bfe9 JW |
1145 | static inline int pmd_write(pmd_t pmd) |
1146 | { | |
1147 | return pmd_flags(pmd) & _PAGE_RW; | |
1148 | } | |
1149 | ||
8809aa2d AK |
1150 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR |
1151 | static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, | |
f2d6bfe9 JW |
1152 | pmd_t *pmdp) |
1153 | { | |
d6ccc3ec | 1154 | return native_pmdp_get_and_clear(pmdp); |
f2d6bfe9 JW |
1155 | } |
1156 | ||
a00cc7d9 MW |
1157 | #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR |
1158 | static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, | |
1159 | unsigned long addr, pud_t *pudp) | |
1160 | { | |
1161 | return native_pudp_get_and_clear(pudp); | |
1162 | } | |
1163 | ||
f2d6bfe9 JW |
1164 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT |
1165 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |
1166 | unsigned long addr, pmd_t *pmdp) | |
1167 | { | |
1168 | clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); | |
f2d6bfe9 JW |
1169 | } |
1170 | ||
1501899a DW |
1171 | #define pud_write pud_write |
1172 | static inline int pud_write(pud_t pud) | |
1173 | { | |
1174 | return pud_flags(pud) & _PAGE_RW; | |
1175 | } | |
1176 | ||
1ed0d08a JR |
1177 | /* |
1178 | * Page table pages are page-aligned. The lower half of the top | |
1179 | * level is used for userspace and the top half for the kernel. | |
1180 | * | |
1181 | * Returns true for parts of the PGD that map userspace and | |
1182 | * false for the parts that map the kernel. | |
1183 | */ | |
1184 | static inline bool pgdp_maps_userspace(void *__ptr) | |
1185 | { | |
1186 | unsigned long ptr = (unsigned long)__ptr; | |
1187 | ||
1188 | return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START); | |
1189 | } | |
1190 | ||
1191 | static inline int pgd_large(pgd_t pgd) { return 0; } | |
1192 | ||
2f2527ef JR |
1193 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
1194 | /* | |
1195 | * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages | |
1196 | * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and | |
1197 | * the user one is in the last 4k. To switch between them, you | |
1198 | * just need to flip the 12th bit in their addresses. | |
1199 | */ | |
1200 | #define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT | |
1201 | ||
1202 | /* | |
1203 | * This generates better code than the inline assembly in | |
1204 | * __set_bit(). | |
1205 | */ | |
1206 | static inline void *ptr_set_bit(void *ptr, int bit) | |
1207 | { | |
1208 | unsigned long __ptr = (unsigned long)ptr; | |
1209 | ||
1210 | __ptr |= BIT(bit); | |
1211 | return (void *)__ptr; | |
1212 | } | |
1213 | static inline void *ptr_clear_bit(void *ptr, int bit) | |
1214 | { | |
1215 | unsigned long __ptr = (unsigned long)ptr; | |
1216 | ||
1217 | __ptr &= ~BIT(bit); | |
1218 | return (void *)__ptr; | |
1219 | } | |
1220 | ||
1221 | static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp) | |
1222 | { | |
1223 | return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); | |
1224 | } | |
1225 | ||
1226 | static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp) | |
1227 | { | |
1228 | return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); | |
1229 | } | |
1230 | ||
1231 | static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp) | |
1232 | { | |
1233 | return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); | |
1234 | } | |
1235 | ||
1236 | static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp) | |
1237 | { | |
1238 | return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); | |
1239 | } | |
1240 | #endif /* CONFIG_PAGE_TABLE_ISOLATION */ | |
1241 | ||
85958b46 JF |
1242 | /* |
1243 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | |
1244 | * | |
1245 | * dst - pointer to pgd range anwhere on a pgd page | |
1246 | * src - "" | |
1247 | * count - the number of pgds to copy. | |
1248 | * | |
1249 | * dst and src can be on the same page, but the range must not overlap, | |
1250 | * and must not cross a page boundary. | |
1251 | */ | |
1252 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |
1253 | { | |
fc2fbc85 DH |
1254 | memcpy(dst, src, count * sizeof(pgd_t)); |
1255 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | |
1256 | if (!static_cpu_has(X86_FEATURE_PTI)) | |
1257 | return; | |
1258 | /* Clone the user space pgd as well */ | |
1259 | memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src), | |
1260 | count * sizeof(pgd_t)); | |
1261 | #endif | |
85958b46 JF |
1262 | } |
1263 | ||
4cbeb51b DH |
1264 | #define PTE_SHIFT ilog2(PTRS_PER_PTE) |
1265 | static inline int page_level_shift(enum pg_level level) | |
1266 | { | |
1267 | return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT; | |
1268 | } | |
1269 | static inline unsigned long page_level_size(enum pg_level level) | |
1270 | { | |
1271 | return 1UL << page_level_shift(level); | |
1272 | } | |
1273 | static inline unsigned long page_level_mask(enum pg_level level) | |
1274 | { | |
1275 | return ~(page_level_size(level) - 1); | |
1276 | } | |
85958b46 | 1277 | |
602e0186 KS |
1278 | /* |
1279 | * The x86 doesn't have any external MMU info: the kernel page | |
1280 | * tables contain all the necessary information. | |
1281 | */ | |
1282 | static inline void update_mmu_cache(struct vm_area_struct *vma, | |
1283 | unsigned long addr, pte_t *ptep) | |
1284 | { | |
1285 | } | |
1286 | static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, | |
1287 | unsigned long addr, pmd_t *pmd) | |
1288 | { | |
1289 | } | |
a00cc7d9 MW |
1290 | static inline void update_mmu_cache_pud(struct vm_area_struct *vma, |
1291 | unsigned long addr, pud_t *pud) | |
1292 | { | |
1293 | } | |
85958b46 | 1294 | |
2bf01f9f | 1295 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
fa0f281c CG |
1296 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) |
1297 | { | |
fa0f281c CG |
1298 | return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); |
1299 | } | |
1300 | ||
1301 | static inline int pte_swp_soft_dirty(pte_t pte) | |
1302 | { | |
fa0f281c CG |
1303 | return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; |
1304 | } | |
1305 | ||
1306 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) | |
1307 | { | |
fa0f281c CG |
1308 | return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); |
1309 | } | |
ab6e3d09 NH |
1310 | |
1311 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | |
1312 | static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) | |
1313 | { | |
1314 | return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY); | |
1315 | } | |
1316 | ||
1317 | static inline int pmd_swp_soft_dirty(pmd_t pmd) | |
1318 | { | |
1319 | return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY; | |
1320 | } | |
1321 | ||
1322 | static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) | |
1323 | { | |
1324 | return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY); | |
1325 | } | |
1326 | #endif | |
2bf01f9f | 1327 | #endif |
fa0f281c | 1328 | |
33a709b2 DH |
1329 | #define PKRU_AD_BIT 0x1 |
1330 | #define PKRU_WD_BIT 0x2 | |
84594296 | 1331 | #define PKRU_BITS_PER_PKEY 2 |
33a709b2 DH |
1332 | |
1333 | static inline bool __pkru_allows_read(u32 pkru, u16 pkey) | |
1334 | { | |
84594296 | 1335 | int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY; |
33a709b2 DH |
1336 | return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits)); |
1337 | } | |
1338 | ||
1339 | static inline bool __pkru_allows_write(u32 pkru, u16 pkey) | |
1340 | { | |
84594296 | 1341 | int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY; |
33a709b2 DH |
1342 | /* |
1343 | * Access-disable disables writes too so we need to check | |
1344 | * both bits here. | |
1345 | */ | |
1346 | return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits)); | |
1347 | } | |
1348 | ||
1349 | static inline u16 pte_flags_pkey(unsigned long pte_flags) | |
1350 | { | |
1351 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | |
1352 | /* ifdef to avoid doing 59-bit shift on 32-bit values */ | |
1353 | return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0; | |
1354 | #else | |
1355 | return 0; | |
1356 | #endif | |
1357 | } | |
1358 | ||
e585513b KS |
1359 | static inline bool __pkru_allows_pkey(u16 pkey, bool write) |
1360 | { | |
1361 | u32 pkru = read_pkru(); | |
1362 | ||
1363 | if (!__pkru_allows_read(pkru, pkey)) | |
1364 | return false; | |
1365 | if (write && !__pkru_allows_write(pkru, pkey)) | |
1366 | return false; | |
1367 | ||
1368 | return true; | |
1369 | } | |
1370 | ||
1371 | /* | |
1372 | * 'pteval' can come from a PTE, PMD or PUD. We only check | |
1373 | * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the | |
1374 | * same value on all 3 types. | |
1375 | */ | |
1376 | static inline bool __pte_access_permitted(unsigned long pteval, bool write) | |
1377 | { | |
1378 | unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER; | |
1379 | ||
1380 | if (write) | |
1381 | need_pte_bits |= _PAGE_RW; | |
1382 | ||
1383 | if ((pteval & need_pte_bits) != need_pte_bits) | |
1384 | return 0; | |
1385 | ||
1386 | return __pkru_allows_pkey(pte_flags_pkey(pteval), write); | |
1387 | } | |
1388 | ||
1389 | #define pte_access_permitted pte_access_permitted | |
1390 | static inline bool pte_access_permitted(pte_t pte, bool write) | |
1391 | { | |
1392 | return __pte_access_permitted(pte_val(pte), write); | |
1393 | } | |
1394 | ||
1395 | #define pmd_access_permitted pmd_access_permitted | |
1396 | static inline bool pmd_access_permitted(pmd_t pmd, bool write) | |
1397 | { | |
1398 | return __pte_access_permitted(pmd_val(pmd), write); | |
1399 | } | |
1400 | ||
1401 | #define pud_access_permitted pud_access_permitted | |
1402 | static inline bool pud_access_permitted(pud_t pud, bool write) | |
1403 | { | |
1404 | return __pte_access_permitted(pud_val(pud), write); | |
1405 | } | |
1406 | ||
0831b2a2 AK |
1407 | #define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1 |
1408 | extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot); | |
1409 | ||
1410 | static inline bool arch_has_pfn_modify_check(void) | |
1411 | { | |
1412 | return boot_cpu_has_bug(X86_BUG_L1TF); | |
1413 | } | |
1414 | ||
195466dc JF |
1415 | #include <asm-generic/pgtable.h> |
1416 | #endif /* __ASSEMBLY__ */ | |
1417 | ||
1965aae3 | 1418 | #endif /* _ASM_X86_PGTABLE_H */ |