]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/include/asm/pgtable.h
x86/speculation/l1tf: Protect PROT_NONE PTEs against speculation
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / pgtable.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_PGTABLE_H
3#define _ASM_X86_PGTABLE_H
6c386655 4
21729f81 5#include <linux/mem_encrypt.h>
c47c1b1f 6#include <asm/page.h>
8d19c99f 7#include <asm/pgtable_types.h>
b2bc2731 8
8a7b12f7 9/*
10 * Macro to mark a page protection value as UC-
11 */
d85f3334
JG
12#define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
8a7b12f7 16 : (prot))
17
21729f81
TL
18/*
19 * Macros to add or remove encryption attribute
20 */
21#define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot)))
22#define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
23
4614139c 24#ifndef __ASSEMBLY__
55a6ca25
PA
25#include <asm/x86_init.h>
26
b9d05200
TL
27extern pgd_t early_top_pgt[PTRS_PER_PGD];
28int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
29
ef6bea6d 30void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
a4b51ef6 31void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
e1a58320
SS
32void ptdump_walk_pgd_level_checkwx(void);
33
34#ifdef CONFIG_DEBUG_WX
35#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
36#else
37#define debug_checkwx() do { } while (0)
38#endif
ef6bea6d 39
8405b122
JF
40/*
41 * ZERO_PAGE is a global shared page that is always zero: used
42 * for zero-mapped memory areas etc..
43 */
277d5b40
AK
44extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
45 __visible;
8405b122
JF
46#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
47
e3ed910d
JF
48extern spinlock_t pgd_lock;
49extern struct list_head pgd_list;
8405b122 50
617d34d9
JF
51extern struct mm_struct *pgd_page_get_mm(struct page *page);
52
21729f81
TL
53extern pmdval_t early_pmd_flags;
54
54321d94
JF
55#ifdef CONFIG_PARAVIRT
56#include <asm/paravirt.h>
57#else /* !CONFIG_PARAVIRT */
58#define set_pte(ptep, pte) native_set_pte(ptep, pte)
59#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
60
54321d94
JF
61#define set_pte_atomic(ptep, pte) \
62 native_set_pte_atomic(ptep, pte)
63
64#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
65
f2a6a705 66#ifndef __PAGETABLE_P4D_FOLDED
54321d94
JF
67#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
68#define pgd_clear(pgd) native_pgd_clear(pgd)
69#endif
70
f2a6a705
KS
71#ifndef set_p4d
72# define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
73#endif
74
75#ifndef __PAGETABLE_PUD_FOLDED
76#define p4d_clear(p4d) native_p4d_clear(p4d)
77#endif
78
54321d94
JF
79#ifndef set_pud
80# define set_pud(pudp, pud) native_set_pud(pudp, pud)
81#endif
82
d0f33ac9 83#ifndef __PAGETABLE_PUD_FOLDED
54321d94
JF
84#define pud_clear(pud) native_pud_clear(pud)
85#endif
86
87#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
88#define pmd_clear(pmd) native_pmd_clear(pmd)
89
54321d94
JF
90#define pgd_val(x) native_pgd_val(x)
91#define __pgd(x) native_make_pgd(x)
92
f2a6a705
KS
93#ifndef __PAGETABLE_P4D_FOLDED
94#define p4d_val(x) native_p4d_val(x)
95#define __p4d(x) native_make_p4d(x)
96#endif
97
54321d94
JF
98#ifndef __PAGETABLE_PUD_FOLDED
99#define pud_val(x) native_pud_val(x)
100#define __pud(x) native_make_pud(x)
101#endif
102
103#ifndef __PAGETABLE_PMD_FOLDED
104#define pmd_val(x) native_pmd_val(x)
105#define __pmd(x) native_make_pmd(x)
106#endif
107
108#define pte_val(x) native_pte_val(x)
109#define __pte(x) native_make_pte(x)
110
224101ed
JF
111#define arch_end_context_switch(prev) do {} while(0)
112
54321d94
JF
113#endif /* CONFIG_PARAVIRT */
114
4614139c
JF
115/*
116 * The following only work if pte_present() is true.
117 * Undefined behaviour if not..
118 */
3cbaeafe
JP
119static inline int pte_dirty(pte_t pte)
120{
a15af1c9 121 return pte_flags(pte) & _PAGE_DIRTY;
3cbaeafe
JP
122}
123
a927cb83
DH
124
125static inline u32 read_pkru(void)
126{
127 if (boot_cpu_has(X86_FEATURE_OSPKE))
128 return __read_pkru();
129 return 0;
130}
131
9e90199c
XG
132static inline void write_pkru(u32 pkru)
133{
134 if (boot_cpu_has(X86_FEATURE_OSPKE))
135 __write_pkru(pkru);
136}
137
3cbaeafe
JP
138static inline int pte_young(pte_t pte)
139{
a15af1c9 140 return pte_flags(pte) & _PAGE_ACCESSED;
3cbaeafe
JP
141}
142
c164e038
KS
143static inline int pmd_dirty(pmd_t pmd)
144{
145 return pmd_flags(pmd) & _PAGE_DIRTY;
146}
3cbaeafe 147
f2d6bfe9
JW
148static inline int pmd_young(pmd_t pmd)
149{
150 return pmd_flags(pmd) & _PAGE_ACCESSED;
151}
152
a00cc7d9
MW
153static inline int pud_dirty(pud_t pud)
154{
155 return pud_flags(pud) & _PAGE_DIRTY;
156}
157
158static inline int pud_young(pud_t pud)
159{
160 return pud_flags(pud) & _PAGE_ACCESSED;
161}
162
3cbaeafe
JP
163static inline int pte_write(pte_t pte)
164{
a15af1c9 165 return pte_flags(pte) & _PAGE_RW;
3cbaeafe
JP
166}
167
3cbaeafe
JP
168static inline int pte_huge(pte_t pte)
169{
a15af1c9 170 return pte_flags(pte) & _PAGE_PSE;
4614139c
JF
171}
172
3cbaeafe
JP
173static inline int pte_global(pte_t pte)
174{
a15af1c9 175 return pte_flags(pte) & _PAGE_GLOBAL;
3cbaeafe
JP
176}
177
178static inline int pte_exec(pte_t pte)
179{
a15af1c9 180 return !(pte_flags(pte) & _PAGE_NX);
3cbaeafe
JP
181}
182
7e675137
NP
183static inline int pte_special(pte_t pte)
184{
c819f37e 185 return pte_flags(pte) & _PAGE_SPECIAL;
7e675137
NP
186}
187
b1fb6302
AK
188/* Entries that were set to PROT_NONE are inverted */
189
190static inline u64 protnone_mask(u64 val);
191
91030ca1
HD
192static inline unsigned long pte_pfn(pte_t pte)
193{
b1fb6302
AK
194 unsigned long pfn = pte_val(pte);
195 pfn ^= protnone_mask(pfn);
196 return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
91030ca1
HD
197}
198
087975b0
AM
199static inline unsigned long pmd_pfn(pmd_t pmd)
200{
b1fb6302
AK
201 unsigned long pfn = pmd_val(pmd);
202 pfn ^= protnone_mask(pfn);
203 return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
087975b0
AM
204}
205
0ee364eb
MG
206static inline unsigned long pud_pfn(pud_t pud)
207{
b1fb6302
AK
208 unsigned long pfn = pud_val(pud);
209 pfn ^= protnone_mask(pfn);
210 return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
0ee364eb
MG
211}
212
fe1e8c3e
KS
213static inline unsigned long p4d_pfn(p4d_t p4d)
214{
215 return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
216}
217
fd7e3159
TL
218static inline unsigned long pgd_pfn(pgd_t pgd)
219{
220 return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
221}
222
fe1e8c3e
KS
223static inline int p4d_large(p4d_t p4d)
224{
225 /* No 512 GiB pages yet */
226 return 0;
227}
228
91030ca1
HD
229#define pte_page(pte) pfn_to_page(pte_pfn(pte))
230
3cbaeafe
JP
231static inline int pmd_large(pmd_t pte)
232{
027ef6c8 233 return pmd_flags(pte) & _PAGE_PSE;
3cbaeafe
JP
234}
235
f2d6bfe9 236#ifdef CONFIG_TRANSPARENT_HUGEPAGE
f2d6bfe9
JW
237static inline int pmd_trans_huge(pmd_t pmd)
238{
5c7fb56e 239 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
f2d6bfe9 240}
4b7167b9 241
a00cc7d9
MW
242#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
243static inline int pud_trans_huge(pud_t pud)
244{
245 return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
246}
247#endif
248
fd8cfd30 249#define has_transparent_hugepage has_transparent_hugepage
4b7167b9
AA
250static inline int has_transparent_hugepage(void)
251{
16bf9226 252 return boot_cpu_has(X86_FEATURE_PSE);
4b7167b9 253}
5c7fb56e
DW
254
255#ifdef __HAVE_ARCH_PTE_DEVMAP
256static inline int pmd_devmap(pmd_t pmd)
257{
258 return !!(pmd_val(pmd) & _PAGE_DEVMAP);
259}
a00cc7d9
MW
260
261#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
262static inline int pud_devmap(pud_t pud)
263{
264 return !!(pud_val(pud) & _PAGE_DEVMAP);
265}
266#else
267static inline int pud_devmap(pud_t pud)
268{
269 return 0;
270}
271#endif
e585513b
KS
272
273static inline int pgd_devmap(pgd_t pgd)
274{
275 return 0;
276}
5c7fb56e 277#endif
f2d6bfe9
JW
278#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
279
6522869c
JF
280static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
281{
282 pteval_t v = native_pte_val(pte);
283
284 return native_make_pte(v | set);
285}
286
287static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
288{
289 pteval_t v = native_pte_val(pte);
290
291 return native_make_pte(v & ~clear);
292}
293
3cbaeafe
JP
294static inline pte_t pte_mkclean(pte_t pte)
295{
6522869c 296 return pte_clear_flags(pte, _PAGE_DIRTY);
3cbaeafe
JP
297}
298
299static inline pte_t pte_mkold(pte_t pte)
300{
6522869c 301 return pte_clear_flags(pte, _PAGE_ACCESSED);
3cbaeafe
JP
302}
303
304static inline pte_t pte_wrprotect(pte_t pte)
305{
6522869c 306 return pte_clear_flags(pte, _PAGE_RW);
3cbaeafe
JP
307}
308
309static inline pte_t pte_mkexec(pte_t pte)
310{
6522869c 311 return pte_clear_flags(pte, _PAGE_NX);
3cbaeafe
JP
312}
313
314static inline pte_t pte_mkdirty(pte_t pte)
315{
0f8975ec 316 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
3cbaeafe
JP
317}
318
319static inline pte_t pte_mkyoung(pte_t pte)
320{
6522869c 321 return pte_set_flags(pte, _PAGE_ACCESSED);
3cbaeafe
JP
322}
323
324static inline pte_t pte_mkwrite(pte_t pte)
325{
6522869c 326 return pte_set_flags(pte, _PAGE_RW);
3cbaeafe
JP
327}
328
329static inline pte_t pte_mkhuge(pte_t pte)
330{
6522869c 331 return pte_set_flags(pte, _PAGE_PSE);
3cbaeafe
JP
332}
333
334static inline pte_t pte_clrhuge(pte_t pte)
335{
6522869c 336 return pte_clear_flags(pte, _PAGE_PSE);
3cbaeafe
JP
337}
338
339static inline pte_t pte_mkglobal(pte_t pte)
340{
6522869c 341 return pte_set_flags(pte, _PAGE_GLOBAL);
3cbaeafe
JP
342}
343
344static inline pte_t pte_clrglobal(pte_t pte)
345{
6522869c 346 return pte_clear_flags(pte, _PAGE_GLOBAL);
3cbaeafe 347}
4614139c 348
7e675137
NP
349static inline pte_t pte_mkspecial(pte_t pte)
350{
6522869c 351 return pte_set_flags(pte, _PAGE_SPECIAL);
7e675137
NP
352}
353
01c8f1c4
DW
354static inline pte_t pte_mkdevmap(pte_t pte)
355{
356 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
357}
358
f2d6bfe9
JW
359static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
360{
361 pmdval_t v = native_pmd_val(pmd);
362
bc511804 363 return native_make_pmd(v | set);
f2d6bfe9
JW
364}
365
366static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
367{
368 pmdval_t v = native_pmd_val(pmd);
369
bc511804 370 return native_make_pmd(v & ~clear);
f2d6bfe9
JW
371}
372
373static inline pmd_t pmd_mkold(pmd_t pmd)
374{
375 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
376}
377
590a471c
MK
378static inline pmd_t pmd_mkclean(pmd_t pmd)
379{
380 return pmd_clear_flags(pmd, _PAGE_DIRTY);
381}
382
f2d6bfe9
JW
383static inline pmd_t pmd_wrprotect(pmd_t pmd)
384{
385 return pmd_clear_flags(pmd, _PAGE_RW);
386}
387
388static inline pmd_t pmd_mkdirty(pmd_t pmd)
389{
0f8975ec 390 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
f2d6bfe9
JW
391}
392
f25748e3
DW
393static inline pmd_t pmd_mkdevmap(pmd_t pmd)
394{
395 return pmd_set_flags(pmd, _PAGE_DEVMAP);
396}
397
f2d6bfe9
JW
398static inline pmd_t pmd_mkhuge(pmd_t pmd)
399{
400 return pmd_set_flags(pmd, _PAGE_PSE);
401}
402
403static inline pmd_t pmd_mkyoung(pmd_t pmd)
404{
405 return pmd_set_flags(pmd, _PAGE_ACCESSED);
406}
407
408static inline pmd_t pmd_mkwrite(pmd_t pmd)
409{
410 return pmd_set_flags(pmd, _PAGE_RW);
411}
412
413static inline pmd_t pmd_mknotpresent(pmd_t pmd)
414{
21d9ee3e 415 return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
f2d6bfe9
JW
416}
417
a00cc7d9
MW
418static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
419{
420 pudval_t v = native_pud_val(pud);
421
bc511804 422 return native_make_pud(v | set);
a00cc7d9
MW
423}
424
425static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
426{
427 pudval_t v = native_pud_val(pud);
428
bc511804 429 return native_make_pud(v & ~clear);
a00cc7d9
MW
430}
431
432static inline pud_t pud_mkold(pud_t pud)
433{
434 return pud_clear_flags(pud, _PAGE_ACCESSED);
435}
436
437static inline pud_t pud_mkclean(pud_t pud)
438{
439 return pud_clear_flags(pud, _PAGE_DIRTY);
440}
441
442static inline pud_t pud_wrprotect(pud_t pud)
443{
444 return pud_clear_flags(pud, _PAGE_RW);
445}
446
447static inline pud_t pud_mkdirty(pud_t pud)
448{
449 return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
450}
451
452static inline pud_t pud_mkdevmap(pud_t pud)
453{
454 return pud_set_flags(pud, _PAGE_DEVMAP);
455}
456
457static inline pud_t pud_mkhuge(pud_t pud)
458{
459 return pud_set_flags(pud, _PAGE_PSE);
460}
461
462static inline pud_t pud_mkyoung(pud_t pud)
463{
464 return pud_set_flags(pud, _PAGE_ACCESSED);
465}
466
467static inline pud_t pud_mkwrite(pud_t pud)
468{
469 return pud_set_flags(pud, _PAGE_RW);
470}
471
472static inline pud_t pud_mknotpresent(pud_t pud)
473{
474 return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
475}
476
2bf01f9f 477#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
0f8975ec
PE
478static inline int pte_soft_dirty(pte_t pte)
479{
480 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
481}
482
483static inline int pmd_soft_dirty(pmd_t pmd)
484{
485 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
486}
487
a00cc7d9
MW
488static inline int pud_soft_dirty(pud_t pud)
489{
490 return pud_flags(pud) & _PAGE_SOFT_DIRTY;
491}
492
0f8975ec
PE
493static inline pte_t pte_mksoft_dirty(pte_t pte)
494{
495 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
496}
497
498static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
499{
500 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
501}
502
a00cc7d9
MW
503static inline pud_t pud_mksoft_dirty(pud_t pud)
504{
505 return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
506}
507
a7b76174
MS
508static inline pte_t pte_clear_soft_dirty(pte_t pte)
509{
510 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
511}
512
513static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
514{
515 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
516}
517
a00cc7d9
MW
518static inline pud_t pud_clear_soft_dirty(pud_t pud)
519{
520 return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
521}
522
2bf01f9f
CG
523#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
524
b534816b
JF
525/*
526 * Mask out unsupported bits in a present pgprot. Non-present pgprots
527 * can use those bits for other purposes, so leave them be.
528 */
529static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
530{
531 pgprotval_t protval = pgprot_val(pgprot);
532
533 if (protval & _PAGE_PRESENT)
534 protval &= __supported_pte_mask;
535
536 return protval;
537}
538
78fb5523
DH
539static inline pgprotval_t check_pgprot(pgprot_t pgprot)
540{
541 pgprotval_t massaged_val = massage_pgprot(pgprot);
542
543 /* mmdebug.h can not be included here because of dependencies */
544#ifdef CONFIG_DEBUG_VM
545 WARN_ONCE(pgprot_val(pgprot) != massaged_val,
546 "attempted to set unsupported pgprot: %016llx "
547 "bits: %016llx supported: %016llx\n",
548 (u64)pgprot_val(pgprot),
549 (u64)pgprot_val(pgprot) ^ massaged_val,
550 (u64)__supported_pte_mask);
551#endif
552
553 return massaged_val;
554}
555
6fdc05d4
JF
556static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
557{
b1fb6302
AK
558 phys_addr_t pfn = page_nr << PAGE_SHIFT;
559 pfn ^= protnone_mask(pgprot_val(pgprot));
560 pfn &= PTE_PFN_MASK;
561 return __pte(pfn | check_pgprot(pgprot));
6fdc05d4
JF
562}
563
564static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
565{
b1fb6302
AK
566 phys_addr_t pfn = page_nr << PAGE_SHIFT;
567 pfn ^= protnone_mask(pgprot_val(pgprot));
568 pfn &= PHYSICAL_PMD_PAGE_MASK;
569 return __pmd(pfn | check_pgprot(pgprot));
6fdc05d4
JF
570}
571
a00cc7d9
MW
572static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
573{
b1fb6302
AK
574 phys_addr_t pfn = page_nr << PAGE_SHIFT;
575 pfn ^= protnone_mask(pgprot_val(pgprot));
576 pfn &= PHYSICAL_PUD_PAGE_MASK;
577 return __pud(pfn | check_pgprot(pgprot));
a00cc7d9
MW
578}
579
b1fb6302
AK
580static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
581
38472311
IM
582static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
583{
b1fb6302 584 pteval_t val = pte_val(pte), oldval = val;
38472311
IM
585
586 /*
587 * Chop off the NX bit (if present), and add the NX portion of
588 * the newprot (if present):
589 */
1c12c4cf 590 val &= _PAGE_CHG_MASK;
78fb5523 591 val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
b1fb6302 592 val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
38472311
IM
593 return __pte(val);
594}
595
c489f125
JW
596static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
597{
b1fb6302 598 pmdval_t val = pmd_val(pmd), oldval = val;
c489f125
JW
599
600 val &= _HPAGE_CHG_MASK;
78fb5523 601 val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
b1fb6302 602 val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
c489f125
JW
603 return __pmd(val);
604}
605
1c12c4cf
VP
606/* mprotect needs to preserve PAT bits when updating vm_page_prot */
607#define pgprot_modify pgprot_modify
608static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
609{
610 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
611 pgprotval_t addbits = pgprot_val(newprot);
612 return __pgprot(preservebits | addbits);
613}
614
bbac8c6d
TK
615#define pte_pgprot(x) __pgprot(pte_flags(x))
616#define pmd_pgprot(x) __pgprot(pmd_flags(x))
617#define pud_pgprot(x) __pgprot(pud_flags(x))
f2a6a705 618#define p4d_pgprot(x) __pgprot(p4d_flags(x))
c6ca18eb 619
b534816b 620#define canon_pgprot(p) __pgprot(massage_pgprot(p))
1e8e23bc 621
59cf57f4
DH
622static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
623{
624 return canon_pgprot(prot);
625}
626
1adcaafe 627static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
d85f3334
JG
628 enum page_cache_mode pcm,
629 enum page_cache_mode new_pcm)
afc7d20c 630{
1adcaafe 631 /*
55a6ca25 632 * PAT type is always WB for untracked ranges, so no need to check.
1adcaafe 633 */
8a271389 634 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
1adcaafe
SS
635 return 1;
636
afc7d20c 637 /*
638 * Certain new memtypes are not allowed with certain
639 * requested memtype:
640 * - request is uncached, return cannot be write-back
641 * - request is write-combine, return cannot be write-back
ecb2feba
TK
642 * - request is write-through, return cannot be write-back
643 * - request is write-through, return cannot be write-combine
afc7d20c 644 */
d85f3334
JG
645 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
646 new_pcm == _PAGE_CACHE_MODE_WB) ||
647 (pcm == _PAGE_CACHE_MODE_WC &&
ecb2feba
TK
648 new_pcm == _PAGE_CACHE_MODE_WB) ||
649 (pcm == _PAGE_CACHE_MODE_WT &&
650 new_pcm == _PAGE_CACHE_MODE_WB) ||
651 (pcm == _PAGE_CACHE_MODE_WT &&
652 new_pcm == _PAGE_CACHE_MODE_WC)) {
afc7d20c 653 return 0;
654 }
655
656 return 1;
657}
658
458a3e64
TH
659pmd_t *populate_extra_pmd(unsigned long vaddr);
660pte_t *populate_extra_pte(unsigned long vaddr);
4614139c
JF
661#endif /* __ASSEMBLY__ */
662
96a388de 663#ifdef CONFIG_X86_32
a1ce3928 664# include <asm/pgtable_32.h>
96a388de 665#else
a1ce3928 666# include <asm/pgtable_64.h>
96a388de 667#endif
6c386655 668
aca159db 669#ifndef __ASSEMBLY__
f476961c 670#include <linux/mm_types.h>
fa0f281c 671#include <linux/mmdebug.h>
4cbeb51b 672#include <linux/log2.h>
ef37bc36 673#include <asm/fixmap.h>
aca159db 674
a034a010
JF
675static inline int pte_none(pte_t pte)
676{
97e3c602 677 return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
a034a010
JF
678}
679
8de01da3
JF
680#define __HAVE_ARCH_PTE_SAME
681static inline int pte_same(pte_t a, pte_t b)
682{
683 return a.pte == b.pte;
684}
685
7c683851 686static inline int pte_present(pte_t a)
c46a7c81
MG
687{
688 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
689}
690
3565fce3
DW
691#ifdef __HAVE_ARCH_PTE_DEVMAP
692static inline int pte_devmap(pte_t a)
693{
694 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
695}
696#endif
697
2c3cf556 698#define pte_accessible pte_accessible
20841405 699static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
2c3cf556 700{
20841405
RR
701 if (pte_flags(a) & _PAGE_PRESENT)
702 return true;
703
21d9ee3e 704 if ((pte_flags(a) & _PAGE_PROTNONE) &&
20841405
RR
705 mm_tlb_flush_pending(mm))
706 return true;
707
708 return false;
2c3cf556
RR
709}
710
649e8ef6
JF
711static inline int pmd_present(pmd_t pmd)
712{
027ef6c8
AA
713 /*
714 * Checking for _PAGE_PSE is needed too because
715 * split_huge_page will temporarily clear the present bit (but
716 * the _PAGE_PSE flag will remain set at all times while the
717 * _PAGE_PRESENT bit is clear).
718 */
21d9ee3e 719 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
649e8ef6
JF
720}
721
e7bb4b6d
MG
722#ifdef CONFIG_NUMA_BALANCING
723/*
724 * These work without NUMA balancing but the kernel does not care. See the
725 * comment in include/asm-generic/pgtable.h
726 */
727static inline int pte_protnone(pte_t pte)
728{
e3a1f6ca
DV
729 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
730 == _PAGE_PROTNONE;
e7bb4b6d
MG
731}
732
733static inline int pmd_protnone(pmd_t pmd)
734{
e3a1f6ca
DV
735 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
736 == _PAGE_PROTNONE;
e7bb4b6d
MG
737}
738#endif /* CONFIG_NUMA_BALANCING */
739
4fea801a
JF
740static inline int pmd_none(pmd_t pmd)
741{
742 /* Only check low word on 32-bit platforms, since it might be
743 out of sync with upper half. */
97e3c602
DH
744 unsigned long val = native_pmd_val(pmd);
745 return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
4fea801a
JF
746}
747
3ffb3564
JF
748static inline unsigned long pmd_page_vaddr(pmd_t pmd)
749{
f70abb0f 750 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
3ffb3564
JF
751}
752
e5f7f202
IM
753/*
754 * Currently stuck as a macro due to indirect forward reference to
755 * linux/mmzone.h's __section_mem_map_addr() definition:
756 */
fd7e3159 757#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
20063ca4 758
e24d7eee
JF
759/*
760 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
761 *
762 * this macro returns the index of the entry in the pmd page which would
763 * control the given virtual address
764 */
ce0c0f9e 765static inline unsigned long pmd_index(unsigned long address)
e24d7eee
JF
766{
767 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
768}
769
97e2817d
JF
770/*
771 * Conversion functions: convert a page and protection to a page entry,
772 * and a page entry and page directory to the page they refer to.
773 *
774 * (Currently stuck as a macro because of indirect forward reference
775 * to linux/mm.h:page_to_nid())
776 */
777#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
778
346309cf
JF
779/*
780 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
781 *
782 * this function returns the index of the entry in the pte page which would
783 * control the given virtual address
784 */
ce0c0f9e 785static inline unsigned long pte_index(unsigned long address)
346309cf
JF
786{
787 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
788}
789
3fbc2444
JF
790static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
791{
792 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
793}
794
99510238
JF
795static inline int pmd_bad(pmd_t pmd)
796{
18a7a199 797 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
99510238
JF
798}
799
cc290ca3
JF
800static inline unsigned long pages_to_mb(unsigned long npg)
801{
802 return npg >> (20 - PAGE_SHIFT);
803}
804
98233368 805#if CONFIG_PGTABLE_LEVELS > 2
deb79cfb
JF
806static inline int pud_none(pud_t pud)
807{
97e3c602 808 return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
deb79cfb
JF
809}
810
5ba7c913
JF
811static inline int pud_present(pud_t pud)
812{
18a7a199 813 return pud_flags(pud) & _PAGE_PRESENT;
5ba7c913 814}
6fff47e3
JF
815
816static inline unsigned long pud_page_vaddr(pud_t pud)
817{
f70abb0f 818 return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
6fff47e3 819}
f476961c 820
e5f7f202
IM
821/*
822 * Currently stuck as a macro due to indirect forward reference to
823 * linux/mmzone.h's __section_mem_map_addr() definition:
824 */
fd7e3159 825#define pud_page(pud) pfn_to_page(pud_pfn(pud))
01ade20d
JF
826
827/* Find an entry in the second-level page table.. */
828static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
829{
830 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
831}
3180fba0 832
3f6cbef1
JF
833static inline int pud_large(pud_t pud)
834{
e2f5bda9 835 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
3f6cbef1
JF
836 (_PAGE_PSE | _PAGE_PRESENT);
837}
a61bb29a
JF
838
839static inline int pud_bad(pud_t pud)
840{
18a7a199 841 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
a61bb29a 842}
e2f5bda9
JF
843#else
844static inline int pud_large(pud_t pud)
845{
846 return 0;
847}
98233368 848#endif /* CONFIG_PGTABLE_LEVELS > 2 */
5ba7c913 849
fe1e8c3e
KS
850static inline unsigned long pud_index(unsigned long address)
851{
852 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
853}
854
f2a6a705
KS
855#if CONFIG_PGTABLE_LEVELS > 3
856static inline int p4d_none(p4d_t p4d)
857{
858 return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
859}
860
861static inline int p4d_present(p4d_t p4d)
862{
863 return p4d_flags(p4d) & _PAGE_PRESENT;
864}
865
866static inline unsigned long p4d_page_vaddr(p4d_t p4d)
867{
868 return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
869}
870
871/*
872 * Currently stuck as a macro due to indirect forward reference to
873 * linux/mmzone.h's __section_mem_map_addr() definition:
874 */
fd7e3159 875#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
f2a6a705
KS
876
877/* Find an entry in the third-level page table.. */
878static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
879{
880 return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
881}
882
883static inline int p4d_bad(p4d_t p4d)
884{
1c4de1ff
DH
885 unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
886
887 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
888 ignore_flags |= _PAGE_NX;
889
890 return (p4d_flags(p4d) & ~ignore_flags) != 0;
f2a6a705
KS
891}
892#endif /* CONFIG_PGTABLE_LEVELS > 3 */
893
fe1e8c3e
KS
894static inline unsigned long p4d_index(unsigned long address)
895{
896 return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
897}
898
f2a6a705 899#if CONFIG_PGTABLE_LEVELS > 4
9f38d7e8
JF
900static inline int pgd_present(pgd_t pgd)
901{
18a7a199 902 return pgd_flags(pgd) & _PAGE_PRESENT;
9f38d7e8 903}
c5f040b1
JF
904
905static inline unsigned long pgd_page_vaddr(pgd_t pgd)
906{
907 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
908}
777cba16 909
e5f7f202
IM
910/*
911 * Currently stuck as a macro due to indirect forward reference to
912 * linux/mmzone.h's __section_mem_map_addr() definition:
913 */
fd7e3159 914#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
7cfb8102
JF
915
916/* to find an entry in a page-table-directory. */
f2a6a705 917static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
3d081b18 918{
f2a6a705 919 return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
3d081b18 920}
30f10316
JF
921
922static inline int pgd_bad(pgd_t pgd)
923{
1c4de1ff
DH
924 unsigned long ignore_flags = _PAGE_USER;
925
926 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
927 ignore_flags |= _PAGE_NX;
928
929 return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
30f10316 930}
7325cc2e
JF
931
932static inline int pgd_none(pgd_t pgd)
933{
97e3c602
DH
934 /*
935 * There is no need to do a workaround for the KNL stray
936 * A/D bit erratum here. PGDs only point to page tables
937 * except on 32-bit non-PAE which is not supported on
938 * KNL.
939 */
26c8e317 940 return !native_pgd_val(pgd);
7325cc2e 941}
f2a6a705 942#endif /* CONFIG_PGTABLE_LEVELS > 4 */
9f38d7e8 943
4614139c
JF
944#endif /* __ASSEMBLY__ */
945
fb15a9b3
JF
946/*
947 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
948 *
949 * this macro returns the index of the entry in the pgd page which would
950 * control the given virtual address
951 */
952#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
953
954/*
955 * pgd_offset() returns a (pgd_t *)
956 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
957 */
61e9b367
DH
958#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
959/*
960 * a shortcut to get a pgd_t in a given mm
961 */
962#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
fb15a9b3
JF
963/*
964 * a shortcut which implies the use of the kernel's pgd, instead
965 * of a process's
966 */
967#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
968
969
68db065c
JF
970#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
971#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
972
195466dc
JF
973#ifndef __ASSEMBLY__
974
2c1b284e 975extern int direct_gbpages;
22ddfcaa 976void init_mem_mapping(void);
8d57470d 977void early_alloc_pgt_buf(void);
4270fd8b 978extern void memblock_find_dma_reserve(void);
2c1b284e 979
b234e8a0
TG
980#ifdef CONFIG_X86_64
981/* Realmode trampoline initialization. */
982extern pgd_t trampoline_pgd_entry;
0483e1fa 983static inline void __meminit init_trampoline_default(void)
b234e8a0
TG
984{
985 /* Default trampoline pgd value */
65ade2f8 986 trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
b234e8a0 987}
0483e1fa
TG
988# ifdef CONFIG_RANDOMIZE_MEMORY
989void __meminit init_trampoline(void);
990# else
991# define init_trampoline init_trampoline_default
992# endif
b234e8a0
TG
993#else
994static inline void init_trampoline(void) { }
995#endif
996
4891645e
JF
997/* local pte updates need not use xchg for locking */
998static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
999{
1000 pte_t res = *ptep;
1001
1002 /* Pure native function needs no input for mm, addr */
1003 native_pte_clear(NULL, 0, ptep);
1004 return res;
1005}
1006
f2d6bfe9
JW
1007static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
1008{
1009 pmd_t res = *pmdp;
1010
1011 native_pmd_clear(pmdp);
1012 return res;
1013}
1014
a00cc7d9
MW
1015static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
1016{
1017 pud_t res = *pudp;
1018
1019 native_pud_clear(pudp);
1020 return res;
1021}
1022
4891645e
JF
1023static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
1024 pte_t *ptep , pte_t pte)
1025{
1026 native_set_pte(ptep, pte);
1027}
1028
87930019
JG
1029static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1030 pmd_t *pmdp, pmd_t pmd)
0a47de52
AA
1031{
1032 native_set_pmd(pmdp, pmd);
1033}
1034
87930019
JG
1035static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1036 pud_t *pudp, pud_t pud)
a00cc7d9
MW
1037{
1038 native_set_pud(pudp, pud);
1039}
1040
195466dc
JF
1041/*
1042 * We only update the dirty/accessed state if we set
1043 * the dirty bit by hand in the kernel, since the hardware
1044 * will do the accessed bit for us, and we don't want to
1045 * race with other CPU's that might be updating the dirty
1046 * bit at the same time.
1047 */
bea41808
JF
1048struct vm_area_struct;
1049
195466dc 1050#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ee5aa8d3
JF
1051extern int ptep_set_access_flags(struct vm_area_struct *vma,
1052 unsigned long address, pte_t *ptep,
1053 pte_t entry, int dirty);
195466dc
JF
1054
1055#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
f9fbf1a3
JF
1056extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1057 unsigned long addr, pte_t *ptep);
195466dc
JF
1058
1059#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
c20311e1
JF
1060extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1061 unsigned long address, pte_t *ptep);
195466dc
JF
1062
1063#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
3cbaeafe
JP
1064static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1065 pte_t *ptep)
195466dc
JF
1066{
1067 pte_t pte = native_ptep_get_and_clear(ptep);
195466dc
JF
1068 return pte;
1069}
1070
1071#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
3cbaeafe
JP
1072static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1073 unsigned long addr, pte_t *ptep,
1074 int full)
195466dc
JF
1075{
1076 pte_t pte;
1077 if (full) {
1078 /*
1079 * Full address destruction in progress; paravirt does not
1080 * care about updates and native needs no locking
1081 */
1082 pte = native_local_ptep_get_and_clear(ptep);
1083 } else {
1084 pte = ptep_get_and_clear(mm, addr, ptep);
1085 }
1086 return pte;
1087}
1088
1089#define __HAVE_ARCH_PTEP_SET_WRPROTECT
3cbaeafe
JP
1090static inline void ptep_set_wrprotect(struct mm_struct *mm,
1091 unsigned long addr, pte_t *ptep)
195466dc 1092{
d8d89827 1093 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
195466dc
JF
1094}
1095
2ac13462 1096#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
61c77326 1097
f2d6bfe9
JW
1098#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1099
1100#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1101extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1102 unsigned long address, pmd_t *pmdp,
1103 pmd_t entry, int dirty);
a00cc7d9
MW
1104extern int pudp_set_access_flags(struct vm_area_struct *vma,
1105 unsigned long address, pud_t *pudp,
1106 pud_t entry, int dirty);
f2d6bfe9
JW
1107
1108#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1109extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1110 unsigned long addr, pmd_t *pmdp);
a00cc7d9
MW
1111extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1112 unsigned long addr, pud_t *pudp);
f2d6bfe9
JW
1113
1114#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1115extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1116 unsigned long address, pmd_t *pmdp);
1117
1118
e4e40e02 1119#define pmd_write pmd_write
f2d6bfe9
JW
1120static inline int pmd_write(pmd_t pmd)
1121{
1122 return pmd_flags(pmd) & _PAGE_RW;
1123}
1124
8809aa2d
AK
1125#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1126static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
f2d6bfe9
JW
1127 pmd_t *pmdp)
1128{
d6ccc3ec 1129 return native_pmdp_get_and_clear(pmdp);
f2d6bfe9
JW
1130}
1131
a00cc7d9
MW
1132#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1133static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1134 unsigned long addr, pud_t *pudp)
1135{
1136 return native_pudp_get_and_clear(pudp);
1137}
1138
f2d6bfe9
JW
1139#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1140static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1141 unsigned long addr, pmd_t *pmdp)
1142{
1143 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
f2d6bfe9
JW
1144}
1145
1501899a
DW
1146#define pud_write pud_write
1147static inline int pud_write(pud_t pud)
1148{
1149 return pud_flags(pud) & _PAGE_RW;
1150}
1151
85958b46
JF
1152/*
1153 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1154 *
1155 * dst - pointer to pgd range anwhere on a pgd page
1156 * src - ""
1157 * count - the number of pgds to copy.
1158 *
1159 * dst and src can be on the same page, but the range must not overlap,
1160 * and must not cross a page boundary.
1161 */
1162static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1163{
fc2fbc85
DH
1164 memcpy(dst, src, count * sizeof(pgd_t));
1165#ifdef CONFIG_PAGE_TABLE_ISOLATION
1166 if (!static_cpu_has(X86_FEATURE_PTI))
1167 return;
1168 /* Clone the user space pgd as well */
1169 memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1170 count * sizeof(pgd_t));
1171#endif
85958b46
JF
1172}
1173
4cbeb51b
DH
1174#define PTE_SHIFT ilog2(PTRS_PER_PTE)
1175static inline int page_level_shift(enum pg_level level)
1176{
1177 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1178}
1179static inline unsigned long page_level_size(enum pg_level level)
1180{
1181 return 1UL << page_level_shift(level);
1182}
1183static inline unsigned long page_level_mask(enum pg_level level)
1184{
1185 return ~(page_level_size(level) - 1);
1186}
85958b46 1187
602e0186
KS
1188/*
1189 * The x86 doesn't have any external MMU info: the kernel page
1190 * tables contain all the necessary information.
1191 */
1192static inline void update_mmu_cache(struct vm_area_struct *vma,
1193 unsigned long addr, pte_t *ptep)
1194{
1195}
1196static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1197 unsigned long addr, pmd_t *pmd)
1198{
1199}
a00cc7d9
MW
1200static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1201 unsigned long addr, pud_t *pud)
1202{
1203}
85958b46 1204
2bf01f9f 1205#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
fa0f281c
CG
1206static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1207{
fa0f281c
CG
1208 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1209}
1210
1211static inline int pte_swp_soft_dirty(pte_t pte)
1212{
fa0f281c
CG
1213 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1214}
1215
1216static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1217{
fa0f281c
CG
1218 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1219}
ab6e3d09
NH
1220
1221#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1222static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1223{
1224 return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1225}
1226
1227static inline int pmd_swp_soft_dirty(pmd_t pmd)
1228{
1229 return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1230}
1231
1232static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1233{
1234 return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1235}
1236#endif
2bf01f9f 1237#endif
fa0f281c 1238
33a709b2
DH
1239#define PKRU_AD_BIT 0x1
1240#define PKRU_WD_BIT 0x2
84594296 1241#define PKRU_BITS_PER_PKEY 2
33a709b2
DH
1242
1243static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
1244{
84594296 1245 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
33a709b2
DH
1246 return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
1247}
1248
1249static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
1250{
84594296 1251 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
33a709b2
DH
1252 /*
1253 * Access-disable disables writes too so we need to check
1254 * both bits here.
1255 */
1256 return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
1257}
1258
1259static inline u16 pte_flags_pkey(unsigned long pte_flags)
1260{
1261#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1262 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1263 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1264#else
1265 return 0;
1266#endif
1267}
1268
e585513b
KS
1269static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1270{
1271 u32 pkru = read_pkru();
1272
1273 if (!__pkru_allows_read(pkru, pkey))
1274 return false;
1275 if (write && !__pkru_allows_write(pkru, pkey))
1276 return false;
1277
1278 return true;
1279}
1280
1281/*
1282 * 'pteval' can come from a PTE, PMD or PUD. We only check
1283 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1284 * same value on all 3 types.
1285 */
1286static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1287{
1288 unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1289
1290 if (write)
1291 need_pte_bits |= _PAGE_RW;
1292
1293 if ((pteval & need_pte_bits) != need_pte_bits)
1294 return 0;
1295
1296 return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1297}
1298
1299#define pte_access_permitted pte_access_permitted
1300static inline bool pte_access_permitted(pte_t pte, bool write)
1301{
1302 return __pte_access_permitted(pte_val(pte), write);
1303}
1304
1305#define pmd_access_permitted pmd_access_permitted
1306static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1307{
1308 return __pte_access_permitted(pmd_val(pmd), write);
1309}
1310
1311#define pud_access_permitted pud_access_permitted
1312static inline bool pud_access_permitted(pud_t pud, bool write)
1313{
1314 return __pte_access_permitted(pud_val(pud), write);
1315}
1316
195466dc
JF
1317#include <asm-generic/pgtable.h>
1318#endif /* __ASSEMBLY__ */
1319
1965aae3 1320#endif /* _ASM_X86_PGTABLE_H */