]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/include/asm/pgtable.h
x86/espfix: Document use of _PAGE_GLOBAL
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / pgtable.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_PGTABLE_H
3#define _ASM_X86_PGTABLE_H
6c386655 4
21729f81 5#include <linux/mem_encrypt.h>
c47c1b1f 6#include <asm/page.h>
8d19c99f 7#include <asm/pgtable_types.h>
b2bc2731 8
8a7b12f7 9/*
10 * Macro to mark a page protection value as UC-
11 */
d85f3334
JG
12#define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
8a7b12f7 16 : (prot))
17
21729f81
TL
18/*
19 * Macros to add or remove encryption attribute
20 */
21#define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot)))
22#define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
23
4614139c 24#ifndef __ASSEMBLY__
55a6ca25
PA
25#include <asm/x86_init.h>
26
b9d05200
TL
27extern pgd_t early_top_pgt[PTRS_PER_PGD];
28int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
29
ef6bea6d 30void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
a4b51ef6 31void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
e1a58320
SS
32void ptdump_walk_pgd_level_checkwx(void);
33
34#ifdef CONFIG_DEBUG_WX
35#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
36#else
37#define debug_checkwx() do { } while (0)
38#endif
ef6bea6d 39
8405b122
JF
40/*
41 * ZERO_PAGE is a global shared page that is always zero: used
42 * for zero-mapped memory areas etc..
43 */
277d5b40
AK
44extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
45 __visible;
8405b122
JF
46#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
47
e3ed910d
JF
48extern spinlock_t pgd_lock;
49extern struct list_head pgd_list;
8405b122 50
617d34d9
JF
51extern struct mm_struct *pgd_page_get_mm(struct page *page);
52
21729f81
TL
53extern pmdval_t early_pmd_flags;
54
54321d94
JF
55#ifdef CONFIG_PARAVIRT
56#include <asm/paravirt.h>
57#else /* !CONFIG_PARAVIRT */
58#define set_pte(ptep, pte) native_set_pte(ptep, pte)
59#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
60
54321d94
JF
61#define set_pte_atomic(ptep, pte) \
62 native_set_pte_atomic(ptep, pte)
63
64#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
65
f2a6a705 66#ifndef __PAGETABLE_P4D_FOLDED
54321d94
JF
67#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
68#define pgd_clear(pgd) native_pgd_clear(pgd)
69#endif
70
f2a6a705
KS
71#ifndef set_p4d
72# define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
73#endif
74
75#ifndef __PAGETABLE_PUD_FOLDED
76#define p4d_clear(p4d) native_p4d_clear(p4d)
77#endif
78
54321d94
JF
79#ifndef set_pud
80# define set_pud(pudp, pud) native_set_pud(pudp, pud)
81#endif
82
d0f33ac9 83#ifndef __PAGETABLE_PUD_FOLDED
54321d94
JF
84#define pud_clear(pud) native_pud_clear(pud)
85#endif
86
87#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
88#define pmd_clear(pmd) native_pmd_clear(pmd)
89
54321d94
JF
90#define pgd_val(x) native_pgd_val(x)
91#define __pgd(x) native_make_pgd(x)
92
f2a6a705
KS
93#ifndef __PAGETABLE_P4D_FOLDED
94#define p4d_val(x) native_p4d_val(x)
95#define __p4d(x) native_make_p4d(x)
96#endif
97
54321d94
JF
98#ifndef __PAGETABLE_PUD_FOLDED
99#define pud_val(x) native_pud_val(x)
100#define __pud(x) native_make_pud(x)
101#endif
102
103#ifndef __PAGETABLE_PMD_FOLDED
104#define pmd_val(x) native_pmd_val(x)
105#define __pmd(x) native_make_pmd(x)
106#endif
107
108#define pte_val(x) native_pte_val(x)
109#define __pte(x) native_make_pte(x)
110
224101ed
JF
111#define arch_end_context_switch(prev) do {} while(0)
112
54321d94
JF
113#endif /* CONFIG_PARAVIRT */
114
4614139c
JF
115/*
116 * The following only work if pte_present() is true.
117 * Undefined behaviour if not..
118 */
3cbaeafe
JP
119static inline int pte_dirty(pte_t pte)
120{
a15af1c9 121 return pte_flags(pte) & _PAGE_DIRTY;
3cbaeafe
JP
122}
123
a927cb83
DH
124
125static inline u32 read_pkru(void)
126{
127 if (boot_cpu_has(X86_FEATURE_OSPKE))
128 return __read_pkru();
129 return 0;
130}
131
9e90199c
XG
132static inline void write_pkru(u32 pkru)
133{
134 if (boot_cpu_has(X86_FEATURE_OSPKE))
135 __write_pkru(pkru);
136}
137
3cbaeafe
JP
138static inline int pte_young(pte_t pte)
139{
a15af1c9 140 return pte_flags(pte) & _PAGE_ACCESSED;
3cbaeafe
JP
141}
142
c164e038
KS
143static inline int pmd_dirty(pmd_t pmd)
144{
145 return pmd_flags(pmd) & _PAGE_DIRTY;
146}
3cbaeafe 147
f2d6bfe9
JW
148static inline int pmd_young(pmd_t pmd)
149{
150 return pmd_flags(pmd) & _PAGE_ACCESSED;
151}
152
a00cc7d9
MW
153static inline int pud_dirty(pud_t pud)
154{
155 return pud_flags(pud) & _PAGE_DIRTY;
156}
157
158static inline int pud_young(pud_t pud)
159{
160 return pud_flags(pud) & _PAGE_ACCESSED;
161}
162
3cbaeafe
JP
163static inline int pte_write(pte_t pte)
164{
a15af1c9 165 return pte_flags(pte) & _PAGE_RW;
3cbaeafe
JP
166}
167
3cbaeafe
JP
168static inline int pte_huge(pte_t pte)
169{
a15af1c9 170 return pte_flags(pte) & _PAGE_PSE;
4614139c
JF
171}
172
3cbaeafe
JP
173static inline int pte_global(pte_t pte)
174{
a15af1c9 175 return pte_flags(pte) & _PAGE_GLOBAL;
3cbaeafe
JP
176}
177
178static inline int pte_exec(pte_t pte)
179{
a15af1c9 180 return !(pte_flags(pte) & _PAGE_NX);
3cbaeafe
JP
181}
182
7e675137
NP
183static inline int pte_special(pte_t pte)
184{
c819f37e 185 return pte_flags(pte) & _PAGE_SPECIAL;
7e675137
NP
186}
187
91030ca1
HD
188static inline unsigned long pte_pfn(pte_t pte)
189{
190 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
191}
192
087975b0
AM
193static inline unsigned long pmd_pfn(pmd_t pmd)
194{
f70abb0f 195 return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
087975b0
AM
196}
197
0ee364eb
MG
198static inline unsigned long pud_pfn(pud_t pud)
199{
f70abb0f 200 return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
0ee364eb
MG
201}
202
fe1e8c3e
KS
203static inline unsigned long p4d_pfn(p4d_t p4d)
204{
205 return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
206}
207
fd7e3159
TL
208static inline unsigned long pgd_pfn(pgd_t pgd)
209{
210 return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
211}
212
fe1e8c3e
KS
213static inline int p4d_large(p4d_t p4d)
214{
215 /* No 512 GiB pages yet */
216 return 0;
217}
218
91030ca1
HD
219#define pte_page(pte) pfn_to_page(pte_pfn(pte))
220
3cbaeafe
JP
221static inline int pmd_large(pmd_t pte)
222{
027ef6c8 223 return pmd_flags(pte) & _PAGE_PSE;
3cbaeafe
JP
224}
225
f2d6bfe9 226#ifdef CONFIG_TRANSPARENT_HUGEPAGE
f2d6bfe9
JW
227static inline int pmd_trans_huge(pmd_t pmd)
228{
5c7fb56e 229 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
f2d6bfe9 230}
4b7167b9 231
a00cc7d9
MW
232#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
233static inline int pud_trans_huge(pud_t pud)
234{
235 return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
236}
237#endif
238
fd8cfd30 239#define has_transparent_hugepage has_transparent_hugepage
4b7167b9
AA
240static inline int has_transparent_hugepage(void)
241{
16bf9226 242 return boot_cpu_has(X86_FEATURE_PSE);
4b7167b9 243}
5c7fb56e
DW
244
245#ifdef __HAVE_ARCH_PTE_DEVMAP
246static inline int pmd_devmap(pmd_t pmd)
247{
248 return !!(pmd_val(pmd) & _PAGE_DEVMAP);
249}
a00cc7d9
MW
250
251#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
252static inline int pud_devmap(pud_t pud)
253{
254 return !!(pud_val(pud) & _PAGE_DEVMAP);
255}
256#else
257static inline int pud_devmap(pud_t pud)
258{
259 return 0;
260}
261#endif
e585513b
KS
262
263static inline int pgd_devmap(pgd_t pgd)
264{
265 return 0;
266}
5c7fb56e 267#endif
f2d6bfe9
JW
268#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
269
6522869c
JF
270static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
271{
272 pteval_t v = native_pte_val(pte);
273
274 return native_make_pte(v | set);
275}
276
277static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
278{
279 pteval_t v = native_pte_val(pte);
280
281 return native_make_pte(v & ~clear);
282}
283
3cbaeafe
JP
284static inline pte_t pte_mkclean(pte_t pte)
285{
6522869c 286 return pte_clear_flags(pte, _PAGE_DIRTY);
3cbaeafe
JP
287}
288
289static inline pte_t pte_mkold(pte_t pte)
290{
6522869c 291 return pte_clear_flags(pte, _PAGE_ACCESSED);
3cbaeafe
JP
292}
293
294static inline pte_t pte_wrprotect(pte_t pte)
295{
6522869c 296 return pte_clear_flags(pte, _PAGE_RW);
3cbaeafe
JP
297}
298
299static inline pte_t pte_mkexec(pte_t pte)
300{
6522869c 301 return pte_clear_flags(pte, _PAGE_NX);
3cbaeafe
JP
302}
303
304static inline pte_t pte_mkdirty(pte_t pte)
305{
0f8975ec 306 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
3cbaeafe
JP
307}
308
309static inline pte_t pte_mkyoung(pte_t pte)
310{
6522869c 311 return pte_set_flags(pte, _PAGE_ACCESSED);
3cbaeafe
JP
312}
313
314static inline pte_t pte_mkwrite(pte_t pte)
315{
6522869c 316 return pte_set_flags(pte, _PAGE_RW);
3cbaeafe
JP
317}
318
319static inline pte_t pte_mkhuge(pte_t pte)
320{
6522869c 321 return pte_set_flags(pte, _PAGE_PSE);
3cbaeafe
JP
322}
323
324static inline pte_t pte_clrhuge(pte_t pte)
325{
6522869c 326 return pte_clear_flags(pte, _PAGE_PSE);
3cbaeafe
JP
327}
328
329static inline pte_t pte_mkglobal(pte_t pte)
330{
6522869c 331 return pte_set_flags(pte, _PAGE_GLOBAL);
3cbaeafe
JP
332}
333
334static inline pte_t pte_clrglobal(pte_t pte)
335{
6522869c 336 return pte_clear_flags(pte, _PAGE_GLOBAL);
3cbaeafe 337}
4614139c 338
7e675137
NP
339static inline pte_t pte_mkspecial(pte_t pte)
340{
6522869c 341 return pte_set_flags(pte, _PAGE_SPECIAL);
7e675137
NP
342}
343
01c8f1c4
DW
344static inline pte_t pte_mkdevmap(pte_t pte)
345{
346 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
347}
348
f2d6bfe9
JW
349static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
350{
351 pmdval_t v = native_pmd_val(pmd);
352
bc511804 353 return native_make_pmd(v | set);
f2d6bfe9
JW
354}
355
356static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
357{
358 pmdval_t v = native_pmd_val(pmd);
359
bc511804 360 return native_make_pmd(v & ~clear);
f2d6bfe9
JW
361}
362
363static inline pmd_t pmd_mkold(pmd_t pmd)
364{
365 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
366}
367
590a471c
MK
368static inline pmd_t pmd_mkclean(pmd_t pmd)
369{
370 return pmd_clear_flags(pmd, _PAGE_DIRTY);
371}
372
f2d6bfe9
JW
373static inline pmd_t pmd_wrprotect(pmd_t pmd)
374{
375 return pmd_clear_flags(pmd, _PAGE_RW);
376}
377
378static inline pmd_t pmd_mkdirty(pmd_t pmd)
379{
0f8975ec 380 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
f2d6bfe9
JW
381}
382
f25748e3
DW
383static inline pmd_t pmd_mkdevmap(pmd_t pmd)
384{
385 return pmd_set_flags(pmd, _PAGE_DEVMAP);
386}
387
f2d6bfe9
JW
388static inline pmd_t pmd_mkhuge(pmd_t pmd)
389{
390 return pmd_set_flags(pmd, _PAGE_PSE);
391}
392
393static inline pmd_t pmd_mkyoung(pmd_t pmd)
394{
395 return pmd_set_flags(pmd, _PAGE_ACCESSED);
396}
397
398static inline pmd_t pmd_mkwrite(pmd_t pmd)
399{
400 return pmd_set_flags(pmd, _PAGE_RW);
401}
402
403static inline pmd_t pmd_mknotpresent(pmd_t pmd)
404{
21d9ee3e 405 return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
f2d6bfe9
JW
406}
407
a00cc7d9
MW
408static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
409{
410 pudval_t v = native_pud_val(pud);
411
bc511804 412 return native_make_pud(v | set);
a00cc7d9
MW
413}
414
415static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
416{
417 pudval_t v = native_pud_val(pud);
418
bc511804 419 return native_make_pud(v & ~clear);
a00cc7d9
MW
420}
421
422static inline pud_t pud_mkold(pud_t pud)
423{
424 return pud_clear_flags(pud, _PAGE_ACCESSED);
425}
426
427static inline pud_t pud_mkclean(pud_t pud)
428{
429 return pud_clear_flags(pud, _PAGE_DIRTY);
430}
431
432static inline pud_t pud_wrprotect(pud_t pud)
433{
434 return pud_clear_flags(pud, _PAGE_RW);
435}
436
437static inline pud_t pud_mkdirty(pud_t pud)
438{
439 return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
440}
441
442static inline pud_t pud_mkdevmap(pud_t pud)
443{
444 return pud_set_flags(pud, _PAGE_DEVMAP);
445}
446
447static inline pud_t pud_mkhuge(pud_t pud)
448{
449 return pud_set_flags(pud, _PAGE_PSE);
450}
451
452static inline pud_t pud_mkyoung(pud_t pud)
453{
454 return pud_set_flags(pud, _PAGE_ACCESSED);
455}
456
457static inline pud_t pud_mkwrite(pud_t pud)
458{
459 return pud_set_flags(pud, _PAGE_RW);
460}
461
462static inline pud_t pud_mknotpresent(pud_t pud)
463{
464 return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
465}
466
2bf01f9f 467#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
0f8975ec
PE
468static inline int pte_soft_dirty(pte_t pte)
469{
470 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
471}
472
473static inline int pmd_soft_dirty(pmd_t pmd)
474{
475 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
476}
477
a00cc7d9
MW
478static inline int pud_soft_dirty(pud_t pud)
479{
480 return pud_flags(pud) & _PAGE_SOFT_DIRTY;
481}
482
0f8975ec
PE
483static inline pte_t pte_mksoft_dirty(pte_t pte)
484{
485 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
486}
487
488static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
489{
490 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
491}
492
a00cc7d9
MW
493static inline pud_t pud_mksoft_dirty(pud_t pud)
494{
495 return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
496}
497
a7b76174
MS
498static inline pte_t pte_clear_soft_dirty(pte_t pte)
499{
500 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
501}
502
503static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
504{
505 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
506}
507
a00cc7d9
MW
508static inline pud_t pud_clear_soft_dirty(pud_t pud)
509{
510 return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
511}
512
2bf01f9f
CG
513#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
514
b534816b
JF
515/*
516 * Mask out unsupported bits in a present pgprot. Non-present pgprots
517 * can use those bits for other purposes, so leave them be.
518 */
519static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
520{
521 pgprotval_t protval = pgprot_val(pgprot);
522
523 if (protval & _PAGE_PRESENT)
524 protval &= __supported_pte_mask;
525
526 return protval;
527}
528
6fdc05d4
JF
529static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
530{
b534816b
JF
531 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
532 massage_pgprot(pgprot));
6fdc05d4
JF
533}
534
535static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
536{
b534816b
JF
537 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
538 massage_pgprot(pgprot));
6fdc05d4
JF
539}
540
a00cc7d9
MW
541static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
542{
543 return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
544 massage_pgprot(pgprot));
545}
546
38472311
IM
547static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
548{
549 pteval_t val = pte_val(pte);
550
551 /*
552 * Chop off the NX bit (if present), and add the NX portion of
553 * the newprot (if present):
554 */
1c12c4cf 555 val &= _PAGE_CHG_MASK;
b534816b 556 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
38472311
IM
557
558 return __pte(val);
559}
560
c489f125
JW
561static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
562{
563 pmdval_t val = pmd_val(pmd);
564
565 val &= _HPAGE_CHG_MASK;
566 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
567
568 return __pmd(val);
569}
570
1c12c4cf
VP
571/* mprotect needs to preserve PAT bits when updating vm_page_prot */
572#define pgprot_modify pgprot_modify
573static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
574{
575 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
576 pgprotval_t addbits = pgprot_val(newprot);
577 return __pgprot(preservebits | addbits);
578}
579
bbac8c6d
TK
580#define pte_pgprot(x) __pgprot(pte_flags(x))
581#define pmd_pgprot(x) __pgprot(pmd_flags(x))
582#define pud_pgprot(x) __pgprot(pud_flags(x))
f2a6a705 583#define p4d_pgprot(x) __pgprot(p4d_flags(x))
c6ca18eb 584
b534816b 585#define canon_pgprot(p) __pgprot(massage_pgprot(p))
1e8e23bc 586
1adcaafe 587static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
d85f3334
JG
588 enum page_cache_mode pcm,
589 enum page_cache_mode new_pcm)
afc7d20c 590{
1adcaafe 591 /*
55a6ca25 592 * PAT type is always WB for untracked ranges, so no need to check.
1adcaafe 593 */
8a271389 594 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
1adcaafe
SS
595 return 1;
596
afc7d20c 597 /*
598 * Certain new memtypes are not allowed with certain
599 * requested memtype:
600 * - request is uncached, return cannot be write-back
601 * - request is write-combine, return cannot be write-back
ecb2feba
TK
602 * - request is write-through, return cannot be write-back
603 * - request is write-through, return cannot be write-combine
afc7d20c 604 */
d85f3334
JG
605 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
606 new_pcm == _PAGE_CACHE_MODE_WB) ||
607 (pcm == _PAGE_CACHE_MODE_WC &&
ecb2feba
TK
608 new_pcm == _PAGE_CACHE_MODE_WB) ||
609 (pcm == _PAGE_CACHE_MODE_WT &&
610 new_pcm == _PAGE_CACHE_MODE_WB) ||
611 (pcm == _PAGE_CACHE_MODE_WT &&
612 new_pcm == _PAGE_CACHE_MODE_WC)) {
afc7d20c 613 return 0;
614 }
615
616 return 1;
617}
618
458a3e64
TH
619pmd_t *populate_extra_pmd(unsigned long vaddr);
620pte_t *populate_extra_pte(unsigned long vaddr);
4614139c
JF
621#endif /* __ASSEMBLY__ */
622
96a388de 623#ifdef CONFIG_X86_32
a1ce3928 624# include <asm/pgtable_32.h>
96a388de 625#else
a1ce3928 626# include <asm/pgtable_64.h>
96a388de 627#endif
6c386655 628
aca159db 629#ifndef __ASSEMBLY__
f476961c 630#include <linux/mm_types.h>
fa0f281c 631#include <linux/mmdebug.h>
4cbeb51b 632#include <linux/log2.h>
ef37bc36 633#include <asm/fixmap.h>
aca159db 634
a034a010
JF
635static inline int pte_none(pte_t pte)
636{
97e3c602 637 return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
a034a010
JF
638}
639
8de01da3
JF
640#define __HAVE_ARCH_PTE_SAME
641static inline int pte_same(pte_t a, pte_t b)
642{
643 return a.pte == b.pte;
644}
645
7c683851 646static inline int pte_present(pte_t a)
c46a7c81
MG
647{
648 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
649}
650
3565fce3
DW
651#ifdef __HAVE_ARCH_PTE_DEVMAP
652static inline int pte_devmap(pte_t a)
653{
654 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
655}
656#endif
657
2c3cf556 658#define pte_accessible pte_accessible
20841405 659static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
2c3cf556 660{
20841405
RR
661 if (pte_flags(a) & _PAGE_PRESENT)
662 return true;
663
21d9ee3e 664 if ((pte_flags(a) & _PAGE_PROTNONE) &&
20841405
RR
665 mm_tlb_flush_pending(mm))
666 return true;
667
668 return false;
2c3cf556
RR
669}
670
649e8ef6
JF
671static inline int pmd_present(pmd_t pmd)
672{
027ef6c8
AA
673 /*
674 * Checking for _PAGE_PSE is needed too because
675 * split_huge_page will temporarily clear the present bit (but
676 * the _PAGE_PSE flag will remain set at all times while the
677 * _PAGE_PRESENT bit is clear).
678 */
21d9ee3e 679 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
649e8ef6
JF
680}
681
e7bb4b6d
MG
682#ifdef CONFIG_NUMA_BALANCING
683/*
684 * These work without NUMA balancing but the kernel does not care. See the
685 * comment in include/asm-generic/pgtable.h
686 */
687static inline int pte_protnone(pte_t pte)
688{
e3a1f6ca
DV
689 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
690 == _PAGE_PROTNONE;
e7bb4b6d
MG
691}
692
693static inline int pmd_protnone(pmd_t pmd)
694{
e3a1f6ca
DV
695 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
696 == _PAGE_PROTNONE;
e7bb4b6d
MG
697}
698#endif /* CONFIG_NUMA_BALANCING */
699
4fea801a
JF
700static inline int pmd_none(pmd_t pmd)
701{
702 /* Only check low word on 32-bit platforms, since it might be
703 out of sync with upper half. */
97e3c602
DH
704 unsigned long val = native_pmd_val(pmd);
705 return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
4fea801a
JF
706}
707
3ffb3564
JF
708static inline unsigned long pmd_page_vaddr(pmd_t pmd)
709{
f70abb0f 710 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
3ffb3564
JF
711}
712
e5f7f202
IM
713/*
714 * Currently stuck as a macro due to indirect forward reference to
715 * linux/mmzone.h's __section_mem_map_addr() definition:
716 */
fd7e3159 717#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
20063ca4 718
e24d7eee
JF
719/*
720 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
721 *
722 * this macro returns the index of the entry in the pmd page which would
723 * control the given virtual address
724 */
ce0c0f9e 725static inline unsigned long pmd_index(unsigned long address)
e24d7eee
JF
726{
727 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
728}
729
97e2817d
JF
730/*
731 * Conversion functions: convert a page and protection to a page entry,
732 * and a page entry and page directory to the page they refer to.
733 *
734 * (Currently stuck as a macro because of indirect forward reference
735 * to linux/mm.h:page_to_nid())
736 */
737#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
738
346309cf
JF
739/*
740 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
741 *
742 * this function returns the index of the entry in the pte page which would
743 * control the given virtual address
744 */
ce0c0f9e 745static inline unsigned long pte_index(unsigned long address)
346309cf
JF
746{
747 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
748}
749
3fbc2444
JF
750static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
751{
752 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
753}
754
99510238
JF
755static inline int pmd_bad(pmd_t pmd)
756{
18a7a199 757 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
99510238
JF
758}
759
cc290ca3
JF
760static inline unsigned long pages_to_mb(unsigned long npg)
761{
762 return npg >> (20 - PAGE_SHIFT);
763}
764
98233368 765#if CONFIG_PGTABLE_LEVELS > 2
deb79cfb
JF
766static inline int pud_none(pud_t pud)
767{
97e3c602 768 return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
deb79cfb
JF
769}
770
5ba7c913
JF
771static inline int pud_present(pud_t pud)
772{
18a7a199 773 return pud_flags(pud) & _PAGE_PRESENT;
5ba7c913 774}
6fff47e3
JF
775
776static inline unsigned long pud_page_vaddr(pud_t pud)
777{
f70abb0f 778 return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
6fff47e3 779}
f476961c 780
e5f7f202
IM
781/*
782 * Currently stuck as a macro due to indirect forward reference to
783 * linux/mmzone.h's __section_mem_map_addr() definition:
784 */
fd7e3159 785#define pud_page(pud) pfn_to_page(pud_pfn(pud))
01ade20d
JF
786
787/* Find an entry in the second-level page table.. */
788static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
789{
790 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
791}
3180fba0 792
3f6cbef1
JF
793static inline int pud_large(pud_t pud)
794{
e2f5bda9 795 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
3f6cbef1
JF
796 (_PAGE_PSE | _PAGE_PRESENT);
797}
a61bb29a
JF
798
799static inline int pud_bad(pud_t pud)
800{
18a7a199 801 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
a61bb29a 802}
e2f5bda9
JF
803#else
804static inline int pud_large(pud_t pud)
805{
806 return 0;
807}
98233368 808#endif /* CONFIG_PGTABLE_LEVELS > 2 */
5ba7c913 809
fe1e8c3e
KS
810static inline unsigned long pud_index(unsigned long address)
811{
812 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
813}
814
f2a6a705
KS
815#if CONFIG_PGTABLE_LEVELS > 3
816static inline int p4d_none(p4d_t p4d)
817{
818 return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
819}
820
821static inline int p4d_present(p4d_t p4d)
822{
823 return p4d_flags(p4d) & _PAGE_PRESENT;
824}
825
826static inline unsigned long p4d_page_vaddr(p4d_t p4d)
827{
828 return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
829}
830
831/*
832 * Currently stuck as a macro due to indirect forward reference to
833 * linux/mmzone.h's __section_mem_map_addr() definition:
834 */
fd7e3159 835#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
f2a6a705
KS
836
837/* Find an entry in the third-level page table.. */
838static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
839{
840 return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
841}
842
843static inline int p4d_bad(p4d_t p4d)
844{
1c4de1ff
DH
845 unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
846
847 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
848 ignore_flags |= _PAGE_NX;
849
850 return (p4d_flags(p4d) & ~ignore_flags) != 0;
f2a6a705
KS
851}
852#endif /* CONFIG_PGTABLE_LEVELS > 3 */
853
fe1e8c3e
KS
854static inline unsigned long p4d_index(unsigned long address)
855{
856 return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
857}
858
f2a6a705 859#if CONFIG_PGTABLE_LEVELS > 4
9f38d7e8
JF
860static inline int pgd_present(pgd_t pgd)
861{
18a7a199 862 return pgd_flags(pgd) & _PAGE_PRESENT;
9f38d7e8 863}
c5f040b1
JF
864
865static inline unsigned long pgd_page_vaddr(pgd_t pgd)
866{
867 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
868}
777cba16 869
e5f7f202
IM
870/*
871 * Currently stuck as a macro due to indirect forward reference to
872 * linux/mmzone.h's __section_mem_map_addr() definition:
873 */
fd7e3159 874#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
7cfb8102
JF
875
876/* to find an entry in a page-table-directory. */
f2a6a705 877static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
3d081b18 878{
f2a6a705 879 return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
3d081b18 880}
30f10316
JF
881
882static inline int pgd_bad(pgd_t pgd)
883{
1c4de1ff
DH
884 unsigned long ignore_flags = _PAGE_USER;
885
886 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
887 ignore_flags |= _PAGE_NX;
888
889 return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
30f10316 890}
7325cc2e
JF
891
892static inline int pgd_none(pgd_t pgd)
893{
97e3c602
DH
894 /*
895 * There is no need to do a workaround for the KNL stray
896 * A/D bit erratum here. PGDs only point to page tables
897 * except on 32-bit non-PAE which is not supported on
898 * KNL.
899 */
26c8e317 900 return !native_pgd_val(pgd);
7325cc2e 901}
f2a6a705 902#endif /* CONFIG_PGTABLE_LEVELS > 4 */
9f38d7e8 903
4614139c
JF
904#endif /* __ASSEMBLY__ */
905
fb15a9b3
JF
906/*
907 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
908 *
909 * this macro returns the index of the entry in the pgd page which would
910 * control the given virtual address
911 */
912#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
913
914/*
915 * pgd_offset() returns a (pgd_t *)
916 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
917 */
61e9b367
DH
918#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
919/*
920 * a shortcut to get a pgd_t in a given mm
921 */
922#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
fb15a9b3
JF
923/*
924 * a shortcut which implies the use of the kernel's pgd, instead
925 * of a process's
926 */
927#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
928
929
68db065c
JF
930#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
931#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
932
195466dc
JF
933#ifndef __ASSEMBLY__
934
2c1b284e 935extern int direct_gbpages;
22ddfcaa 936void init_mem_mapping(void);
8d57470d 937void early_alloc_pgt_buf(void);
4270fd8b 938extern void memblock_find_dma_reserve(void);
2c1b284e 939
b234e8a0
TG
940#ifdef CONFIG_X86_64
941/* Realmode trampoline initialization. */
942extern pgd_t trampoline_pgd_entry;
0483e1fa 943static inline void __meminit init_trampoline_default(void)
b234e8a0
TG
944{
945 /* Default trampoline pgd value */
65ade2f8 946 trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
b234e8a0 947}
0483e1fa
TG
948# ifdef CONFIG_RANDOMIZE_MEMORY
949void __meminit init_trampoline(void);
950# else
951# define init_trampoline init_trampoline_default
952# endif
b234e8a0
TG
953#else
954static inline void init_trampoline(void) { }
955#endif
956
4891645e
JF
957/* local pte updates need not use xchg for locking */
958static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
959{
960 pte_t res = *ptep;
961
962 /* Pure native function needs no input for mm, addr */
963 native_pte_clear(NULL, 0, ptep);
964 return res;
965}
966
f2d6bfe9
JW
967static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
968{
969 pmd_t res = *pmdp;
970
971 native_pmd_clear(pmdp);
972 return res;
973}
974
a00cc7d9
MW
975static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
976{
977 pud_t res = *pudp;
978
979 native_pud_clear(pudp);
980 return res;
981}
982
4891645e
JF
983static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
984 pte_t *ptep , pte_t pte)
985{
986 native_set_pte(ptep, pte);
987}
988
87930019
JG
989static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
990 pmd_t *pmdp, pmd_t pmd)
0a47de52
AA
991{
992 native_set_pmd(pmdp, pmd);
993}
994
87930019
JG
995static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
996 pud_t *pudp, pud_t pud)
a00cc7d9
MW
997{
998 native_set_pud(pudp, pud);
999}
1000
195466dc
JF
1001/*
1002 * We only update the dirty/accessed state if we set
1003 * the dirty bit by hand in the kernel, since the hardware
1004 * will do the accessed bit for us, and we don't want to
1005 * race with other CPU's that might be updating the dirty
1006 * bit at the same time.
1007 */
bea41808
JF
1008struct vm_area_struct;
1009
195466dc 1010#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ee5aa8d3
JF
1011extern int ptep_set_access_flags(struct vm_area_struct *vma,
1012 unsigned long address, pte_t *ptep,
1013 pte_t entry, int dirty);
195466dc
JF
1014
1015#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
f9fbf1a3
JF
1016extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1017 unsigned long addr, pte_t *ptep);
195466dc
JF
1018
1019#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
c20311e1
JF
1020extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1021 unsigned long address, pte_t *ptep);
195466dc
JF
1022
1023#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
3cbaeafe
JP
1024static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1025 pte_t *ptep)
195466dc
JF
1026{
1027 pte_t pte = native_ptep_get_and_clear(ptep);
195466dc
JF
1028 return pte;
1029}
1030
1031#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
3cbaeafe
JP
1032static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1033 unsigned long addr, pte_t *ptep,
1034 int full)
195466dc
JF
1035{
1036 pte_t pte;
1037 if (full) {
1038 /*
1039 * Full address destruction in progress; paravirt does not
1040 * care about updates and native needs no locking
1041 */
1042 pte = native_local_ptep_get_and_clear(ptep);
1043 } else {
1044 pte = ptep_get_and_clear(mm, addr, ptep);
1045 }
1046 return pte;
1047}
1048
1049#define __HAVE_ARCH_PTEP_SET_WRPROTECT
3cbaeafe
JP
1050static inline void ptep_set_wrprotect(struct mm_struct *mm,
1051 unsigned long addr, pte_t *ptep)
195466dc 1052{
d8d89827 1053 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
195466dc
JF
1054}
1055
2ac13462 1056#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
61c77326 1057
f2d6bfe9
JW
1058#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1059
1060#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1061extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1062 unsigned long address, pmd_t *pmdp,
1063 pmd_t entry, int dirty);
a00cc7d9
MW
1064extern int pudp_set_access_flags(struct vm_area_struct *vma,
1065 unsigned long address, pud_t *pudp,
1066 pud_t entry, int dirty);
f2d6bfe9
JW
1067
1068#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1069extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1070 unsigned long addr, pmd_t *pmdp);
a00cc7d9
MW
1071extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1072 unsigned long addr, pud_t *pudp);
f2d6bfe9
JW
1073
1074#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1075extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1076 unsigned long address, pmd_t *pmdp);
1077
1078
e4e40e02 1079#define pmd_write pmd_write
f2d6bfe9
JW
1080static inline int pmd_write(pmd_t pmd)
1081{
1082 return pmd_flags(pmd) & _PAGE_RW;
1083}
1084
8809aa2d
AK
1085#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1086static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
f2d6bfe9
JW
1087 pmd_t *pmdp)
1088{
d6ccc3ec 1089 return native_pmdp_get_and_clear(pmdp);
f2d6bfe9
JW
1090}
1091
a00cc7d9
MW
1092#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1093static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1094 unsigned long addr, pud_t *pudp)
1095{
1096 return native_pudp_get_and_clear(pudp);
1097}
1098
f2d6bfe9
JW
1099#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1100static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1101 unsigned long addr, pmd_t *pmdp)
1102{
1103 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
f2d6bfe9
JW
1104}
1105
1501899a
DW
1106#define pud_write pud_write
1107static inline int pud_write(pud_t pud)
1108{
1109 return pud_flags(pud) & _PAGE_RW;
1110}
1111
85958b46
JF
1112/*
1113 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1114 *
1115 * dst - pointer to pgd range anwhere on a pgd page
1116 * src - ""
1117 * count - the number of pgds to copy.
1118 *
1119 * dst and src can be on the same page, but the range must not overlap,
1120 * and must not cross a page boundary.
1121 */
1122static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1123{
fc2fbc85
DH
1124 memcpy(dst, src, count * sizeof(pgd_t));
1125#ifdef CONFIG_PAGE_TABLE_ISOLATION
1126 if (!static_cpu_has(X86_FEATURE_PTI))
1127 return;
1128 /* Clone the user space pgd as well */
1129 memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1130 count * sizeof(pgd_t));
1131#endif
85958b46
JF
1132}
1133
4cbeb51b
DH
1134#define PTE_SHIFT ilog2(PTRS_PER_PTE)
1135static inline int page_level_shift(enum pg_level level)
1136{
1137 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1138}
1139static inline unsigned long page_level_size(enum pg_level level)
1140{
1141 return 1UL << page_level_shift(level);
1142}
1143static inline unsigned long page_level_mask(enum pg_level level)
1144{
1145 return ~(page_level_size(level) - 1);
1146}
85958b46 1147
602e0186
KS
1148/*
1149 * The x86 doesn't have any external MMU info: the kernel page
1150 * tables contain all the necessary information.
1151 */
1152static inline void update_mmu_cache(struct vm_area_struct *vma,
1153 unsigned long addr, pte_t *ptep)
1154{
1155}
1156static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1157 unsigned long addr, pmd_t *pmd)
1158{
1159}
a00cc7d9
MW
1160static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1161 unsigned long addr, pud_t *pud)
1162{
1163}
85958b46 1164
2bf01f9f 1165#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
fa0f281c
CG
1166static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1167{
fa0f281c
CG
1168 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1169}
1170
1171static inline int pte_swp_soft_dirty(pte_t pte)
1172{
fa0f281c
CG
1173 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1174}
1175
1176static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1177{
fa0f281c
CG
1178 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1179}
ab6e3d09
NH
1180
1181#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1182static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1183{
1184 return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1185}
1186
1187static inline int pmd_swp_soft_dirty(pmd_t pmd)
1188{
1189 return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1190}
1191
1192static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1193{
1194 return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1195}
1196#endif
2bf01f9f 1197#endif
fa0f281c 1198
33a709b2
DH
1199#define PKRU_AD_BIT 0x1
1200#define PKRU_WD_BIT 0x2
84594296 1201#define PKRU_BITS_PER_PKEY 2
33a709b2
DH
1202
1203static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
1204{
84594296 1205 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
33a709b2
DH
1206 return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
1207}
1208
1209static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
1210{
84594296 1211 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
33a709b2
DH
1212 /*
1213 * Access-disable disables writes too so we need to check
1214 * both bits here.
1215 */
1216 return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
1217}
1218
1219static inline u16 pte_flags_pkey(unsigned long pte_flags)
1220{
1221#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1222 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1223 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1224#else
1225 return 0;
1226#endif
1227}
1228
e585513b
KS
1229static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1230{
1231 u32 pkru = read_pkru();
1232
1233 if (!__pkru_allows_read(pkru, pkey))
1234 return false;
1235 if (write && !__pkru_allows_write(pkru, pkey))
1236 return false;
1237
1238 return true;
1239}
1240
1241/*
1242 * 'pteval' can come from a PTE, PMD or PUD. We only check
1243 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1244 * same value on all 3 types.
1245 */
1246static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1247{
1248 unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1249
1250 if (write)
1251 need_pte_bits |= _PAGE_RW;
1252
1253 if ((pteval & need_pte_bits) != need_pte_bits)
1254 return 0;
1255
1256 return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1257}
1258
1259#define pte_access_permitted pte_access_permitted
1260static inline bool pte_access_permitted(pte_t pte, bool write)
1261{
1262 return __pte_access_permitted(pte_val(pte), write);
1263}
1264
1265#define pmd_access_permitted pmd_access_permitted
1266static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1267{
1268 return __pte_access_permitted(pmd_val(pmd), write);
1269}
1270
1271#define pud_access_permitted pud_access_permitted
1272static inline bool pud_access_permitted(pud_t pud, bool write)
1273{
1274 return __pte_access_permitted(pud_val(pud), write);
1275}
1276
195466dc
JF
1277#include <asm-generic/pgtable.h>
1278#endif /* __ASSEMBLY__ */
1279
1965aae3 1280#endif /* _ASM_X86_PGTABLE_H */