]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/include/asm/pgtable.h
x86/mm: provide pmdp_establish() helper
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / pgtable.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_PGTABLE_H
3#define _ASM_X86_PGTABLE_H
6c386655 4
21729f81 5#include <linux/mem_encrypt.h>
c47c1b1f 6#include <asm/page.h>
8d19c99f 7#include <asm/pgtable_types.h>
b2bc2731 8
8a7b12f7 9/*
10 * Macro to mark a page protection value as UC-
11 */
d85f3334
JG
12#define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
8a7b12f7 16 : (prot))
17
21729f81
TL
18/*
19 * Macros to add or remove encryption attribute
20 */
21#define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot)))
22#define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
23
4614139c 24#ifndef __ASSEMBLY__
55a6ca25
PA
25#include <asm/x86_init.h>
26
b9d05200
TL
27extern pgd_t early_top_pgt[PTRS_PER_PGD];
28int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
29
ef6bea6d 30void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
a4b51ef6 31void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
e1a58320 32void ptdump_walk_pgd_level_checkwx(void);
3a2cf6d0 33void ptdump_walk_user_pgd_level_checkwx(void);
e1a58320
SS
34
35#ifdef CONFIG_DEBUG_WX
3a2cf6d0
JR
36#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
37#define debug_checkwx_user() ptdump_walk_user_pgd_level_checkwx()
e1a58320 38#else
3a2cf6d0
JR
39#define debug_checkwx() do { } while (0)
40#define debug_checkwx_user() do { } while (0)
e1a58320 41#endif
ef6bea6d 42
8405b122
JF
43/*
44 * ZERO_PAGE is a global shared page that is always zero: used
45 * for zero-mapped memory areas etc..
46 */
277d5b40
AK
47extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
48 __visible;
8405b122
JF
49#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
50
e3ed910d
JF
51extern spinlock_t pgd_lock;
52extern struct list_head pgd_list;
8405b122 53
617d34d9
JF
54extern struct mm_struct *pgd_page_get_mm(struct page *page);
55
21729f81
TL
56extern pmdval_t early_pmd_flags;
57
54321d94
JF
58#ifdef CONFIG_PARAVIRT
59#include <asm/paravirt.h>
60#else /* !CONFIG_PARAVIRT */
61#define set_pte(ptep, pte) native_set_pte(ptep, pte)
62#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
63
54321d94
JF
64#define set_pte_atomic(ptep, pte) \
65 native_set_pte_atomic(ptep, pte)
66
67#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
68
f2a6a705 69#ifndef __PAGETABLE_P4D_FOLDED
54321d94
JF
70#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
71#define pgd_clear(pgd) native_pgd_clear(pgd)
72#endif
73
f2a6a705
KS
74#ifndef set_p4d
75# define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
76#endif
77
78#ifndef __PAGETABLE_PUD_FOLDED
79#define p4d_clear(p4d) native_p4d_clear(p4d)
80#endif
81
54321d94
JF
82#ifndef set_pud
83# define set_pud(pudp, pud) native_set_pud(pudp, pud)
84#endif
85
d0f33ac9 86#ifndef __PAGETABLE_PUD_FOLDED
54321d94
JF
87#define pud_clear(pud) native_pud_clear(pud)
88#endif
89
90#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
91#define pmd_clear(pmd) native_pmd_clear(pmd)
92
54321d94
JF
93#define pgd_val(x) native_pgd_val(x)
94#define __pgd(x) native_make_pgd(x)
95
f2a6a705
KS
96#ifndef __PAGETABLE_P4D_FOLDED
97#define p4d_val(x) native_p4d_val(x)
98#define __p4d(x) native_make_p4d(x)
99#endif
100
54321d94
JF
101#ifndef __PAGETABLE_PUD_FOLDED
102#define pud_val(x) native_pud_val(x)
103#define __pud(x) native_make_pud(x)
104#endif
105
106#ifndef __PAGETABLE_PMD_FOLDED
107#define pmd_val(x) native_pmd_val(x)
108#define __pmd(x) native_make_pmd(x)
109#endif
110
111#define pte_val(x) native_pte_val(x)
112#define __pte(x) native_make_pte(x)
113
224101ed
JF
114#define arch_end_context_switch(prev) do {} while(0)
115
54321d94
JF
116#endif /* CONFIG_PARAVIRT */
117
4614139c
JF
118/*
119 * The following only work if pte_present() is true.
120 * Undefined behaviour if not..
121 */
3cbaeafe
JP
122static inline int pte_dirty(pte_t pte)
123{
a15af1c9 124 return pte_flags(pte) & _PAGE_DIRTY;
3cbaeafe
JP
125}
126
a927cb83
DH
127
128static inline u32 read_pkru(void)
129{
130 if (boot_cpu_has(X86_FEATURE_OSPKE))
131 return __read_pkru();
132 return 0;
133}
134
9e90199c
XG
135static inline void write_pkru(u32 pkru)
136{
137 if (boot_cpu_has(X86_FEATURE_OSPKE))
138 __write_pkru(pkru);
139}
140
3cbaeafe
JP
141static inline int pte_young(pte_t pte)
142{
a15af1c9 143 return pte_flags(pte) & _PAGE_ACCESSED;
3cbaeafe
JP
144}
145
c164e038
KS
146static inline int pmd_dirty(pmd_t pmd)
147{
148 return pmd_flags(pmd) & _PAGE_DIRTY;
149}
3cbaeafe 150
f2d6bfe9
JW
151static inline int pmd_young(pmd_t pmd)
152{
153 return pmd_flags(pmd) & _PAGE_ACCESSED;
154}
155
a00cc7d9
MW
156static inline int pud_dirty(pud_t pud)
157{
158 return pud_flags(pud) & _PAGE_DIRTY;
159}
160
161static inline int pud_young(pud_t pud)
162{
163 return pud_flags(pud) & _PAGE_ACCESSED;
164}
165
3cbaeafe
JP
166static inline int pte_write(pte_t pte)
167{
a15af1c9 168 return pte_flags(pte) & _PAGE_RW;
3cbaeafe
JP
169}
170
3cbaeafe
JP
171static inline int pte_huge(pte_t pte)
172{
a15af1c9 173 return pte_flags(pte) & _PAGE_PSE;
4614139c
JF
174}
175
3cbaeafe
JP
176static inline int pte_global(pte_t pte)
177{
a15af1c9 178 return pte_flags(pte) & _PAGE_GLOBAL;
3cbaeafe
JP
179}
180
181static inline int pte_exec(pte_t pte)
182{
a15af1c9 183 return !(pte_flags(pte) & _PAGE_NX);
3cbaeafe
JP
184}
185
7e675137
NP
186static inline int pte_special(pte_t pte)
187{
c819f37e 188 return pte_flags(pte) & _PAGE_SPECIAL;
7e675137
NP
189}
190
b1fb6302
AK
191/* Entries that were set to PROT_NONE are inverted */
192
193static inline u64 protnone_mask(u64 val);
194
91030ca1
HD
195static inline unsigned long pte_pfn(pte_t pte)
196{
3c739aee 197 phys_addr_t pfn = pte_val(pte);
b1fb6302
AK
198 pfn ^= protnone_mask(pfn);
199 return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
91030ca1
HD
200}
201
087975b0
AM
202static inline unsigned long pmd_pfn(pmd_t pmd)
203{
3c739aee 204 phys_addr_t pfn = pmd_val(pmd);
b1fb6302
AK
205 pfn ^= protnone_mask(pfn);
206 return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
087975b0
AM
207}
208
0ee364eb
MG
209static inline unsigned long pud_pfn(pud_t pud)
210{
3c739aee 211 phys_addr_t pfn = pud_val(pud);
b1fb6302
AK
212 pfn ^= protnone_mask(pfn);
213 return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
0ee364eb
MG
214}
215
fe1e8c3e
KS
216static inline unsigned long p4d_pfn(p4d_t p4d)
217{
218 return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
219}
220
fd7e3159
TL
221static inline unsigned long pgd_pfn(pgd_t pgd)
222{
223 return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
224}
225
fe1e8c3e
KS
226static inline int p4d_large(p4d_t p4d)
227{
228 /* No 512 GiB pages yet */
229 return 0;
230}
231
91030ca1
HD
232#define pte_page(pte) pfn_to_page(pte_pfn(pte))
233
3cbaeafe
JP
234static inline int pmd_large(pmd_t pte)
235{
027ef6c8 236 return pmd_flags(pte) & _PAGE_PSE;
3cbaeafe
JP
237}
238
f2d6bfe9 239#ifdef CONFIG_TRANSPARENT_HUGEPAGE
f2d6bfe9
JW
240static inline int pmd_trans_huge(pmd_t pmd)
241{
5c7fb56e 242 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
f2d6bfe9 243}
4b7167b9 244
a00cc7d9
MW
245#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
246static inline int pud_trans_huge(pud_t pud)
247{
248 return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
249}
250#endif
251
fd8cfd30 252#define has_transparent_hugepage has_transparent_hugepage
4b7167b9
AA
253static inline int has_transparent_hugepage(void)
254{
16bf9226 255 return boot_cpu_has(X86_FEATURE_PSE);
4b7167b9 256}
5c7fb56e
DW
257
258#ifdef __HAVE_ARCH_PTE_DEVMAP
259static inline int pmd_devmap(pmd_t pmd)
260{
261 return !!(pmd_val(pmd) & _PAGE_DEVMAP);
262}
a00cc7d9
MW
263
264#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
265static inline int pud_devmap(pud_t pud)
266{
267 return !!(pud_val(pud) & _PAGE_DEVMAP);
268}
269#else
270static inline int pud_devmap(pud_t pud)
271{
272 return 0;
273}
274#endif
e585513b
KS
275
276static inline int pgd_devmap(pgd_t pgd)
277{
278 return 0;
279}
5c7fb56e 280#endif
f2d6bfe9
JW
281#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
282
6522869c
JF
283static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
284{
285 pteval_t v = native_pte_val(pte);
286
287 return native_make_pte(v | set);
288}
289
290static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
291{
292 pteval_t v = native_pte_val(pte);
293
294 return native_make_pte(v & ~clear);
295}
296
3cbaeafe
JP
297static inline pte_t pte_mkclean(pte_t pte)
298{
6522869c 299 return pte_clear_flags(pte, _PAGE_DIRTY);
3cbaeafe
JP
300}
301
302static inline pte_t pte_mkold(pte_t pte)
303{
6522869c 304 return pte_clear_flags(pte, _PAGE_ACCESSED);
3cbaeafe
JP
305}
306
307static inline pte_t pte_wrprotect(pte_t pte)
308{
6522869c 309 return pte_clear_flags(pte, _PAGE_RW);
3cbaeafe
JP
310}
311
312static inline pte_t pte_mkexec(pte_t pte)
313{
6522869c 314 return pte_clear_flags(pte, _PAGE_NX);
3cbaeafe
JP
315}
316
317static inline pte_t pte_mkdirty(pte_t pte)
318{
0f8975ec 319 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
3cbaeafe
JP
320}
321
322static inline pte_t pte_mkyoung(pte_t pte)
323{
6522869c 324 return pte_set_flags(pte, _PAGE_ACCESSED);
3cbaeafe
JP
325}
326
327static inline pte_t pte_mkwrite(pte_t pte)
328{
6522869c 329 return pte_set_flags(pte, _PAGE_RW);
3cbaeafe
JP
330}
331
332static inline pte_t pte_mkhuge(pte_t pte)
333{
6522869c 334 return pte_set_flags(pte, _PAGE_PSE);
3cbaeafe
JP
335}
336
337static inline pte_t pte_clrhuge(pte_t pte)
338{
6522869c 339 return pte_clear_flags(pte, _PAGE_PSE);
3cbaeafe
JP
340}
341
342static inline pte_t pte_mkglobal(pte_t pte)
343{
6522869c 344 return pte_set_flags(pte, _PAGE_GLOBAL);
3cbaeafe
JP
345}
346
347static inline pte_t pte_clrglobal(pte_t pte)
348{
6522869c 349 return pte_clear_flags(pte, _PAGE_GLOBAL);
3cbaeafe 350}
4614139c 351
7e675137
NP
352static inline pte_t pte_mkspecial(pte_t pte)
353{
6522869c 354 return pte_set_flags(pte, _PAGE_SPECIAL);
7e675137
NP
355}
356
01c8f1c4
DW
357static inline pte_t pte_mkdevmap(pte_t pte)
358{
359 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
360}
361
f2d6bfe9
JW
362static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
363{
364 pmdval_t v = native_pmd_val(pmd);
365
bc511804 366 return native_make_pmd(v | set);
f2d6bfe9
JW
367}
368
369static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
370{
371 pmdval_t v = native_pmd_val(pmd);
372
bc511804 373 return native_make_pmd(v & ~clear);
f2d6bfe9
JW
374}
375
376static inline pmd_t pmd_mkold(pmd_t pmd)
377{
378 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
379}
380
590a471c
MK
381static inline pmd_t pmd_mkclean(pmd_t pmd)
382{
383 return pmd_clear_flags(pmd, _PAGE_DIRTY);
384}
385
f2d6bfe9
JW
386static inline pmd_t pmd_wrprotect(pmd_t pmd)
387{
388 return pmd_clear_flags(pmd, _PAGE_RW);
389}
390
391static inline pmd_t pmd_mkdirty(pmd_t pmd)
392{
0f8975ec 393 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
f2d6bfe9
JW
394}
395
f25748e3
DW
396static inline pmd_t pmd_mkdevmap(pmd_t pmd)
397{
398 return pmd_set_flags(pmd, _PAGE_DEVMAP);
399}
400
f2d6bfe9
JW
401static inline pmd_t pmd_mkhuge(pmd_t pmd)
402{
403 return pmd_set_flags(pmd, _PAGE_PSE);
404}
405
406static inline pmd_t pmd_mkyoung(pmd_t pmd)
407{
408 return pmd_set_flags(pmd, _PAGE_ACCESSED);
409}
410
411static inline pmd_t pmd_mkwrite(pmd_t pmd)
412{
413 return pmd_set_flags(pmd, _PAGE_RW);
414}
415
a00cc7d9
MW
416static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
417{
418 pudval_t v = native_pud_val(pud);
419
bc511804 420 return native_make_pud(v | set);
a00cc7d9
MW
421}
422
423static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
424{
425 pudval_t v = native_pud_val(pud);
426
bc511804 427 return native_make_pud(v & ~clear);
a00cc7d9
MW
428}
429
430static inline pud_t pud_mkold(pud_t pud)
431{
432 return pud_clear_flags(pud, _PAGE_ACCESSED);
433}
434
435static inline pud_t pud_mkclean(pud_t pud)
436{
437 return pud_clear_flags(pud, _PAGE_DIRTY);
438}
439
440static inline pud_t pud_wrprotect(pud_t pud)
441{
442 return pud_clear_flags(pud, _PAGE_RW);
443}
444
445static inline pud_t pud_mkdirty(pud_t pud)
446{
447 return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
448}
449
450static inline pud_t pud_mkdevmap(pud_t pud)
451{
452 return pud_set_flags(pud, _PAGE_DEVMAP);
453}
454
455static inline pud_t pud_mkhuge(pud_t pud)
456{
457 return pud_set_flags(pud, _PAGE_PSE);
458}
459
460static inline pud_t pud_mkyoung(pud_t pud)
461{
462 return pud_set_flags(pud, _PAGE_ACCESSED);
463}
464
465static inline pud_t pud_mkwrite(pud_t pud)
466{
467 return pud_set_flags(pud, _PAGE_RW);
468}
469
2bf01f9f 470#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
0f8975ec
PE
471static inline int pte_soft_dirty(pte_t pte)
472{
473 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
474}
475
476static inline int pmd_soft_dirty(pmd_t pmd)
477{
478 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
479}
480
a00cc7d9
MW
481static inline int pud_soft_dirty(pud_t pud)
482{
483 return pud_flags(pud) & _PAGE_SOFT_DIRTY;
484}
485
0f8975ec
PE
486static inline pte_t pte_mksoft_dirty(pte_t pte)
487{
488 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
489}
490
491static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
492{
493 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
494}
495
a00cc7d9
MW
496static inline pud_t pud_mksoft_dirty(pud_t pud)
497{
498 return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
499}
500
a7b76174
MS
501static inline pte_t pte_clear_soft_dirty(pte_t pte)
502{
503 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
504}
505
506static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
507{
508 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
509}
510
a00cc7d9
MW
511static inline pud_t pud_clear_soft_dirty(pud_t pud)
512{
513 return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
514}
515
2bf01f9f
CG
516#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
517
b534816b
JF
518/*
519 * Mask out unsupported bits in a present pgprot. Non-present pgprots
520 * can use those bits for other purposes, so leave them be.
521 */
522static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
523{
524 pgprotval_t protval = pgprot_val(pgprot);
525
526 if (protval & _PAGE_PRESENT)
527 protval &= __supported_pte_mask;
528
529 return protval;
530}
531
78fb5523
DH
532static inline pgprotval_t check_pgprot(pgprot_t pgprot)
533{
534 pgprotval_t massaged_val = massage_pgprot(pgprot);
535
536 /* mmdebug.h can not be included here because of dependencies */
537#ifdef CONFIG_DEBUG_VM
538 WARN_ONCE(pgprot_val(pgprot) != massaged_val,
539 "attempted to set unsupported pgprot: %016llx "
540 "bits: %016llx supported: %016llx\n",
541 (u64)pgprot_val(pgprot),
542 (u64)pgprot_val(pgprot) ^ massaged_val,
543 (u64)__supported_pte_mask);
544#endif
545
546 return massaged_val;
547}
548
6fdc05d4
JF
549static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
550{
3c739aee 551 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
b1fb6302
AK
552 pfn ^= protnone_mask(pgprot_val(pgprot));
553 pfn &= PTE_PFN_MASK;
554 return __pte(pfn | check_pgprot(pgprot));
6fdc05d4
JF
555}
556
557static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
558{
3c739aee 559 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
b1fb6302
AK
560 pfn ^= protnone_mask(pgprot_val(pgprot));
561 pfn &= PHYSICAL_PMD_PAGE_MASK;
562 return __pmd(pfn | check_pgprot(pgprot));
6fdc05d4
JF
563}
564
a00cc7d9
MW
565static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
566{
3c739aee 567 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
b1fb6302
AK
568 pfn ^= protnone_mask(pgprot_val(pgprot));
569 pfn &= PHYSICAL_PUD_PAGE_MASK;
570 return __pud(pfn | check_pgprot(pgprot));
a00cc7d9
MW
571}
572
d76c928d
AK
573static inline pmd_t pmd_mknotpresent(pmd_t pmd)
574{
575 return pfn_pmd(pmd_pfn(pmd),
576 __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
577}
578
579static inline pud_t pud_mknotpresent(pud_t pud)
580{
581 return pfn_pud(pud_pfn(pud),
582 __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
583}
584
b1fb6302
AK
585static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
586
38472311
IM
587static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
588{
b1fb6302 589 pteval_t val = pte_val(pte), oldval = val;
38472311
IM
590
591 /*
592 * Chop off the NX bit (if present), and add the NX portion of
593 * the newprot (if present):
594 */
1c12c4cf 595 val &= _PAGE_CHG_MASK;
78fb5523 596 val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
b1fb6302 597 val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
38472311
IM
598 return __pte(val);
599}
600
c489f125
JW
601static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
602{
b1fb6302 603 pmdval_t val = pmd_val(pmd), oldval = val;
c489f125
JW
604
605 val &= _HPAGE_CHG_MASK;
78fb5523 606 val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
b1fb6302 607 val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
c489f125
JW
608 return __pmd(val);
609}
610
1c12c4cf
VP
611/* mprotect needs to preserve PAT bits when updating vm_page_prot */
612#define pgprot_modify pgprot_modify
613static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
614{
615 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
616 pgprotval_t addbits = pgprot_val(newprot);
617 return __pgprot(preservebits | addbits);
618}
619
bbac8c6d
TK
620#define pte_pgprot(x) __pgprot(pte_flags(x))
621#define pmd_pgprot(x) __pgprot(pmd_flags(x))
622#define pud_pgprot(x) __pgprot(pud_flags(x))
f2a6a705 623#define p4d_pgprot(x) __pgprot(p4d_flags(x))
c6ca18eb 624
b534816b 625#define canon_pgprot(p) __pgprot(massage_pgprot(p))
1e8e23bc 626
59cf57f4
DH
627static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
628{
629 return canon_pgprot(prot);
630}
631
1adcaafe 632static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
d85f3334
JG
633 enum page_cache_mode pcm,
634 enum page_cache_mode new_pcm)
afc7d20c 635{
1adcaafe 636 /*
55a6ca25 637 * PAT type is always WB for untracked ranges, so no need to check.
1adcaafe 638 */
8a271389 639 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
1adcaafe
SS
640 return 1;
641
afc7d20c 642 /*
643 * Certain new memtypes are not allowed with certain
644 * requested memtype:
645 * - request is uncached, return cannot be write-back
646 * - request is write-combine, return cannot be write-back
ecb2feba
TK
647 * - request is write-through, return cannot be write-back
648 * - request is write-through, return cannot be write-combine
afc7d20c 649 */
d85f3334
JG
650 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
651 new_pcm == _PAGE_CACHE_MODE_WB) ||
652 (pcm == _PAGE_CACHE_MODE_WC &&
ecb2feba
TK
653 new_pcm == _PAGE_CACHE_MODE_WB) ||
654 (pcm == _PAGE_CACHE_MODE_WT &&
655 new_pcm == _PAGE_CACHE_MODE_WB) ||
656 (pcm == _PAGE_CACHE_MODE_WT &&
657 new_pcm == _PAGE_CACHE_MODE_WC)) {
afc7d20c 658 return 0;
659 }
660
661 return 1;
662}
663
458a3e64
TH
664pmd_t *populate_extra_pmd(unsigned long vaddr);
665pte_t *populate_extra_pte(unsigned long vaddr);
212dde6a
JR
666
667#ifdef CONFIG_PAGE_TABLE_ISOLATION
668pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
669
670/*
671 * Take a PGD location (pgdp) and a pgd value that needs to be set there.
672 * Populates the user and returns the resulting PGD that must be set in
673 * the kernel copy of the page tables.
674 */
675static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
676{
677 if (!static_cpu_has(X86_FEATURE_PTI))
678 return pgd;
679 return __pti_set_user_pgtbl(pgdp, pgd);
680}
681#else /* CONFIG_PAGE_TABLE_ISOLATION */
682static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
683{
684 return pgd;
685}
686#endif /* CONFIG_PAGE_TABLE_ISOLATION */
687
4614139c
JF
688#endif /* __ASSEMBLY__ */
689
212dde6a 690
96a388de 691#ifdef CONFIG_X86_32
a1ce3928 692# include <asm/pgtable_32.h>
96a388de 693#else
a1ce3928 694# include <asm/pgtable_64.h>
96a388de 695#endif
6c386655 696
aca159db 697#ifndef __ASSEMBLY__
f476961c 698#include <linux/mm_types.h>
fa0f281c 699#include <linux/mmdebug.h>
4cbeb51b 700#include <linux/log2.h>
ef37bc36 701#include <asm/fixmap.h>
aca159db 702
a034a010
JF
703static inline int pte_none(pte_t pte)
704{
97e3c602 705 return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
a034a010
JF
706}
707
8de01da3
JF
708#define __HAVE_ARCH_PTE_SAME
709static inline int pte_same(pte_t a, pte_t b)
710{
711 return a.pte == b.pte;
712}
713
7c683851 714static inline int pte_present(pte_t a)
c46a7c81
MG
715{
716 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
717}
718
3565fce3
DW
719#ifdef __HAVE_ARCH_PTE_DEVMAP
720static inline int pte_devmap(pte_t a)
721{
722 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
723}
724#endif
725
2c3cf556 726#define pte_accessible pte_accessible
20841405 727static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
2c3cf556 728{
20841405
RR
729 if (pte_flags(a) & _PAGE_PRESENT)
730 return true;
731
21d9ee3e 732 if ((pte_flags(a) & _PAGE_PROTNONE) &&
20841405
RR
733 mm_tlb_flush_pending(mm))
734 return true;
735
736 return false;
2c3cf556
RR
737}
738
649e8ef6
JF
739static inline int pmd_present(pmd_t pmd)
740{
027ef6c8
AA
741 /*
742 * Checking for _PAGE_PSE is needed too because
743 * split_huge_page will temporarily clear the present bit (but
744 * the _PAGE_PSE flag will remain set at all times while the
745 * _PAGE_PRESENT bit is clear).
746 */
21d9ee3e 747 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
649e8ef6
JF
748}
749
e7bb4b6d
MG
750#ifdef CONFIG_NUMA_BALANCING
751/*
752 * These work without NUMA balancing but the kernel does not care. See the
753 * comment in include/asm-generic/pgtable.h
754 */
755static inline int pte_protnone(pte_t pte)
756{
e3a1f6ca
DV
757 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
758 == _PAGE_PROTNONE;
e7bb4b6d
MG
759}
760
761static inline int pmd_protnone(pmd_t pmd)
762{
e3a1f6ca
DV
763 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
764 == _PAGE_PROTNONE;
e7bb4b6d
MG
765}
766#endif /* CONFIG_NUMA_BALANCING */
767
4fea801a
JF
768static inline int pmd_none(pmd_t pmd)
769{
770 /* Only check low word on 32-bit platforms, since it might be
771 out of sync with upper half. */
97e3c602
DH
772 unsigned long val = native_pmd_val(pmd);
773 return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
4fea801a
JF
774}
775
3ffb3564
JF
776static inline unsigned long pmd_page_vaddr(pmd_t pmd)
777{
f70abb0f 778 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
3ffb3564
JF
779}
780
e5f7f202
IM
781/*
782 * Currently stuck as a macro due to indirect forward reference to
783 * linux/mmzone.h's __section_mem_map_addr() definition:
784 */
fd7e3159 785#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
20063ca4 786
e24d7eee
JF
787/*
788 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
789 *
790 * this macro returns the index of the entry in the pmd page which would
791 * control the given virtual address
792 */
ce0c0f9e 793static inline unsigned long pmd_index(unsigned long address)
e24d7eee
JF
794{
795 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
796}
797
97e2817d
JF
798/*
799 * Conversion functions: convert a page and protection to a page entry,
800 * and a page entry and page directory to the page they refer to.
801 *
802 * (Currently stuck as a macro because of indirect forward reference
803 * to linux/mm.h:page_to_nid())
804 */
805#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
806
346309cf
JF
807/*
808 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
809 *
810 * this function returns the index of the entry in the pte page which would
811 * control the given virtual address
812 */
ce0c0f9e 813static inline unsigned long pte_index(unsigned long address)
346309cf
JF
814{
815 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
816}
817
3fbc2444
JF
818static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
819{
820 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
821}
822
99510238
JF
823static inline int pmd_bad(pmd_t pmd)
824{
18a7a199 825 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
99510238
JF
826}
827
cc290ca3
JF
828static inline unsigned long pages_to_mb(unsigned long npg)
829{
830 return npg >> (20 - PAGE_SHIFT);
831}
832
98233368 833#if CONFIG_PGTABLE_LEVELS > 2
deb79cfb
JF
834static inline int pud_none(pud_t pud)
835{
97e3c602 836 return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
deb79cfb
JF
837}
838
5ba7c913
JF
839static inline int pud_present(pud_t pud)
840{
18a7a199 841 return pud_flags(pud) & _PAGE_PRESENT;
5ba7c913 842}
6fff47e3
JF
843
844static inline unsigned long pud_page_vaddr(pud_t pud)
845{
f70abb0f 846 return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
6fff47e3 847}
f476961c 848
e5f7f202
IM
849/*
850 * Currently stuck as a macro due to indirect forward reference to
851 * linux/mmzone.h's __section_mem_map_addr() definition:
852 */
fd7e3159 853#define pud_page(pud) pfn_to_page(pud_pfn(pud))
01ade20d
JF
854
855/* Find an entry in the second-level page table.. */
856static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
857{
858 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
859}
3180fba0 860
3f6cbef1
JF
861static inline int pud_large(pud_t pud)
862{
e2f5bda9 863 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
3f6cbef1
JF
864 (_PAGE_PSE | _PAGE_PRESENT);
865}
a61bb29a
JF
866
867static inline int pud_bad(pud_t pud)
868{
18a7a199 869 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
a61bb29a 870}
e2f5bda9
JF
871#else
872static inline int pud_large(pud_t pud)
873{
874 return 0;
875}
98233368 876#endif /* CONFIG_PGTABLE_LEVELS > 2 */
5ba7c913 877
fe1e8c3e
KS
878static inline unsigned long pud_index(unsigned long address)
879{
880 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
881}
882
f2a6a705
KS
883#if CONFIG_PGTABLE_LEVELS > 3
884static inline int p4d_none(p4d_t p4d)
885{
886 return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
887}
888
889static inline int p4d_present(p4d_t p4d)
890{
891 return p4d_flags(p4d) & _PAGE_PRESENT;
892}
893
894static inline unsigned long p4d_page_vaddr(p4d_t p4d)
895{
896 return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
897}
898
899/*
900 * Currently stuck as a macro due to indirect forward reference to
901 * linux/mmzone.h's __section_mem_map_addr() definition:
902 */
fd7e3159 903#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
f2a6a705
KS
904
905/* Find an entry in the third-level page table.. */
906static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
907{
908 return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
909}
910
911static inline int p4d_bad(p4d_t p4d)
912{
1c4de1ff
DH
913 unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
914
915 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
916 ignore_flags |= _PAGE_NX;
917
918 return (p4d_flags(p4d) & ~ignore_flags) != 0;
f2a6a705
KS
919}
920#endif /* CONFIG_PGTABLE_LEVELS > 3 */
921
fe1e8c3e
KS
922static inline unsigned long p4d_index(unsigned long address)
923{
924 return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
925}
926
f2a6a705 927#if CONFIG_PGTABLE_LEVELS > 4
9f38d7e8
JF
928static inline int pgd_present(pgd_t pgd)
929{
18a7a199 930 return pgd_flags(pgd) & _PAGE_PRESENT;
9f38d7e8 931}
c5f040b1
JF
932
933static inline unsigned long pgd_page_vaddr(pgd_t pgd)
934{
935 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
936}
777cba16 937
e5f7f202
IM
938/*
939 * Currently stuck as a macro due to indirect forward reference to
940 * linux/mmzone.h's __section_mem_map_addr() definition:
941 */
fd7e3159 942#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
7cfb8102
JF
943
944/* to find an entry in a page-table-directory. */
f2a6a705 945static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
3d081b18 946{
f2a6a705 947 return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
3d081b18 948}
30f10316
JF
949
950static inline int pgd_bad(pgd_t pgd)
951{
1c4de1ff
DH
952 unsigned long ignore_flags = _PAGE_USER;
953
954 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
955 ignore_flags |= _PAGE_NX;
956
957 return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
30f10316 958}
7325cc2e
JF
959
960static inline int pgd_none(pgd_t pgd)
961{
97e3c602
DH
962 /*
963 * There is no need to do a workaround for the KNL stray
964 * A/D bit erratum here. PGDs only point to page tables
965 * except on 32-bit non-PAE which is not supported on
966 * KNL.
967 */
26c8e317 968 return !native_pgd_val(pgd);
7325cc2e 969}
f2a6a705 970#endif /* CONFIG_PGTABLE_LEVELS > 4 */
9f38d7e8 971
4614139c
JF
972#endif /* __ASSEMBLY__ */
973
fb15a9b3
JF
974/*
975 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
976 *
977 * this macro returns the index of the entry in the pgd page which would
978 * control the given virtual address
979 */
980#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
981
982/*
983 * pgd_offset() returns a (pgd_t *)
984 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
985 */
61e9b367
DH
986#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
987/*
988 * a shortcut to get a pgd_t in a given mm
989 */
990#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
fb15a9b3
JF
991/*
992 * a shortcut which implies the use of the kernel's pgd, instead
993 * of a process's
994 */
995#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
996
997
68db065c
JF
998#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
999#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
1000
195466dc
JF
1001#ifndef __ASSEMBLY__
1002
2c1b284e 1003extern int direct_gbpages;
22ddfcaa 1004void init_mem_mapping(void);
8d57470d 1005void early_alloc_pgt_buf(void);
4270fd8b 1006extern void memblock_find_dma_reserve(void);
2c1b284e 1007
b234e8a0
TG
1008#ifdef CONFIG_X86_64
1009/* Realmode trampoline initialization. */
1010extern pgd_t trampoline_pgd_entry;
0483e1fa 1011static inline void __meminit init_trampoline_default(void)
b234e8a0
TG
1012{
1013 /* Default trampoline pgd value */
65ade2f8 1014 trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
b234e8a0 1015}
0483e1fa
TG
1016# ifdef CONFIG_RANDOMIZE_MEMORY
1017void __meminit init_trampoline(void);
1018# else
1019# define init_trampoline init_trampoline_default
1020# endif
b234e8a0
TG
1021#else
1022static inline void init_trampoline(void) { }
1023#endif
1024
4891645e
JF
1025/* local pte updates need not use xchg for locking */
1026static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
1027{
1028 pte_t res = *ptep;
1029
1030 /* Pure native function needs no input for mm, addr */
1031 native_pte_clear(NULL, 0, ptep);
1032 return res;
1033}
1034
f2d6bfe9
JW
1035static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
1036{
1037 pmd_t res = *pmdp;
1038
1039 native_pmd_clear(pmdp);
1040 return res;
1041}
1042
a00cc7d9
MW
1043static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
1044{
1045 pud_t res = *pudp;
1046
1047 native_pud_clear(pudp);
1048 return res;
1049}
1050
4891645e
JF
1051static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
1052 pte_t *ptep , pte_t pte)
1053{
1054 native_set_pte(ptep, pte);
1055}
1056
87930019
JG
1057static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1058 pmd_t *pmdp, pmd_t pmd)
0a47de52
AA
1059{
1060 native_set_pmd(pmdp, pmd);
1061}
1062
87930019
JG
1063static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1064 pud_t *pudp, pud_t pud)
a00cc7d9
MW
1065{
1066 native_set_pud(pudp, pud);
1067}
1068
195466dc
JF
1069/*
1070 * We only update the dirty/accessed state if we set
1071 * the dirty bit by hand in the kernel, since the hardware
1072 * will do the accessed bit for us, and we don't want to
1073 * race with other CPU's that might be updating the dirty
1074 * bit at the same time.
1075 */
bea41808
JF
1076struct vm_area_struct;
1077
195466dc 1078#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ee5aa8d3
JF
1079extern int ptep_set_access_flags(struct vm_area_struct *vma,
1080 unsigned long address, pte_t *ptep,
1081 pte_t entry, int dirty);
195466dc
JF
1082
1083#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
f9fbf1a3
JF
1084extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1085 unsigned long addr, pte_t *ptep);
195466dc
JF
1086
1087#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
c20311e1
JF
1088extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1089 unsigned long address, pte_t *ptep);
195466dc
JF
1090
1091#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
3cbaeafe
JP
1092static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1093 pte_t *ptep)
195466dc
JF
1094{
1095 pte_t pte = native_ptep_get_and_clear(ptep);
195466dc
JF
1096 return pte;
1097}
1098
1099#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
3cbaeafe
JP
1100static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1101 unsigned long addr, pte_t *ptep,
1102 int full)
195466dc
JF
1103{
1104 pte_t pte;
1105 if (full) {
1106 /*
1107 * Full address destruction in progress; paravirt does not
1108 * care about updates and native needs no locking
1109 */
1110 pte = native_local_ptep_get_and_clear(ptep);
1111 } else {
1112 pte = ptep_get_and_clear(mm, addr, ptep);
1113 }
1114 return pte;
1115}
1116
1117#define __HAVE_ARCH_PTEP_SET_WRPROTECT
3cbaeafe
JP
1118static inline void ptep_set_wrprotect(struct mm_struct *mm,
1119 unsigned long addr, pte_t *ptep)
195466dc 1120{
d8d89827 1121 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
195466dc
JF
1122}
1123
2ac13462 1124#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
61c77326 1125
f2d6bfe9
JW
1126#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1127
1128#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1129extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1130 unsigned long address, pmd_t *pmdp,
1131 pmd_t entry, int dirty);
a00cc7d9
MW
1132extern int pudp_set_access_flags(struct vm_area_struct *vma,
1133 unsigned long address, pud_t *pudp,
1134 pud_t entry, int dirty);
f2d6bfe9
JW
1135
1136#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1137extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1138 unsigned long addr, pmd_t *pmdp);
a00cc7d9
MW
1139extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1140 unsigned long addr, pud_t *pudp);
f2d6bfe9
JW
1141
1142#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1143extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1144 unsigned long address, pmd_t *pmdp);
1145
1146
e4e40e02 1147#define pmd_write pmd_write
f2d6bfe9
JW
1148static inline int pmd_write(pmd_t pmd)
1149{
1150 return pmd_flags(pmd) & _PAGE_RW;
1151}
1152
8809aa2d
AK
1153#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1154static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
f2d6bfe9
JW
1155 pmd_t *pmdp)
1156{
d6ccc3ec 1157 return native_pmdp_get_and_clear(pmdp);
f2d6bfe9
JW
1158}
1159
a00cc7d9
MW
1160#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1161static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1162 unsigned long addr, pud_t *pudp)
1163{
1164 return native_pudp_get_and_clear(pudp);
1165}
1166
f2d6bfe9
JW
1167#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1168static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1169 unsigned long addr, pmd_t *pmdp)
1170{
1171 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
f2d6bfe9
JW
1172}
1173
1501899a
DW
1174#define pud_write pud_write
1175static inline int pud_write(pud_t pud)
1176{
1177 return pud_flags(pud) & _PAGE_RW;
1178}
1179
8f79a130
KS
1180#ifndef pmdp_establish
1181#define pmdp_establish pmdp_establish
1182static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1183 unsigned long address, pmd_t *pmdp, pmd_t pmd)
1184{
1185 if (IS_ENABLED(CONFIG_SMP)) {
1186 return xchg(pmdp, pmd);
1187 } else {
1188 pmd_t old = *pmdp;
1189 *pmdp = pmd;
1190 return old;
1191 }
1192}
1193#endif
1194
1ed0d08a
JR
1195/*
1196 * Page table pages are page-aligned. The lower half of the top
1197 * level is used for userspace and the top half for the kernel.
1198 *
1199 * Returns true for parts of the PGD that map userspace and
1200 * false for the parts that map the kernel.
1201 */
1202static inline bool pgdp_maps_userspace(void *__ptr)
1203{
1204 unsigned long ptr = (unsigned long)__ptr;
1205
1206 return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
1207}
1208
1209static inline int pgd_large(pgd_t pgd) { return 0; }
1210
2f2527ef
JR
1211#ifdef CONFIG_PAGE_TABLE_ISOLATION
1212/*
1213 * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
1214 * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
1215 * the user one is in the last 4k. To switch between them, you
1216 * just need to flip the 12th bit in their addresses.
1217 */
1218#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
1219
1220/*
1221 * This generates better code than the inline assembly in
1222 * __set_bit().
1223 */
1224static inline void *ptr_set_bit(void *ptr, int bit)
1225{
1226 unsigned long __ptr = (unsigned long)ptr;
1227
1228 __ptr |= BIT(bit);
1229 return (void *)__ptr;
1230}
1231static inline void *ptr_clear_bit(void *ptr, int bit)
1232{
1233 unsigned long __ptr = (unsigned long)ptr;
1234
1235 __ptr &= ~BIT(bit);
1236 return (void *)__ptr;
1237}
1238
1239static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
1240{
1241 return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1242}
1243
1244static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
1245{
1246 return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1247}
1248
1249static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
1250{
1251 return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1252}
1253
1254static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
1255{
1256 return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1257}
1258#endif /* CONFIG_PAGE_TABLE_ISOLATION */
1259
85958b46
JF
1260/*
1261 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1262 *
1263 * dst - pointer to pgd range anwhere on a pgd page
1264 * src - ""
1265 * count - the number of pgds to copy.
1266 *
1267 * dst and src can be on the same page, but the range must not overlap,
1268 * and must not cross a page boundary.
1269 */
1270static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1271{
fc2fbc85
DH
1272 memcpy(dst, src, count * sizeof(pgd_t));
1273#ifdef CONFIG_PAGE_TABLE_ISOLATION
1274 if (!static_cpu_has(X86_FEATURE_PTI))
1275 return;
1276 /* Clone the user space pgd as well */
1277 memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1278 count * sizeof(pgd_t));
1279#endif
85958b46
JF
1280}
1281
4cbeb51b
DH
1282#define PTE_SHIFT ilog2(PTRS_PER_PTE)
1283static inline int page_level_shift(enum pg_level level)
1284{
1285 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1286}
1287static inline unsigned long page_level_size(enum pg_level level)
1288{
1289 return 1UL << page_level_shift(level);
1290}
1291static inline unsigned long page_level_mask(enum pg_level level)
1292{
1293 return ~(page_level_size(level) - 1);
1294}
85958b46 1295
602e0186
KS
1296/*
1297 * The x86 doesn't have any external MMU info: the kernel page
1298 * tables contain all the necessary information.
1299 */
1300static inline void update_mmu_cache(struct vm_area_struct *vma,
1301 unsigned long addr, pte_t *ptep)
1302{
1303}
1304static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1305 unsigned long addr, pmd_t *pmd)
1306{
1307}
a00cc7d9
MW
1308static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1309 unsigned long addr, pud_t *pud)
1310{
1311}
85958b46 1312
2bf01f9f 1313#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
fa0f281c
CG
1314static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1315{
fa0f281c
CG
1316 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1317}
1318
1319static inline int pte_swp_soft_dirty(pte_t pte)
1320{
fa0f281c
CG
1321 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1322}
1323
1324static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1325{
fa0f281c
CG
1326 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1327}
ab6e3d09
NH
1328
1329#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1330static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1331{
1332 return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1333}
1334
1335static inline int pmd_swp_soft_dirty(pmd_t pmd)
1336{
1337 return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1338}
1339
1340static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1341{
1342 return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1343}
1344#endif
2bf01f9f 1345#endif
fa0f281c 1346
33a709b2
DH
1347#define PKRU_AD_BIT 0x1
1348#define PKRU_WD_BIT 0x2
84594296 1349#define PKRU_BITS_PER_PKEY 2
33a709b2
DH
1350
1351static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
1352{
84594296 1353 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
33a709b2
DH
1354 return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
1355}
1356
1357static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
1358{
84594296 1359 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
33a709b2
DH
1360 /*
1361 * Access-disable disables writes too so we need to check
1362 * both bits here.
1363 */
1364 return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
1365}
1366
1367static inline u16 pte_flags_pkey(unsigned long pte_flags)
1368{
1369#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1370 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1371 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1372#else
1373 return 0;
1374#endif
1375}
1376
e585513b
KS
1377static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1378{
1379 u32 pkru = read_pkru();
1380
1381 if (!__pkru_allows_read(pkru, pkey))
1382 return false;
1383 if (write && !__pkru_allows_write(pkru, pkey))
1384 return false;
1385
1386 return true;
1387}
1388
1389/*
1390 * 'pteval' can come from a PTE, PMD or PUD. We only check
1391 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1392 * same value on all 3 types.
1393 */
1394static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1395{
1396 unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1397
1398 if (write)
1399 need_pte_bits |= _PAGE_RW;
1400
1401 if ((pteval & need_pte_bits) != need_pte_bits)
1402 return 0;
1403
1404 return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1405}
1406
1407#define pte_access_permitted pte_access_permitted
1408static inline bool pte_access_permitted(pte_t pte, bool write)
1409{
1410 return __pte_access_permitted(pte_val(pte), write);
1411}
1412
1413#define pmd_access_permitted pmd_access_permitted
1414static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1415{
1416 return __pte_access_permitted(pmd_val(pmd), write);
1417}
1418
1419#define pud_access_permitted pud_access_permitted
1420static inline bool pud_access_permitted(pud_t pud, bool write)
1421{
1422 return __pte_access_permitted(pud_val(pud), write);
1423}
1424
0831b2a2
AK
1425#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1426extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1427
1428static inline bool arch_has_pfn_modify_check(void)
1429{
1430 return boot_cpu_has_bug(X86_BUG_L1TF);
1431}
1432
195466dc
JF
1433#include <asm-generic/pgtable.h>
1434#endif /* __ASSEMBLY__ */
1435
1965aae3 1436#endif /* _ASM_X86_PGTABLE_H */