1 // SPDX-License-Identifier: GPL-2.0-only
3 * This kernel test validates architecture page table helpers and
4 * accessors and helps in verifying their continued compliance with
5 * expected generic MM semantics.
7 * Copyright (C) 2019 ARM Ltd.
9 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
19 #include <linux/mman.h>
20 #include <linux/mm_types.h>
21 #include <linux/module.h>
22 #include <linux/pfn_t.h>
23 #include <linux/printk.h>
24 #include <linux/pgtable.h>
25 #include <linux/random.h>
26 #include <linux/spinlock.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/start_kernel.h>
30 #include <linux/sched/mm.h>
31 #include <asm/pgalloc.h>
32 #include <asm/tlbflush.h>
35 * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
36 * expectations that are being validated here. All future changes in here
37 * or the documentation need to be in sync.
40 #define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
43 * On s390 platform, the lower 4 bits are used to identify given page table
44 * entry type. But these bits might affect the ability to clear entries with
45 * pxx_clear() because of how dynamic page table folding works on s390. So
46 * while loading up the entries do not change the lower 4 bits. It does not
47 * have affect any other platform.
49 #define S390_MASK_BITS 4
50 #define RANDOM_ORVALUE GENMASK(BITS_PER_LONG - 1, S390_MASK_BITS)
51 #define RANDOM_NZVALUE GENMASK(7, 0)
53 static void __init
pte_basic_tests(unsigned long pfn
, pgprot_t prot
)
55 pte_t pte
= pfn_pte(pfn
, prot
);
57 pr_debug("Validating PTE basic\n");
58 WARN_ON(!pte_same(pte
, pte
));
59 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte
))));
60 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte
))));
61 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte
))));
62 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte
))));
63 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte
))));
64 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte
))));
67 static void __init
pte_advanced_tests(struct mm_struct
*mm
,
68 struct vm_area_struct
*vma
, pte_t
*ptep
,
69 unsigned long pfn
, unsigned long vaddr
,
72 pte_t pte
= pfn_pte(pfn
, prot
);
74 pr_debug("Validating PTE advanced\n");
75 pte
= pfn_pte(pfn
, prot
);
76 set_pte_at(mm
, vaddr
, ptep
, pte
);
77 ptep_set_wrprotect(mm
, vaddr
, ptep
);
79 WARN_ON(pte_write(pte
));
81 pte
= pfn_pte(pfn
, prot
);
82 set_pte_at(mm
, vaddr
, ptep
, pte
);
83 ptep_get_and_clear(mm
, vaddr
, ptep
);
85 WARN_ON(!pte_none(pte
));
87 pte
= pfn_pte(pfn
, prot
);
88 pte
= pte_wrprotect(pte
);
89 pte
= pte_mkclean(pte
);
90 set_pte_at(mm
, vaddr
, ptep
, pte
);
91 pte
= pte_mkwrite(pte
);
92 pte
= pte_mkdirty(pte
);
93 ptep_set_access_flags(vma
, vaddr
, ptep
, pte
, 1);
95 WARN_ON(!(pte_write(pte
) && pte_dirty(pte
)));
97 pte
= pfn_pte(pfn
, prot
);
98 set_pte_at(mm
, vaddr
, ptep
, pte
);
99 ptep_get_and_clear_full(mm
, vaddr
, ptep
, 1);
100 pte
= ptep_get(ptep
);
101 WARN_ON(!pte_none(pte
));
103 pte
= pte_mkyoung(pte
);
104 set_pte_at(mm
, vaddr
, ptep
, pte
);
105 ptep_test_and_clear_young(vma
, vaddr
, ptep
);
106 pte
= ptep_get(ptep
);
107 WARN_ON(pte_young(pte
));
110 static void __init
pte_savedwrite_tests(unsigned long pfn
, pgprot_t prot
)
112 pte_t pte
= pfn_pte(pfn
, prot
);
114 pr_debug("Validating PTE saved write\n");
115 WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte
))));
116 WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte
))));
118 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
119 static void __init
pmd_basic_tests(unsigned long pfn
, pgprot_t prot
)
121 pmd_t pmd
= pfn_pmd(pfn
, prot
);
123 if (!has_transparent_hugepage())
126 pr_debug("Validating PMD basic\n");
127 WARN_ON(!pmd_same(pmd
, pmd
));
128 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd
))));
129 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd
))));
130 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd
))));
131 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd
))));
132 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd
))));
133 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd
))));
135 * A huge page does not point to next level page table
136 * entry. Hence this must qualify as pmd_bad().
138 WARN_ON(!pmd_bad(pmd_mkhuge(pmd
)));
141 static void __init
pmd_advanced_tests(struct mm_struct
*mm
,
142 struct vm_area_struct
*vma
, pmd_t
*pmdp
,
143 unsigned long pfn
, unsigned long vaddr
,
146 pmd_t pmd
= pfn_pmd(pfn
, prot
);
148 if (!has_transparent_hugepage())
151 pr_debug("Validating PMD advanced\n");
152 /* Align the address wrt HPAGE_PMD_SIZE */
153 vaddr
= (vaddr
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
;
155 pmd
= pfn_pmd(pfn
, prot
);
156 set_pmd_at(mm
, vaddr
, pmdp
, pmd
);
157 pmdp_set_wrprotect(mm
, vaddr
, pmdp
);
158 pmd
= READ_ONCE(*pmdp
);
159 WARN_ON(pmd_write(pmd
));
161 pmd
= pfn_pmd(pfn
, prot
);
162 set_pmd_at(mm
, vaddr
, pmdp
, pmd
);
163 pmdp_huge_get_and_clear(mm
, vaddr
, pmdp
);
164 pmd
= READ_ONCE(*pmdp
);
165 WARN_ON(!pmd_none(pmd
));
167 pmd
= pfn_pmd(pfn
, prot
);
168 pmd
= pmd_wrprotect(pmd
);
169 pmd
= pmd_mkclean(pmd
);
170 set_pmd_at(mm
, vaddr
, pmdp
, pmd
);
171 pmd
= pmd_mkwrite(pmd
);
172 pmd
= pmd_mkdirty(pmd
);
173 pmdp_set_access_flags(vma
, vaddr
, pmdp
, pmd
, 1);
174 pmd
= READ_ONCE(*pmdp
);
175 WARN_ON(!(pmd_write(pmd
) && pmd_dirty(pmd
)));
177 pmd
= pmd_mkhuge(pfn_pmd(pfn
, prot
));
178 set_pmd_at(mm
, vaddr
, pmdp
, pmd
);
179 pmdp_huge_get_and_clear_full(vma
, vaddr
, pmdp
, 1);
180 pmd
= READ_ONCE(*pmdp
);
181 WARN_ON(!pmd_none(pmd
));
183 pmd
= pmd_mkyoung(pmd
);
184 set_pmd_at(mm
, vaddr
, pmdp
, pmd
);
185 pmdp_test_and_clear_young(vma
, vaddr
, pmdp
);
186 pmd
= READ_ONCE(*pmdp
);
187 WARN_ON(pmd_young(pmd
));
190 static void __init
pmd_leaf_tests(unsigned long pfn
, pgprot_t prot
)
192 pmd_t pmd
= pfn_pmd(pfn
, prot
);
194 pr_debug("Validating PMD leaf\n");
196 * PMD based THP is a leaf entry.
198 pmd
= pmd_mkhuge(pmd
);
199 WARN_ON(!pmd_leaf(pmd
));
202 static void __init
pmd_huge_tests(pmd_t
*pmdp
, unsigned long pfn
, pgprot_t prot
)
206 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP
))
209 pr_debug("Validating PMD huge\n");
211 * X86 defined pmd_set_huge() verifies that the given
212 * PMD is not a populated non-leaf entry.
214 WRITE_ONCE(*pmdp
, __pmd(0));
215 WARN_ON(!pmd_set_huge(pmdp
, __pfn_to_phys(pfn
), prot
));
216 WARN_ON(!pmd_clear_huge(pmdp
));
217 pmd
= READ_ONCE(*pmdp
);
218 WARN_ON(!pmd_none(pmd
));
221 static void __init
pmd_savedwrite_tests(unsigned long pfn
, pgprot_t prot
)
223 pmd_t pmd
= pfn_pmd(pfn
, prot
);
225 pr_debug("Validating PMD saved write\n");
226 WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd
))));
227 WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd
))));
230 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
231 static void __init
pud_basic_tests(unsigned long pfn
, pgprot_t prot
)
233 pud_t pud
= pfn_pud(pfn
, prot
);
235 if (!has_transparent_hugepage())
238 pr_debug("Validating PUD basic\n");
239 WARN_ON(!pud_same(pud
, pud
));
240 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud
))));
241 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud
))));
242 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud
))));
243 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud
))));
245 if (mm_pmd_folded(mm
))
249 * A huge page does not point to next level page table
250 * entry. Hence this must qualify as pud_bad().
252 WARN_ON(!pud_bad(pud_mkhuge(pud
)));
255 static void __init
pud_advanced_tests(struct mm_struct
*mm
,
256 struct vm_area_struct
*vma
, pud_t
*pudp
,
257 unsigned long pfn
, unsigned long vaddr
,
260 pud_t pud
= pfn_pud(pfn
, prot
);
262 if (!has_transparent_hugepage())
265 pr_debug("Validating PUD advanced\n");
266 /* Align the address wrt HPAGE_PUD_SIZE */
267 vaddr
= (vaddr
& HPAGE_PUD_MASK
) + HPAGE_PUD_SIZE
;
269 set_pud_at(mm
, vaddr
, pudp
, pud
);
270 pudp_set_wrprotect(mm
, vaddr
, pudp
);
271 pud
= READ_ONCE(*pudp
);
272 WARN_ON(pud_write(pud
));
274 #ifndef __PAGETABLE_PMD_FOLDED
275 pud
= pfn_pud(pfn
, prot
);
276 set_pud_at(mm
, vaddr
, pudp
, pud
);
277 pudp_huge_get_and_clear(mm
, vaddr
, pudp
);
278 pud
= READ_ONCE(*pudp
);
279 WARN_ON(!pud_none(pud
));
281 pud
= pfn_pud(pfn
, prot
);
282 set_pud_at(mm
, vaddr
, pudp
, pud
);
283 pudp_huge_get_and_clear_full(mm
, vaddr
, pudp
, 1);
284 pud
= READ_ONCE(*pudp
);
285 WARN_ON(!pud_none(pud
));
286 #endif /* __PAGETABLE_PMD_FOLDED */
287 pud
= pfn_pud(pfn
, prot
);
288 pud
= pud_wrprotect(pud
);
289 pud
= pud_mkclean(pud
);
290 set_pud_at(mm
, vaddr
, pudp
, pud
);
291 pud
= pud_mkwrite(pud
);
292 pud
= pud_mkdirty(pud
);
293 pudp_set_access_flags(vma
, vaddr
, pudp
, pud
, 1);
294 pud
= READ_ONCE(*pudp
);
295 WARN_ON(!(pud_write(pud
) && pud_dirty(pud
)));
297 pud
= pud_mkyoung(pud
);
298 set_pud_at(mm
, vaddr
, pudp
, pud
);
299 pudp_test_and_clear_young(vma
, vaddr
, pudp
);
300 pud
= READ_ONCE(*pudp
);
301 WARN_ON(pud_young(pud
));
304 static void __init
pud_leaf_tests(unsigned long pfn
, pgprot_t prot
)
306 pud_t pud
= pfn_pud(pfn
, prot
);
308 pr_debug("Validating PUD leaf\n");
310 * PUD based THP is a leaf entry.
312 pud
= pud_mkhuge(pud
);
313 WARN_ON(!pud_leaf(pud
));
316 static void __init
pud_huge_tests(pud_t
*pudp
, unsigned long pfn
, pgprot_t prot
)
320 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP
))
323 pr_debug("Validating PUD huge\n");
325 * X86 defined pud_set_huge() verifies that the given
326 * PUD is not a populated non-leaf entry.
328 WRITE_ONCE(*pudp
, __pud(0));
329 WARN_ON(!pud_set_huge(pudp
, __pfn_to_phys(pfn
), prot
));
330 WARN_ON(!pud_clear_huge(pudp
));
331 pud
= READ_ONCE(*pudp
);
332 WARN_ON(!pud_none(pud
));
334 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
335 static void __init
pud_basic_tests(unsigned long pfn
, pgprot_t prot
) { }
336 static void __init
pud_advanced_tests(struct mm_struct
*mm
,
337 struct vm_area_struct
*vma
, pud_t
*pudp
,
338 unsigned long pfn
, unsigned long vaddr
,
342 static void __init
pud_leaf_tests(unsigned long pfn
, pgprot_t prot
) { }
343 static void __init
pud_huge_tests(pud_t
*pudp
, unsigned long pfn
, pgprot_t prot
)
346 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
347 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
348 static void __init
pmd_basic_tests(unsigned long pfn
, pgprot_t prot
) { }
349 static void __init
pud_basic_tests(unsigned long pfn
, pgprot_t prot
) { }
350 static void __init
pmd_advanced_tests(struct mm_struct
*mm
,
351 struct vm_area_struct
*vma
, pmd_t
*pmdp
,
352 unsigned long pfn
, unsigned long vaddr
,
356 static void __init
pud_advanced_tests(struct mm_struct
*mm
,
357 struct vm_area_struct
*vma
, pud_t
*pudp
,
358 unsigned long pfn
, unsigned long vaddr
,
362 static void __init
pmd_leaf_tests(unsigned long pfn
, pgprot_t prot
) { }
363 static void __init
pud_leaf_tests(unsigned long pfn
, pgprot_t prot
) { }
364 static void __init
pmd_huge_tests(pmd_t
*pmdp
, unsigned long pfn
, pgprot_t prot
)
367 static void __init
pud_huge_tests(pud_t
*pudp
, unsigned long pfn
, pgprot_t prot
)
370 static void __init
pmd_savedwrite_tests(unsigned long pfn
, pgprot_t prot
) { }
371 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
373 static void __init
p4d_basic_tests(unsigned long pfn
, pgprot_t prot
)
377 pr_debug("Validating P4D basic\n");
378 memset(&p4d
, RANDOM_NZVALUE
, sizeof(p4d_t
));
379 WARN_ON(!p4d_same(p4d
, p4d
));
382 static void __init
pgd_basic_tests(unsigned long pfn
, pgprot_t prot
)
386 pr_debug("Validating PGD basic\n");
387 memset(&pgd
, RANDOM_NZVALUE
, sizeof(pgd_t
));
388 WARN_ON(!pgd_same(pgd
, pgd
));
391 #ifndef __PAGETABLE_PUD_FOLDED
392 static void __init
pud_clear_tests(struct mm_struct
*mm
, pud_t
*pudp
)
394 pud_t pud
= READ_ONCE(*pudp
);
396 if (mm_pmd_folded(mm
))
399 pr_debug("Validating PUD clear\n");
400 pud
= __pud(pud_val(pud
) | RANDOM_ORVALUE
);
401 WRITE_ONCE(*pudp
, pud
);
403 pud
= READ_ONCE(*pudp
);
404 WARN_ON(!pud_none(pud
));
407 static void __init
pud_populate_tests(struct mm_struct
*mm
, pud_t
*pudp
,
412 if (mm_pmd_folded(mm
))
415 pr_debug("Validating PUD populate\n");
417 * This entry points to next level page table page.
418 * Hence this must not qualify as pud_bad().
422 pud_populate(mm
, pudp
, pmdp
);
423 pud
= READ_ONCE(*pudp
);
424 WARN_ON(pud_bad(pud
));
426 #else /* !__PAGETABLE_PUD_FOLDED */
427 static void __init
pud_clear_tests(struct mm_struct
*mm
, pud_t
*pudp
) { }
428 static void __init
pud_populate_tests(struct mm_struct
*mm
, pud_t
*pudp
,
432 #endif /* PAGETABLE_PUD_FOLDED */
434 #ifndef __PAGETABLE_P4D_FOLDED
435 static void __init
p4d_clear_tests(struct mm_struct
*mm
, p4d_t
*p4dp
)
437 p4d_t p4d
= READ_ONCE(*p4dp
);
439 if (mm_pud_folded(mm
))
442 pr_debug("Validating P4D clear\n");
443 p4d
= __p4d(p4d_val(p4d
) | RANDOM_ORVALUE
);
444 WRITE_ONCE(*p4dp
, p4d
);
446 p4d
= READ_ONCE(*p4dp
);
447 WARN_ON(!p4d_none(p4d
));
450 static void __init
p4d_populate_tests(struct mm_struct
*mm
, p4d_t
*p4dp
,
455 if (mm_pud_folded(mm
))
458 pr_debug("Validating P4D populate\n");
460 * This entry points to next level page table page.
461 * Hence this must not qualify as p4d_bad().
465 p4d_populate(mm
, p4dp
, pudp
);
466 p4d
= READ_ONCE(*p4dp
);
467 WARN_ON(p4d_bad(p4d
));
470 static void __init
pgd_clear_tests(struct mm_struct
*mm
, pgd_t
*pgdp
)
472 pgd_t pgd
= READ_ONCE(*pgdp
);
474 if (mm_p4d_folded(mm
))
477 pr_debug("Validating PGD clear\n");
478 pgd
= __pgd(pgd_val(pgd
) | RANDOM_ORVALUE
);
479 WRITE_ONCE(*pgdp
, pgd
);
481 pgd
= READ_ONCE(*pgdp
);
482 WARN_ON(!pgd_none(pgd
));
485 static void __init
pgd_populate_tests(struct mm_struct
*mm
, pgd_t
*pgdp
,
490 if (mm_p4d_folded(mm
))
493 pr_debug("Validating PGD populate\n");
495 * This entry points to next level page table page.
496 * Hence this must not qualify as pgd_bad().
500 pgd_populate(mm
, pgdp
, p4dp
);
501 pgd
= READ_ONCE(*pgdp
);
502 WARN_ON(pgd_bad(pgd
));
504 #else /* !__PAGETABLE_P4D_FOLDED */
505 static void __init
p4d_clear_tests(struct mm_struct
*mm
, p4d_t
*p4dp
) { }
506 static void __init
pgd_clear_tests(struct mm_struct
*mm
, pgd_t
*pgdp
) { }
507 static void __init
p4d_populate_tests(struct mm_struct
*mm
, p4d_t
*p4dp
,
511 static void __init
pgd_populate_tests(struct mm_struct
*mm
, pgd_t
*pgdp
,
515 #endif /* PAGETABLE_P4D_FOLDED */
517 static void __init
pte_clear_tests(struct mm_struct
*mm
, pte_t
*ptep
,
520 pte_t pte
= ptep_get(ptep
);
522 pr_debug("Validating PTE clear\n");
523 pte
= __pte(pte_val(pte
) | RANDOM_ORVALUE
);
524 set_pte_at(mm
, vaddr
, ptep
, pte
);
526 pte_clear(mm
, vaddr
, ptep
);
527 pte
= ptep_get(ptep
);
528 WARN_ON(!pte_none(pte
));
531 static void __init
pmd_clear_tests(struct mm_struct
*mm
, pmd_t
*pmdp
)
533 pmd_t pmd
= READ_ONCE(*pmdp
);
535 pr_debug("Validating PMD clear\n");
536 pmd
= __pmd(pmd_val(pmd
) | RANDOM_ORVALUE
);
537 WRITE_ONCE(*pmdp
, pmd
);
539 pmd
= READ_ONCE(*pmdp
);
540 WARN_ON(!pmd_none(pmd
));
543 static void __init
pmd_populate_tests(struct mm_struct
*mm
, pmd_t
*pmdp
,
548 pr_debug("Validating PMD populate\n");
550 * This entry points to next level page table page.
551 * Hence this must not qualify as pmd_bad().
554 pmd_populate(mm
, pmdp
, pgtable
);
555 pmd
= READ_ONCE(*pmdp
);
556 WARN_ON(pmd_bad(pmd
));
559 static void __init
pte_special_tests(unsigned long pfn
, pgprot_t prot
)
561 pte_t pte
= pfn_pte(pfn
, prot
);
563 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL
))
566 pr_debug("Validating PTE special\n");
567 WARN_ON(!pte_special(pte_mkspecial(pte
)));
570 static void __init
pte_protnone_tests(unsigned long pfn
, pgprot_t prot
)
572 pte_t pte
= pfn_pte(pfn
, prot
);
574 if (!IS_ENABLED(CONFIG_NUMA_BALANCING
))
577 pr_debug("Validating PTE protnone\n");
578 WARN_ON(!pte_protnone(pte
));
579 WARN_ON(!pte_present(pte
));
582 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
583 static void __init
pmd_protnone_tests(unsigned long pfn
, pgprot_t prot
)
585 pmd_t pmd
= pmd_mkhuge(pfn_pmd(pfn
, prot
));
587 if (!IS_ENABLED(CONFIG_NUMA_BALANCING
))
590 pr_debug("Validating PMD protnone\n");
591 WARN_ON(!pmd_protnone(pmd
));
592 WARN_ON(!pmd_present(pmd
));
594 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
595 static void __init
pmd_protnone_tests(unsigned long pfn
, pgprot_t prot
) { }
596 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
598 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
599 static void __init
pte_devmap_tests(unsigned long pfn
, pgprot_t prot
)
601 pte_t pte
= pfn_pte(pfn
, prot
);
603 pr_debug("Validating PTE devmap\n");
604 WARN_ON(!pte_devmap(pte_mkdevmap(pte
)));
607 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
608 static void __init
pmd_devmap_tests(unsigned long pfn
, pgprot_t prot
)
610 pmd_t pmd
= pfn_pmd(pfn
, prot
);
612 pr_debug("Validating PMD devmap\n");
613 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd
)));
616 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
617 static void __init
pud_devmap_tests(unsigned long pfn
, pgprot_t prot
)
619 pud_t pud
= pfn_pud(pfn
, prot
);
621 pr_debug("Validating PUD devmap\n");
622 WARN_ON(!pud_devmap(pud_mkdevmap(pud
)));
624 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
625 static void __init
pud_devmap_tests(unsigned long pfn
, pgprot_t prot
) { }
626 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
627 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
628 static void __init
pmd_devmap_tests(unsigned long pfn
, pgprot_t prot
) { }
629 static void __init
pud_devmap_tests(unsigned long pfn
, pgprot_t prot
) { }
630 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
632 static void __init
pte_devmap_tests(unsigned long pfn
, pgprot_t prot
) { }
633 static void __init
pmd_devmap_tests(unsigned long pfn
, pgprot_t prot
) { }
634 static void __init
pud_devmap_tests(unsigned long pfn
, pgprot_t prot
) { }
635 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
637 static void __init
pte_soft_dirty_tests(unsigned long pfn
, pgprot_t prot
)
639 pte_t pte
= pfn_pte(pfn
, prot
);
641 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY
))
644 pr_debug("Validating PTE soft dirty\n");
645 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte
)));
646 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte
)));
649 static void __init
pte_swap_soft_dirty_tests(unsigned long pfn
, pgprot_t prot
)
651 pte_t pte
= pfn_pte(pfn
, prot
);
653 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY
))
656 pr_debug("Validating PTE swap soft dirty\n");
657 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte
)));
658 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte
)));
661 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
662 static void __init
pmd_soft_dirty_tests(unsigned long pfn
, pgprot_t prot
)
664 pmd_t pmd
= pfn_pmd(pfn
, prot
);
666 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY
))
669 pr_debug("Validating PMD soft dirty\n");
670 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd
)));
671 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd
)));
674 static void __init
pmd_swap_soft_dirty_tests(unsigned long pfn
, pgprot_t prot
)
676 pmd_t pmd
= pfn_pmd(pfn
, prot
);
678 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY
) ||
679 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION
))
682 pr_debug("Validating PMD swap soft dirty\n");
683 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd
)));
684 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd
)));
686 #else /* !CONFIG_ARCH_HAS_PTE_DEVMAP */
687 static void __init
pmd_soft_dirty_tests(unsigned long pfn
, pgprot_t prot
) { }
688 static void __init
pmd_swap_soft_dirty_tests(unsigned long pfn
, pgprot_t prot
)
691 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
693 static void __init
pte_swap_tests(unsigned long pfn
, pgprot_t prot
)
698 pr_debug("Validating PTE swap\n");
699 pte
= pfn_pte(pfn
, prot
);
700 swp
= __pte_to_swp_entry(pte
);
701 pte
= __swp_entry_to_pte(swp
);
702 WARN_ON(pfn
!= pte_pfn(pte
));
705 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
706 static void __init
pmd_swap_tests(unsigned long pfn
, pgprot_t prot
)
711 pr_debug("Validating PMD swap\n");
712 pmd
= pfn_pmd(pfn
, prot
);
713 swp
= __pmd_to_swp_entry(pmd
);
714 pmd
= __swp_entry_to_pmd(swp
);
715 WARN_ON(pfn
!= pmd_pfn(pmd
));
717 #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
718 static void __init
pmd_swap_tests(unsigned long pfn
, pgprot_t prot
) { }
719 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
721 static void __init
swap_migration_tests(void)
726 if (!IS_ENABLED(CONFIG_MIGRATION
))
729 pr_debug("Validating swap migration\n");
731 * swap_migration_tests() requires a dedicated page as it needs to
732 * be locked before creating a migration entry from it. Locking the
733 * page that actually maps kernel text ('start_kernel') can be real
734 * problematic. Lets allocate a dedicated page explicitly for this
735 * purpose that will be freed subsequently.
737 page
= alloc_page(GFP_KERNEL
);
739 pr_err("page allocation failed\n");
744 * make_migration_entry() expects given page to be
745 * locked, otherwise it stumbles upon a BUG_ON().
747 __SetPageLocked(page
);
748 swp
= make_migration_entry(page
, 1);
749 WARN_ON(!is_migration_entry(swp
));
750 WARN_ON(!is_write_migration_entry(swp
));
752 make_migration_entry_read(&swp
);
753 WARN_ON(!is_migration_entry(swp
));
754 WARN_ON(is_write_migration_entry(swp
));
756 swp
= make_migration_entry(page
, 0);
757 WARN_ON(!is_migration_entry(swp
));
758 WARN_ON(is_write_migration_entry(swp
));
759 __ClearPageLocked(page
);
763 #ifdef CONFIG_HUGETLB_PAGE
764 static void __init
hugetlb_basic_tests(unsigned long pfn
, pgprot_t prot
)
769 pr_debug("Validating HugeTLB basic\n");
771 * Accessing the page associated with the pfn is safe here,
772 * as it was previously derived from a real kernel symbol.
774 page
= pfn_to_page(pfn
);
775 pte
= mk_huge_pte(page
, prot
);
777 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte
)));
778 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte
))));
779 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte
))));
781 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
782 pte
= pfn_pte(pfn
, prot
);
784 WARN_ON(!pte_huge(pte_mkhuge(pte
)));
785 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
788 static void __init
hugetlb_advanced_tests(struct mm_struct
*mm
,
789 struct vm_area_struct
*vma
,
790 pte_t
*ptep
, unsigned long pfn
,
791 unsigned long vaddr
, pgprot_t prot
)
793 struct page
*page
= pfn_to_page(pfn
);
794 pte_t pte
= ptep_get(ptep
);
795 unsigned long paddr
= __pfn_to_phys(pfn
) & PMD_MASK
;
797 pr_debug("Validating HugeTLB advanced\n");
798 pte
= pte_mkhuge(mk_pte(pfn_to_page(PHYS_PFN(paddr
)), prot
));
799 set_huge_pte_at(mm
, vaddr
, ptep
, pte
);
801 WARN_ON(!pte_same(pte
, huge_ptep_get(ptep
)));
802 huge_pte_clear(mm
, vaddr
, ptep
, PMD_SIZE
);
803 pte
= huge_ptep_get(ptep
);
804 WARN_ON(!huge_pte_none(pte
));
806 pte
= mk_huge_pte(page
, prot
);
807 set_huge_pte_at(mm
, vaddr
, ptep
, pte
);
809 huge_ptep_set_wrprotect(mm
, vaddr
, ptep
);
810 pte
= huge_ptep_get(ptep
);
811 WARN_ON(huge_pte_write(pte
));
813 pte
= mk_huge_pte(page
, prot
);
814 set_huge_pte_at(mm
, vaddr
, ptep
, pte
);
816 huge_ptep_get_and_clear(mm
, vaddr
, ptep
);
817 pte
= huge_ptep_get(ptep
);
818 WARN_ON(!huge_pte_none(pte
));
820 pte
= mk_huge_pte(page
, prot
);
821 pte
= huge_pte_wrprotect(pte
);
822 set_huge_pte_at(mm
, vaddr
, ptep
, pte
);
824 pte
= huge_pte_mkwrite(pte
);
825 pte
= huge_pte_mkdirty(pte
);
826 huge_ptep_set_access_flags(vma
, vaddr
, ptep
, pte
, 1);
827 pte
= huge_ptep_get(ptep
);
828 WARN_ON(!(huge_pte_write(pte
) && huge_pte_dirty(pte
)));
830 #else /* !CONFIG_HUGETLB_PAGE */
831 static void __init
hugetlb_basic_tests(unsigned long pfn
, pgprot_t prot
) { }
832 static void __init
hugetlb_advanced_tests(struct mm_struct
*mm
,
833 struct vm_area_struct
*vma
,
834 pte_t
*ptep
, unsigned long pfn
,
835 unsigned long vaddr
, pgprot_t prot
)
838 #endif /* CONFIG_HUGETLB_PAGE */
840 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
841 static void __init
pmd_thp_tests(unsigned long pfn
, pgprot_t prot
)
845 if (!has_transparent_hugepage())
848 pr_debug("Validating PMD based THP\n");
850 * pmd_trans_huge() and pmd_present() must return positive after
851 * MMU invalidation with pmd_mkinvalid(). This behavior is an
852 * optimization for transparent huge page. pmd_trans_huge() must
853 * be true if pmd_page() returns a valid THP to avoid taking the
854 * pmd_lock when others walk over non transhuge pmds (i.e. there
855 * are no THP allocated). Especially when splitting a THP and
856 * removing the present bit from the pmd, pmd_trans_huge() still
857 * needs to return true. pmd_present() should be true whenever
858 * pmd_trans_huge() returns true.
860 pmd
= pfn_pmd(pfn
, prot
);
861 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd
)));
863 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
864 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd
))));
865 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd
))));
866 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
869 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
870 static void __init
pud_thp_tests(unsigned long pfn
, pgprot_t prot
)
874 if (!has_transparent_hugepage())
877 pr_debug("Validating PUD based THP\n");
878 pud
= pfn_pud(pfn
, prot
);
879 WARN_ON(!pud_trans_huge(pud_mkhuge(pud
)));
882 * pud_mkinvalid() has been dropped for now. Enable back
883 * these tests when it comes back with a modified pud_present().
885 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
886 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
889 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
890 static void __init
pud_thp_tests(unsigned long pfn
, pgprot_t prot
) { }
891 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
892 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
893 static void __init
pmd_thp_tests(unsigned long pfn
, pgprot_t prot
) { }
894 static void __init
pud_thp_tests(unsigned long pfn
, pgprot_t prot
) { }
895 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
897 static unsigned long __init
get_random_vaddr(void)
899 unsigned long random_vaddr
, random_pages
, total_user_pages
;
901 total_user_pages
= (TASK_SIZE
- FIRST_USER_ADDRESS
) / PAGE_SIZE
;
903 random_pages
= get_random_long() % total_user_pages
;
904 random_vaddr
= FIRST_USER_ADDRESS
+ random_pages
* PAGE_SIZE
;
909 static int __init
debug_vm_pgtable(void)
911 struct vm_area_struct
*vma
;
912 struct mm_struct
*mm
;
914 p4d_t
*p4dp
, *saved_p4dp
;
915 pud_t
*pudp
, *saved_pudp
;
916 pmd_t
*pmdp
, *saved_pmdp
, pmd
;
918 pgtable_t saved_ptep
;
919 pgprot_t prot
, protnone
;
921 unsigned long vaddr
, pte_aligned
, pmd_aligned
;
922 unsigned long pud_aligned
, p4d_aligned
, pgd_aligned
;
923 spinlock_t
*ptl
= NULL
;
925 pr_info("Validating architecture page table helpers\n");
926 prot
= vm_get_page_prot(VMFLAGS
);
927 vaddr
= get_random_vaddr();
930 pr_err("mm_struct allocation failed\n");
935 * __P000 (or even __S000) will help create page table entries with
936 * PROT_NONE permission as required for pxx_protnone_tests().
940 vma
= vm_area_alloc(mm
);
942 pr_err("vma allocation failed\n");
947 * PFN for mapping at PTE level is determined from a standard kernel
948 * text symbol. But pfns for higher page table levels are derived by
949 * masking lower bits of this real pfn. These derived pfns might not
950 * exist on the platform but that does not really matter as pfn_pxx()
951 * helpers will still create appropriate entries for the test. This
952 * helps avoid large memory block allocations to be used for mapping
953 * at higher page table levels.
955 paddr
= __pa_symbol(&start_kernel
);
957 pte_aligned
= (paddr
& PAGE_MASK
) >> PAGE_SHIFT
;
958 pmd_aligned
= (paddr
& PMD_MASK
) >> PAGE_SHIFT
;
959 pud_aligned
= (paddr
& PUD_MASK
) >> PAGE_SHIFT
;
960 p4d_aligned
= (paddr
& P4D_MASK
) >> PAGE_SHIFT
;
961 pgd_aligned
= (paddr
& PGDIR_MASK
) >> PAGE_SHIFT
;
962 WARN_ON(!pfn_valid(pte_aligned
));
964 pgdp
= pgd_offset(mm
, vaddr
);
965 p4dp
= p4d_alloc(mm
, pgdp
, vaddr
);
966 pudp
= pud_alloc(mm
, p4dp
, vaddr
);
967 pmdp
= pmd_alloc(mm
, pudp
, vaddr
);
968 ptep
= pte_alloc_map_lock(mm
, pmdp
, vaddr
, &ptl
);
971 * Save all the page table page addresses as the page table
972 * entries will be used for testing with random or garbage
973 * values. These saved addresses will be used for freeing
976 pmd
= READ_ONCE(*pmdp
);
977 saved_p4dp
= p4d_offset(pgdp
, 0UL);
978 saved_pudp
= pud_offset(p4dp
, 0UL);
979 saved_pmdp
= pmd_offset(pudp
, 0UL);
980 saved_ptep
= pmd_pgtable(pmd
);
982 pte_basic_tests(pte_aligned
, prot
);
983 pmd_basic_tests(pmd_aligned
, prot
);
984 pud_basic_tests(pud_aligned
, prot
);
985 p4d_basic_tests(p4d_aligned
, prot
);
986 pgd_basic_tests(pgd_aligned
, prot
);
988 pte_clear_tests(mm
, ptep
, vaddr
);
989 pmd_clear_tests(mm
, pmdp
);
990 pud_clear_tests(mm
, pudp
);
991 p4d_clear_tests(mm
, p4dp
);
992 pgd_clear_tests(mm
, pgdp
);
994 pte_advanced_tests(mm
, vma
, ptep
, pte_aligned
, vaddr
, prot
);
995 pmd_advanced_tests(mm
, vma
, pmdp
, pmd_aligned
, vaddr
, prot
);
996 pud_advanced_tests(mm
, vma
, pudp
, pud_aligned
, vaddr
, prot
);
997 hugetlb_advanced_tests(mm
, vma
, ptep
, pte_aligned
, vaddr
, prot
);
999 pmd_leaf_tests(pmd_aligned
, prot
);
1000 pud_leaf_tests(pud_aligned
, prot
);
1002 pmd_huge_tests(pmdp
, pmd_aligned
, prot
);
1003 pud_huge_tests(pudp
, pud_aligned
, prot
);
1005 pte_savedwrite_tests(pte_aligned
, prot
);
1006 pmd_savedwrite_tests(pmd_aligned
, prot
);
1008 pte_unmap_unlock(ptep
, ptl
);
1010 pmd_populate_tests(mm
, pmdp
, saved_ptep
);
1011 pud_populate_tests(mm
, pudp
, saved_pmdp
);
1012 p4d_populate_tests(mm
, p4dp
, saved_pudp
);
1013 pgd_populate_tests(mm
, pgdp
, saved_p4dp
);
1015 pte_special_tests(pte_aligned
, prot
);
1016 pte_protnone_tests(pte_aligned
, protnone
);
1017 pmd_protnone_tests(pmd_aligned
, protnone
);
1019 pte_devmap_tests(pte_aligned
, prot
);
1020 pmd_devmap_tests(pmd_aligned
, prot
);
1021 pud_devmap_tests(pud_aligned
, prot
);
1023 pte_soft_dirty_tests(pte_aligned
, prot
);
1024 pmd_soft_dirty_tests(pmd_aligned
, prot
);
1025 pte_swap_soft_dirty_tests(pte_aligned
, prot
);
1026 pmd_swap_soft_dirty_tests(pmd_aligned
, prot
);
1028 pte_swap_tests(pte_aligned
, prot
);
1029 pmd_swap_tests(pmd_aligned
, prot
);
1031 swap_migration_tests();
1032 hugetlb_basic_tests(pte_aligned
, prot
);
1034 pmd_thp_tests(pmd_aligned
, prot
);
1035 pud_thp_tests(pud_aligned
, prot
);
1037 p4d_free(mm
, saved_p4dp
);
1038 pud_free(mm
, saved_pudp
);
1039 pmd_free(mm
, saved_pmdp
);
1040 pte_free(mm
, saved_ptep
);
1049 late_initcall(debug_vm_pgtable
);