1 // SPDX-License-Identifier: GPL-2.0-only
3 * This kernel test validates architecture page table helpers and
4 * accessors and helps in verifying their continued compliance with
5 * expected generic MM semantics.
7 * Copyright (C) 2019 ARM Ltd.
9 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
19 #include <linux/mman.h>
20 #include <linux/mm_types.h>
21 #include <linux/module.h>
22 #include <linux/pfn_t.h>
23 #include <linux/printk.h>
24 #include <linux/pgtable.h>
25 #include <linux/random.h>
26 #include <linux/spinlock.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/start_kernel.h>
30 #include <linux/sched/mm.h>
32 #include <asm/pgalloc.h>
33 #include <asm/tlbflush.h>
36 * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
37 * expectations that are being validated here. All future changes in here
38 * or the documentation need to be in sync.
41 #define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
44 * On s390 platform, the lower 4 bits are used to identify given page table
45 * entry type. But these bits might affect the ability to clear entries with
46 * pxx_clear() because of how dynamic page table folding works on s390. So
47 * while loading up the entries do not change the lower 4 bits. It does not
48 * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
49 * used to mark a pte entry.
51 #define S390_SKIP_MASK GENMASK(3, 0)
52 #if __BITS_PER_LONG == 64
53 #define PPC64_SKIP_MASK GENMASK(62, 62)
55 #define PPC64_SKIP_MASK 0x0
57 #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
58 #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
59 #define RANDOM_NZVALUE GENMASK(7, 0)
61 static void __init
pte_basic_tests(unsigned long pfn
, int idx
)
63 pgprot_t prot
= protection_map
[idx
];
64 pte_t pte
= pfn_pte(pfn
, prot
);
65 unsigned long val
= idx
, *ptr
= &val
;
67 pr_debug("Validating PTE basic (%pGv)\n", ptr
);
70 * This test needs to be executed after the given page table entry
71 * is created with pfn_pte() to make sure that protection_map[idx]
72 * does not have the dirty bit enabled from the beginning. This is
73 * important for platforms like arm64 where (!PTE_RDONLY) indicate
74 * dirty bit being set.
76 WARN_ON(pte_dirty(pte_wrprotect(pte
)));
78 WARN_ON(!pte_same(pte
, pte
));
79 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte
))));
80 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte
))));
81 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte
))));
82 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte
))));
83 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte
))));
84 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte
))));
85 WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte
))));
86 WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte
))));
89 static void __init
pte_advanced_tests(struct mm_struct
*mm
,
90 struct vm_area_struct
*vma
, pte_t
*ptep
,
91 unsigned long pfn
, unsigned long vaddr
,
97 * Architectures optimize set_pte_at by avoiding TLB flush.
98 * This requires set_pte_at to be not used to update an
99 * existing pte entry. Clear pte before we do set_pte_at
102 pr_debug("Validating PTE advanced\n");
103 pte
= pfn_pte(pfn
, prot
);
104 set_pte_at(mm
, vaddr
, ptep
, pte
);
105 ptep_set_wrprotect(mm
, vaddr
, ptep
);
106 pte
= ptep_get(ptep
);
107 WARN_ON(pte_write(pte
));
108 ptep_get_and_clear(mm
, vaddr
, ptep
);
109 pte
= ptep_get(ptep
);
110 WARN_ON(!pte_none(pte
));
112 pte
= pfn_pte(pfn
, prot
);
113 pte
= pte_wrprotect(pte
);
114 pte
= pte_mkclean(pte
);
115 set_pte_at(mm
, vaddr
, ptep
, pte
);
116 pte
= pte_mkwrite(pte
);
117 pte
= pte_mkdirty(pte
);
118 ptep_set_access_flags(vma
, vaddr
, ptep
, pte
, 1);
119 pte
= ptep_get(ptep
);
120 WARN_ON(!(pte_write(pte
) && pte_dirty(pte
)));
121 ptep_get_and_clear_full(mm
, vaddr
, ptep
, 1);
122 pte
= ptep_get(ptep
);
123 WARN_ON(!pte_none(pte
));
125 pte
= pfn_pte(pfn
, prot
);
126 pte
= pte_mkyoung(pte
);
127 set_pte_at(mm
, vaddr
, ptep
, pte
);
128 ptep_test_and_clear_young(vma
, vaddr
, ptep
);
129 pte
= ptep_get(ptep
);
130 WARN_ON(pte_young(pte
));
133 static void __init
pte_savedwrite_tests(unsigned long pfn
, pgprot_t prot
)
135 pte_t pte
= pfn_pte(pfn
, prot
);
137 if (!IS_ENABLED(CONFIG_NUMA_BALANCING
))
140 pr_debug("Validating PTE saved write\n");
141 WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte
))));
142 WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte
))));
145 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
146 static void __init
pmd_basic_tests(unsigned long pfn
, int idx
)
148 pgprot_t prot
= protection_map
[idx
];
149 unsigned long val
= idx
, *ptr
= &val
;
152 if (!has_transparent_hugepage())
155 pr_debug("Validating PMD basic (%pGv)\n", ptr
);
156 pmd
= pfn_pmd(pfn
, prot
);
159 * This test needs to be executed after the given page table entry
160 * is created with pfn_pmd() to make sure that protection_map[idx]
161 * does not have the dirty bit enabled from the beginning. This is
162 * important for platforms like arm64 where (!PTE_RDONLY) indicate
163 * dirty bit being set.
165 WARN_ON(pmd_dirty(pmd_wrprotect(pmd
)));
168 WARN_ON(!pmd_same(pmd
, pmd
));
169 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd
))));
170 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd
))));
171 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd
))));
172 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd
))));
173 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd
))));
174 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd
))));
175 WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd
))));
176 WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd
))));
178 * A huge page does not point to next level page table
179 * entry. Hence this must qualify as pmd_bad().
181 WARN_ON(!pmd_bad(pmd_mkhuge(pmd
)));
184 static void __init
pmd_advanced_tests(struct mm_struct
*mm
,
185 struct vm_area_struct
*vma
, pmd_t
*pmdp
,
186 unsigned long pfn
, unsigned long vaddr
,
187 pgprot_t prot
, pgtable_t pgtable
)
191 if (!has_transparent_hugepage())
194 pr_debug("Validating PMD advanced\n");
195 /* Align the address wrt HPAGE_PMD_SIZE */
196 vaddr
&= HPAGE_PMD_MASK
;
198 pgtable_trans_huge_deposit(mm
, pmdp
, pgtable
);
200 pmd
= pfn_pmd(pfn
, prot
);
201 set_pmd_at(mm
, vaddr
, pmdp
, pmd
);
202 pmdp_set_wrprotect(mm
, vaddr
, pmdp
);
203 pmd
= READ_ONCE(*pmdp
);
204 WARN_ON(pmd_write(pmd
));
205 pmdp_huge_get_and_clear(mm
, vaddr
, pmdp
);
206 pmd
= READ_ONCE(*pmdp
);
207 WARN_ON(!pmd_none(pmd
));
209 pmd
= pfn_pmd(pfn
, prot
);
210 pmd
= pmd_wrprotect(pmd
);
211 pmd
= pmd_mkclean(pmd
);
212 set_pmd_at(mm
, vaddr
, pmdp
, pmd
);
213 pmd
= pmd_mkwrite(pmd
);
214 pmd
= pmd_mkdirty(pmd
);
215 pmdp_set_access_flags(vma
, vaddr
, pmdp
, pmd
, 1);
216 pmd
= READ_ONCE(*pmdp
);
217 WARN_ON(!(pmd_write(pmd
) && pmd_dirty(pmd
)));
218 pmdp_huge_get_and_clear_full(vma
, vaddr
, pmdp
, 1);
219 pmd
= READ_ONCE(*pmdp
);
220 WARN_ON(!pmd_none(pmd
));
222 pmd
= pmd_mkhuge(pfn_pmd(pfn
, prot
));
223 pmd
= pmd_mkyoung(pmd
);
224 set_pmd_at(mm
, vaddr
, pmdp
, pmd
);
225 pmdp_test_and_clear_young(vma
, vaddr
, pmdp
);
226 pmd
= READ_ONCE(*pmdp
);
227 WARN_ON(pmd_young(pmd
));
229 /* Clear the pte entries */
230 pmdp_huge_get_and_clear(mm
, vaddr
, pmdp
);
231 pgtable
= pgtable_trans_huge_withdraw(mm
, pmdp
);
234 static void __init
pmd_leaf_tests(unsigned long pfn
, pgprot_t prot
)
238 if (!has_transparent_hugepage())
241 pr_debug("Validating PMD leaf\n");
242 pmd
= pfn_pmd(pfn
, prot
);
245 * PMD based THP is a leaf entry.
247 pmd
= pmd_mkhuge(pmd
);
248 WARN_ON(!pmd_leaf(pmd
));
251 static void __init
pmd_savedwrite_tests(unsigned long pfn
, pgprot_t prot
)
255 if (!IS_ENABLED(CONFIG_NUMA_BALANCING
))
258 if (!has_transparent_hugepage())
261 pr_debug("Validating PMD saved write\n");
262 pmd
= pfn_pmd(pfn
, prot
);
263 WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd
))));
264 WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd
))));
267 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
268 static void __init
pud_basic_tests(struct mm_struct
*mm
, unsigned long pfn
, int idx
)
270 pgprot_t prot
= protection_map
[idx
];
271 unsigned long val
= idx
, *ptr
= &val
;
274 if (!has_transparent_hugepage())
277 pr_debug("Validating PUD basic (%pGv)\n", ptr
);
278 pud
= pfn_pud(pfn
, prot
);
281 * This test needs to be executed after the given page table entry
282 * is created with pfn_pud() to make sure that protection_map[idx]
283 * does not have the dirty bit enabled from the beginning. This is
284 * important for platforms like arm64 where (!PTE_RDONLY) indicate
285 * dirty bit being set.
287 WARN_ON(pud_dirty(pud_wrprotect(pud
)));
289 WARN_ON(!pud_same(pud
, pud
));
290 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud
))));
291 WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud
))));
292 WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud
))));
293 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud
))));
294 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud
))));
295 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud
))));
296 WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud
))));
297 WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud
))));
299 if (mm_pmd_folded(mm
))
303 * A huge page does not point to next level page table
304 * entry. Hence this must qualify as pud_bad().
306 WARN_ON(!pud_bad(pud_mkhuge(pud
)));
309 static void __init
pud_advanced_tests(struct mm_struct
*mm
,
310 struct vm_area_struct
*vma
, pud_t
*pudp
,
311 unsigned long pfn
, unsigned long vaddr
,
316 if (!has_transparent_hugepage())
319 pr_debug("Validating PUD advanced\n");
320 /* Align the address wrt HPAGE_PUD_SIZE */
321 vaddr
&= HPAGE_PUD_MASK
;
323 pud
= pfn_pud(pfn
, prot
);
324 set_pud_at(mm
, vaddr
, pudp
, pud
);
325 pudp_set_wrprotect(mm
, vaddr
, pudp
);
326 pud
= READ_ONCE(*pudp
);
327 WARN_ON(pud_write(pud
));
329 #ifndef __PAGETABLE_PMD_FOLDED
330 pudp_huge_get_and_clear(mm
, vaddr
, pudp
);
331 pud
= READ_ONCE(*pudp
);
332 WARN_ON(!pud_none(pud
));
333 #endif /* __PAGETABLE_PMD_FOLDED */
334 pud
= pfn_pud(pfn
, prot
);
335 pud
= pud_wrprotect(pud
);
336 pud
= pud_mkclean(pud
);
337 set_pud_at(mm
, vaddr
, pudp
, pud
);
338 pud
= pud_mkwrite(pud
);
339 pud
= pud_mkdirty(pud
);
340 pudp_set_access_flags(vma
, vaddr
, pudp
, pud
, 1);
341 pud
= READ_ONCE(*pudp
);
342 WARN_ON(!(pud_write(pud
) && pud_dirty(pud
)));
344 #ifndef __PAGETABLE_PMD_FOLDED
345 pudp_huge_get_and_clear_full(mm
, vaddr
, pudp
, 1);
346 pud
= READ_ONCE(*pudp
);
347 WARN_ON(!pud_none(pud
));
348 #endif /* __PAGETABLE_PMD_FOLDED */
350 pud
= pfn_pud(pfn
, prot
);
351 pud
= pud_mkyoung(pud
);
352 set_pud_at(mm
, vaddr
, pudp
, pud
);
353 pudp_test_and_clear_young(vma
, vaddr
, pudp
);
354 pud
= READ_ONCE(*pudp
);
355 WARN_ON(pud_young(pud
));
357 pudp_huge_get_and_clear(mm
, vaddr
, pudp
);
360 static void __init
pud_leaf_tests(unsigned long pfn
, pgprot_t prot
)
364 if (!has_transparent_hugepage())
367 pr_debug("Validating PUD leaf\n");
368 pud
= pfn_pud(pfn
, prot
);
370 * PUD based THP is a leaf entry.
372 pud
= pud_mkhuge(pud
);
373 WARN_ON(!pud_leaf(pud
));
375 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
376 static void __init
pud_basic_tests(struct mm_struct
*mm
, unsigned long pfn
, int idx
) { }
377 static void __init
pud_advanced_tests(struct mm_struct
*mm
,
378 struct vm_area_struct
*vma
, pud_t
*pudp
,
379 unsigned long pfn
, unsigned long vaddr
,
383 static void __init
pud_leaf_tests(unsigned long pfn
, pgprot_t prot
) { }
384 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
385 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
386 static void __init
pmd_basic_tests(unsigned long pfn
, int idx
) { }
387 static void __init
pud_basic_tests(struct mm_struct
*mm
, unsigned long pfn
, int idx
) { }
388 static void __init
pmd_advanced_tests(struct mm_struct
*mm
,
389 struct vm_area_struct
*vma
, pmd_t
*pmdp
,
390 unsigned long pfn
, unsigned long vaddr
,
391 pgprot_t prot
, pgtable_t pgtable
)
394 static void __init
pud_advanced_tests(struct mm_struct
*mm
,
395 struct vm_area_struct
*vma
, pud_t
*pudp
,
396 unsigned long pfn
, unsigned long vaddr
,
400 static void __init
pmd_leaf_tests(unsigned long pfn
, pgprot_t prot
) { }
401 static void __init
pud_leaf_tests(unsigned long pfn
, pgprot_t prot
) { }
402 static void __init
pmd_savedwrite_tests(unsigned long pfn
, pgprot_t prot
) { }
403 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
405 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
406 static void __init
pmd_huge_tests(pmd_t
*pmdp
, unsigned long pfn
, pgprot_t prot
)
410 if (!arch_vmap_pmd_supported(prot
))
413 pr_debug("Validating PMD huge\n");
415 * X86 defined pmd_set_huge() verifies that the given
416 * PMD is not a populated non-leaf entry.
418 WRITE_ONCE(*pmdp
, __pmd(0));
419 WARN_ON(!pmd_set_huge(pmdp
, __pfn_to_phys(pfn
), prot
));
420 WARN_ON(!pmd_clear_huge(pmdp
));
421 pmd
= READ_ONCE(*pmdp
);
422 WARN_ON(!pmd_none(pmd
));
425 static void __init
pud_huge_tests(pud_t
*pudp
, unsigned long pfn
, pgprot_t prot
)
429 if (!arch_vmap_pud_supported(prot
))
432 pr_debug("Validating PUD huge\n");
434 * X86 defined pud_set_huge() verifies that the given
435 * PUD is not a populated non-leaf entry.
437 WRITE_ONCE(*pudp
, __pud(0));
438 WARN_ON(!pud_set_huge(pudp
, __pfn_to_phys(pfn
), prot
));
439 WARN_ON(!pud_clear_huge(pudp
));
440 pud
= READ_ONCE(*pudp
);
441 WARN_ON(!pud_none(pud
));
443 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
444 static void __init
pmd_huge_tests(pmd_t
*pmdp
, unsigned long pfn
, pgprot_t prot
) { }
445 static void __init
pud_huge_tests(pud_t
*pudp
, unsigned long pfn
, pgprot_t prot
) { }
446 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
448 static void __init
p4d_basic_tests(unsigned long pfn
, pgprot_t prot
)
452 pr_debug("Validating P4D basic\n");
453 memset(&p4d
, RANDOM_NZVALUE
, sizeof(p4d_t
));
454 WARN_ON(!p4d_same(p4d
, p4d
));
457 static void __init
pgd_basic_tests(unsigned long pfn
, pgprot_t prot
)
461 pr_debug("Validating PGD basic\n");
462 memset(&pgd
, RANDOM_NZVALUE
, sizeof(pgd_t
));
463 WARN_ON(!pgd_same(pgd
, pgd
));
466 #ifndef __PAGETABLE_PUD_FOLDED
467 static void __init
pud_clear_tests(struct mm_struct
*mm
, pud_t
*pudp
)
469 pud_t pud
= READ_ONCE(*pudp
);
471 if (mm_pmd_folded(mm
))
474 pr_debug("Validating PUD clear\n");
475 pud
= __pud(pud_val(pud
) | RANDOM_ORVALUE
);
476 WRITE_ONCE(*pudp
, pud
);
478 pud
= READ_ONCE(*pudp
);
479 WARN_ON(!pud_none(pud
));
482 static void __init
pud_populate_tests(struct mm_struct
*mm
, pud_t
*pudp
,
487 if (mm_pmd_folded(mm
))
490 pr_debug("Validating PUD populate\n");
492 * This entry points to next level page table page.
493 * Hence this must not qualify as pud_bad().
495 pud_populate(mm
, pudp
, pmdp
);
496 pud
= READ_ONCE(*pudp
);
497 WARN_ON(pud_bad(pud
));
499 #else /* !__PAGETABLE_PUD_FOLDED */
500 static void __init
pud_clear_tests(struct mm_struct
*mm
, pud_t
*pudp
) { }
501 static void __init
pud_populate_tests(struct mm_struct
*mm
, pud_t
*pudp
,
505 #endif /* PAGETABLE_PUD_FOLDED */
507 #ifndef __PAGETABLE_P4D_FOLDED
508 static void __init
p4d_clear_tests(struct mm_struct
*mm
, p4d_t
*p4dp
)
510 p4d_t p4d
= READ_ONCE(*p4dp
);
512 if (mm_pud_folded(mm
))
515 pr_debug("Validating P4D clear\n");
516 p4d
= __p4d(p4d_val(p4d
) | RANDOM_ORVALUE
);
517 WRITE_ONCE(*p4dp
, p4d
);
519 p4d
= READ_ONCE(*p4dp
);
520 WARN_ON(!p4d_none(p4d
));
523 static void __init
p4d_populate_tests(struct mm_struct
*mm
, p4d_t
*p4dp
,
528 if (mm_pud_folded(mm
))
531 pr_debug("Validating P4D populate\n");
533 * This entry points to next level page table page.
534 * Hence this must not qualify as p4d_bad().
538 p4d_populate(mm
, p4dp
, pudp
);
539 p4d
= READ_ONCE(*p4dp
);
540 WARN_ON(p4d_bad(p4d
));
543 static void __init
pgd_clear_tests(struct mm_struct
*mm
, pgd_t
*pgdp
)
545 pgd_t pgd
= READ_ONCE(*pgdp
);
547 if (mm_p4d_folded(mm
))
550 pr_debug("Validating PGD clear\n");
551 pgd
= __pgd(pgd_val(pgd
) | RANDOM_ORVALUE
);
552 WRITE_ONCE(*pgdp
, pgd
);
554 pgd
= READ_ONCE(*pgdp
);
555 WARN_ON(!pgd_none(pgd
));
558 static void __init
pgd_populate_tests(struct mm_struct
*mm
, pgd_t
*pgdp
,
563 if (mm_p4d_folded(mm
))
566 pr_debug("Validating PGD populate\n");
568 * This entry points to next level page table page.
569 * Hence this must not qualify as pgd_bad().
573 pgd_populate(mm
, pgdp
, p4dp
);
574 pgd
= READ_ONCE(*pgdp
);
575 WARN_ON(pgd_bad(pgd
));
577 #else /* !__PAGETABLE_P4D_FOLDED */
578 static void __init
p4d_clear_tests(struct mm_struct
*mm
, p4d_t
*p4dp
) { }
579 static void __init
pgd_clear_tests(struct mm_struct
*mm
, pgd_t
*pgdp
) { }
580 static void __init
p4d_populate_tests(struct mm_struct
*mm
, p4d_t
*p4dp
,
584 static void __init
pgd_populate_tests(struct mm_struct
*mm
, pgd_t
*pgdp
,
588 #endif /* PAGETABLE_P4D_FOLDED */
590 static void __init
pte_clear_tests(struct mm_struct
*mm
, pte_t
*ptep
,
591 unsigned long pfn
, unsigned long vaddr
,
594 pte_t pte
= pfn_pte(pfn
, prot
);
596 pr_debug("Validating PTE clear\n");
598 pte
= __pte(pte_val(pte
) | RANDOM_ORVALUE
);
600 set_pte_at(mm
, vaddr
, ptep
, pte
);
602 pte_clear(mm
, vaddr
, ptep
);
603 pte
= ptep_get(ptep
);
604 WARN_ON(!pte_none(pte
));
607 static void __init
pmd_clear_tests(struct mm_struct
*mm
, pmd_t
*pmdp
)
609 pmd_t pmd
= READ_ONCE(*pmdp
);
611 pr_debug("Validating PMD clear\n");
612 pmd
= __pmd(pmd_val(pmd
) | RANDOM_ORVALUE
);
613 WRITE_ONCE(*pmdp
, pmd
);
615 pmd
= READ_ONCE(*pmdp
);
616 WARN_ON(!pmd_none(pmd
));
619 static void __init
pmd_populate_tests(struct mm_struct
*mm
, pmd_t
*pmdp
,
624 pr_debug("Validating PMD populate\n");
626 * This entry points to next level page table page.
627 * Hence this must not qualify as pmd_bad().
629 pmd_populate(mm
, pmdp
, pgtable
);
630 pmd
= READ_ONCE(*pmdp
);
631 WARN_ON(pmd_bad(pmd
));
634 static void __init
pte_special_tests(unsigned long pfn
, pgprot_t prot
)
636 pte_t pte
= pfn_pte(pfn
, prot
);
638 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL
))
641 pr_debug("Validating PTE special\n");
642 WARN_ON(!pte_special(pte_mkspecial(pte
)));
645 static void __init
pte_protnone_tests(unsigned long pfn
, pgprot_t prot
)
647 pte_t pte
= pfn_pte(pfn
, prot
);
649 if (!IS_ENABLED(CONFIG_NUMA_BALANCING
))
652 pr_debug("Validating PTE protnone\n");
653 WARN_ON(!pte_protnone(pte
));
654 WARN_ON(!pte_present(pte
));
657 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
658 static void __init
pmd_protnone_tests(unsigned long pfn
, pgprot_t prot
)
662 if (!IS_ENABLED(CONFIG_NUMA_BALANCING
))
665 if (!has_transparent_hugepage())
668 pr_debug("Validating PMD protnone\n");
669 pmd
= pmd_mkhuge(pfn_pmd(pfn
, prot
));
670 WARN_ON(!pmd_protnone(pmd
));
671 WARN_ON(!pmd_present(pmd
));
673 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
674 static void __init
pmd_protnone_tests(unsigned long pfn
, pgprot_t prot
) { }
675 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
677 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
678 static void __init
pte_devmap_tests(unsigned long pfn
, pgprot_t prot
)
680 pte_t pte
= pfn_pte(pfn
, prot
);
682 pr_debug("Validating PTE devmap\n");
683 WARN_ON(!pte_devmap(pte_mkdevmap(pte
)));
686 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
687 static void __init
pmd_devmap_tests(unsigned long pfn
, pgprot_t prot
)
691 if (!has_transparent_hugepage())
694 pr_debug("Validating PMD devmap\n");
695 pmd
= pfn_pmd(pfn
, prot
);
696 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd
)));
699 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
700 static void __init
pud_devmap_tests(unsigned long pfn
, pgprot_t prot
)
704 if (!has_transparent_hugepage())
707 pr_debug("Validating PUD devmap\n");
708 pud
= pfn_pud(pfn
, prot
);
709 WARN_ON(!pud_devmap(pud_mkdevmap(pud
)));
711 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
712 static void __init
pud_devmap_tests(unsigned long pfn
, pgprot_t prot
) { }
713 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
714 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
715 static void __init
pmd_devmap_tests(unsigned long pfn
, pgprot_t prot
) { }
716 static void __init
pud_devmap_tests(unsigned long pfn
, pgprot_t prot
) { }
717 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
719 static void __init
pte_devmap_tests(unsigned long pfn
, pgprot_t prot
) { }
720 static void __init
pmd_devmap_tests(unsigned long pfn
, pgprot_t prot
) { }
721 static void __init
pud_devmap_tests(unsigned long pfn
, pgprot_t prot
) { }
722 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
724 static void __init
pte_soft_dirty_tests(unsigned long pfn
, pgprot_t prot
)
726 pte_t pte
= pfn_pte(pfn
, prot
);
728 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY
))
731 pr_debug("Validating PTE soft dirty\n");
732 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte
)));
733 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte
)));
736 static void __init
pte_swap_soft_dirty_tests(unsigned long pfn
, pgprot_t prot
)
738 pte_t pte
= pfn_pte(pfn
, prot
);
740 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY
))
743 pr_debug("Validating PTE swap soft dirty\n");
744 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte
)));
745 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte
)));
748 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
749 static void __init
pmd_soft_dirty_tests(unsigned long pfn
, pgprot_t prot
)
753 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY
))
756 if (!has_transparent_hugepage())
759 pr_debug("Validating PMD soft dirty\n");
760 pmd
= pfn_pmd(pfn
, prot
);
761 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd
)));
762 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd
)));
765 static void __init
pmd_swap_soft_dirty_tests(unsigned long pfn
, pgprot_t prot
)
769 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY
) ||
770 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION
))
773 if (!has_transparent_hugepage())
776 pr_debug("Validating PMD swap soft dirty\n");
777 pmd
= pfn_pmd(pfn
, prot
);
778 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd
)));
779 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd
)));
781 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
782 static void __init
pmd_soft_dirty_tests(unsigned long pfn
, pgprot_t prot
) { }
783 static void __init
pmd_swap_soft_dirty_tests(unsigned long pfn
, pgprot_t prot
)
786 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
788 static void __init
pte_swap_tests(unsigned long pfn
, pgprot_t prot
)
793 pr_debug("Validating PTE swap\n");
794 pte
= pfn_pte(pfn
, prot
);
795 swp
= __pte_to_swp_entry(pte
);
796 pte
= __swp_entry_to_pte(swp
);
797 WARN_ON(pfn
!= pte_pfn(pte
));
800 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
801 static void __init
pmd_swap_tests(unsigned long pfn
, pgprot_t prot
)
806 if (!has_transparent_hugepage())
809 pr_debug("Validating PMD swap\n");
810 pmd
= pfn_pmd(pfn
, prot
);
811 swp
= __pmd_to_swp_entry(pmd
);
812 pmd
= __swp_entry_to_pmd(swp
);
813 WARN_ON(pfn
!= pmd_pfn(pmd
));
815 #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
816 static void __init
pmd_swap_tests(unsigned long pfn
, pgprot_t prot
) { }
817 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
819 static void __init
swap_migration_tests(void)
824 if (!IS_ENABLED(CONFIG_MIGRATION
))
827 pr_debug("Validating swap migration\n");
829 * swap_migration_tests() requires a dedicated page as it needs to
830 * be locked before creating a migration entry from it. Locking the
831 * page that actually maps kernel text ('start_kernel') can be real
832 * problematic. Lets allocate a dedicated page explicitly for this
833 * purpose that will be freed subsequently.
835 page
= alloc_page(GFP_KERNEL
);
837 pr_err("page allocation failed\n");
842 * make_migration_entry() expects given page to be
843 * locked, otherwise it stumbles upon a BUG_ON().
845 __SetPageLocked(page
);
846 swp
= make_writable_migration_entry(page_to_pfn(page
));
847 WARN_ON(!is_migration_entry(swp
));
848 WARN_ON(!is_writable_migration_entry(swp
));
850 swp
= make_readable_migration_entry(swp_offset(swp
));
851 WARN_ON(!is_migration_entry(swp
));
852 WARN_ON(is_writable_migration_entry(swp
));
854 swp
= make_readable_migration_entry(page_to_pfn(page
));
855 WARN_ON(!is_migration_entry(swp
));
856 WARN_ON(is_writable_migration_entry(swp
));
857 __ClearPageLocked(page
);
861 #ifdef CONFIG_HUGETLB_PAGE
862 static void __init
hugetlb_basic_tests(unsigned long pfn
, pgprot_t prot
)
867 pr_debug("Validating HugeTLB basic\n");
869 * Accessing the page associated with the pfn is safe here,
870 * as it was previously derived from a real kernel symbol.
872 page
= pfn_to_page(pfn
);
873 pte
= mk_huge_pte(page
, prot
);
875 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte
)));
876 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte
))));
877 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte
))));
879 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
880 pte
= pfn_pte(pfn
, prot
);
882 WARN_ON(!pte_huge(pte_mkhuge(pte
)));
883 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
885 #else /* !CONFIG_HUGETLB_PAGE */
886 static void __init
hugetlb_basic_tests(unsigned long pfn
, pgprot_t prot
) { }
887 #endif /* CONFIG_HUGETLB_PAGE */
889 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
890 static void __init
pmd_thp_tests(unsigned long pfn
, pgprot_t prot
)
894 if (!has_transparent_hugepage())
897 pr_debug("Validating PMD based THP\n");
899 * pmd_trans_huge() and pmd_present() must return positive after
900 * MMU invalidation with pmd_mkinvalid(). This behavior is an
901 * optimization for transparent huge page. pmd_trans_huge() must
902 * be true if pmd_page() returns a valid THP to avoid taking the
903 * pmd_lock when others walk over non transhuge pmds (i.e. there
904 * are no THP allocated). Especially when splitting a THP and
905 * removing the present bit from the pmd, pmd_trans_huge() still
906 * needs to return true. pmd_present() should be true whenever
907 * pmd_trans_huge() returns true.
909 pmd
= pfn_pmd(pfn
, prot
);
910 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd
)));
912 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
913 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd
))));
914 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd
))));
915 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
918 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
919 static void __init
pud_thp_tests(unsigned long pfn
, pgprot_t prot
)
923 if (!has_transparent_hugepage())
926 pr_debug("Validating PUD based THP\n");
927 pud
= pfn_pud(pfn
, prot
);
928 WARN_ON(!pud_trans_huge(pud_mkhuge(pud
)));
931 * pud_mkinvalid() has been dropped for now. Enable back
932 * these tests when it comes back with a modified pud_present().
934 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
935 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
938 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
939 static void __init
pud_thp_tests(unsigned long pfn
, pgprot_t prot
) { }
940 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
941 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
942 static void __init
pmd_thp_tests(unsigned long pfn
, pgprot_t prot
) { }
943 static void __init
pud_thp_tests(unsigned long pfn
, pgprot_t prot
) { }
944 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
946 static unsigned long __init
get_random_vaddr(void)
948 unsigned long random_vaddr
, random_pages
, total_user_pages
;
950 total_user_pages
= (TASK_SIZE
- FIRST_USER_ADDRESS
) / PAGE_SIZE
;
952 random_pages
= get_random_long() % total_user_pages
;
953 random_vaddr
= FIRST_USER_ADDRESS
+ random_pages
* PAGE_SIZE
;
958 static int __init
debug_vm_pgtable(void)
960 struct vm_area_struct
*vma
;
961 struct mm_struct
*mm
;
963 p4d_t
*p4dp
, *saved_p4dp
;
964 pud_t
*pudp
, *saved_pudp
;
965 pmd_t
*pmdp
, *saved_pmdp
, pmd
;
967 pgtable_t saved_ptep
;
968 pgprot_t prot
, protnone
;
970 unsigned long vaddr
, pte_aligned
, pmd_aligned
;
971 unsigned long pud_aligned
, p4d_aligned
, pgd_aligned
;
972 spinlock_t
*ptl
= NULL
;
975 pr_info("Validating architecture page table helpers\n");
976 prot
= vm_get_page_prot(VMFLAGS
);
977 vaddr
= get_random_vaddr();
980 pr_err("mm_struct allocation failed\n");
985 * __P000 (or even __S000) will help create page table entries with
986 * PROT_NONE permission as required for pxx_protnone_tests().
990 vma
= vm_area_alloc(mm
);
992 pr_err("vma allocation failed\n");
997 * PFN for mapping at PTE level is determined from a standard kernel
998 * text symbol. But pfns for higher page table levels are derived by
999 * masking lower bits of this real pfn. These derived pfns might not
1000 * exist on the platform but that does not really matter as pfn_pxx()
1001 * helpers will still create appropriate entries for the test. This
1002 * helps avoid large memory block allocations to be used for mapping
1003 * at higher page table levels.
1005 paddr
= __pa_symbol(&start_kernel
);
1007 pte_aligned
= (paddr
& PAGE_MASK
) >> PAGE_SHIFT
;
1008 pmd_aligned
= (paddr
& PMD_MASK
) >> PAGE_SHIFT
;
1009 pud_aligned
= (paddr
& PUD_MASK
) >> PAGE_SHIFT
;
1010 p4d_aligned
= (paddr
& P4D_MASK
) >> PAGE_SHIFT
;
1011 pgd_aligned
= (paddr
& PGDIR_MASK
) >> PAGE_SHIFT
;
1012 WARN_ON(!pfn_valid(pte_aligned
));
1014 pgdp
= pgd_offset(mm
, vaddr
);
1015 p4dp
= p4d_alloc(mm
, pgdp
, vaddr
);
1016 pudp
= pud_alloc(mm
, p4dp
, vaddr
);
1017 pmdp
= pmd_alloc(mm
, pudp
, vaddr
);
1019 * Allocate pgtable_t
1021 if (pte_alloc(mm
, pmdp
)) {
1022 pr_err("pgtable allocation failed\n");
1027 * Save all the page table page addresses as the page table
1028 * entries will be used for testing with random or garbage
1029 * values. These saved addresses will be used for freeing
1032 pmd
= READ_ONCE(*pmdp
);
1033 saved_p4dp
= p4d_offset(pgdp
, 0UL);
1034 saved_pudp
= pud_offset(p4dp
, 0UL);
1035 saved_pmdp
= pmd_offset(pudp
, 0UL);
1036 saved_ptep
= pmd_pgtable(pmd
);
1039 * Iterate over the protection_map[] to make sure that all
1040 * the basic page table transformation validations just hold
1041 * true irrespective of the starting protection value for a
1042 * given page table entry.
1044 for (idx
= 0; idx
< ARRAY_SIZE(protection_map
); idx
++) {
1045 pte_basic_tests(pte_aligned
, idx
);
1046 pmd_basic_tests(pmd_aligned
, idx
);
1047 pud_basic_tests(mm
, pud_aligned
, idx
);
1051 * Both P4D and PGD level tests are very basic which do not
1052 * involve creating page table entries from the protection
1053 * value and the given pfn. Hence just keep them out from
1054 * the above iteration for now to save some test execution
1057 p4d_basic_tests(p4d_aligned
, prot
);
1058 pgd_basic_tests(pgd_aligned
, prot
);
1060 pmd_leaf_tests(pmd_aligned
, prot
);
1061 pud_leaf_tests(pud_aligned
, prot
);
1063 pte_savedwrite_tests(pte_aligned
, protnone
);
1064 pmd_savedwrite_tests(pmd_aligned
, protnone
);
1066 pte_special_tests(pte_aligned
, prot
);
1067 pte_protnone_tests(pte_aligned
, protnone
);
1068 pmd_protnone_tests(pmd_aligned
, protnone
);
1070 pte_devmap_tests(pte_aligned
, prot
);
1071 pmd_devmap_tests(pmd_aligned
, prot
);
1072 pud_devmap_tests(pud_aligned
, prot
);
1074 pte_soft_dirty_tests(pte_aligned
, prot
);
1075 pmd_soft_dirty_tests(pmd_aligned
, prot
);
1076 pte_swap_soft_dirty_tests(pte_aligned
, prot
);
1077 pmd_swap_soft_dirty_tests(pmd_aligned
, prot
);
1079 pte_swap_tests(pte_aligned
, prot
);
1080 pmd_swap_tests(pmd_aligned
, prot
);
1082 swap_migration_tests();
1084 pmd_thp_tests(pmd_aligned
, prot
);
1085 pud_thp_tests(pud_aligned
, prot
);
1087 hugetlb_basic_tests(pte_aligned
, prot
);
1090 * Page table modifying tests. They need to hold
1091 * proper page table lock.
1094 ptep
= pte_offset_map_lock(mm
, pmdp
, vaddr
, &ptl
);
1095 pte_clear_tests(mm
, ptep
, pte_aligned
, vaddr
, prot
);
1096 pte_advanced_tests(mm
, vma
, ptep
, pte_aligned
, vaddr
, prot
);
1097 pte_unmap_unlock(ptep
, ptl
);
1099 ptl
= pmd_lock(mm
, pmdp
);
1100 pmd_clear_tests(mm
, pmdp
);
1101 pmd_advanced_tests(mm
, vma
, pmdp
, pmd_aligned
, vaddr
, prot
, saved_ptep
);
1102 pmd_huge_tests(pmdp
, pmd_aligned
, prot
);
1103 pmd_populate_tests(mm
, pmdp
, saved_ptep
);
1106 ptl
= pud_lock(mm
, pudp
);
1107 pud_clear_tests(mm
, pudp
);
1108 pud_advanced_tests(mm
, vma
, pudp
, pud_aligned
, vaddr
, prot
);
1109 pud_huge_tests(pudp
, pud_aligned
, prot
);
1110 pud_populate_tests(mm
, pudp
, saved_pmdp
);
1113 spin_lock(&mm
->page_table_lock
);
1114 p4d_clear_tests(mm
, p4dp
);
1115 pgd_clear_tests(mm
, pgdp
);
1116 p4d_populate_tests(mm
, p4dp
, saved_pudp
);
1117 pgd_populate_tests(mm
, pgdp
, saved_p4dp
);
1118 spin_unlock(&mm
->page_table_lock
);
1120 p4d_free(mm
, saved_p4dp
);
1121 pud_free(mm
, saved_pudp
);
1122 pmd_free(mm
, saved_pmdp
);
1123 pte_free(mm
, saved_ptep
);
1132 late_initcall(debug_vm_pgtable
);