]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - mm/debug_vm_pgtable.c
mm/debug_vm_pgtable: add tests validating advanced arch page table helpers
[mirror_ubuntu-kernels.git] / mm / debug_vm_pgtable.c
CommitLineData
399145f9
AK
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This kernel test validates architecture page table helpers and
4 * accessors and helps in verifying their continued compliance with
5 * expected generic MM semantics.
6 *
7 * Copyright (C) 2019 ARM Ltd.
8 *
9 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10 */
11#define pr_fmt(fmt) "debug_vm_pgtable: %s: " fmt, __func__
12
13#include <linux/gfp.h>
14#include <linux/highmem.h>
15#include <linux/hugetlb.h>
16#include <linux/kernel.h>
17#include <linux/kconfig.h>
18#include <linux/mm.h>
19#include <linux/mman.h>
20#include <linux/mm_types.h>
21#include <linux/module.h>
22#include <linux/pfn_t.h>
23#include <linux/printk.h>
a5c3b9ff 24#include <linux/pgtable.h>
399145f9
AK
25#include <linux/random.h>
26#include <linux/spinlock.h>
27#include <linux/swap.h>
28#include <linux/swapops.h>
29#include <linux/start_kernel.h>
30#include <linux/sched/mm.h>
31#include <asm/pgalloc.h>
a5c3b9ff 32#include <asm/tlbflush.h>
399145f9
AK
33
34#define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
35
36/*
37 * On s390 platform, the lower 4 bits are used to identify given page table
38 * entry type. But these bits might affect the ability to clear entries with
39 * pxx_clear() because of how dynamic page table folding works on s390. So
40 * while loading up the entries do not change the lower 4 bits. It does not
41 * have affect any other platform.
42 */
43#define S390_MASK_BITS 4
44#define RANDOM_ORVALUE GENMASK(BITS_PER_LONG - 1, S390_MASK_BITS)
45#define RANDOM_NZVALUE GENMASK(7, 0)
46
47static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot)
48{
49 pte_t pte = pfn_pte(pfn, prot);
50
51 WARN_ON(!pte_same(pte, pte));
52 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
53 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
54 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
55 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
56 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
57 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
58}
59
a5c3b9ff
AK
60static void __init pte_advanced_tests(struct mm_struct *mm,
61 struct vm_area_struct *vma, pte_t *ptep,
62 unsigned long pfn, unsigned long vaddr,
63 pgprot_t prot)
64{
65 pte_t pte = pfn_pte(pfn, prot);
66
67 pte = pfn_pte(pfn, prot);
68 set_pte_at(mm, vaddr, ptep, pte);
69 ptep_set_wrprotect(mm, vaddr, ptep);
70 pte = ptep_get(ptep);
71 WARN_ON(pte_write(pte));
72
73 pte = pfn_pte(pfn, prot);
74 set_pte_at(mm, vaddr, ptep, pte);
75 ptep_get_and_clear(mm, vaddr, ptep);
76 pte = ptep_get(ptep);
77 WARN_ON(!pte_none(pte));
78
79 pte = pfn_pte(pfn, prot);
80 pte = pte_wrprotect(pte);
81 pte = pte_mkclean(pte);
82 set_pte_at(mm, vaddr, ptep, pte);
83 pte = pte_mkwrite(pte);
84 pte = pte_mkdirty(pte);
85 ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
86 pte = ptep_get(ptep);
87 WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
88
89 pte = pfn_pte(pfn, prot);
90 set_pte_at(mm, vaddr, ptep, pte);
91 ptep_get_and_clear_full(mm, vaddr, ptep, 1);
92 pte = ptep_get(ptep);
93 WARN_ON(!pte_none(pte));
94
95 pte = pte_mkyoung(pte);
96 set_pte_at(mm, vaddr, ptep, pte);
97 ptep_test_and_clear_young(vma, vaddr, ptep);
98 pte = ptep_get(ptep);
99 WARN_ON(pte_young(pte));
100}
101
102static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
103{
104 pte_t pte = pfn_pte(pfn, prot);
105
106 WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
107 WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
108}
399145f9
AK
109#ifdef CONFIG_TRANSPARENT_HUGEPAGE
110static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot)
111{
112 pmd_t pmd = pfn_pmd(pfn, prot);
113
787d563b
AK
114 if (!has_transparent_hugepage())
115 return;
116
399145f9
AK
117 WARN_ON(!pmd_same(pmd, pmd));
118 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
119 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
120 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
121 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
122 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
123 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
124 /*
125 * A huge page does not point to next level page table
126 * entry. Hence this must qualify as pmd_bad().
127 */
128 WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
129}
130
a5c3b9ff
AK
131static void __init pmd_advanced_tests(struct mm_struct *mm,
132 struct vm_area_struct *vma, pmd_t *pmdp,
133 unsigned long pfn, unsigned long vaddr,
134 pgprot_t prot)
135{
136 pmd_t pmd = pfn_pmd(pfn, prot);
137
138 if (!has_transparent_hugepage())
139 return;
140
141 /* Align the address wrt HPAGE_PMD_SIZE */
142 vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
143
144 pmd = pfn_pmd(pfn, prot);
145 set_pmd_at(mm, vaddr, pmdp, pmd);
146 pmdp_set_wrprotect(mm, vaddr, pmdp);
147 pmd = READ_ONCE(*pmdp);
148 WARN_ON(pmd_write(pmd));
149
150 pmd = pfn_pmd(pfn, prot);
151 set_pmd_at(mm, vaddr, pmdp, pmd);
152 pmdp_huge_get_and_clear(mm, vaddr, pmdp);
153 pmd = READ_ONCE(*pmdp);
154 WARN_ON(!pmd_none(pmd));
155
156 pmd = pfn_pmd(pfn, prot);
157 pmd = pmd_wrprotect(pmd);
158 pmd = pmd_mkclean(pmd);
159 set_pmd_at(mm, vaddr, pmdp, pmd);
160 pmd = pmd_mkwrite(pmd);
161 pmd = pmd_mkdirty(pmd);
162 pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
163 pmd = READ_ONCE(*pmdp);
164 WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
165
166 pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
167 set_pmd_at(mm, vaddr, pmdp, pmd);
168 pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
169 pmd = READ_ONCE(*pmdp);
170 WARN_ON(!pmd_none(pmd));
171
172 pmd = pmd_mkyoung(pmd);
173 set_pmd_at(mm, vaddr, pmdp, pmd);
174 pmdp_test_and_clear_young(vma, vaddr, pmdp);
175 pmd = READ_ONCE(*pmdp);
176 WARN_ON(pmd_young(pmd));
177}
178
179static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
180{
181 pmd_t pmd = pfn_pmd(pfn, prot);
182
183 /*
184 * PMD based THP is a leaf entry.
185 */
186 pmd = pmd_mkhuge(pmd);
187 WARN_ON(!pmd_leaf(pmd));
188}
189
190static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
191{
192 pmd_t pmd;
193
194 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
195 return;
196 /*
197 * X86 defined pmd_set_huge() verifies that the given
198 * PMD is not a populated non-leaf entry.
199 */
200 WRITE_ONCE(*pmdp, __pmd(0));
201 WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
202 WARN_ON(!pmd_clear_huge(pmdp));
203 pmd = READ_ONCE(*pmdp);
204 WARN_ON(!pmd_none(pmd));
205}
206
207static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
208{
209 pmd_t pmd = pfn_pmd(pfn, prot);
210
211 WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
212 WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
213}
214
399145f9
AK
215#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
216static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot)
217{
218 pud_t pud = pfn_pud(pfn, prot);
219
787d563b
AK
220 if (!has_transparent_hugepage())
221 return;
222
399145f9
AK
223 WARN_ON(!pud_same(pud, pud));
224 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
225 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
226 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
227 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
228
229 if (mm_pmd_folded(mm))
230 return;
231
232 /*
233 * A huge page does not point to next level page table
234 * entry. Hence this must qualify as pud_bad().
235 */
236 WARN_ON(!pud_bad(pud_mkhuge(pud)));
237}
a5c3b9ff
AK
238
239static void __init pud_advanced_tests(struct mm_struct *mm,
240 struct vm_area_struct *vma, pud_t *pudp,
241 unsigned long pfn, unsigned long vaddr,
242 pgprot_t prot)
243{
244 pud_t pud = pfn_pud(pfn, prot);
245
246 if (!has_transparent_hugepage())
247 return;
248
249 /* Align the address wrt HPAGE_PUD_SIZE */
250 vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
251
252 set_pud_at(mm, vaddr, pudp, pud);
253 pudp_set_wrprotect(mm, vaddr, pudp);
254 pud = READ_ONCE(*pudp);
255 WARN_ON(pud_write(pud));
256
257#ifndef __PAGETABLE_PMD_FOLDED
258 pud = pfn_pud(pfn, prot);
259 set_pud_at(mm, vaddr, pudp, pud);
260 pudp_huge_get_and_clear(mm, vaddr, pudp);
261 pud = READ_ONCE(*pudp);
262 WARN_ON(!pud_none(pud));
263
264 pud = pfn_pud(pfn, prot);
265 set_pud_at(mm, vaddr, pudp, pud);
266 pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
267 pud = READ_ONCE(*pudp);
268 WARN_ON(!pud_none(pud));
269#endif /* __PAGETABLE_PMD_FOLDED */
270 pud = pfn_pud(pfn, prot);
271 pud = pud_wrprotect(pud);
272 pud = pud_mkclean(pud);
273 set_pud_at(mm, vaddr, pudp, pud);
274 pud = pud_mkwrite(pud);
275 pud = pud_mkdirty(pud);
276 pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
277 pud = READ_ONCE(*pudp);
278 WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
279
280 pud = pud_mkyoung(pud);
281 set_pud_at(mm, vaddr, pudp, pud);
282 pudp_test_and_clear_young(vma, vaddr, pudp);
283 pud = READ_ONCE(*pudp);
284 WARN_ON(pud_young(pud));
285}
286
287static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
288{
289 pud_t pud = pfn_pud(pfn, prot);
290
291 /*
292 * PUD based THP is a leaf entry.
293 */
294 pud = pud_mkhuge(pud);
295 WARN_ON(!pud_leaf(pud));
296}
297
298static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
299{
300 pud_t pud;
301
302 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
303 return;
304 /*
305 * X86 defined pud_set_huge() verifies that the given
306 * PUD is not a populated non-leaf entry.
307 */
308 WRITE_ONCE(*pudp, __pud(0));
309 WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
310 WARN_ON(!pud_clear_huge(pudp));
311 pud = READ_ONCE(*pudp);
312 WARN_ON(!pud_none(pud));
313}
399145f9
AK
314#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
315static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
a5c3b9ff
AK
316static void __init pud_advanced_tests(struct mm_struct *mm,
317 struct vm_area_struct *vma, pud_t *pudp,
318 unsigned long pfn, unsigned long vaddr,
319 pgprot_t prot)
320{
321}
322static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
323static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
324{
325}
399145f9
AK
326#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
327#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
328static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) { }
329static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
a5c3b9ff
AK
330static void __init pmd_advanced_tests(struct mm_struct *mm,
331 struct vm_area_struct *vma, pmd_t *pmdp,
332 unsigned long pfn, unsigned long vaddr,
333 pgprot_t prot)
334{
335}
336static void __init pud_advanced_tests(struct mm_struct *mm,
337 struct vm_area_struct *vma, pud_t *pudp,
338 unsigned long pfn, unsigned long vaddr,
339 pgprot_t prot)
340{
341}
342static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
343static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
344static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
345{
346}
347static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
348{
349}
350static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
399145f9
AK
351#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
352
353static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
354{
355 p4d_t p4d;
356
357 memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
358 WARN_ON(!p4d_same(p4d, p4d));
359}
360
361static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot)
362{
363 pgd_t pgd;
364
365 memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
366 WARN_ON(!pgd_same(pgd, pgd));
367}
368
369#ifndef __PAGETABLE_PUD_FOLDED
370static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
371{
372 pud_t pud = READ_ONCE(*pudp);
373
374 if (mm_pmd_folded(mm))
375 return;
376
377 pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
378 WRITE_ONCE(*pudp, pud);
379 pud_clear(pudp);
380 pud = READ_ONCE(*pudp);
381 WARN_ON(!pud_none(pud));
382}
383
384static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
385 pmd_t *pmdp)
386{
387 pud_t pud;
388
389 if (mm_pmd_folded(mm))
390 return;
391 /*
392 * This entry points to next level page table page.
393 * Hence this must not qualify as pud_bad().
394 */
395 pmd_clear(pmdp);
396 pud_clear(pudp);
397 pud_populate(mm, pudp, pmdp);
398 pud = READ_ONCE(*pudp);
399 WARN_ON(pud_bad(pud));
400}
401#else /* !__PAGETABLE_PUD_FOLDED */
402static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
403static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
404 pmd_t *pmdp)
405{
406}
407#endif /* PAGETABLE_PUD_FOLDED */
408
409#ifndef __PAGETABLE_P4D_FOLDED
410static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
411{
412 p4d_t p4d = READ_ONCE(*p4dp);
413
414 if (mm_pud_folded(mm))
415 return;
416
417 p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
418 WRITE_ONCE(*p4dp, p4d);
419 p4d_clear(p4dp);
420 p4d = READ_ONCE(*p4dp);
421 WARN_ON(!p4d_none(p4d));
422}
423
424static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
425 pud_t *pudp)
426{
427 p4d_t p4d;
428
429 if (mm_pud_folded(mm))
430 return;
431
432 /*
433 * This entry points to next level page table page.
434 * Hence this must not qualify as p4d_bad().
435 */
436 pud_clear(pudp);
437 p4d_clear(p4dp);
438 p4d_populate(mm, p4dp, pudp);
439 p4d = READ_ONCE(*p4dp);
440 WARN_ON(p4d_bad(p4d));
441}
442
443static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
444{
445 pgd_t pgd = READ_ONCE(*pgdp);
446
447 if (mm_p4d_folded(mm))
448 return;
449
450 pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
451 WRITE_ONCE(*pgdp, pgd);
452 pgd_clear(pgdp);
453 pgd = READ_ONCE(*pgdp);
454 WARN_ON(!pgd_none(pgd));
455}
456
457static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
458 p4d_t *p4dp)
459{
460 pgd_t pgd;
461
462 if (mm_p4d_folded(mm))
463 return;
464
465 /*
466 * This entry points to next level page table page.
467 * Hence this must not qualify as pgd_bad().
468 */
469 p4d_clear(p4dp);
470 pgd_clear(pgdp);
471 pgd_populate(mm, pgdp, p4dp);
472 pgd = READ_ONCE(*pgdp);
473 WARN_ON(pgd_bad(pgd));
474}
475#else /* !__PAGETABLE_P4D_FOLDED */
476static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
477static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
478static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
479 pud_t *pudp)
480{
481}
482static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
483 p4d_t *p4dp)
484{
485}
486#endif /* PAGETABLE_P4D_FOLDED */
487
488static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
489 unsigned long vaddr)
490{
9449c9cb 491 pte_t pte = ptep_get(ptep);
399145f9
AK
492
493 pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
494 set_pte_at(mm, vaddr, ptep, pte);
495 barrier();
496 pte_clear(mm, vaddr, ptep);
9449c9cb 497 pte = ptep_get(ptep);
399145f9
AK
498 WARN_ON(!pte_none(pte));
499}
500
501static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
502{
503 pmd_t pmd = READ_ONCE(*pmdp);
504
505 pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
506 WRITE_ONCE(*pmdp, pmd);
507 pmd_clear(pmdp);
508 pmd = READ_ONCE(*pmdp);
509 WARN_ON(!pmd_none(pmd));
510}
511
512static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
513 pgtable_t pgtable)
514{
515 pmd_t pmd;
516
517 /*
518 * This entry points to next level page table page.
519 * Hence this must not qualify as pmd_bad().
520 */
521 pmd_clear(pmdp);
522 pmd_populate(mm, pmdp, pgtable);
523 pmd = READ_ONCE(*pmdp);
524 WARN_ON(pmd_bad(pmd));
525}
526
05289402
AK
527static void __init pte_special_tests(unsigned long pfn, pgprot_t prot)
528{
529 pte_t pte = pfn_pte(pfn, prot);
530
531 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
532 return;
533
534 WARN_ON(!pte_special(pte_mkspecial(pte)));
535}
536
537static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
538{
539 pte_t pte = pfn_pte(pfn, prot);
540
541 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
542 return;
543
544 WARN_ON(!pte_protnone(pte));
545 WARN_ON(!pte_present(pte));
546}
547
548#ifdef CONFIG_TRANSPARENT_HUGEPAGE
549static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
550{
551 pmd_t pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
552
553 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
554 return;
555
556 WARN_ON(!pmd_protnone(pmd));
557 WARN_ON(!pmd_present(pmd));
558}
559#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
560static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot) { }
561#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
562
563#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
564static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
565{
566 pte_t pte = pfn_pte(pfn, prot);
567
568 WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
569}
570
571#ifdef CONFIG_TRANSPARENT_HUGEPAGE
572static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
573{
574 pmd_t pmd = pfn_pmd(pfn, prot);
575
576 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
577}
578
579#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
580static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
581{
582 pud_t pud = pfn_pud(pfn, prot);
583
584 WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
585}
586#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
587static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
588#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
589#else /* CONFIG_TRANSPARENT_HUGEPAGE */
590static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
591static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
592#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
593#else
594static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot) { }
595static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
596static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
597#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
598
599static void __init pte_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
600{
601 pte_t pte = pfn_pte(pfn, prot);
602
603 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
604 return;
605
606 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
607 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
608}
609
610static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
611{
612 pte_t pte = pfn_pte(pfn, prot);
613
614 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
615 return;
616
617 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
618 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
619}
620
621#ifdef CONFIG_TRANSPARENT_HUGEPAGE
622static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
623{
624 pmd_t pmd = pfn_pmd(pfn, prot);
625
626 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
627 return;
628
629 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
630 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
631}
632
633static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
634{
635 pmd_t pmd = pfn_pmd(pfn, prot);
636
637 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
638 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
639 return;
640
641 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
642 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
643}
644#else /* !CONFIG_ARCH_HAS_PTE_DEVMAP */
645static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
646static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
647{
648}
649#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
650
651static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
652{
653 swp_entry_t swp;
654 pte_t pte;
655
656 pte = pfn_pte(pfn, prot);
657 swp = __pte_to_swp_entry(pte);
658 pte = __swp_entry_to_pte(swp);
659 WARN_ON(pfn != pte_pfn(pte));
660}
661
662#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
663static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
664{
665 swp_entry_t swp;
666 pmd_t pmd;
667
668 pmd = pfn_pmd(pfn, prot);
669 swp = __pmd_to_swp_entry(pmd);
670 pmd = __swp_entry_to_pmd(swp);
671 WARN_ON(pfn != pmd_pfn(pmd));
672}
673#else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
674static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) { }
675#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
676
677static void __init swap_migration_tests(void)
678{
679 struct page *page;
680 swp_entry_t swp;
681
682 if (!IS_ENABLED(CONFIG_MIGRATION))
683 return;
684 /*
685 * swap_migration_tests() requires a dedicated page as it needs to
686 * be locked before creating a migration entry from it. Locking the
687 * page that actually maps kernel text ('start_kernel') can be real
688 * problematic. Lets allocate a dedicated page explicitly for this
689 * purpose that will be freed subsequently.
690 */
691 page = alloc_page(GFP_KERNEL);
692 if (!page) {
693 pr_err("page allocation failed\n");
694 return;
695 }
696
697 /*
698 * make_migration_entry() expects given page to be
699 * locked, otherwise it stumbles upon a BUG_ON().
700 */
701 __SetPageLocked(page);
702 swp = make_migration_entry(page, 1);
703 WARN_ON(!is_migration_entry(swp));
704 WARN_ON(!is_write_migration_entry(swp));
705
706 make_migration_entry_read(&swp);
707 WARN_ON(!is_migration_entry(swp));
708 WARN_ON(is_write_migration_entry(swp));
709
710 swp = make_migration_entry(page, 0);
711 WARN_ON(!is_migration_entry(swp));
712 WARN_ON(is_write_migration_entry(swp));
713 __ClearPageLocked(page);
714 __free_page(page);
715}
716
717#ifdef CONFIG_HUGETLB_PAGE
718static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot)
719{
720 struct page *page;
721 pte_t pte;
722
723 /*
724 * Accessing the page associated with the pfn is safe here,
725 * as it was previously derived from a real kernel symbol.
726 */
727 page = pfn_to_page(pfn);
728 pte = mk_huge_pte(page, prot);
729
730 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
731 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
732 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
733
734#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
735 pte = pfn_pte(pfn, prot);
736
737 WARN_ON(!pte_huge(pte_mkhuge(pte)));
738#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
739}
a5c3b9ff
AK
740
741static void __init hugetlb_advanced_tests(struct mm_struct *mm,
742 struct vm_area_struct *vma,
743 pte_t *ptep, unsigned long pfn,
744 unsigned long vaddr, pgprot_t prot)
745{
746 struct page *page = pfn_to_page(pfn);
747 pte_t pte = ptep_get(ptep);
748 unsigned long paddr = __pfn_to_phys(pfn) & PMD_MASK;
749
750 pte = pte_mkhuge(mk_pte(pfn_to_page(PHYS_PFN(paddr)), prot));
751 set_huge_pte_at(mm, vaddr, ptep, pte);
752 barrier();
753 WARN_ON(!pte_same(pte, huge_ptep_get(ptep)));
754 huge_pte_clear(mm, vaddr, ptep, PMD_SIZE);
755 pte = huge_ptep_get(ptep);
756 WARN_ON(!huge_pte_none(pte));
757
758 pte = mk_huge_pte(page, prot);
759 set_huge_pte_at(mm, vaddr, ptep, pte);
760 barrier();
761 huge_ptep_set_wrprotect(mm, vaddr, ptep);
762 pte = huge_ptep_get(ptep);
763 WARN_ON(huge_pte_write(pte));
764
765 pte = mk_huge_pte(page, prot);
766 set_huge_pte_at(mm, vaddr, ptep, pte);
767 barrier();
768 huge_ptep_get_and_clear(mm, vaddr, ptep);
769 pte = huge_ptep_get(ptep);
770 WARN_ON(!huge_pte_none(pte));
771
772 pte = mk_huge_pte(page, prot);
773 pte = huge_pte_wrprotect(pte);
774 set_huge_pte_at(mm, vaddr, ptep, pte);
775 barrier();
776 pte = huge_pte_mkwrite(pte);
777 pte = huge_pte_mkdirty(pte);
778 huge_ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
779 pte = huge_ptep_get(ptep);
780 WARN_ON(!(huge_pte_write(pte) && huge_pte_dirty(pte)));
781}
05289402
AK
782#else /* !CONFIG_HUGETLB_PAGE */
783static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) { }
a5c3b9ff
AK
784static void __init hugetlb_advanced_tests(struct mm_struct *mm,
785 struct vm_area_struct *vma,
786 pte_t *ptep, unsigned long pfn,
787 unsigned long vaddr, pgprot_t prot)
788{
789}
05289402
AK
790#endif /* CONFIG_HUGETLB_PAGE */
791
792#ifdef CONFIG_TRANSPARENT_HUGEPAGE
793static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot)
794{
795 pmd_t pmd;
796
797 if (!has_transparent_hugepage())
798 return;
799
800 /*
801 * pmd_trans_huge() and pmd_present() must return positive after
802 * MMU invalidation with pmd_mkinvalid(). This behavior is an
803 * optimization for transparent huge page. pmd_trans_huge() must
804 * be true if pmd_page() returns a valid THP to avoid taking the
805 * pmd_lock when others walk over non transhuge pmds (i.e. there
806 * are no THP allocated). Especially when splitting a THP and
807 * removing the present bit from the pmd, pmd_trans_huge() still
808 * needs to return true. pmd_present() should be true whenever
809 * pmd_trans_huge() returns true.
810 */
811 pmd = pfn_pmd(pfn, prot);
812 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
813
814#ifndef __HAVE_ARCH_PMDP_INVALIDATE
815 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
816 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
817#endif /* __HAVE_ARCH_PMDP_INVALIDATE */
818}
819
820#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
821static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot)
822{
823 pud_t pud;
824
825 if (!has_transparent_hugepage())
826 return;
827
828 pud = pfn_pud(pfn, prot);
829 WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
830
831 /*
832 * pud_mkinvalid() has been dropped for now. Enable back
833 * these tests when it comes back with a modified pud_present().
834 *
835 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
836 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
837 */
838}
839#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
840static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
841#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
842#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
843static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot) { }
844static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
845#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
846
399145f9
AK
847static unsigned long __init get_random_vaddr(void)
848{
849 unsigned long random_vaddr, random_pages, total_user_pages;
850
851 total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
852
853 random_pages = get_random_long() % total_user_pages;
854 random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
855
856 return random_vaddr;
857}
858
859static int __init debug_vm_pgtable(void)
860{
a5c3b9ff 861 struct vm_area_struct *vma;
399145f9
AK
862 struct mm_struct *mm;
863 pgd_t *pgdp;
864 p4d_t *p4dp, *saved_p4dp;
865 pud_t *pudp, *saved_pudp;
866 pmd_t *pmdp, *saved_pmdp, pmd;
867 pte_t *ptep;
868 pgtable_t saved_ptep;
05289402 869 pgprot_t prot, protnone;
399145f9
AK
870 phys_addr_t paddr;
871 unsigned long vaddr, pte_aligned, pmd_aligned;
872 unsigned long pud_aligned, p4d_aligned, pgd_aligned;
fea1120c 873 spinlock_t *ptl = NULL;
399145f9
AK
874
875 pr_info("Validating architecture page table helpers\n");
876 prot = vm_get_page_prot(VMFLAGS);
877 vaddr = get_random_vaddr();
878 mm = mm_alloc();
879 if (!mm) {
880 pr_err("mm_struct allocation failed\n");
881 return 1;
882 }
883
05289402
AK
884 /*
885 * __P000 (or even __S000) will help create page table entries with
886 * PROT_NONE permission as required for pxx_protnone_tests().
887 */
888 protnone = __P000;
889
a5c3b9ff
AK
890 vma = vm_area_alloc(mm);
891 if (!vma) {
892 pr_err("vma allocation failed\n");
893 return 1;
894 }
895
399145f9
AK
896 /*
897 * PFN for mapping at PTE level is determined from a standard kernel
898 * text symbol. But pfns for higher page table levels are derived by
899 * masking lower bits of this real pfn. These derived pfns might not
900 * exist on the platform but that does not really matter as pfn_pxx()
901 * helpers will still create appropriate entries for the test. This
902 * helps avoid large memory block allocations to be used for mapping
903 * at higher page table levels.
904 */
905 paddr = __pa_symbol(&start_kernel);
906
907 pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
908 pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
909 pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
910 p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT;
911 pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT;
912 WARN_ON(!pfn_valid(pte_aligned));
913
914 pgdp = pgd_offset(mm, vaddr);
915 p4dp = p4d_alloc(mm, pgdp, vaddr);
916 pudp = pud_alloc(mm, p4dp, vaddr);
917 pmdp = pmd_alloc(mm, pudp, vaddr);
918 ptep = pte_alloc_map_lock(mm, pmdp, vaddr, &ptl);
919
920 /*
921 * Save all the page table page addresses as the page table
922 * entries will be used for testing with random or garbage
923 * values. These saved addresses will be used for freeing
924 * page table pages.
925 */
926 pmd = READ_ONCE(*pmdp);
927 saved_p4dp = p4d_offset(pgdp, 0UL);
928 saved_pudp = pud_offset(p4dp, 0UL);
929 saved_pmdp = pmd_offset(pudp, 0UL);
930 saved_ptep = pmd_pgtable(pmd);
931
932 pte_basic_tests(pte_aligned, prot);
933 pmd_basic_tests(pmd_aligned, prot);
934 pud_basic_tests(pud_aligned, prot);
935 p4d_basic_tests(p4d_aligned, prot);
936 pgd_basic_tests(pgd_aligned, prot);
937
938 pte_clear_tests(mm, ptep, vaddr);
939 pmd_clear_tests(mm, pmdp);
940 pud_clear_tests(mm, pudp);
941 p4d_clear_tests(mm, p4dp);
942 pgd_clear_tests(mm, pgdp);
943
a5c3b9ff
AK
944 pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
945 pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot);
946 pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
947 hugetlb_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
948
949 pmd_leaf_tests(pmd_aligned, prot);
950 pud_leaf_tests(pud_aligned, prot);
951
952 pmd_huge_tests(pmdp, pmd_aligned, prot);
953 pud_huge_tests(pudp, pud_aligned, prot);
954
955 pte_savedwrite_tests(pte_aligned, prot);
956 pmd_savedwrite_tests(pmd_aligned, prot);
957
399145f9
AK
958 pte_unmap_unlock(ptep, ptl);
959
960 pmd_populate_tests(mm, pmdp, saved_ptep);
961 pud_populate_tests(mm, pudp, saved_pmdp);
962 p4d_populate_tests(mm, p4dp, saved_pudp);
963 pgd_populate_tests(mm, pgdp, saved_p4dp);
964
05289402
AK
965 pte_special_tests(pte_aligned, prot);
966 pte_protnone_tests(pte_aligned, protnone);
967 pmd_protnone_tests(pmd_aligned, protnone);
968
969 pte_devmap_tests(pte_aligned, prot);
970 pmd_devmap_tests(pmd_aligned, prot);
971 pud_devmap_tests(pud_aligned, prot);
972
973 pte_soft_dirty_tests(pte_aligned, prot);
974 pmd_soft_dirty_tests(pmd_aligned, prot);
975 pte_swap_soft_dirty_tests(pte_aligned, prot);
976 pmd_swap_soft_dirty_tests(pmd_aligned, prot);
977
978 pte_swap_tests(pte_aligned, prot);
979 pmd_swap_tests(pmd_aligned, prot);
980
981 swap_migration_tests();
982 hugetlb_basic_tests(pte_aligned, prot);
983
984 pmd_thp_tests(pmd_aligned, prot);
985 pud_thp_tests(pud_aligned, prot);
986
399145f9
AK
987 p4d_free(mm, saved_p4dp);
988 pud_free(mm, saved_pudp);
989 pmd_free(mm, saved_pmdp);
990 pte_free(mm, saved_ptep);
991
a5c3b9ff 992 vm_area_free(vma);
399145f9
AK
993 mm_dec_nr_puds(mm);
994 mm_dec_nr_pmds(mm);
995 mm_dec_nr_ptes(mm);
996 mmdrop(mm);
997 return 0;
998}
999late_initcall(debug_vm_pgtable);