]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - mm/debug_vm_pgtable.c
mm/debug_vm_pgtable/savedwrite: enable savedwrite test with CONFIG_NUMA_BALANCING
[mirror_ubuntu-kernels.git] / mm / debug_vm_pgtable.c
CommitLineData
399145f9
AK
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This kernel test validates architecture page table helpers and
4 * accessors and helps in verifying their continued compliance with
5 * expected generic MM semantics.
6 *
7 * Copyright (C) 2019 ARM Ltd.
8 *
9 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10 */
6315df41 11#define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
399145f9
AK
12
13#include <linux/gfp.h>
14#include <linux/highmem.h>
15#include <linux/hugetlb.h>
16#include <linux/kernel.h>
17#include <linux/kconfig.h>
18#include <linux/mm.h>
19#include <linux/mman.h>
20#include <linux/mm_types.h>
21#include <linux/module.h>
22#include <linux/pfn_t.h>
23#include <linux/printk.h>
a5c3b9ff 24#include <linux/pgtable.h>
399145f9
AK
25#include <linux/random.h>
26#include <linux/spinlock.h>
27#include <linux/swap.h>
28#include <linux/swapops.h>
29#include <linux/start_kernel.h>
30#include <linux/sched/mm.h>
85a14463 31#include <linux/io.h>
399145f9 32#include <asm/pgalloc.h>
a5c3b9ff 33#include <asm/tlbflush.h>
399145f9 34
b1d00007
AK
35/*
36 * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
37 * expectations that are being validated here. All future changes in here
38 * or the documentation need to be in sync.
39 */
40
399145f9
AK
41#define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
42
43/*
44 * On s390 platform, the lower 4 bits are used to identify given page table
45 * entry type. But these bits might affect the ability to clear entries with
46 * pxx_clear() because of how dynamic page table folding works on s390. So
47 * while loading up the entries do not change the lower 4 bits. It does not
cfc5bbc4
AK
48 * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
49 * used to mark a pte entry.
399145f9 50 */
cfc5bbc4
AK
51#define S390_SKIP_MASK GENMASK(3, 0)
52#if __BITS_PER_LONG == 64
53#define PPC64_SKIP_MASK GENMASK(62, 62)
54#else
55#define PPC64_SKIP_MASK 0x0
56#endif
57#define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
58#define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
399145f9
AK
59#define RANDOM_NZVALUE GENMASK(7, 0)
60
61static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot)
62{
63 pte_t pte = pfn_pte(pfn, prot);
64
6315df41 65 pr_debug("Validating PTE basic\n");
399145f9
AK
66 WARN_ON(!pte_same(pte, pte));
67 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
68 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
69 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
70 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
71 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
72 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
73}
74
a5c3b9ff
AK
75static void __init pte_advanced_tests(struct mm_struct *mm,
76 struct vm_area_struct *vma, pte_t *ptep,
77 unsigned long pfn, unsigned long vaddr,
78 pgprot_t prot)
79{
80 pte_t pte = pfn_pte(pfn, prot);
81
6315df41 82 pr_debug("Validating PTE advanced\n");
a5c3b9ff
AK
83 pte = pfn_pte(pfn, prot);
84 set_pte_at(mm, vaddr, ptep, pte);
85 ptep_set_wrprotect(mm, vaddr, ptep);
86 pte = ptep_get(ptep);
87 WARN_ON(pte_write(pte));
88
89 pte = pfn_pte(pfn, prot);
90 set_pte_at(mm, vaddr, ptep, pte);
91 ptep_get_and_clear(mm, vaddr, ptep);
92 pte = ptep_get(ptep);
93 WARN_ON(!pte_none(pte));
94
95 pte = pfn_pte(pfn, prot);
96 pte = pte_wrprotect(pte);
97 pte = pte_mkclean(pte);
98 set_pte_at(mm, vaddr, ptep, pte);
99 pte = pte_mkwrite(pte);
100 pte = pte_mkdirty(pte);
101 ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
102 pte = ptep_get(ptep);
103 WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
104
105 pte = pfn_pte(pfn, prot);
106 set_pte_at(mm, vaddr, ptep, pte);
107 ptep_get_and_clear_full(mm, vaddr, ptep, 1);
108 pte = ptep_get(ptep);
109 WARN_ON(!pte_none(pte));
110
111 pte = pte_mkyoung(pte);
112 set_pte_at(mm, vaddr, ptep, pte);
113 ptep_test_and_clear_young(vma, vaddr, ptep);
114 pte = ptep_get(ptep);
115 WARN_ON(pte_young(pte));
116}
117
118static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
119{
120 pte_t pte = pfn_pte(pfn, prot);
121
4200605b
AK
122 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
123 return;
124
6315df41 125 pr_debug("Validating PTE saved write\n");
a5c3b9ff
AK
126 WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
127 WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
128}
4200605b 129
399145f9
AK
130#ifdef CONFIG_TRANSPARENT_HUGEPAGE
131static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot)
132{
133 pmd_t pmd = pfn_pmd(pfn, prot);
134
787d563b
AK
135 if (!has_transparent_hugepage())
136 return;
137
6315df41 138 pr_debug("Validating PMD basic\n");
399145f9
AK
139 WARN_ON(!pmd_same(pmd, pmd));
140 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
141 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
142 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
143 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
144 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
145 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
146 /*
147 * A huge page does not point to next level page table
148 * entry. Hence this must qualify as pmd_bad().
149 */
150 WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
151}
152
a5c3b9ff
AK
153static void __init pmd_advanced_tests(struct mm_struct *mm,
154 struct vm_area_struct *vma, pmd_t *pmdp,
155 unsigned long pfn, unsigned long vaddr,
156 pgprot_t prot)
157{
158 pmd_t pmd = pfn_pmd(pfn, prot);
159
160 if (!has_transparent_hugepage())
161 return;
162
6315df41 163 pr_debug("Validating PMD advanced\n");
a5c3b9ff
AK
164 /* Align the address wrt HPAGE_PMD_SIZE */
165 vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
166
167 pmd = pfn_pmd(pfn, prot);
168 set_pmd_at(mm, vaddr, pmdp, pmd);
169 pmdp_set_wrprotect(mm, vaddr, pmdp);
170 pmd = READ_ONCE(*pmdp);
171 WARN_ON(pmd_write(pmd));
172
173 pmd = pfn_pmd(pfn, prot);
174 set_pmd_at(mm, vaddr, pmdp, pmd);
175 pmdp_huge_get_and_clear(mm, vaddr, pmdp);
176 pmd = READ_ONCE(*pmdp);
177 WARN_ON(!pmd_none(pmd));
178
179 pmd = pfn_pmd(pfn, prot);
180 pmd = pmd_wrprotect(pmd);
181 pmd = pmd_mkclean(pmd);
182 set_pmd_at(mm, vaddr, pmdp, pmd);
183 pmd = pmd_mkwrite(pmd);
184 pmd = pmd_mkdirty(pmd);
185 pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
186 pmd = READ_ONCE(*pmdp);
187 WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
188
189 pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
190 set_pmd_at(mm, vaddr, pmdp, pmd);
191 pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
192 pmd = READ_ONCE(*pmdp);
193 WARN_ON(!pmd_none(pmd));
194
195 pmd = pmd_mkyoung(pmd);
196 set_pmd_at(mm, vaddr, pmdp, pmd);
197 pmdp_test_and_clear_young(vma, vaddr, pmdp);
198 pmd = READ_ONCE(*pmdp);
199 WARN_ON(pmd_young(pmd));
200}
201
202static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
203{
204 pmd_t pmd = pfn_pmd(pfn, prot);
205
6315df41 206 pr_debug("Validating PMD leaf\n");
a5c3b9ff
AK
207 /*
208 * PMD based THP is a leaf entry.
209 */
210 pmd = pmd_mkhuge(pmd);
211 WARN_ON(!pmd_leaf(pmd));
212}
213
85a14463 214#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
a5c3b9ff
AK
215static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
216{
217 pmd_t pmd;
218
85a14463 219 if (!arch_ioremap_pmd_supported())
a5c3b9ff 220 return;
6315df41
AK
221
222 pr_debug("Validating PMD huge\n");
a5c3b9ff
AK
223 /*
224 * X86 defined pmd_set_huge() verifies that the given
225 * PMD is not a populated non-leaf entry.
226 */
227 WRITE_ONCE(*pmdp, __pmd(0));
228 WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
229 WARN_ON(!pmd_clear_huge(pmdp));
230 pmd = READ_ONCE(*pmdp);
231 WARN_ON(!pmd_none(pmd));
232}
85a14463
AK
233#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
234static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
235#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
a5c3b9ff
AK
236
237static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
238{
239 pmd_t pmd = pfn_pmd(pfn, prot);
240
4200605b
AK
241 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
242 return;
243
6315df41 244 pr_debug("Validating PMD saved write\n");
a5c3b9ff
AK
245 WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
246 WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
247}
248
399145f9
AK
249#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
250static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot)
251{
252 pud_t pud = pfn_pud(pfn, prot);
253
787d563b
AK
254 if (!has_transparent_hugepage())
255 return;
256
6315df41 257 pr_debug("Validating PUD basic\n");
399145f9
AK
258 WARN_ON(!pud_same(pud, pud));
259 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
260 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
261 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
262 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
263
264 if (mm_pmd_folded(mm))
265 return;
266
267 /*
268 * A huge page does not point to next level page table
269 * entry. Hence this must qualify as pud_bad().
270 */
271 WARN_ON(!pud_bad(pud_mkhuge(pud)));
272}
a5c3b9ff
AK
273
274static void __init pud_advanced_tests(struct mm_struct *mm,
275 struct vm_area_struct *vma, pud_t *pudp,
276 unsigned long pfn, unsigned long vaddr,
277 pgprot_t prot)
278{
279 pud_t pud = pfn_pud(pfn, prot);
280
281 if (!has_transparent_hugepage())
282 return;
283
6315df41 284 pr_debug("Validating PUD advanced\n");
a5c3b9ff
AK
285 /* Align the address wrt HPAGE_PUD_SIZE */
286 vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
287
288 set_pud_at(mm, vaddr, pudp, pud);
289 pudp_set_wrprotect(mm, vaddr, pudp);
290 pud = READ_ONCE(*pudp);
291 WARN_ON(pud_write(pud));
292
293#ifndef __PAGETABLE_PMD_FOLDED
294 pud = pfn_pud(pfn, prot);
295 set_pud_at(mm, vaddr, pudp, pud);
296 pudp_huge_get_and_clear(mm, vaddr, pudp);
297 pud = READ_ONCE(*pudp);
298 WARN_ON(!pud_none(pud));
299
300 pud = pfn_pud(pfn, prot);
301 set_pud_at(mm, vaddr, pudp, pud);
302 pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
303 pud = READ_ONCE(*pudp);
304 WARN_ON(!pud_none(pud));
305#endif /* __PAGETABLE_PMD_FOLDED */
306 pud = pfn_pud(pfn, prot);
307 pud = pud_wrprotect(pud);
308 pud = pud_mkclean(pud);
309 set_pud_at(mm, vaddr, pudp, pud);
310 pud = pud_mkwrite(pud);
311 pud = pud_mkdirty(pud);
312 pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
313 pud = READ_ONCE(*pudp);
314 WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
315
316 pud = pud_mkyoung(pud);
317 set_pud_at(mm, vaddr, pudp, pud);
318 pudp_test_and_clear_young(vma, vaddr, pudp);
319 pud = READ_ONCE(*pudp);
320 WARN_ON(pud_young(pud));
321}
322
323static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
324{
325 pud_t pud = pfn_pud(pfn, prot);
326
6315df41 327 pr_debug("Validating PUD leaf\n");
a5c3b9ff
AK
328 /*
329 * PUD based THP is a leaf entry.
330 */
331 pud = pud_mkhuge(pud);
332 WARN_ON(!pud_leaf(pud));
333}
334
85a14463 335#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
a5c3b9ff
AK
336static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
337{
338 pud_t pud;
339
85a14463 340 if (!arch_ioremap_pud_supported())
a5c3b9ff 341 return;
6315df41
AK
342
343 pr_debug("Validating PUD huge\n");
a5c3b9ff
AK
344 /*
345 * X86 defined pud_set_huge() verifies that the given
346 * PUD is not a populated non-leaf entry.
347 */
348 WRITE_ONCE(*pudp, __pud(0));
349 WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
350 WARN_ON(!pud_clear_huge(pudp));
351 pud = READ_ONCE(*pudp);
352 WARN_ON(!pud_none(pud));
353}
85a14463
AK
354#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
355static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
356#endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
357
399145f9
AK
358#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
359static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
a5c3b9ff
AK
360static void __init pud_advanced_tests(struct mm_struct *mm,
361 struct vm_area_struct *vma, pud_t *pudp,
362 unsigned long pfn, unsigned long vaddr,
363 pgprot_t prot)
364{
365}
366static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
367static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
368{
369}
399145f9
AK
370#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
371#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
372static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) { }
373static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
a5c3b9ff
AK
374static void __init pmd_advanced_tests(struct mm_struct *mm,
375 struct vm_area_struct *vma, pmd_t *pmdp,
376 unsigned long pfn, unsigned long vaddr,
377 pgprot_t prot)
378{
379}
380static void __init pud_advanced_tests(struct mm_struct *mm,
381 struct vm_area_struct *vma, pud_t *pudp,
382 unsigned long pfn, unsigned long vaddr,
383 pgprot_t prot)
384{
385}
386static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
387static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
388static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
389{
390}
391static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
392{
393}
394static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
399145f9
AK
395#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
396
397static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
398{
399 p4d_t p4d;
400
6315df41 401 pr_debug("Validating P4D basic\n");
399145f9
AK
402 memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
403 WARN_ON(!p4d_same(p4d, p4d));
404}
405
406static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot)
407{
408 pgd_t pgd;
409
6315df41 410 pr_debug("Validating PGD basic\n");
399145f9
AK
411 memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
412 WARN_ON(!pgd_same(pgd, pgd));
413}
414
415#ifndef __PAGETABLE_PUD_FOLDED
416static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
417{
418 pud_t pud = READ_ONCE(*pudp);
419
420 if (mm_pmd_folded(mm))
421 return;
422
6315df41 423 pr_debug("Validating PUD clear\n");
399145f9
AK
424 pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
425 WRITE_ONCE(*pudp, pud);
426 pud_clear(pudp);
427 pud = READ_ONCE(*pudp);
428 WARN_ON(!pud_none(pud));
429}
430
431static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
432 pmd_t *pmdp)
433{
434 pud_t pud;
435
436 if (mm_pmd_folded(mm))
437 return;
6315df41
AK
438
439 pr_debug("Validating PUD populate\n");
399145f9
AK
440 /*
441 * This entry points to next level page table page.
442 * Hence this must not qualify as pud_bad().
443 */
444 pmd_clear(pmdp);
445 pud_clear(pudp);
446 pud_populate(mm, pudp, pmdp);
447 pud = READ_ONCE(*pudp);
448 WARN_ON(pud_bad(pud));
449}
450#else /* !__PAGETABLE_PUD_FOLDED */
451static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
452static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
453 pmd_t *pmdp)
454{
455}
456#endif /* PAGETABLE_PUD_FOLDED */
457
458#ifndef __PAGETABLE_P4D_FOLDED
459static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
460{
461 p4d_t p4d = READ_ONCE(*p4dp);
462
463 if (mm_pud_folded(mm))
464 return;
465
6315df41 466 pr_debug("Validating P4D clear\n");
399145f9
AK
467 p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
468 WRITE_ONCE(*p4dp, p4d);
469 p4d_clear(p4dp);
470 p4d = READ_ONCE(*p4dp);
471 WARN_ON(!p4d_none(p4d));
472}
473
474static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
475 pud_t *pudp)
476{
477 p4d_t p4d;
478
479 if (mm_pud_folded(mm))
480 return;
481
6315df41 482 pr_debug("Validating P4D populate\n");
399145f9
AK
483 /*
484 * This entry points to next level page table page.
485 * Hence this must not qualify as p4d_bad().
486 */
487 pud_clear(pudp);
488 p4d_clear(p4dp);
489 p4d_populate(mm, p4dp, pudp);
490 p4d = READ_ONCE(*p4dp);
491 WARN_ON(p4d_bad(p4d));
492}
493
494static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
495{
496 pgd_t pgd = READ_ONCE(*pgdp);
497
498 if (mm_p4d_folded(mm))
499 return;
500
6315df41 501 pr_debug("Validating PGD clear\n");
399145f9
AK
502 pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
503 WRITE_ONCE(*pgdp, pgd);
504 pgd_clear(pgdp);
505 pgd = READ_ONCE(*pgdp);
506 WARN_ON(!pgd_none(pgd));
507}
508
509static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
510 p4d_t *p4dp)
511{
512 pgd_t pgd;
513
514 if (mm_p4d_folded(mm))
515 return;
516
6315df41 517 pr_debug("Validating PGD populate\n");
399145f9
AK
518 /*
519 * This entry points to next level page table page.
520 * Hence this must not qualify as pgd_bad().
521 */
522 p4d_clear(p4dp);
523 pgd_clear(pgdp);
524 pgd_populate(mm, pgdp, p4dp);
525 pgd = READ_ONCE(*pgdp);
526 WARN_ON(pgd_bad(pgd));
527}
528#else /* !__PAGETABLE_P4D_FOLDED */
529static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
530static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
531static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
532 pud_t *pudp)
533{
534}
535static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
536 p4d_t *p4dp)
537{
538}
539#endif /* PAGETABLE_P4D_FOLDED */
540
541static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
542 unsigned long vaddr)
543{
9449c9cb 544 pte_t pte = ptep_get(ptep);
399145f9 545
6315df41 546 pr_debug("Validating PTE clear\n");
399145f9
AK
547 pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
548 set_pte_at(mm, vaddr, ptep, pte);
549 barrier();
550 pte_clear(mm, vaddr, ptep);
9449c9cb 551 pte = ptep_get(ptep);
399145f9
AK
552 WARN_ON(!pte_none(pte));
553}
554
555static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
556{
557 pmd_t pmd = READ_ONCE(*pmdp);
558
6315df41 559 pr_debug("Validating PMD clear\n");
399145f9
AK
560 pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
561 WRITE_ONCE(*pmdp, pmd);
562 pmd_clear(pmdp);
563 pmd = READ_ONCE(*pmdp);
564 WARN_ON(!pmd_none(pmd));
565}
566
567static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
568 pgtable_t pgtable)
569{
570 pmd_t pmd;
571
6315df41 572 pr_debug("Validating PMD populate\n");
399145f9
AK
573 /*
574 * This entry points to next level page table page.
575 * Hence this must not qualify as pmd_bad().
576 */
577 pmd_clear(pmdp);
578 pmd_populate(mm, pmdp, pgtable);
579 pmd = READ_ONCE(*pmdp);
580 WARN_ON(pmd_bad(pmd));
581}
582
05289402
AK
583static void __init pte_special_tests(unsigned long pfn, pgprot_t prot)
584{
585 pte_t pte = pfn_pte(pfn, prot);
586
587 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
588 return;
589
6315df41 590 pr_debug("Validating PTE special\n");
05289402
AK
591 WARN_ON(!pte_special(pte_mkspecial(pte)));
592}
593
594static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
595{
596 pte_t pte = pfn_pte(pfn, prot);
597
598 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
599 return;
600
6315df41 601 pr_debug("Validating PTE protnone\n");
05289402
AK
602 WARN_ON(!pte_protnone(pte));
603 WARN_ON(!pte_present(pte));
604}
605
606#ifdef CONFIG_TRANSPARENT_HUGEPAGE
607static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
608{
609 pmd_t pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
610
611 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
612 return;
613
6315df41 614 pr_debug("Validating PMD protnone\n");
05289402
AK
615 WARN_ON(!pmd_protnone(pmd));
616 WARN_ON(!pmd_present(pmd));
617}
618#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
619static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot) { }
620#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
621
622#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
623static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
624{
625 pte_t pte = pfn_pte(pfn, prot);
626
6315df41 627 pr_debug("Validating PTE devmap\n");
05289402
AK
628 WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
629}
630
631#ifdef CONFIG_TRANSPARENT_HUGEPAGE
632static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
633{
634 pmd_t pmd = pfn_pmd(pfn, prot);
635
6315df41 636 pr_debug("Validating PMD devmap\n");
05289402
AK
637 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
638}
639
640#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
641static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
642{
643 pud_t pud = pfn_pud(pfn, prot);
644
6315df41 645 pr_debug("Validating PUD devmap\n");
05289402
AK
646 WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
647}
648#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
649static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
650#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
651#else /* CONFIG_TRANSPARENT_HUGEPAGE */
652static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
653static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
654#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
655#else
656static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot) { }
657static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
658static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
659#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
660
661static void __init pte_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
662{
663 pte_t pte = pfn_pte(pfn, prot);
664
665 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
666 return;
667
6315df41 668 pr_debug("Validating PTE soft dirty\n");
05289402
AK
669 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
670 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
671}
672
673static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
674{
675 pte_t pte = pfn_pte(pfn, prot);
676
677 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
678 return;
679
6315df41 680 pr_debug("Validating PTE swap soft dirty\n");
05289402
AK
681 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
682 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
683}
684
685#ifdef CONFIG_TRANSPARENT_HUGEPAGE
686static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
687{
688 pmd_t pmd = pfn_pmd(pfn, prot);
689
690 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
691 return;
692
6315df41 693 pr_debug("Validating PMD soft dirty\n");
05289402
AK
694 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
695 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
696}
697
698static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
699{
700 pmd_t pmd = pfn_pmd(pfn, prot);
701
702 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
703 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
704 return;
705
6315df41 706 pr_debug("Validating PMD swap soft dirty\n");
05289402
AK
707 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
708 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
709}
710#else /* !CONFIG_ARCH_HAS_PTE_DEVMAP */
711static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
712static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
713{
714}
715#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
716
717static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
718{
719 swp_entry_t swp;
720 pte_t pte;
721
6315df41 722 pr_debug("Validating PTE swap\n");
05289402
AK
723 pte = pfn_pte(pfn, prot);
724 swp = __pte_to_swp_entry(pte);
725 pte = __swp_entry_to_pte(swp);
726 WARN_ON(pfn != pte_pfn(pte));
727}
728
729#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
730static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
731{
732 swp_entry_t swp;
733 pmd_t pmd;
734
6315df41 735 pr_debug("Validating PMD swap\n");
05289402
AK
736 pmd = pfn_pmd(pfn, prot);
737 swp = __pmd_to_swp_entry(pmd);
738 pmd = __swp_entry_to_pmd(swp);
739 WARN_ON(pfn != pmd_pfn(pmd));
740}
741#else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
742static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) { }
743#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
744
745static void __init swap_migration_tests(void)
746{
747 struct page *page;
748 swp_entry_t swp;
749
750 if (!IS_ENABLED(CONFIG_MIGRATION))
751 return;
6315df41
AK
752
753 pr_debug("Validating swap migration\n");
05289402
AK
754 /*
755 * swap_migration_tests() requires a dedicated page as it needs to
756 * be locked before creating a migration entry from it. Locking the
757 * page that actually maps kernel text ('start_kernel') can be real
758 * problematic. Lets allocate a dedicated page explicitly for this
759 * purpose that will be freed subsequently.
760 */
761 page = alloc_page(GFP_KERNEL);
762 if (!page) {
763 pr_err("page allocation failed\n");
764 return;
765 }
766
767 /*
768 * make_migration_entry() expects given page to be
769 * locked, otherwise it stumbles upon a BUG_ON().
770 */
771 __SetPageLocked(page);
772 swp = make_migration_entry(page, 1);
773 WARN_ON(!is_migration_entry(swp));
774 WARN_ON(!is_write_migration_entry(swp));
775
776 make_migration_entry_read(&swp);
777 WARN_ON(!is_migration_entry(swp));
778 WARN_ON(is_write_migration_entry(swp));
779
780 swp = make_migration_entry(page, 0);
781 WARN_ON(!is_migration_entry(swp));
782 WARN_ON(is_write_migration_entry(swp));
783 __ClearPageLocked(page);
784 __free_page(page);
785}
786
787#ifdef CONFIG_HUGETLB_PAGE
788static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot)
789{
790 struct page *page;
791 pte_t pte;
792
6315df41 793 pr_debug("Validating HugeTLB basic\n");
05289402
AK
794 /*
795 * Accessing the page associated with the pfn is safe here,
796 * as it was previously derived from a real kernel symbol.
797 */
798 page = pfn_to_page(pfn);
799 pte = mk_huge_pte(page, prot);
800
801 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
802 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
803 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
804
805#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
806 pte = pfn_pte(pfn, prot);
807
808 WARN_ON(!pte_huge(pte_mkhuge(pte)));
809#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
810}
a5c3b9ff
AK
811
812static void __init hugetlb_advanced_tests(struct mm_struct *mm,
813 struct vm_area_struct *vma,
814 pte_t *ptep, unsigned long pfn,
815 unsigned long vaddr, pgprot_t prot)
816{
817 struct page *page = pfn_to_page(pfn);
818 pte_t pte = ptep_get(ptep);
819 unsigned long paddr = __pfn_to_phys(pfn) & PMD_MASK;
820
6315df41 821 pr_debug("Validating HugeTLB advanced\n");
a5c3b9ff
AK
822 pte = pte_mkhuge(mk_pte(pfn_to_page(PHYS_PFN(paddr)), prot));
823 set_huge_pte_at(mm, vaddr, ptep, pte);
824 barrier();
825 WARN_ON(!pte_same(pte, huge_ptep_get(ptep)));
826 huge_pte_clear(mm, vaddr, ptep, PMD_SIZE);
827 pte = huge_ptep_get(ptep);
828 WARN_ON(!huge_pte_none(pte));
829
830 pte = mk_huge_pte(page, prot);
831 set_huge_pte_at(mm, vaddr, ptep, pte);
832 barrier();
833 huge_ptep_set_wrprotect(mm, vaddr, ptep);
834 pte = huge_ptep_get(ptep);
835 WARN_ON(huge_pte_write(pte));
836
837 pte = mk_huge_pte(page, prot);
838 set_huge_pte_at(mm, vaddr, ptep, pte);
839 barrier();
840 huge_ptep_get_and_clear(mm, vaddr, ptep);
841 pte = huge_ptep_get(ptep);
842 WARN_ON(!huge_pte_none(pte));
843
844 pte = mk_huge_pte(page, prot);
845 pte = huge_pte_wrprotect(pte);
846 set_huge_pte_at(mm, vaddr, ptep, pte);
847 barrier();
848 pte = huge_pte_mkwrite(pte);
849 pte = huge_pte_mkdirty(pte);
850 huge_ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
851 pte = huge_ptep_get(ptep);
852 WARN_ON(!(huge_pte_write(pte) && huge_pte_dirty(pte)));
853}
05289402
AK
854#else /* !CONFIG_HUGETLB_PAGE */
855static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) { }
a5c3b9ff
AK
856static void __init hugetlb_advanced_tests(struct mm_struct *mm,
857 struct vm_area_struct *vma,
858 pte_t *ptep, unsigned long pfn,
859 unsigned long vaddr, pgprot_t prot)
860{
861}
05289402
AK
862#endif /* CONFIG_HUGETLB_PAGE */
863
864#ifdef CONFIG_TRANSPARENT_HUGEPAGE
865static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot)
866{
867 pmd_t pmd;
868
869 if (!has_transparent_hugepage())
870 return;
871
6315df41 872 pr_debug("Validating PMD based THP\n");
05289402
AK
873 /*
874 * pmd_trans_huge() and pmd_present() must return positive after
875 * MMU invalidation with pmd_mkinvalid(). This behavior is an
876 * optimization for transparent huge page. pmd_trans_huge() must
877 * be true if pmd_page() returns a valid THP to avoid taking the
878 * pmd_lock when others walk over non transhuge pmds (i.e. there
879 * are no THP allocated). Especially when splitting a THP and
880 * removing the present bit from the pmd, pmd_trans_huge() still
881 * needs to return true. pmd_present() should be true whenever
882 * pmd_trans_huge() returns true.
883 */
884 pmd = pfn_pmd(pfn, prot);
885 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
886
887#ifndef __HAVE_ARCH_PMDP_INVALIDATE
888 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
889 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
890#endif /* __HAVE_ARCH_PMDP_INVALIDATE */
891}
892
893#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
894static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot)
895{
896 pud_t pud;
897
898 if (!has_transparent_hugepage())
899 return;
900
6315df41 901 pr_debug("Validating PUD based THP\n");
05289402
AK
902 pud = pfn_pud(pfn, prot);
903 WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
904
905 /*
906 * pud_mkinvalid() has been dropped for now. Enable back
907 * these tests when it comes back with a modified pud_present().
908 *
909 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
910 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
911 */
912}
913#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
914static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
915#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
916#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
917static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot) { }
918static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
919#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
920
399145f9
AK
921static unsigned long __init get_random_vaddr(void)
922{
923 unsigned long random_vaddr, random_pages, total_user_pages;
924
925 total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
926
927 random_pages = get_random_long() % total_user_pages;
928 random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
929
930 return random_vaddr;
931}
932
933static int __init debug_vm_pgtable(void)
934{
a5c3b9ff 935 struct vm_area_struct *vma;
399145f9
AK
936 struct mm_struct *mm;
937 pgd_t *pgdp;
938 p4d_t *p4dp, *saved_p4dp;
939 pud_t *pudp, *saved_pudp;
940 pmd_t *pmdp, *saved_pmdp, pmd;
941 pte_t *ptep;
942 pgtable_t saved_ptep;
05289402 943 pgprot_t prot, protnone;
399145f9
AK
944 phys_addr_t paddr;
945 unsigned long vaddr, pte_aligned, pmd_aligned;
946 unsigned long pud_aligned, p4d_aligned, pgd_aligned;
fea1120c 947 spinlock_t *ptl = NULL;
399145f9
AK
948
949 pr_info("Validating architecture page table helpers\n");
950 prot = vm_get_page_prot(VMFLAGS);
951 vaddr = get_random_vaddr();
952 mm = mm_alloc();
953 if (!mm) {
954 pr_err("mm_struct allocation failed\n");
955 return 1;
956 }
957
05289402
AK
958 /*
959 * __P000 (or even __S000) will help create page table entries with
960 * PROT_NONE permission as required for pxx_protnone_tests().
961 */
962 protnone = __P000;
963
a5c3b9ff
AK
964 vma = vm_area_alloc(mm);
965 if (!vma) {
966 pr_err("vma allocation failed\n");
967 return 1;
968 }
969
399145f9
AK
970 /*
971 * PFN for mapping at PTE level is determined from a standard kernel
972 * text symbol. But pfns for higher page table levels are derived by
973 * masking lower bits of this real pfn. These derived pfns might not
974 * exist on the platform but that does not really matter as pfn_pxx()
975 * helpers will still create appropriate entries for the test. This
976 * helps avoid large memory block allocations to be used for mapping
977 * at higher page table levels.
978 */
979 paddr = __pa_symbol(&start_kernel);
980
981 pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
982 pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
983 pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
984 p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT;
985 pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT;
986 WARN_ON(!pfn_valid(pte_aligned));
987
988 pgdp = pgd_offset(mm, vaddr);
989 p4dp = p4d_alloc(mm, pgdp, vaddr);
990 pudp = pud_alloc(mm, p4dp, vaddr);
991 pmdp = pmd_alloc(mm, pudp, vaddr);
992 ptep = pte_alloc_map_lock(mm, pmdp, vaddr, &ptl);
993
994 /*
995 * Save all the page table page addresses as the page table
996 * entries will be used for testing with random or garbage
997 * values. These saved addresses will be used for freeing
998 * page table pages.
999 */
1000 pmd = READ_ONCE(*pmdp);
1001 saved_p4dp = p4d_offset(pgdp, 0UL);
1002 saved_pudp = pud_offset(p4dp, 0UL);
1003 saved_pmdp = pmd_offset(pudp, 0UL);
1004 saved_ptep = pmd_pgtable(pmd);
1005
1006 pte_basic_tests(pte_aligned, prot);
1007 pmd_basic_tests(pmd_aligned, prot);
1008 pud_basic_tests(pud_aligned, prot);
1009 p4d_basic_tests(p4d_aligned, prot);
1010 pgd_basic_tests(pgd_aligned, prot);
1011
1012 pte_clear_tests(mm, ptep, vaddr);
1013 pmd_clear_tests(mm, pmdp);
1014 pud_clear_tests(mm, pudp);
1015 p4d_clear_tests(mm, p4dp);
1016 pgd_clear_tests(mm, pgdp);
1017
a5c3b9ff
AK
1018 pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
1019 pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot);
1020 pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
1021 hugetlb_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
1022
1023 pmd_leaf_tests(pmd_aligned, prot);
1024 pud_leaf_tests(pud_aligned, prot);
1025
1026 pmd_huge_tests(pmdp, pmd_aligned, prot);
1027 pud_huge_tests(pudp, pud_aligned, prot);
1028
4200605b
AK
1029 pte_savedwrite_tests(pte_aligned, protnone);
1030 pmd_savedwrite_tests(pmd_aligned, protnone);
a5c3b9ff 1031
399145f9
AK
1032 pte_unmap_unlock(ptep, ptl);
1033
1034 pmd_populate_tests(mm, pmdp, saved_ptep);
1035 pud_populate_tests(mm, pudp, saved_pmdp);
1036 p4d_populate_tests(mm, p4dp, saved_pudp);
1037 pgd_populate_tests(mm, pgdp, saved_p4dp);
1038
05289402
AK
1039 pte_special_tests(pte_aligned, prot);
1040 pte_protnone_tests(pte_aligned, protnone);
1041 pmd_protnone_tests(pmd_aligned, protnone);
1042
1043 pte_devmap_tests(pte_aligned, prot);
1044 pmd_devmap_tests(pmd_aligned, prot);
1045 pud_devmap_tests(pud_aligned, prot);
1046
1047 pte_soft_dirty_tests(pte_aligned, prot);
1048 pmd_soft_dirty_tests(pmd_aligned, prot);
1049 pte_swap_soft_dirty_tests(pte_aligned, prot);
1050 pmd_swap_soft_dirty_tests(pmd_aligned, prot);
1051
1052 pte_swap_tests(pte_aligned, prot);
1053 pmd_swap_tests(pmd_aligned, prot);
1054
1055 swap_migration_tests();
1056 hugetlb_basic_tests(pte_aligned, prot);
1057
1058 pmd_thp_tests(pmd_aligned, prot);
1059 pud_thp_tests(pud_aligned, prot);
1060
399145f9
AK
1061 p4d_free(mm, saved_p4dp);
1062 pud_free(mm, saved_pudp);
1063 pmd_free(mm, saved_pmdp);
1064 pte_free(mm, saved_ptep);
1065
a5c3b9ff 1066 vm_area_free(vma);
399145f9
AK
1067 mm_dec_nr_puds(mm);
1068 mm_dec_nr_pmds(mm);
1069 mm_dec_nr_ptes(mm);
1070 mmdrop(mm);
1071 return 0;
1072}
1073late_initcall(debug_vm_pgtable);