]>
Commit | Line | Data |
---|---|---|
399145f9 AK |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * This kernel test validates architecture page table helpers and | |
4 | * accessors and helps in verifying their continued compliance with | |
5 | * expected generic MM semantics. | |
6 | * | |
7 | * Copyright (C) 2019 ARM Ltd. | |
8 | * | |
9 | * Author: Anshuman Khandual <anshuman.khandual@arm.com> | |
10 | */ | |
6315df41 | 11 | #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__ |
399145f9 AK |
12 | |
13 | #include <linux/gfp.h> | |
14 | #include <linux/highmem.h> | |
15 | #include <linux/hugetlb.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/kconfig.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/mman.h> | |
20 | #include <linux/mm_types.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/pfn_t.h> | |
23 | #include <linux/printk.h> | |
a5c3b9ff | 24 | #include <linux/pgtable.h> |
399145f9 AK |
25 | #include <linux/random.h> |
26 | #include <linux/spinlock.h> | |
27 | #include <linux/swap.h> | |
28 | #include <linux/swapops.h> | |
29 | #include <linux/start_kernel.h> | |
30 | #include <linux/sched/mm.h> | |
85a14463 | 31 | #include <linux/io.h> |
399145f9 | 32 | #include <asm/pgalloc.h> |
a5c3b9ff | 33 | #include <asm/tlbflush.h> |
399145f9 | 34 | |
b1d00007 AK |
35 | /* |
36 | * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics | |
37 | * expectations that are being validated here. All future changes in here | |
38 | * or the documentation need to be in sync. | |
39 | */ | |
40 | ||
399145f9 AK |
41 | #define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC) |
42 | ||
43 | /* | |
44 | * On s390 platform, the lower 4 bits are used to identify given page table | |
45 | * entry type. But these bits might affect the ability to clear entries with | |
46 | * pxx_clear() because of how dynamic page table folding works on s390. So | |
47 | * while loading up the entries do not change the lower 4 bits. It does not | |
cfc5bbc4 AK |
48 | * have affect any other platform. Also avoid the 62nd bit on ppc64 that is |
49 | * used to mark a pte entry. | |
399145f9 | 50 | */ |
cfc5bbc4 AK |
51 | #define S390_SKIP_MASK GENMASK(3, 0) |
52 | #if __BITS_PER_LONG == 64 | |
53 | #define PPC64_SKIP_MASK GENMASK(62, 62) | |
54 | #else | |
55 | #define PPC64_SKIP_MASK 0x0 | |
56 | #endif | |
57 | #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK) | |
58 | #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK) | |
399145f9 AK |
59 | #define RANDOM_NZVALUE GENMASK(7, 0) |
60 | ||
3c9b84f0 GS |
61 | struct pgtable_debug_args { |
62 | struct mm_struct *mm; | |
63 | struct vm_area_struct *vma; | |
64 | ||
65 | pgd_t *pgdp; | |
66 | p4d_t *p4dp; | |
67 | pud_t *pudp; | |
68 | pmd_t *pmdp; | |
69 | pte_t *ptep; | |
70 | ||
71 | p4d_t *start_p4dp; | |
72 | pud_t *start_pudp; | |
73 | pmd_t *start_pmdp; | |
74 | pgtable_t start_ptep; | |
75 | ||
76 | unsigned long vaddr; | |
77 | pgprot_t page_prot; | |
78 | pgprot_t page_prot_none; | |
79 | ||
80 | bool is_contiguous_page; | |
81 | unsigned long pud_pfn; | |
82 | unsigned long pmd_pfn; | |
83 | unsigned long pte_pfn; | |
84 | ||
85 | unsigned long fixed_pgd_pfn; | |
86 | unsigned long fixed_p4d_pfn; | |
87 | unsigned long fixed_pud_pfn; | |
88 | unsigned long fixed_pmd_pfn; | |
89 | unsigned long fixed_pte_pfn; | |
90 | }; | |
91 | ||
36b77d1e | 92 | static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx) |
399145f9 | 93 | { |
2e326c07 | 94 | pgprot_t prot = protection_map[idx]; |
36b77d1e | 95 | pte_t pte = pfn_pte(args->fixed_pte_pfn, prot); |
2e326c07 | 96 | unsigned long val = idx, *ptr = &val; |
399145f9 | 97 | |
2e326c07 | 98 | pr_debug("Validating PTE basic (%pGv)\n", ptr); |
bb5c47ce AK |
99 | |
100 | /* | |
101 | * This test needs to be executed after the given page table entry | |
102 | * is created with pfn_pte() to make sure that protection_map[idx] | |
103 | * does not have the dirty bit enabled from the beginning. This is | |
104 | * important for platforms like arm64 where (!PTE_RDONLY) indicate | |
105 | * dirty bit being set. | |
106 | */ | |
107 | WARN_ON(pte_dirty(pte_wrprotect(pte))); | |
108 | ||
399145f9 AK |
109 | WARN_ON(!pte_same(pte, pte)); |
110 | WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte)))); | |
111 | WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte)))); | |
112 | WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte)))); | |
113 | WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte)))); | |
114 | WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte)))); | |
115 | WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte)))); | |
bb5c47ce AK |
116 | WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte)))); |
117 | WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte)))); | |
399145f9 AK |
118 | } |
119 | ||
44966c44 | 120 | static void __init pte_advanced_tests(struct pgtable_debug_args *args) |
a5c3b9ff | 121 | { |
b593b90d | 122 | pte_t pte; |
a5c3b9ff | 123 | |
c3824e18 AK |
124 | /* |
125 | * Architectures optimize set_pte_at by avoiding TLB flush. | |
126 | * This requires set_pte_at to be not used to update an | |
127 | * existing pte entry. Clear pte before we do set_pte_at | |
128 | */ | |
44966c44 GS |
129 | if (args->pte_pfn == ULONG_MAX) |
130 | return; | |
c3824e18 | 131 | |
6315df41 | 132 | pr_debug("Validating PTE advanced\n"); |
44966c44 GS |
133 | pte = pfn_pte(args->pte_pfn, args->page_prot); |
134 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); | |
135 | ptep_set_wrprotect(args->mm, args->vaddr, args->ptep); | |
136 | pte = ptep_get(args->ptep); | |
a5c3b9ff | 137 | WARN_ON(pte_write(pte)); |
44966c44 GS |
138 | ptep_get_and_clear(args->mm, args->vaddr, args->ptep); |
139 | pte = ptep_get(args->ptep); | |
a5c3b9ff AK |
140 | WARN_ON(!pte_none(pte)); |
141 | ||
44966c44 | 142 | pte = pfn_pte(args->pte_pfn, args->page_prot); |
a5c3b9ff AK |
143 | pte = pte_wrprotect(pte); |
144 | pte = pte_mkclean(pte); | |
44966c44 | 145 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
a5c3b9ff AK |
146 | pte = pte_mkwrite(pte); |
147 | pte = pte_mkdirty(pte); | |
44966c44 GS |
148 | ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1); |
149 | pte = ptep_get(args->ptep); | |
a5c3b9ff | 150 | WARN_ON(!(pte_write(pte) && pte_dirty(pte))); |
44966c44 GS |
151 | ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); |
152 | pte = ptep_get(args->ptep); | |
a5c3b9ff AK |
153 | WARN_ON(!pte_none(pte)); |
154 | ||
44966c44 | 155 | pte = pfn_pte(args->pte_pfn, args->page_prot); |
a5c3b9ff | 156 | pte = pte_mkyoung(pte); |
44966c44 GS |
157 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
158 | ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep); | |
159 | pte = ptep_get(args->ptep); | |
a5c3b9ff AK |
160 | WARN_ON(pte_young(pte)); |
161 | } | |
162 | ||
8983d231 | 163 | static void __init pte_savedwrite_tests(struct pgtable_debug_args *args) |
a5c3b9ff | 164 | { |
8983d231 | 165 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none); |
a5c3b9ff | 166 | |
4200605b AK |
167 | if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) |
168 | return; | |
169 | ||
6315df41 | 170 | pr_debug("Validating PTE saved write\n"); |
a5c3b9ff AK |
171 | WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte)))); |
172 | WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte)))); | |
173 | } | |
4200605b | 174 | |
399145f9 | 175 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
36b77d1e | 176 | static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) |
399145f9 | 177 | { |
2e326c07 | 178 | pgprot_t prot = protection_map[idx]; |
2e326c07 | 179 | unsigned long val = idx, *ptr = &val; |
65ac1a60 | 180 | pmd_t pmd; |
399145f9 | 181 | |
787d563b AK |
182 | if (!has_transparent_hugepage()) |
183 | return; | |
184 | ||
2e326c07 | 185 | pr_debug("Validating PMD basic (%pGv)\n", ptr); |
36b77d1e | 186 | pmd = pfn_pmd(args->fixed_pmd_pfn, prot); |
bb5c47ce AK |
187 | |
188 | /* | |
189 | * This test needs to be executed after the given page table entry | |
190 | * is created with pfn_pmd() to make sure that protection_map[idx] | |
191 | * does not have the dirty bit enabled from the beginning. This is | |
192 | * important for platforms like arm64 where (!PTE_RDONLY) indicate | |
193 | * dirty bit being set. | |
194 | */ | |
195 | WARN_ON(pmd_dirty(pmd_wrprotect(pmd))); | |
196 | ||
197 | ||
399145f9 AK |
198 | WARN_ON(!pmd_same(pmd, pmd)); |
199 | WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd)))); | |
200 | WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd)))); | |
201 | WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd)))); | |
202 | WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd)))); | |
203 | WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd)))); | |
204 | WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd)))); | |
bb5c47ce AK |
205 | WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd)))); |
206 | WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd)))); | |
399145f9 AK |
207 | /* |
208 | * A huge page does not point to next level page table | |
209 | * entry. Hence this must qualify as pmd_bad(). | |
210 | */ | |
211 | WARN_ON(!pmd_bad(pmd_mkhuge(pmd))); | |
212 | } | |
213 | ||
a5c3b9ff AK |
214 | static void __init pmd_advanced_tests(struct mm_struct *mm, |
215 | struct vm_area_struct *vma, pmd_t *pmdp, | |
216 | unsigned long pfn, unsigned long vaddr, | |
87f34986 | 217 | pgprot_t prot, pgtable_t pgtable) |
a5c3b9ff | 218 | { |
65ac1a60 | 219 | pmd_t pmd; |
a5c3b9ff AK |
220 | |
221 | if (!has_transparent_hugepage()) | |
222 | return; | |
223 | ||
6315df41 | 224 | pr_debug("Validating PMD advanced\n"); |
a5c3b9ff | 225 | /* Align the address wrt HPAGE_PMD_SIZE */ |
04f7ce3f | 226 | vaddr &= HPAGE_PMD_MASK; |
a5c3b9ff | 227 | |
87f34986 AK |
228 | pgtable_trans_huge_deposit(mm, pmdp, pgtable); |
229 | ||
a5c3b9ff AK |
230 | pmd = pfn_pmd(pfn, prot); |
231 | set_pmd_at(mm, vaddr, pmdp, pmd); | |
232 | pmdp_set_wrprotect(mm, vaddr, pmdp); | |
233 | pmd = READ_ONCE(*pmdp); | |
234 | WARN_ON(pmd_write(pmd)); | |
a5c3b9ff AK |
235 | pmdp_huge_get_and_clear(mm, vaddr, pmdp); |
236 | pmd = READ_ONCE(*pmdp); | |
237 | WARN_ON(!pmd_none(pmd)); | |
238 | ||
239 | pmd = pfn_pmd(pfn, prot); | |
240 | pmd = pmd_wrprotect(pmd); | |
241 | pmd = pmd_mkclean(pmd); | |
242 | set_pmd_at(mm, vaddr, pmdp, pmd); | |
243 | pmd = pmd_mkwrite(pmd); | |
244 | pmd = pmd_mkdirty(pmd); | |
245 | pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1); | |
246 | pmd = READ_ONCE(*pmdp); | |
247 | WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd))); | |
a5c3b9ff AK |
248 | pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1); |
249 | pmd = READ_ONCE(*pmdp); | |
250 | WARN_ON(!pmd_none(pmd)); | |
251 | ||
c3824e18 | 252 | pmd = pmd_mkhuge(pfn_pmd(pfn, prot)); |
a5c3b9ff AK |
253 | pmd = pmd_mkyoung(pmd); |
254 | set_pmd_at(mm, vaddr, pmdp, pmd); | |
255 | pmdp_test_and_clear_young(vma, vaddr, pmdp); | |
256 | pmd = READ_ONCE(*pmdp); | |
257 | WARN_ON(pmd_young(pmd)); | |
87f34986 | 258 | |
13af0506 AK |
259 | /* Clear the pte entries */ |
260 | pmdp_huge_get_and_clear(mm, vaddr, pmdp); | |
87f34986 | 261 | pgtable = pgtable_trans_huge_withdraw(mm, pmdp); |
a5c3b9ff AK |
262 | } |
263 | ||
8983d231 | 264 | static void __init pmd_leaf_tests(struct pgtable_debug_args *args) |
a5c3b9ff | 265 | { |
65ac1a60 AK |
266 | pmd_t pmd; |
267 | ||
268 | if (!has_transparent_hugepage()) | |
269 | return; | |
a5c3b9ff | 270 | |
6315df41 | 271 | pr_debug("Validating PMD leaf\n"); |
8983d231 | 272 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
65ac1a60 | 273 | |
a5c3b9ff AK |
274 | /* |
275 | * PMD based THP is a leaf entry. | |
276 | */ | |
277 | pmd = pmd_mkhuge(pmd); | |
278 | WARN_ON(!pmd_leaf(pmd)); | |
279 | } | |
280 | ||
8983d231 | 281 | static void __init pmd_savedwrite_tests(struct pgtable_debug_args *args) |
a5c3b9ff | 282 | { |
65ac1a60 | 283 | pmd_t pmd; |
a5c3b9ff | 284 | |
4200605b AK |
285 | if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) |
286 | return; | |
287 | ||
65ac1a60 AK |
288 | if (!has_transparent_hugepage()) |
289 | return; | |
290 | ||
6315df41 | 291 | pr_debug("Validating PMD saved write\n"); |
8983d231 | 292 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none); |
a5c3b9ff AK |
293 | WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd)))); |
294 | WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd)))); | |
295 | } | |
296 | ||
399145f9 | 297 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
36b77d1e | 298 | static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) |
399145f9 | 299 | { |
2e326c07 | 300 | pgprot_t prot = protection_map[idx]; |
2e326c07 | 301 | unsigned long val = idx, *ptr = &val; |
65ac1a60 | 302 | pud_t pud; |
399145f9 | 303 | |
787d563b AK |
304 | if (!has_transparent_hugepage()) |
305 | return; | |
306 | ||
2e326c07 | 307 | pr_debug("Validating PUD basic (%pGv)\n", ptr); |
36b77d1e | 308 | pud = pfn_pud(args->fixed_pud_pfn, prot); |
bb5c47ce AK |
309 | |
310 | /* | |
311 | * This test needs to be executed after the given page table entry | |
312 | * is created with pfn_pud() to make sure that protection_map[idx] | |
313 | * does not have the dirty bit enabled from the beginning. This is | |
314 | * important for platforms like arm64 where (!PTE_RDONLY) indicate | |
315 | * dirty bit being set. | |
316 | */ | |
317 | WARN_ON(pud_dirty(pud_wrprotect(pud))); | |
318 | ||
399145f9 AK |
319 | WARN_ON(!pud_same(pud, pud)); |
320 | WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud)))); | |
bb5c47ce AK |
321 | WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud)))); |
322 | WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud)))); | |
399145f9 AK |
323 | WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud)))); |
324 | WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud)))); | |
325 | WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud)))); | |
bb5c47ce AK |
326 | WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud)))); |
327 | WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud)))); | |
399145f9 | 328 | |
36b77d1e | 329 | if (mm_pmd_folded(args->mm)) |
399145f9 AK |
330 | return; |
331 | ||
332 | /* | |
333 | * A huge page does not point to next level page table | |
334 | * entry. Hence this must qualify as pud_bad(). | |
335 | */ | |
336 | WARN_ON(!pud_bad(pud_mkhuge(pud))); | |
337 | } | |
a5c3b9ff AK |
338 | |
339 | static void __init pud_advanced_tests(struct mm_struct *mm, | |
340 | struct vm_area_struct *vma, pud_t *pudp, | |
341 | unsigned long pfn, unsigned long vaddr, | |
342 | pgprot_t prot) | |
343 | { | |
65ac1a60 | 344 | pud_t pud; |
a5c3b9ff AK |
345 | |
346 | if (!has_transparent_hugepage()) | |
347 | return; | |
348 | ||
6315df41 | 349 | pr_debug("Validating PUD advanced\n"); |
a5c3b9ff | 350 | /* Align the address wrt HPAGE_PUD_SIZE */ |
04f7ce3f | 351 | vaddr &= HPAGE_PUD_MASK; |
a5c3b9ff | 352 | |
65ac1a60 | 353 | pud = pfn_pud(pfn, prot); |
a5c3b9ff AK |
354 | set_pud_at(mm, vaddr, pudp, pud); |
355 | pudp_set_wrprotect(mm, vaddr, pudp); | |
356 | pud = READ_ONCE(*pudp); | |
357 | WARN_ON(pud_write(pud)); | |
358 | ||
359 | #ifndef __PAGETABLE_PMD_FOLDED | |
a5c3b9ff AK |
360 | pudp_huge_get_and_clear(mm, vaddr, pudp); |
361 | pud = READ_ONCE(*pudp); | |
362 | WARN_ON(!pud_none(pud)); | |
a5c3b9ff AK |
363 | #endif /* __PAGETABLE_PMD_FOLDED */ |
364 | pud = pfn_pud(pfn, prot); | |
365 | pud = pud_wrprotect(pud); | |
366 | pud = pud_mkclean(pud); | |
367 | set_pud_at(mm, vaddr, pudp, pud); | |
368 | pud = pud_mkwrite(pud); | |
369 | pud = pud_mkdirty(pud); | |
370 | pudp_set_access_flags(vma, vaddr, pudp, pud, 1); | |
371 | pud = READ_ONCE(*pudp); | |
372 | WARN_ON(!(pud_write(pud) && pud_dirty(pud))); | |
373 | ||
c3824e18 AK |
374 | #ifndef __PAGETABLE_PMD_FOLDED |
375 | pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1); | |
376 | pud = READ_ONCE(*pudp); | |
377 | WARN_ON(!pud_none(pud)); | |
378 | #endif /* __PAGETABLE_PMD_FOLDED */ | |
379 | ||
380 | pud = pfn_pud(pfn, prot); | |
a5c3b9ff AK |
381 | pud = pud_mkyoung(pud); |
382 | set_pud_at(mm, vaddr, pudp, pud); | |
383 | pudp_test_and_clear_young(vma, vaddr, pudp); | |
384 | pud = READ_ONCE(*pudp); | |
385 | WARN_ON(pud_young(pud)); | |
13af0506 AK |
386 | |
387 | pudp_huge_get_and_clear(mm, vaddr, pudp); | |
a5c3b9ff AK |
388 | } |
389 | ||
8983d231 | 390 | static void __init pud_leaf_tests(struct pgtable_debug_args *args) |
a5c3b9ff | 391 | { |
65ac1a60 AK |
392 | pud_t pud; |
393 | ||
394 | if (!has_transparent_hugepage()) | |
395 | return; | |
a5c3b9ff | 396 | |
6315df41 | 397 | pr_debug("Validating PUD leaf\n"); |
8983d231 | 398 | pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); |
a5c3b9ff AK |
399 | /* |
400 | * PUD based THP is a leaf entry. | |
401 | */ | |
402 | pud = pud_mkhuge(pud); | |
403 | WARN_ON(!pud_leaf(pud)); | |
404 | } | |
399145f9 | 405 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
36b77d1e | 406 | static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } |
a5c3b9ff AK |
407 | static void __init pud_advanced_tests(struct mm_struct *mm, |
408 | struct vm_area_struct *vma, pud_t *pudp, | |
409 | unsigned long pfn, unsigned long vaddr, | |
410 | pgprot_t prot) | |
411 | { | |
412 | } | |
8983d231 | 413 | static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } |
399145f9 AK |
414 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
415 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ | |
36b77d1e GS |
416 | static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { } |
417 | static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } | |
a5c3b9ff AK |
418 | static void __init pmd_advanced_tests(struct mm_struct *mm, |
419 | struct vm_area_struct *vma, pmd_t *pmdp, | |
420 | unsigned long pfn, unsigned long vaddr, | |
87f34986 | 421 | pgprot_t prot, pgtable_t pgtable) |
a5c3b9ff AK |
422 | { |
423 | } | |
424 | static void __init pud_advanced_tests(struct mm_struct *mm, | |
425 | struct vm_area_struct *vma, pud_t *pudp, | |
426 | unsigned long pfn, unsigned long vaddr, | |
427 | pgprot_t prot) | |
428 | { | |
429 | } | |
8983d231 GS |
430 | static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { } |
431 | static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } | |
432 | static void __init pmd_savedwrite_tests(struct pgtable_debug_args *args) { } | |
5fe77be6 SL |
433 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
434 | ||
435 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP | |
a5c3b9ff AK |
436 | static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) |
437 | { | |
5fe77be6 SL |
438 | pmd_t pmd; |
439 | ||
440 | if (!arch_vmap_pmd_supported(prot)) | |
441 | return; | |
442 | ||
443 | pr_debug("Validating PMD huge\n"); | |
444 | /* | |
445 | * X86 defined pmd_set_huge() verifies that the given | |
446 | * PMD is not a populated non-leaf entry. | |
447 | */ | |
448 | WRITE_ONCE(*pmdp, __pmd(0)); | |
449 | WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot)); | |
450 | WARN_ON(!pmd_clear_huge(pmdp)); | |
451 | pmd = READ_ONCE(*pmdp); | |
452 | WARN_ON(!pmd_none(pmd)); | |
a5c3b9ff | 453 | } |
5fe77be6 | 454 | |
a5c3b9ff AK |
455 | static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) |
456 | { | |
5fe77be6 SL |
457 | pud_t pud; |
458 | ||
459 | if (!arch_vmap_pud_supported(prot)) | |
460 | return; | |
461 | ||
462 | pr_debug("Validating PUD huge\n"); | |
463 | /* | |
464 | * X86 defined pud_set_huge() verifies that the given | |
465 | * PUD is not a populated non-leaf entry. | |
466 | */ | |
467 | WRITE_ONCE(*pudp, __pud(0)); | |
468 | WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot)); | |
469 | WARN_ON(!pud_clear_huge(pudp)); | |
470 | pud = READ_ONCE(*pudp); | |
471 | WARN_ON(!pud_none(pud)); | |
a5c3b9ff | 472 | } |
5fe77be6 SL |
473 | #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ |
474 | static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { } | |
475 | static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { } | |
476 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ | |
399145f9 | 477 | |
36b77d1e | 478 | static void __init p4d_basic_tests(struct pgtable_debug_args *args) |
399145f9 AK |
479 | { |
480 | p4d_t p4d; | |
481 | ||
6315df41 | 482 | pr_debug("Validating P4D basic\n"); |
399145f9 AK |
483 | memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t)); |
484 | WARN_ON(!p4d_same(p4d, p4d)); | |
485 | } | |
486 | ||
36b77d1e | 487 | static void __init pgd_basic_tests(struct pgtable_debug_args *args) |
399145f9 AK |
488 | { |
489 | pgd_t pgd; | |
490 | ||
6315df41 | 491 | pr_debug("Validating PGD basic\n"); |
399145f9 AK |
492 | memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t)); |
493 | WARN_ON(!pgd_same(pgd, pgd)); | |
494 | } | |
495 | ||
496 | #ifndef __PAGETABLE_PUD_FOLDED | |
497 | static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) | |
498 | { | |
499 | pud_t pud = READ_ONCE(*pudp); | |
500 | ||
501 | if (mm_pmd_folded(mm)) | |
502 | return; | |
503 | ||
6315df41 | 504 | pr_debug("Validating PUD clear\n"); |
399145f9 AK |
505 | pud = __pud(pud_val(pud) | RANDOM_ORVALUE); |
506 | WRITE_ONCE(*pudp, pud); | |
507 | pud_clear(pudp); | |
508 | pud = READ_ONCE(*pudp); | |
509 | WARN_ON(!pud_none(pud)); | |
510 | } | |
511 | ||
512 | static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp, | |
513 | pmd_t *pmdp) | |
514 | { | |
515 | pud_t pud; | |
516 | ||
517 | if (mm_pmd_folded(mm)) | |
518 | return; | |
6315df41 AK |
519 | |
520 | pr_debug("Validating PUD populate\n"); | |
399145f9 AK |
521 | /* |
522 | * This entry points to next level page table page. | |
523 | * Hence this must not qualify as pud_bad(). | |
524 | */ | |
399145f9 AK |
525 | pud_populate(mm, pudp, pmdp); |
526 | pud = READ_ONCE(*pudp); | |
527 | WARN_ON(pud_bad(pud)); | |
528 | } | |
529 | #else /* !__PAGETABLE_PUD_FOLDED */ | |
530 | static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { } | |
531 | static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp, | |
532 | pmd_t *pmdp) | |
533 | { | |
534 | } | |
535 | #endif /* PAGETABLE_PUD_FOLDED */ | |
536 | ||
537 | #ifndef __PAGETABLE_P4D_FOLDED | |
538 | static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) | |
539 | { | |
540 | p4d_t p4d = READ_ONCE(*p4dp); | |
541 | ||
542 | if (mm_pud_folded(mm)) | |
543 | return; | |
544 | ||
6315df41 | 545 | pr_debug("Validating P4D clear\n"); |
399145f9 AK |
546 | p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE); |
547 | WRITE_ONCE(*p4dp, p4d); | |
548 | p4d_clear(p4dp); | |
549 | p4d = READ_ONCE(*p4dp); | |
550 | WARN_ON(!p4d_none(p4d)); | |
551 | } | |
552 | ||
553 | static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp, | |
554 | pud_t *pudp) | |
555 | { | |
556 | p4d_t p4d; | |
557 | ||
558 | if (mm_pud_folded(mm)) | |
559 | return; | |
560 | ||
6315df41 | 561 | pr_debug("Validating P4D populate\n"); |
399145f9 AK |
562 | /* |
563 | * This entry points to next level page table page. | |
564 | * Hence this must not qualify as p4d_bad(). | |
565 | */ | |
566 | pud_clear(pudp); | |
567 | p4d_clear(p4dp); | |
568 | p4d_populate(mm, p4dp, pudp); | |
569 | p4d = READ_ONCE(*p4dp); | |
570 | WARN_ON(p4d_bad(p4d)); | |
571 | } | |
572 | ||
573 | static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) | |
574 | { | |
575 | pgd_t pgd = READ_ONCE(*pgdp); | |
576 | ||
577 | if (mm_p4d_folded(mm)) | |
578 | return; | |
579 | ||
6315df41 | 580 | pr_debug("Validating PGD clear\n"); |
399145f9 AK |
581 | pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE); |
582 | WRITE_ONCE(*pgdp, pgd); | |
583 | pgd_clear(pgdp); | |
584 | pgd = READ_ONCE(*pgdp); | |
585 | WARN_ON(!pgd_none(pgd)); | |
586 | } | |
587 | ||
588 | static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp, | |
589 | p4d_t *p4dp) | |
590 | { | |
591 | pgd_t pgd; | |
592 | ||
593 | if (mm_p4d_folded(mm)) | |
594 | return; | |
595 | ||
6315df41 | 596 | pr_debug("Validating PGD populate\n"); |
399145f9 AK |
597 | /* |
598 | * This entry points to next level page table page. | |
599 | * Hence this must not qualify as pgd_bad(). | |
600 | */ | |
601 | p4d_clear(p4dp); | |
602 | pgd_clear(pgdp); | |
603 | pgd_populate(mm, pgdp, p4dp); | |
604 | pgd = READ_ONCE(*pgdp); | |
605 | WARN_ON(pgd_bad(pgd)); | |
606 | } | |
607 | #else /* !__PAGETABLE_P4D_FOLDED */ | |
608 | static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { } | |
609 | static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { } | |
610 | static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp, | |
611 | pud_t *pudp) | |
612 | { | |
613 | } | |
614 | static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp, | |
615 | p4d_t *p4dp) | |
616 | { | |
617 | } | |
618 | #endif /* PAGETABLE_P4D_FOLDED */ | |
619 | ||
44966c44 | 620 | static void __init pte_clear_tests(struct pgtable_debug_args *args) |
399145f9 | 621 | { |
44966c44 GS |
622 | pte_t pte = pfn_pte(args->pte_pfn, args->page_prot); |
623 | ||
624 | if (args->pte_pfn == ULONG_MAX) | |
625 | return; | |
399145f9 | 626 | |
6315df41 | 627 | pr_debug("Validating PTE clear\n"); |
401035d5 | 628 | #ifndef CONFIG_RISCV |
399145f9 | 629 | pte = __pte(pte_val(pte) | RANDOM_ORVALUE); |
401035d5 | 630 | #endif |
44966c44 | 631 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
399145f9 | 632 | barrier(); |
44966c44 GS |
633 | pte_clear(args->mm, args->vaddr, args->ptep); |
634 | pte = ptep_get(args->ptep); | |
399145f9 AK |
635 | WARN_ON(!pte_none(pte)); |
636 | } | |
637 | ||
638 | static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp) | |
639 | { | |
640 | pmd_t pmd = READ_ONCE(*pmdp); | |
641 | ||
6315df41 | 642 | pr_debug("Validating PMD clear\n"); |
399145f9 AK |
643 | pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE); |
644 | WRITE_ONCE(*pmdp, pmd); | |
645 | pmd_clear(pmdp); | |
646 | pmd = READ_ONCE(*pmdp); | |
647 | WARN_ON(!pmd_none(pmd)); | |
648 | } | |
649 | ||
650 | static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp, | |
651 | pgtable_t pgtable) | |
652 | { | |
653 | pmd_t pmd; | |
654 | ||
6315df41 | 655 | pr_debug("Validating PMD populate\n"); |
399145f9 AK |
656 | /* |
657 | * This entry points to next level page table page. | |
658 | * Hence this must not qualify as pmd_bad(). | |
659 | */ | |
399145f9 AK |
660 | pmd_populate(mm, pmdp, pgtable); |
661 | pmd = READ_ONCE(*pmdp); | |
662 | WARN_ON(pmd_bad(pmd)); | |
663 | } | |
664 | ||
8cb183f2 | 665 | static void __init pte_special_tests(struct pgtable_debug_args *args) |
05289402 | 666 | { |
8cb183f2 | 667 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
05289402 AK |
668 | |
669 | if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) | |
670 | return; | |
671 | ||
6315df41 | 672 | pr_debug("Validating PTE special\n"); |
05289402 AK |
673 | WARN_ON(!pte_special(pte_mkspecial(pte))); |
674 | } | |
675 | ||
8cb183f2 | 676 | static void __init pte_protnone_tests(struct pgtable_debug_args *args) |
05289402 | 677 | { |
8cb183f2 | 678 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none); |
05289402 AK |
679 | |
680 | if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) | |
681 | return; | |
682 | ||
6315df41 | 683 | pr_debug("Validating PTE protnone\n"); |
05289402 AK |
684 | WARN_ON(!pte_protnone(pte)); |
685 | WARN_ON(!pte_present(pte)); | |
686 | } | |
687 | ||
688 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
8cb183f2 | 689 | static void __init pmd_protnone_tests(struct pgtable_debug_args *args) |
05289402 | 690 | { |
65ac1a60 | 691 | pmd_t pmd; |
05289402 AK |
692 | |
693 | if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) | |
694 | return; | |
695 | ||
65ac1a60 AK |
696 | if (!has_transparent_hugepage()) |
697 | return; | |
698 | ||
6315df41 | 699 | pr_debug("Validating PMD protnone\n"); |
8cb183f2 | 700 | pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none)); |
05289402 AK |
701 | WARN_ON(!pmd_protnone(pmd)); |
702 | WARN_ON(!pmd_present(pmd)); | |
703 | } | |
704 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ | |
8cb183f2 | 705 | static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { } |
05289402 AK |
706 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
707 | ||
708 | #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP | |
8cb183f2 | 709 | static void __init pte_devmap_tests(struct pgtable_debug_args *args) |
05289402 | 710 | { |
8cb183f2 | 711 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
05289402 | 712 | |
6315df41 | 713 | pr_debug("Validating PTE devmap\n"); |
05289402 AK |
714 | WARN_ON(!pte_devmap(pte_mkdevmap(pte))); |
715 | } | |
716 | ||
717 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
8cb183f2 | 718 | static void __init pmd_devmap_tests(struct pgtable_debug_args *args) |
05289402 | 719 | { |
65ac1a60 AK |
720 | pmd_t pmd; |
721 | ||
722 | if (!has_transparent_hugepage()) | |
723 | return; | |
05289402 | 724 | |
6315df41 | 725 | pr_debug("Validating PMD devmap\n"); |
8cb183f2 | 726 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
05289402 AK |
727 | WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd))); |
728 | } | |
729 | ||
730 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
8cb183f2 | 731 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) |
05289402 | 732 | { |
65ac1a60 AK |
733 | pud_t pud; |
734 | ||
735 | if (!has_transparent_hugepage()) | |
736 | return; | |
05289402 | 737 | |
6315df41 | 738 | pr_debug("Validating PUD devmap\n"); |
8cb183f2 | 739 | pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); |
05289402 AK |
740 | WARN_ON(!pud_devmap(pud_mkdevmap(pud))); |
741 | } | |
742 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | |
8cb183f2 | 743 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } |
05289402 AK |
744 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
745 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
8cb183f2 GS |
746 | static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } |
747 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } | |
05289402 AK |
748 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
749 | #else | |
8cb183f2 GS |
750 | static void __init pte_devmap_tests(struct pgtable_debug_args *args) { } |
751 | static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } | |
752 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } | |
05289402 AK |
753 | #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ |
754 | ||
5f447e80 | 755 | static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args) |
05289402 | 756 | { |
5f447e80 | 757 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
05289402 AK |
758 | |
759 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) | |
760 | return; | |
761 | ||
6315df41 | 762 | pr_debug("Validating PTE soft dirty\n"); |
05289402 AK |
763 | WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte))); |
764 | WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte))); | |
765 | } | |
766 | ||
5f447e80 | 767 | static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args) |
05289402 | 768 | { |
5f447e80 | 769 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
05289402 AK |
770 | |
771 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) | |
772 | return; | |
773 | ||
6315df41 | 774 | pr_debug("Validating PTE swap soft dirty\n"); |
05289402 AK |
775 | WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte))); |
776 | WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte))); | |
777 | } | |
778 | ||
779 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
5f447e80 | 780 | static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) |
05289402 | 781 | { |
65ac1a60 | 782 | pmd_t pmd; |
05289402 AK |
783 | |
784 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) | |
785 | return; | |
786 | ||
65ac1a60 AK |
787 | if (!has_transparent_hugepage()) |
788 | return; | |
789 | ||
6315df41 | 790 | pr_debug("Validating PMD soft dirty\n"); |
5f447e80 | 791 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
05289402 AK |
792 | WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd))); |
793 | WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd))); | |
794 | } | |
795 | ||
5f447e80 | 796 | static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) |
05289402 | 797 | { |
65ac1a60 | 798 | pmd_t pmd; |
05289402 AK |
799 | |
800 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) || | |
801 | !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION)) | |
802 | return; | |
803 | ||
65ac1a60 AK |
804 | if (!has_transparent_hugepage()) |
805 | return; | |
806 | ||
6315df41 | 807 | pr_debug("Validating PMD swap soft dirty\n"); |
5f447e80 | 808 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
05289402 AK |
809 | WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd))); |
810 | WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd))); | |
811 | } | |
b593b90d | 812 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ |
5f447e80 GS |
813 | static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { } |
814 | static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { } | |
b593b90d | 815 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
05289402 | 816 | |
5f447e80 | 817 | static void __init pte_swap_tests(struct pgtable_debug_args *args) |
05289402 AK |
818 | { |
819 | swp_entry_t swp; | |
820 | pte_t pte; | |
821 | ||
6315df41 | 822 | pr_debug("Validating PTE swap\n"); |
5f447e80 | 823 | pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
05289402 AK |
824 | swp = __pte_to_swp_entry(pte); |
825 | pte = __swp_entry_to_pte(swp); | |
5f447e80 | 826 | WARN_ON(args->fixed_pte_pfn != pte_pfn(pte)); |
05289402 AK |
827 | } |
828 | ||
829 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | |
5f447e80 | 830 | static void __init pmd_swap_tests(struct pgtable_debug_args *args) |
05289402 AK |
831 | { |
832 | swp_entry_t swp; | |
833 | pmd_t pmd; | |
834 | ||
65ac1a60 AK |
835 | if (!has_transparent_hugepage()) |
836 | return; | |
837 | ||
6315df41 | 838 | pr_debug("Validating PMD swap\n"); |
5f447e80 | 839 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
05289402 AK |
840 | swp = __pmd_to_swp_entry(pmd); |
841 | pmd = __swp_entry_to_pmd(swp); | |
5f447e80 | 842 | WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd)); |
05289402 AK |
843 | } |
844 | #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */ | |
5f447e80 | 845 | static void __init pmd_swap_tests(struct pgtable_debug_args *args) { } |
05289402 AK |
846 | #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
847 | ||
4878a888 | 848 | static void __init swap_migration_tests(struct pgtable_debug_args *args) |
05289402 AK |
849 | { |
850 | struct page *page; | |
851 | swp_entry_t swp; | |
852 | ||
853 | if (!IS_ENABLED(CONFIG_MIGRATION)) | |
854 | return; | |
6315df41 | 855 | |
05289402 AK |
856 | /* |
857 | * swap_migration_tests() requires a dedicated page as it needs to | |
858 | * be locked before creating a migration entry from it. Locking the | |
859 | * page that actually maps kernel text ('start_kernel') can be real | |
4878a888 GS |
860 | * problematic. Lets use the allocated page explicitly for this |
861 | * purpose. | |
05289402 | 862 | */ |
4878a888 GS |
863 | page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; |
864 | if (!page) | |
05289402 | 865 | return; |
4878a888 GS |
866 | |
867 | pr_debug("Validating swap migration\n"); | |
05289402 AK |
868 | |
869 | /* | |
870 | * make_migration_entry() expects given page to be | |
871 | * locked, otherwise it stumbles upon a BUG_ON(). | |
872 | */ | |
873 | __SetPageLocked(page); | |
4dd845b5 | 874 | swp = make_writable_migration_entry(page_to_pfn(page)); |
05289402 | 875 | WARN_ON(!is_migration_entry(swp)); |
4dd845b5 | 876 | WARN_ON(!is_writable_migration_entry(swp)); |
05289402 | 877 | |
4dd845b5 | 878 | swp = make_readable_migration_entry(swp_offset(swp)); |
05289402 | 879 | WARN_ON(!is_migration_entry(swp)); |
4dd845b5 | 880 | WARN_ON(is_writable_migration_entry(swp)); |
05289402 | 881 | |
4dd845b5 | 882 | swp = make_readable_migration_entry(page_to_pfn(page)); |
05289402 | 883 | WARN_ON(!is_migration_entry(swp)); |
4dd845b5 | 884 | WARN_ON(is_writable_migration_entry(swp)); |
05289402 | 885 | __ClearPageLocked(page); |
05289402 AK |
886 | } |
887 | ||
888 | #ifdef CONFIG_HUGETLB_PAGE | |
36b77d1e | 889 | static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) |
05289402 AK |
890 | { |
891 | struct page *page; | |
892 | pte_t pte; | |
893 | ||
6315df41 | 894 | pr_debug("Validating HugeTLB basic\n"); |
05289402 AK |
895 | /* |
896 | * Accessing the page associated with the pfn is safe here, | |
897 | * as it was previously derived from a real kernel symbol. | |
898 | */ | |
36b77d1e GS |
899 | page = pfn_to_page(args->fixed_pmd_pfn); |
900 | pte = mk_huge_pte(page, args->page_prot); | |
05289402 AK |
901 | |
902 | WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte))); | |
903 | WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte)))); | |
904 | WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte)))); | |
905 | ||
906 | #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB | |
36b77d1e | 907 | pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot); |
05289402 AK |
908 | |
909 | WARN_ON(!pte_huge(pte_mkhuge(pte))); | |
910 | #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ | |
911 | } | |
912 | #else /* !CONFIG_HUGETLB_PAGE */ | |
36b77d1e | 913 | static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { } |
05289402 AK |
914 | #endif /* CONFIG_HUGETLB_PAGE */ |
915 | ||
916 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
4878a888 | 917 | static void __init pmd_thp_tests(struct pgtable_debug_args *args) |
05289402 AK |
918 | { |
919 | pmd_t pmd; | |
920 | ||
921 | if (!has_transparent_hugepage()) | |
922 | return; | |
923 | ||
6315df41 | 924 | pr_debug("Validating PMD based THP\n"); |
05289402 AK |
925 | /* |
926 | * pmd_trans_huge() and pmd_present() must return positive after | |
927 | * MMU invalidation with pmd_mkinvalid(). This behavior is an | |
928 | * optimization for transparent huge page. pmd_trans_huge() must | |
929 | * be true if pmd_page() returns a valid THP to avoid taking the | |
930 | * pmd_lock when others walk over non transhuge pmds (i.e. there | |
931 | * are no THP allocated). Especially when splitting a THP and | |
932 | * removing the present bit from the pmd, pmd_trans_huge() still | |
933 | * needs to return true. pmd_present() should be true whenever | |
934 | * pmd_trans_huge() returns true. | |
935 | */ | |
4878a888 | 936 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
05289402 AK |
937 | WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd))); |
938 | ||
939 | #ifndef __HAVE_ARCH_PMDP_INVALIDATE | |
940 | WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd)))); | |
941 | WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd)))); | |
942 | #endif /* __HAVE_ARCH_PMDP_INVALIDATE */ | |
943 | } | |
944 | ||
945 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
4878a888 | 946 | static void __init pud_thp_tests(struct pgtable_debug_args *args) |
05289402 AK |
947 | { |
948 | pud_t pud; | |
949 | ||
950 | if (!has_transparent_hugepage()) | |
951 | return; | |
952 | ||
6315df41 | 953 | pr_debug("Validating PUD based THP\n"); |
4878a888 | 954 | pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); |
05289402 AK |
955 | WARN_ON(!pud_trans_huge(pud_mkhuge(pud))); |
956 | ||
957 | /* | |
958 | * pud_mkinvalid() has been dropped for now. Enable back | |
959 | * these tests when it comes back with a modified pud_present(). | |
960 | * | |
961 | * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud)))); | |
962 | * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud)))); | |
963 | */ | |
964 | } | |
965 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | |
4878a888 | 966 | static void __init pud_thp_tests(struct pgtable_debug_args *args) { } |
05289402 AK |
967 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
968 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ | |
4878a888 GS |
969 | static void __init pmd_thp_tests(struct pgtable_debug_args *args) { } |
970 | static void __init pud_thp_tests(struct pgtable_debug_args *args) { } | |
05289402 AK |
971 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
972 | ||
399145f9 AK |
973 | static unsigned long __init get_random_vaddr(void) |
974 | { | |
975 | unsigned long random_vaddr, random_pages, total_user_pages; | |
976 | ||
977 | total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE; | |
978 | ||
979 | random_pages = get_random_long() % total_user_pages; | |
980 | random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE; | |
981 | ||
982 | return random_vaddr; | |
983 | } | |
984 | ||
3c9b84f0 GS |
985 | static void __init destroy_args(struct pgtable_debug_args *args) |
986 | { | |
987 | struct page *page = NULL; | |
988 | ||
989 | /* Free (huge) page */ | |
990 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && | |
991 | IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && | |
992 | has_transparent_hugepage() && | |
993 | args->pud_pfn != ULONG_MAX) { | |
994 | if (args->is_contiguous_page) { | |
995 | free_contig_range(args->pud_pfn, | |
996 | (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT))); | |
997 | } else { | |
998 | page = pfn_to_page(args->pud_pfn); | |
999 | __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT); | |
1000 | } | |
1001 | ||
1002 | args->pud_pfn = ULONG_MAX; | |
1003 | args->pmd_pfn = ULONG_MAX; | |
1004 | args->pte_pfn = ULONG_MAX; | |
1005 | } | |
1006 | ||
1007 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && | |
1008 | has_transparent_hugepage() && | |
1009 | args->pmd_pfn != ULONG_MAX) { | |
1010 | if (args->is_contiguous_page) { | |
1011 | free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER)); | |
1012 | } else { | |
1013 | page = pfn_to_page(args->pmd_pfn); | |
1014 | __free_pages(page, HPAGE_PMD_ORDER); | |
1015 | } | |
1016 | ||
1017 | args->pmd_pfn = ULONG_MAX; | |
1018 | args->pte_pfn = ULONG_MAX; | |
1019 | } | |
1020 | ||
1021 | if (args->pte_pfn != ULONG_MAX) { | |
1022 | page = pfn_to_page(args->pte_pfn); | |
1023 | __free_pages(page, 0); | |
1024 | ||
1025 | args->pte_pfn = ULONG_MAX; | |
1026 | } | |
1027 | ||
1028 | /* Free page table entries */ | |
1029 | if (args->start_ptep) { | |
1030 | pte_free(args->mm, args->start_ptep); | |
1031 | mm_dec_nr_ptes(args->mm); | |
1032 | } | |
1033 | ||
1034 | if (args->start_pmdp) { | |
1035 | pmd_free(args->mm, args->start_pmdp); | |
1036 | mm_dec_nr_pmds(args->mm); | |
1037 | } | |
1038 | ||
1039 | if (args->start_pudp) { | |
1040 | pud_free(args->mm, args->start_pudp); | |
1041 | mm_dec_nr_puds(args->mm); | |
1042 | } | |
1043 | ||
1044 | if (args->start_p4dp) | |
1045 | p4d_free(args->mm, args->start_p4dp); | |
1046 | ||
1047 | /* Free vma and mm struct */ | |
1048 | if (args->vma) | |
1049 | vm_area_free(args->vma); | |
1050 | ||
1051 | if (args->mm) | |
1052 | mmdrop(args->mm); | |
1053 | } | |
1054 | ||
1055 | static struct page * __init | |
1056 | debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order) | |
1057 | { | |
1058 | struct page *page = NULL; | |
1059 | ||
1060 | #ifdef CONFIG_CONTIG_ALLOC | |
1061 | if (order >= MAX_ORDER) { | |
1062 | page = alloc_contig_pages((1 << order), GFP_KERNEL, | |
1063 | first_online_node, NULL); | |
1064 | if (page) { | |
1065 | args->is_contiguous_page = true; | |
1066 | return page; | |
1067 | } | |
1068 | } | |
1069 | #endif | |
1070 | ||
1071 | if (order < MAX_ORDER) | |
1072 | page = alloc_pages(GFP_KERNEL, order); | |
1073 | ||
1074 | return page; | |
1075 | } | |
1076 | ||
1077 | static int __init init_args(struct pgtable_debug_args *args) | |
1078 | { | |
1079 | struct page *page = NULL; | |
1080 | phys_addr_t phys; | |
1081 | int ret = 0; | |
1082 | ||
1083 | /* | |
1084 | * Initialize the debugging data. | |
1085 | * | |
1086 | * __P000 (or even __S000) will help create page table entries with | |
1087 | * PROT_NONE permission as required for pxx_protnone_tests(). | |
1088 | */ | |
1089 | memset(args, 0, sizeof(*args)); | |
1090 | args->vaddr = get_random_vaddr(); | |
1091 | args->page_prot = vm_get_page_prot(VMFLAGS); | |
1092 | args->page_prot_none = __P000; | |
1093 | args->is_contiguous_page = false; | |
1094 | args->pud_pfn = ULONG_MAX; | |
1095 | args->pmd_pfn = ULONG_MAX; | |
1096 | args->pte_pfn = ULONG_MAX; | |
1097 | args->fixed_pgd_pfn = ULONG_MAX; | |
1098 | args->fixed_p4d_pfn = ULONG_MAX; | |
1099 | args->fixed_pud_pfn = ULONG_MAX; | |
1100 | args->fixed_pmd_pfn = ULONG_MAX; | |
1101 | args->fixed_pte_pfn = ULONG_MAX; | |
1102 | ||
1103 | /* Allocate mm and vma */ | |
1104 | args->mm = mm_alloc(); | |
1105 | if (!args->mm) { | |
1106 | pr_err("Failed to allocate mm struct\n"); | |
1107 | ret = -ENOMEM; | |
1108 | goto error; | |
1109 | } | |
1110 | ||
1111 | args->vma = vm_area_alloc(args->mm); | |
1112 | if (!args->vma) { | |
1113 | pr_err("Failed to allocate vma\n"); | |
1114 | ret = -ENOMEM; | |
1115 | goto error; | |
1116 | } | |
1117 | ||
1118 | /* | |
1119 | * Allocate page table entries. They will be modified in the tests. | |
1120 | * Lets save the page table entries so that they can be released | |
1121 | * when the tests are completed. | |
1122 | */ | |
1123 | args->pgdp = pgd_offset(args->mm, args->vaddr); | |
1124 | args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr); | |
1125 | if (!args->p4dp) { | |
1126 | pr_err("Failed to allocate p4d entries\n"); | |
1127 | ret = -ENOMEM; | |
1128 | goto error; | |
1129 | } | |
1130 | args->start_p4dp = p4d_offset(args->pgdp, 0UL); | |
1131 | WARN_ON(!args->start_p4dp); | |
1132 | ||
1133 | args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr); | |
1134 | if (!args->pudp) { | |
1135 | pr_err("Failed to allocate pud entries\n"); | |
1136 | ret = -ENOMEM; | |
1137 | goto error; | |
1138 | } | |
1139 | args->start_pudp = pud_offset(args->p4dp, 0UL); | |
1140 | WARN_ON(!args->start_pudp); | |
1141 | ||
1142 | args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr); | |
1143 | if (!args->pmdp) { | |
1144 | pr_err("Failed to allocate pmd entries\n"); | |
1145 | ret = -ENOMEM; | |
1146 | goto error; | |
1147 | } | |
1148 | args->start_pmdp = pmd_offset(args->pudp, 0UL); | |
1149 | WARN_ON(!args->start_pmdp); | |
1150 | ||
1151 | if (pte_alloc(args->mm, args->pmdp)) { | |
1152 | pr_err("Failed to allocate pte entries\n"); | |
1153 | ret = -ENOMEM; | |
1154 | goto error; | |
1155 | } | |
1156 | args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp)); | |
1157 | WARN_ON(!args->start_ptep); | |
1158 | ||
1159 | /* | |
1160 | * PFN for mapping at PTE level is determined from a standard kernel | |
1161 | * text symbol. But pfns for higher page table levels are derived by | |
1162 | * masking lower bits of this real pfn. These derived pfns might not | |
1163 | * exist on the platform but that does not really matter as pfn_pxx() | |
1164 | * helpers will still create appropriate entries for the test. This | |
1165 | * helps avoid large memory block allocations to be used for mapping | |
1166 | * at higher page table levels in some of the tests. | |
1167 | */ | |
1168 | phys = __pa_symbol(&start_kernel); | |
1169 | args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK); | |
1170 | args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK); | |
1171 | args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK); | |
1172 | args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK); | |
1173 | args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK); | |
1174 | WARN_ON(!pfn_valid(args->fixed_pte_pfn)); | |
1175 | ||
1176 | /* | |
1177 | * Allocate (huge) pages because some of the tests need to access | |
1178 | * the data in the pages. The corresponding tests will be skipped | |
1179 | * if we fail to allocate (huge) pages. | |
1180 | */ | |
1181 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && | |
1182 | IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && | |
1183 | has_transparent_hugepage()) { | |
1184 | page = debug_vm_pgtable_alloc_huge_page(args, | |
1185 | HPAGE_PUD_SHIFT - PAGE_SHIFT); | |
1186 | if (page) { | |
1187 | args->pud_pfn = page_to_pfn(page); | |
1188 | args->pmd_pfn = args->pud_pfn; | |
1189 | args->pte_pfn = args->pud_pfn; | |
1190 | return 0; | |
1191 | } | |
1192 | } | |
1193 | ||
1194 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && | |
1195 | has_transparent_hugepage()) { | |
1196 | page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER); | |
1197 | if (page) { | |
1198 | args->pmd_pfn = page_to_pfn(page); | |
1199 | args->pte_pfn = args->pmd_pfn; | |
1200 | return 0; | |
1201 | } | |
1202 | } | |
1203 | ||
1204 | page = alloc_pages(GFP_KERNEL, 0); | |
1205 | if (page) | |
1206 | args->pte_pfn = page_to_pfn(page); | |
1207 | ||
1208 | return 0; | |
1209 | ||
1210 | error: | |
1211 | destroy_args(args); | |
1212 | return ret; | |
1213 | } | |
1214 | ||
399145f9 AK |
1215 | static int __init debug_vm_pgtable(void) |
1216 | { | |
3c9b84f0 | 1217 | struct pgtable_debug_args args; |
a5c3b9ff | 1218 | struct vm_area_struct *vma; |
399145f9 AK |
1219 | struct mm_struct *mm; |
1220 | pgd_t *pgdp; | |
1221 | p4d_t *p4dp, *saved_p4dp; | |
1222 | pud_t *pudp, *saved_pudp; | |
1223 | pmd_t *pmdp, *saved_pmdp, pmd; | |
399145f9 | 1224 | pgtable_t saved_ptep; |
8cb183f2 | 1225 | pgprot_t prot; |
399145f9 | 1226 | phys_addr_t paddr; |
44966c44 | 1227 | unsigned long vaddr, pmd_aligned; |
36b77d1e | 1228 | unsigned long pud_aligned; |
fea1120c | 1229 | spinlock_t *ptl = NULL; |
3c9b84f0 | 1230 | int idx, ret; |
399145f9 AK |
1231 | |
1232 | pr_info("Validating architecture page table helpers\n"); | |
3c9b84f0 GS |
1233 | ret = init_args(&args); |
1234 | if (ret) | |
1235 | return ret; | |
1236 | ||
399145f9 AK |
1237 | prot = vm_get_page_prot(VMFLAGS); |
1238 | vaddr = get_random_vaddr(); | |
1239 | mm = mm_alloc(); | |
1240 | if (!mm) { | |
1241 | pr_err("mm_struct allocation failed\n"); | |
1242 | return 1; | |
1243 | } | |
1244 | ||
a5c3b9ff AK |
1245 | vma = vm_area_alloc(mm); |
1246 | if (!vma) { | |
1247 | pr_err("vma allocation failed\n"); | |
1248 | return 1; | |
1249 | } | |
1250 | ||
399145f9 AK |
1251 | /* |
1252 | * PFN for mapping at PTE level is determined from a standard kernel | |
1253 | * text symbol. But pfns for higher page table levels are derived by | |
1254 | * masking lower bits of this real pfn. These derived pfns might not | |
1255 | * exist on the platform but that does not really matter as pfn_pxx() | |
1256 | * helpers will still create appropriate entries for the test. This | |
1257 | * helps avoid large memory block allocations to be used for mapping | |
1258 | * at higher page table levels. | |
1259 | */ | |
1260 | paddr = __pa_symbol(&start_kernel); | |
1261 | ||
399145f9 AK |
1262 | pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT; |
1263 | pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT; | |
399145f9 AK |
1264 | |
1265 | pgdp = pgd_offset(mm, vaddr); | |
1266 | p4dp = p4d_alloc(mm, pgdp, vaddr); | |
1267 | pudp = pud_alloc(mm, p4dp, vaddr); | |
1268 | pmdp = pmd_alloc(mm, pudp, vaddr); | |
f14312e1 AK |
1269 | /* |
1270 | * Allocate pgtable_t | |
1271 | */ | |
1272 | if (pte_alloc(mm, pmdp)) { | |
1273 | pr_err("pgtable allocation failed\n"); | |
1274 | return 1; | |
1275 | } | |
399145f9 AK |
1276 | |
1277 | /* | |
1278 | * Save all the page table page addresses as the page table | |
1279 | * entries will be used for testing with random or garbage | |
1280 | * values. These saved addresses will be used for freeing | |
1281 | * page table pages. | |
1282 | */ | |
1283 | pmd = READ_ONCE(*pmdp); | |
1284 | saved_p4dp = p4d_offset(pgdp, 0UL); | |
1285 | saved_pudp = pud_offset(p4dp, 0UL); | |
1286 | saved_pmdp = pmd_offset(pudp, 0UL); | |
1287 | saved_ptep = pmd_pgtable(pmd); | |
1288 | ||
2e326c07 AK |
1289 | /* |
1290 | * Iterate over the protection_map[] to make sure that all | |
1291 | * the basic page table transformation validations just hold | |
1292 | * true irrespective of the starting protection value for a | |
1293 | * given page table entry. | |
1294 | */ | |
1295 | for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) { | |
36b77d1e GS |
1296 | pte_basic_tests(&args, idx); |
1297 | pmd_basic_tests(&args, idx); | |
1298 | pud_basic_tests(&args, idx); | |
2e326c07 AK |
1299 | } |
1300 | ||
1301 | /* | |
1302 | * Both P4D and PGD level tests are very basic which do not | |
1303 | * involve creating page table entries from the protection | |
1304 | * value and the given pfn. Hence just keep them out from | |
1305 | * the above iteration for now to save some test execution | |
1306 | * time. | |
1307 | */ | |
36b77d1e GS |
1308 | p4d_basic_tests(&args); |
1309 | pgd_basic_tests(&args); | |
399145f9 | 1310 | |
8983d231 GS |
1311 | pmd_leaf_tests(&args); |
1312 | pud_leaf_tests(&args); | |
a5c3b9ff | 1313 | |
8983d231 GS |
1314 | pte_savedwrite_tests(&args); |
1315 | pmd_savedwrite_tests(&args); | |
a5c3b9ff | 1316 | |
8cb183f2 GS |
1317 | pte_special_tests(&args); |
1318 | pte_protnone_tests(&args); | |
1319 | pmd_protnone_tests(&args); | |
05289402 | 1320 | |
8cb183f2 GS |
1321 | pte_devmap_tests(&args); |
1322 | pmd_devmap_tests(&args); | |
1323 | pud_devmap_tests(&args); | |
05289402 | 1324 | |
5f447e80 GS |
1325 | pte_soft_dirty_tests(&args); |
1326 | pmd_soft_dirty_tests(&args); | |
1327 | pte_swap_soft_dirty_tests(&args); | |
1328 | pmd_swap_soft_dirty_tests(&args); | |
05289402 | 1329 | |
5f447e80 GS |
1330 | pte_swap_tests(&args); |
1331 | pmd_swap_tests(&args); | |
05289402 | 1332 | |
4878a888 | 1333 | swap_migration_tests(&args); |
05289402 | 1334 | |
4878a888 GS |
1335 | pmd_thp_tests(&args); |
1336 | pud_thp_tests(&args); | |
05289402 | 1337 | |
36b77d1e | 1338 | hugetlb_basic_tests(&args); |
e8edf0ad | 1339 | |
6f302e27 AK |
1340 | /* |
1341 | * Page table modifying tests. They need to hold | |
1342 | * proper page table lock. | |
1343 | */ | |
e8edf0ad | 1344 | |
44966c44 GS |
1345 | args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl); |
1346 | pte_clear_tests(&args); | |
1347 | pte_advanced_tests(&args); | |
1348 | pte_unmap_unlock(args.ptep, ptl); | |
e8edf0ad | 1349 | |
6f302e27 AK |
1350 | ptl = pmd_lock(mm, pmdp); |
1351 | pmd_clear_tests(mm, pmdp); | |
87f34986 | 1352 | pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot, saved_ptep); |
e8edf0ad | 1353 | pmd_huge_tests(pmdp, pmd_aligned, prot); |
6f302e27 AK |
1354 | pmd_populate_tests(mm, pmdp, saved_ptep); |
1355 | spin_unlock(ptl); | |
1356 | ||
1357 | ptl = pud_lock(mm, pudp); | |
1358 | pud_clear_tests(mm, pudp); | |
1359 | pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot); | |
e8edf0ad | 1360 | pud_huge_tests(pudp, pud_aligned, prot); |
6f302e27 AK |
1361 | pud_populate_tests(mm, pudp, saved_pmdp); |
1362 | spin_unlock(ptl); | |
e8edf0ad | 1363 | |
6f302e27 AK |
1364 | spin_lock(&mm->page_table_lock); |
1365 | p4d_clear_tests(mm, p4dp); | |
1366 | pgd_clear_tests(mm, pgdp); | |
e8edf0ad AK |
1367 | p4d_populate_tests(mm, p4dp, saved_pudp); |
1368 | pgd_populate_tests(mm, pgdp, saved_p4dp); | |
6f302e27 | 1369 | spin_unlock(&mm->page_table_lock); |
e8edf0ad | 1370 | |
399145f9 AK |
1371 | p4d_free(mm, saved_p4dp); |
1372 | pud_free(mm, saved_pudp); | |
1373 | pmd_free(mm, saved_pmdp); | |
1374 | pte_free(mm, saved_ptep); | |
1375 | ||
a5c3b9ff | 1376 | vm_area_free(vma); |
399145f9 AK |
1377 | mm_dec_nr_puds(mm); |
1378 | mm_dec_nr_pmds(mm); | |
1379 | mm_dec_nr_ptes(mm); | |
1380 | mmdrop(mm); | |
3c9b84f0 GS |
1381 | |
1382 | destroy_args(&args); | |
399145f9 AK |
1383 | return 0; |
1384 | } | |
1385 | late_initcall(debug_vm_pgtable); |