]>
Commit | Line | Data |
---|---|---|
399145f9 AK |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * This kernel test validates architecture page table helpers and | |
4 | * accessors and helps in verifying their continued compliance with | |
5 | * expected generic MM semantics. | |
6 | * | |
7 | * Copyright (C) 2019 ARM Ltd. | |
8 | * | |
9 | * Author: Anshuman Khandual <anshuman.khandual@arm.com> | |
10 | */ | |
11 | #define pr_fmt(fmt) "debug_vm_pgtable: %s: " fmt, __func__ | |
12 | ||
13 | #include <linux/gfp.h> | |
14 | #include <linux/highmem.h> | |
15 | #include <linux/hugetlb.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/kconfig.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/mman.h> | |
20 | #include <linux/mm_types.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/pfn_t.h> | |
23 | #include <linux/printk.h> | |
24 | #include <linux/random.h> | |
25 | #include <linux/spinlock.h> | |
26 | #include <linux/swap.h> | |
27 | #include <linux/swapops.h> | |
28 | #include <linux/start_kernel.h> | |
29 | #include <linux/sched/mm.h> | |
30 | #include <asm/pgalloc.h> | |
399145f9 AK |
31 | |
32 | #define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC) | |
33 | ||
34 | /* | |
35 | * On s390 platform, the lower 4 bits are used to identify given page table | |
36 | * entry type. But these bits might affect the ability to clear entries with | |
37 | * pxx_clear() because of how dynamic page table folding works on s390. So | |
38 | * while loading up the entries do not change the lower 4 bits. It does not | |
39 | * have affect any other platform. | |
40 | */ | |
41 | #define S390_MASK_BITS 4 | |
42 | #define RANDOM_ORVALUE GENMASK(BITS_PER_LONG - 1, S390_MASK_BITS) | |
43 | #define RANDOM_NZVALUE GENMASK(7, 0) | |
44 | ||
45 | static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot) | |
46 | { | |
47 | pte_t pte = pfn_pte(pfn, prot); | |
48 | ||
49 | WARN_ON(!pte_same(pte, pte)); | |
50 | WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte)))); | |
51 | WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte)))); | |
52 | WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte)))); | |
53 | WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte)))); | |
54 | WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte)))); | |
55 | WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte)))); | |
56 | } | |
57 | ||
58 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
59 | static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) | |
60 | { | |
61 | pmd_t pmd = pfn_pmd(pfn, prot); | |
62 | ||
63 | WARN_ON(!pmd_same(pmd, pmd)); | |
64 | WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd)))); | |
65 | WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd)))); | |
66 | WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd)))); | |
67 | WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd)))); | |
68 | WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd)))); | |
69 | WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd)))); | |
70 | /* | |
71 | * A huge page does not point to next level page table | |
72 | * entry. Hence this must qualify as pmd_bad(). | |
73 | */ | |
74 | WARN_ON(!pmd_bad(pmd_mkhuge(pmd))); | |
75 | } | |
76 | ||
77 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
78 | static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) | |
79 | { | |
80 | pud_t pud = pfn_pud(pfn, prot); | |
81 | ||
82 | WARN_ON(!pud_same(pud, pud)); | |
83 | WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud)))); | |
84 | WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud)))); | |
85 | WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud)))); | |
86 | WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud)))); | |
87 | ||
88 | if (mm_pmd_folded(mm)) | |
89 | return; | |
90 | ||
91 | /* | |
92 | * A huge page does not point to next level page table | |
93 | * entry. Hence this must qualify as pud_bad(). | |
94 | */ | |
95 | WARN_ON(!pud_bad(pud_mkhuge(pud))); | |
96 | } | |
97 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | |
98 | static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { } | |
99 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | |
100 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ | |
101 | static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) { } | |
102 | static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { } | |
103 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
104 | ||
105 | static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot) | |
106 | { | |
107 | p4d_t p4d; | |
108 | ||
109 | memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t)); | |
110 | WARN_ON(!p4d_same(p4d, p4d)); | |
111 | } | |
112 | ||
113 | static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot) | |
114 | { | |
115 | pgd_t pgd; | |
116 | ||
117 | memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t)); | |
118 | WARN_ON(!pgd_same(pgd, pgd)); | |
119 | } | |
120 | ||
121 | #ifndef __PAGETABLE_PUD_FOLDED | |
122 | static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) | |
123 | { | |
124 | pud_t pud = READ_ONCE(*pudp); | |
125 | ||
126 | if (mm_pmd_folded(mm)) | |
127 | return; | |
128 | ||
129 | pud = __pud(pud_val(pud) | RANDOM_ORVALUE); | |
130 | WRITE_ONCE(*pudp, pud); | |
131 | pud_clear(pudp); | |
132 | pud = READ_ONCE(*pudp); | |
133 | WARN_ON(!pud_none(pud)); | |
134 | } | |
135 | ||
136 | static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp, | |
137 | pmd_t *pmdp) | |
138 | { | |
139 | pud_t pud; | |
140 | ||
141 | if (mm_pmd_folded(mm)) | |
142 | return; | |
143 | /* | |
144 | * This entry points to next level page table page. | |
145 | * Hence this must not qualify as pud_bad(). | |
146 | */ | |
147 | pmd_clear(pmdp); | |
148 | pud_clear(pudp); | |
149 | pud_populate(mm, pudp, pmdp); | |
150 | pud = READ_ONCE(*pudp); | |
151 | WARN_ON(pud_bad(pud)); | |
152 | } | |
153 | #else /* !__PAGETABLE_PUD_FOLDED */ | |
154 | static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { } | |
155 | static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp, | |
156 | pmd_t *pmdp) | |
157 | { | |
158 | } | |
159 | #endif /* PAGETABLE_PUD_FOLDED */ | |
160 | ||
161 | #ifndef __PAGETABLE_P4D_FOLDED | |
162 | static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) | |
163 | { | |
164 | p4d_t p4d = READ_ONCE(*p4dp); | |
165 | ||
166 | if (mm_pud_folded(mm)) | |
167 | return; | |
168 | ||
169 | p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE); | |
170 | WRITE_ONCE(*p4dp, p4d); | |
171 | p4d_clear(p4dp); | |
172 | p4d = READ_ONCE(*p4dp); | |
173 | WARN_ON(!p4d_none(p4d)); | |
174 | } | |
175 | ||
176 | static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp, | |
177 | pud_t *pudp) | |
178 | { | |
179 | p4d_t p4d; | |
180 | ||
181 | if (mm_pud_folded(mm)) | |
182 | return; | |
183 | ||
184 | /* | |
185 | * This entry points to next level page table page. | |
186 | * Hence this must not qualify as p4d_bad(). | |
187 | */ | |
188 | pud_clear(pudp); | |
189 | p4d_clear(p4dp); | |
190 | p4d_populate(mm, p4dp, pudp); | |
191 | p4d = READ_ONCE(*p4dp); | |
192 | WARN_ON(p4d_bad(p4d)); | |
193 | } | |
194 | ||
195 | static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) | |
196 | { | |
197 | pgd_t pgd = READ_ONCE(*pgdp); | |
198 | ||
199 | if (mm_p4d_folded(mm)) | |
200 | return; | |
201 | ||
202 | pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE); | |
203 | WRITE_ONCE(*pgdp, pgd); | |
204 | pgd_clear(pgdp); | |
205 | pgd = READ_ONCE(*pgdp); | |
206 | WARN_ON(!pgd_none(pgd)); | |
207 | } | |
208 | ||
209 | static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp, | |
210 | p4d_t *p4dp) | |
211 | { | |
212 | pgd_t pgd; | |
213 | ||
214 | if (mm_p4d_folded(mm)) | |
215 | return; | |
216 | ||
217 | /* | |
218 | * This entry points to next level page table page. | |
219 | * Hence this must not qualify as pgd_bad(). | |
220 | */ | |
221 | p4d_clear(p4dp); | |
222 | pgd_clear(pgdp); | |
223 | pgd_populate(mm, pgdp, p4dp); | |
224 | pgd = READ_ONCE(*pgdp); | |
225 | WARN_ON(pgd_bad(pgd)); | |
226 | } | |
227 | #else /* !__PAGETABLE_P4D_FOLDED */ | |
228 | static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { } | |
229 | static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { } | |
230 | static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp, | |
231 | pud_t *pudp) | |
232 | { | |
233 | } | |
234 | static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp, | |
235 | p4d_t *p4dp) | |
236 | { | |
237 | } | |
238 | #endif /* PAGETABLE_P4D_FOLDED */ | |
239 | ||
240 | static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep, | |
241 | unsigned long vaddr) | |
242 | { | |
243 | pte_t pte = READ_ONCE(*ptep); | |
244 | ||
245 | pte = __pte(pte_val(pte) | RANDOM_ORVALUE); | |
246 | set_pte_at(mm, vaddr, ptep, pte); | |
247 | barrier(); | |
248 | pte_clear(mm, vaddr, ptep); | |
249 | pte = READ_ONCE(*ptep); | |
250 | WARN_ON(!pte_none(pte)); | |
251 | } | |
252 | ||
253 | static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp) | |
254 | { | |
255 | pmd_t pmd = READ_ONCE(*pmdp); | |
256 | ||
257 | pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE); | |
258 | WRITE_ONCE(*pmdp, pmd); | |
259 | pmd_clear(pmdp); | |
260 | pmd = READ_ONCE(*pmdp); | |
261 | WARN_ON(!pmd_none(pmd)); | |
262 | } | |
263 | ||
264 | static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp, | |
265 | pgtable_t pgtable) | |
266 | { | |
267 | pmd_t pmd; | |
268 | ||
269 | /* | |
270 | * This entry points to next level page table page. | |
271 | * Hence this must not qualify as pmd_bad(). | |
272 | */ | |
273 | pmd_clear(pmdp); | |
274 | pmd_populate(mm, pmdp, pgtable); | |
275 | pmd = READ_ONCE(*pmdp); | |
276 | WARN_ON(pmd_bad(pmd)); | |
277 | } | |
278 | ||
279 | static unsigned long __init get_random_vaddr(void) | |
280 | { | |
281 | unsigned long random_vaddr, random_pages, total_user_pages; | |
282 | ||
283 | total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE; | |
284 | ||
285 | random_pages = get_random_long() % total_user_pages; | |
286 | random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE; | |
287 | ||
288 | return random_vaddr; | |
289 | } | |
290 | ||
291 | static int __init debug_vm_pgtable(void) | |
292 | { | |
293 | struct mm_struct *mm; | |
294 | pgd_t *pgdp; | |
295 | p4d_t *p4dp, *saved_p4dp; | |
296 | pud_t *pudp, *saved_pudp; | |
297 | pmd_t *pmdp, *saved_pmdp, pmd; | |
298 | pte_t *ptep; | |
299 | pgtable_t saved_ptep; | |
300 | pgprot_t prot; | |
301 | phys_addr_t paddr; | |
302 | unsigned long vaddr, pte_aligned, pmd_aligned; | |
303 | unsigned long pud_aligned, p4d_aligned, pgd_aligned; | |
304 | spinlock_t *uninitialized_var(ptl); | |
305 | ||
306 | pr_info("Validating architecture page table helpers\n"); | |
307 | prot = vm_get_page_prot(VMFLAGS); | |
308 | vaddr = get_random_vaddr(); | |
309 | mm = mm_alloc(); | |
310 | if (!mm) { | |
311 | pr_err("mm_struct allocation failed\n"); | |
312 | return 1; | |
313 | } | |
314 | ||
315 | /* | |
316 | * PFN for mapping at PTE level is determined from a standard kernel | |
317 | * text symbol. But pfns for higher page table levels are derived by | |
318 | * masking lower bits of this real pfn. These derived pfns might not | |
319 | * exist on the platform but that does not really matter as pfn_pxx() | |
320 | * helpers will still create appropriate entries for the test. This | |
321 | * helps avoid large memory block allocations to be used for mapping | |
322 | * at higher page table levels. | |
323 | */ | |
324 | paddr = __pa_symbol(&start_kernel); | |
325 | ||
326 | pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT; | |
327 | pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT; | |
328 | pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT; | |
329 | p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT; | |
330 | pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT; | |
331 | WARN_ON(!pfn_valid(pte_aligned)); | |
332 | ||
333 | pgdp = pgd_offset(mm, vaddr); | |
334 | p4dp = p4d_alloc(mm, pgdp, vaddr); | |
335 | pudp = pud_alloc(mm, p4dp, vaddr); | |
336 | pmdp = pmd_alloc(mm, pudp, vaddr); | |
337 | ptep = pte_alloc_map_lock(mm, pmdp, vaddr, &ptl); | |
338 | ||
339 | /* | |
340 | * Save all the page table page addresses as the page table | |
341 | * entries will be used for testing with random or garbage | |
342 | * values. These saved addresses will be used for freeing | |
343 | * page table pages. | |
344 | */ | |
345 | pmd = READ_ONCE(*pmdp); | |
346 | saved_p4dp = p4d_offset(pgdp, 0UL); | |
347 | saved_pudp = pud_offset(p4dp, 0UL); | |
348 | saved_pmdp = pmd_offset(pudp, 0UL); | |
349 | saved_ptep = pmd_pgtable(pmd); | |
350 | ||
351 | pte_basic_tests(pte_aligned, prot); | |
352 | pmd_basic_tests(pmd_aligned, prot); | |
353 | pud_basic_tests(pud_aligned, prot); | |
354 | p4d_basic_tests(p4d_aligned, prot); | |
355 | pgd_basic_tests(pgd_aligned, prot); | |
356 | ||
357 | pte_clear_tests(mm, ptep, vaddr); | |
358 | pmd_clear_tests(mm, pmdp); | |
359 | pud_clear_tests(mm, pudp); | |
360 | p4d_clear_tests(mm, p4dp); | |
361 | pgd_clear_tests(mm, pgdp); | |
362 | ||
363 | pte_unmap_unlock(ptep, ptl); | |
364 | ||
365 | pmd_populate_tests(mm, pmdp, saved_ptep); | |
366 | pud_populate_tests(mm, pudp, saved_pmdp); | |
367 | p4d_populate_tests(mm, p4dp, saved_pudp); | |
368 | pgd_populate_tests(mm, pgdp, saved_p4dp); | |
369 | ||
370 | p4d_free(mm, saved_p4dp); | |
371 | pud_free(mm, saved_pudp); | |
372 | pmd_free(mm, saved_pmdp); | |
373 | pte_free(mm, saved_ptep); | |
374 | ||
375 | mm_dec_nr_puds(mm); | |
376 | mm_dec_nr_pmds(mm); | |
377 | mm_dec_nr_ptes(mm); | |
378 | mmdrop(mm); | |
379 | return 0; | |
380 | } | |
381 | late_initcall(debug_vm_pgtable); |