]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/mm/dump_pagetables.c
x86/dump_pagetables: Fix LDT remap address marker
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / mm / dump_pagetables.c
CommitLineData
926e5392
AV
1/*
2 * Debug helper to dump the current kernel pagetables of the system
3 * so that we can see what the various memory ranges are set to.
4 *
5 * (C) Copyright 2008 Intel Corporation
6 *
7 * Author: Arjan van de Ven <arjan@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
12 * of the License.
13 */
14
fe770bf0 15#include <linux/debugfs.h>
04b67022 16#include <linux/kasan.h>
fe770bf0 17#include <linux/mm.h>
84e629b6 18#include <linux/init.h>
146fbb76 19#include <linux/sched.h>
926e5392 20#include <linux/seq_file.h>
dfe06429 21#include <linux/pci.h>
926e5392 22
dfe06429 23#include <asm/e820/types.h>
926e5392
AV
24#include <asm/pgtable.h>
25
26/*
27 * The dumper groups pagetable entries of the same type into one, and for
28 * that it needs to keep some state when walking, and flush this state
29 * when a "break" in the continuity is found.
30 */
31struct pg_state {
32 int level;
33 pgprot_t current_prot;
34 unsigned long start_address;
35 unsigned long current_address;
fe770bf0 36 const struct addr_marker *marker;
3891a04a 37 unsigned long lines;
ef6bea6d 38 bool to_dmesg;
e1a58320
SS
39 bool check_wx;
40 unsigned long wx_pages;
926e5392
AV
41};
42
fe770bf0
PA
43struct addr_marker {
44 unsigned long start_address;
45 const char *name;
3891a04a 46 unsigned long max_lines;
fe770bf0
PA
47};
48
146122e2
TG
49/* Address space markers hints */
50
51#ifdef CONFIG_X86_64
52
92851e2f
AS
53enum address_markers_idx {
54 USER_SPACE_NR = 0,
92851e2f 55 KERNEL_SPACE_NR,
c6b2363a 56#ifdef CONFIG_MODIFY_LDT_SYSCALL
f55f0501
AL
57 LDT_NR,
58#endif
c6b2363a 59 LOW_KERNEL_NR,
92851e2f
AS
60 VMALLOC_START_NR,
61 VMEMMAP_START_NR,
025205f8
AR
62#ifdef CONFIG_KASAN
63 KASAN_SHADOW_START_NR,
64 KASAN_SHADOW_END_NR,
f55f0501 65#endif
f2078904 66 CPU_ENTRY_AREA_NR,
146122e2 67#ifdef CONFIG_X86_ESPFIX64
3891a04a 68 ESPFIX_START_NR,
146122e2
TG
69#endif
70#ifdef CONFIG_EFI
71 EFI_END_NR,
72#endif
92851e2f
AS
73 HIGH_KERNEL_NR,
74 MODULES_VADDR_NR,
75 MODULES_END_NR,
146122e2
TG
76 FIXADDR_START_NR,
77 END_OF_SPACE_NR,
78};
79
80static struct addr_marker address_markers[] = {
81 [USER_SPACE_NR] = { 0, "User Space" },
82 [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" },
83 [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" },
84 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
85 [VMEMMAP_START_NR] = { 0UL, "Vmemmap" },
86#ifdef CONFIG_KASAN
87 [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" },
88 [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" },
f55f0501
AL
89#endif
90#ifdef CONFIG_MODIFY_LDT_SYSCALL
91 [LDT_NR] = { LDT_BASE_ADDR, "LDT remap" },
146122e2 92#endif
92a0f81d 93 [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
146122e2
TG
94#ifdef CONFIG_X86_ESPFIX64
95 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
96#endif
97#ifdef CONFIG_EFI
98 [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" },
99#endif
100 [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" },
101 [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" },
102 [MODULES_END_NR] = { MODULES_END, "End Modules" },
103 [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" },
104 [END_OF_SPACE_NR] = { -1, NULL }
105};
106
a5673bd0
JR
107#define INIT_PGD ((pgd_t *) &init_top_pgt)
108
146122e2
TG
109#else /* CONFIG_X86_64 */
110
111enum address_markers_idx {
112 USER_SPACE_NR = 0,
92851e2f
AS
113 KERNEL_SPACE_NR,
114 VMALLOC_START_NR,
115 VMALLOC_END_NR,
146122e2 116#ifdef CONFIG_HIGHMEM
92851e2f 117 PKMAP_BASE_NR,
02b91bfd
JR
118#endif
119#ifdef CONFIG_MODIFY_LDT_SYSCALL
120 LDT_NR,
92851e2f 121#endif
92a0f81d 122 CPU_ENTRY_AREA_NR,
146122e2
TG
123 FIXADDR_START_NR,
124 END_OF_SPACE_NR,
92851e2f
AS
125};
126
fe770bf0 127static struct addr_marker address_markers[] = {
146122e2
TG
128 [USER_SPACE_NR] = { 0, "User Space" },
129 [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" },
130 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
131 [VMALLOC_END_NR] = { 0UL, "vmalloc() End" },
132#ifdef CONFIG_HIGHMEM
133 [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
02b91bfd
JR
134#endif
135#ifdef CONFIG_MODIFY_LDT_SYSCALL
136 [LDT_NR] = { 0UL, "LDT remap" },
fe770bf0 137#endif
92a0f81d 138 [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
146122e2
TG
139 [FIXADDR_START_NR] = { 0UL, "Fixmap area" },
140 [END_OF_SPACE_NR] = { -1, NULL }
fe770bf0 141};
926e5392 142
a5673bd0
JR
143#define INIT_PGD (swapper_pg_dir)
144
146122e2
TG
145#endif /* !CONFIG_X86_64 */
146
fe770bf0
PA
147/* Multipliers for offsets within the PTEs */
148#define PTE_LEVEL_MULT (PAGE_SIZE)
149#define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
150#define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
fdd3d8ce 151#define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
84bbabc3 152#define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
926e5392 153
ef6bea6d
BP
154#define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
155({ \
156 if (to_dmesg) \
157 printk(KERN_INFO fmt, ##args); \
158 else \
159 if (m) \
160 seq_printf(m, fmt, ##args); \
161})
162
163#define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
164({ \
165 if (to_dmesg) \
166 printk(KERN_CONT fmt, ##args); \
167 else \
168 if (m) \
169 seq_printf(m, fmt, ##args); \
170})
171
926e5392
AV
172/*
173 * Print a readable form of a pgprot_t to the seq_file
174 */
ef6bea6d 175static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
926e5392 176{
fe770bf0
PA
177 pgprotval_t pr = pgprot_val(prot);
178 static const char * const level_name[] =
45dcd209 179 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
fe770bf0 180
c0534494 181 if (!(pr & _PAGE_PRESENT)) {
fe770bf0 182 /* Not present */
f439c429 183 pt_dump_cont_printf(m, dmsg, " ");
fe770bf0
PA
184 } else {
185 if (pr & _PAGE_USER)
ef6bea6d 186 pt_dump_cont_printf(m, dmsg, "USR ");
926e5392 187 else
ef6bea6d 188 pt_dump_cont_printf(m, dmsg, " ");
fe770bf0 189 if (pr & _PAGE_RW)
ef6bea6d 190 pt_dump_cont_printf(m, dmsg, "RW ");
fe770bf0 191 else
ef6bea6d 192 pt_dump_cont_printf(m, dmsg, "ro ");
fe770bf0 193 if (pr & _PAGE_PWT)
ef6bea6d 194 pt_dump_cont_printf(m, dmsg, "PWT ");
fe770bf0 195 else
ef6bea6d 196 pt_dump_cont_printf(m, dmsg, " ");
fe770bf0 197 if (pr & _PAGE_PCD)
ef6bea6d 198 pt_dump_cont_printf(m, dmsg, "PCD ");
926e5392 199 else
ef6bea6d 200 pt_dump_cont_printf(m, dmsg, " ");
fe770bf0 201
f439c429 202 /* Bit 7 has a different meaning on level 3 vs 4 */
45dcd209 203 if (level <= 4 && pr & _PAGE_PSE)
f439c429
JG
204 pt_dump_cont_printf(m, dmsg, "PSE ");
205 else
206 pt_dump_cont_printf(m, dmsg, " ");
45dcd209
KS
207 if ((level == 5 && pr & _PAGE_PAT) ||
208 ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE))
da25e628 209 pt_dump_cont_printf(m, dmsg, "PAT ");
f439c429
JG
210 else
211 pt_dump_cont_printf(m, dmsg, " ");
fe770bf0 212 if (pr & _PAGE_GLOBAL)
ef6bea6d 213 pt_dump_cont_printf(m, dmsg, "GLB ");
fe770bf0 214 else
ef6bea6d 215 pt_dump_cont_printf(m, dmsg, " ");
fe770bf0 216 if (pr & _PAGE_NX)
ef6bea6d 217 pt_dump_cont_printf(m, dmsg, "NX ");
fe770bf0 218 else
ef6bea6d 219 pt_dump_cont_printf(m, dmsg, "x ");
926e5392 220 }
ef6bea6d 221 pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
926e5392
AV
222}
223
224/*
fe770bf0 225 * On 64 bits, sign-extend the 48 bit address to 64 bit
926e5392 226 */
fe770bf0 227static unsigned long normalize_addr(unsigned long u)
926e5392 228{
3a366f79
KS
229 int shift;
230 if (!IS_ENABLED(CONFIG_X86_64))
231 return u;
232
233 shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
234 return (signed long)(u << shift) >> shift;
926e5392
AV
235}
236
dfe06429
TG
237static void note_wx(struct pg_state *st)
238{
239 unsigned long npages;
240
241 npages = (st->current_address - st->start_address) / PAGE_SIZE;
242
243#ifdef CONFIG_PCI_BIOS
244 /*
245 * If PCI BIOS is enabled, the PCI BIOS area is forced to WX.
246 * Inform about it, but avoid the warning.
247 */
248 if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN &&
249 st->current_address <= PAGE_OFFSET + BIOS_END) {
250 pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages);
251 return;
252 }
253#endif
254 /* Account the WX pages */
255 st->wx_pages += npages;
256 WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n",
257 (void *)st->start_address);
258}
259
926e5392
AV
260/*
261 * This function gets called on a break in a continuous series
262 * of PTE entries; the next one is different so we need to
263 * print what we collected so far.
264 */
265static void note_page(struct seq_file *m, struct pg_state *st,
fe770bf0 266 pgprot_t new_prot, int level)
926e5392 267{
fe770bf0 268 pgprotval_t prot, cur;
3891a04a 269 static const char units[] = "BKMGTPE";
926e5392
AV
270
271 /*
272 * If we have a "break" in the series, we need to flush the state that
fe770bf0
PA
273 * we have now. "break" is either changing perms, levels or
274 * address space marker.
926e5392 275 */
da25e628
TK
276 prot = pgprot_val(new_prot);
277 cur = pgprot_val(st->current_prot);
926e5392 278
fe770bf0
PA
279 if (!st->level) {
280 /* First entry */
281 st->current_prot = new_prot;
282 st->level = level;
283 st->marker = address_markers;
3891a04a 284 st->lines = 0;
ef6bea6d
BP
285 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
286 st->marker->name);
fe770bf0
PA
287 } else if (prot != cur || level != st->level ||
288 st->current_address >= st->marker[1].start_address) {
289 const char *unit = units;
926e5392 290 unsigned long delta;
6424fb38 291 int width = sizeof(unsigned long) * 2;
e1a58320
SS
292 pgprotval_t pr = pgprot_val(st->current_prot);
293
dfe06429
TG
294 if (st->check_wx && (pr & _PAGE_RW) && !(pr & _PAGE_NX))
295 note_wx(st);
926e5392 296
926e5392
AV
297 /*
298 * Now print the actual finished series
299 */
3891a04a
PA
300 if (!st->marker->max_lines ||
301 st->lines < st->marker->max_lines) {
302 pt_dump_seq_printf(m, st->to_dmesg,
303 "0x%0*lx-0x%0*lx ",
304 width, st->start_address,
305 width, st->current_address);
926e5392 306
3891a04a
PA
307 delta = st->current_address - st->start_address;
308 while (!(delta & 1023) && unit[1]) {
309 delta >>= 10;
310 unit++;
311 }
312 pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ",
313 delta, *unit);
314 printk_prot(m, st->current_prot, st->level,
315 st->to_dmesg);
926e5392 316 }
3891a04a 317 st->lines++;
fe770bf0
PA
318
319 /*
320 * We print markers for special areas of address space,
321 * such as the start of vmalloc space etc.
322 * This helps in the interpretation.
323 */
324 if (st->current_address >= st->marker[1].start_address) {
3891a04a
PA
325 if (st->marker->max_lines &&
326 st->lines > st->marker->max_lines) {
327 unsigned long nskip =
328 st->lines - st->marker->max_lines;
329 pt_dump_seq_printf(m, st->to_dmesg,
330 "... %lu entr%s skipped ... \n",
331 nskip,
332 nskip == 1 ? "y" : "ies");
333 }
fe770bf0 334 st->marker++;
3891a04a 335 st->lines = 0;
ef6bea6d
BP
336 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
337 st->marker->name);
926e5392 338 }
fe770bf0 339
926e5392
AV
340 st->start_address = st->current_address;
341 st->current_prot = new_prot;
342 st->level = level;
fe770bf0 343 }
926e5392
AV
344}
345
fdd3d8ce 346static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, unsigned long P)
926e5392
AV
347{
348 int i;
349 pte_t *start;
da25e628 350 pgprotval_t prot;
926e5392 351
fdd3d8ce 352 start = (pte_t *)pmd_page_vaddr(addr);
926e5392 353 for (i = 0; i < PTRS_PER_PTE; i++) {
da25e628 354 prot = pte_flags(*start);
fe770bf0 355 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
45dcd209 356 note_page(m, st, __pgprot(prot), 5);
926e5392
AV
357 start++;
358 }
359}
04b67022
AR
360#ifdef CONFIG_KASAN
361
362/*
363 * This is an optimization for KASAN=y case. Since all kasan page tables
364 * eventually point to the kasan_zero_page we could call note_page()
365 * right away without walking through lower level page tables. This saves
366 * us dozens of seconds (minutes for 5-level config) while checking for
367 * W+X mapping or reading kernel_page_tables debugfs file.
368 */
369static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
370 void *pt)
371{
372 if (__pa(pt) == __pa(kasan_zero_pmd) ||
373#ifdef CONFIG_X86_5LEVEL
374 __pa(pt) == __pa(kasan_zero_p4d) ||
375#endif
376 __pa(pt) == __pa(kasan_zero_pud)) {
377 pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
378 note_page(m, st, __pgprot(prot), 5);
379 return true;
380 }
381 return false;
382}
383#else
384static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
385 void *pt)
386{
387 return false;
388}
389#endif
926e5392 390
fe770bf0 391#if PTRS_PER_PMD > 1
926e5392 392
fdd3d8ce 393static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, unsigned long P)
926e5392
AV
394{
395 int i;
04b67022 396 pmd_t *start, *pmd_start;
da25e628 397 pgprotval_t prot;
926e5392 398
04b67022 399 pmd_start = start = (pmd_t *)pud_page_vaddr(addr);
926e5392 400 for (i = 0; i < PTRS_PER_PMD; i++) {
fe770bf0 401 st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
926e5392 402 if (!pmd_none(*start)) {
da25e628
TK
403 if (pmd_large(*start) || !pmd_present(*start)) {
404 prot = pmd_flags(*start);
45dcd209 405 note_page(m, st, __pgprot(prot), 4);
04b67022 406 } else if (!kasan_page_table(m, st, pmd_start)) {
fe770bf0
PA
407 walk_pte_level(m, st, *start,
408 P + i * PMD_LEVEL_MULT);
da25e628 409 }
926e5392 410 } else
45dcd209 411 note_page(m, st, __pgprot(0), 4);
926e5392
AV
412 start++;
413 }
414}
415
fe770bf0
PA
416#else
417#define walk_pmd_level(m,s,a,p) walk_pte_level(m,s,__pmd(pud_val(a)),p)
418#define pud_large(a) pmd_large(__pmd(pud_val(a)))
419#define pud_none(a) pmd_none(__pmd(pud_val(a)))
420#endif
926e5392 421
fe770bf0
PA
422#if PTRS_PER_PUD > 1
423
fdd3d8ce 424static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, unsigned long P)
926e5392
AV
425{
426 int i;
04b67022 427 pud_t *start, *pud_start;
da25e628 428 pgprotval_t prot;
243b72aa 429 pud_t *prev_pud = NULL;
926e5392 430
04b67022 431 pud_start = start = (pud_t *)p4d_page_vaddr(addr);
926e5392
AV
432
433 for (i = 0; i < PTRS_PER_PUD; i++) {
fe770bf0 434 st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
04b67022 435 if (!pud_none(*start)) {
da25e628
TK
436 if (pud_large(*start) || !pud_present(*start)) {
437 prot = pud_flags(*start);
45dcd209 438 note_page(m, st, __pgprot(prot), 3);
04b67022 439 } else if (!kasan_page_table(m, st, pud_start)) {
fe770bf0
PA
440 walk_pmd_level(m, st, *start,
441 P + i * PUD_LEVEL_MULT);
da25e628 442 }
926e5392 443 } else
45dcd209 444 note_page(m, st, __pgprot(0), 3);
926e5392 445
243b72aa 446 prev_pud = start;
926e5392
AV
447 start++;
448 }
449}
450
fe770bf0 451#else
fdd3d8ce
KS
452#define walk_pud_level(m,s,a,p) walk_pmd_level(m,s,__pud(p4d_val(a)),p)
453#define p4d_large(a) pud_large(__pud(p4d_val(a)))
454#define p4d_none(a) pud_none(__pud(p4d_val(a)))
455#endif
456
457#if PTRS_PER_P4D > 1
458
459static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr, unsigned long P)
460{
461 int i;
04b67022 462 p4d_t *start, *p4d_start;
fdd3d8ce
KS
463 pgprotval_t prot;
464
04b67022 465 p4d_start = start = (p4d_t *)pgd_page_vaddr(addr);
fdd3d8ce
KS
466
467 for (i = 0; i < PTRS_PER_P4D; i++) {
468 st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
469 if (!p4d_none(*start)) {
470 if (p4d_large(*start) || !p4d_present(*start)) {
471 prot = p4d_flags(*start);
472 note_page(m, st, __pgprot(prot), 2);
04b67022 473 } else if (!kasan_page_table(m, st, p4d_start)) {
fdd3d8ce
KS
474 walk_pud_level(m, st, *start,
475 P + i * P4D_LEVEL_MULT);
476 }
477 } else
478 note_page(m, st, __pgprot(0), 2);
479
480 start++;
481 }
482}
483
484#else
485#define walk_p4d_level(m,s,a,p) walk_pud_level(m,s,__p4d(pgd_val(a)),p)
486#define pgd_large(a) p4d_large(__p4d(pgd_val(a)))
487#define pgd_none(a) p4d_none(__p4d(pgd_val(a)))
fe770bf0
PA
488#endif
489
f4e342c8
BO
490static inline bool is_hypervisor_range(int idx)
491{
b176862f 492#ifdef CONFIG_X86_64
f4e342c8 493 /*
422de96e
KS
494 * A hole in the beginning of kernel address space reserved
495 * for a hypervisor.
f4e342c8 496 */
422de96e
KS
497 return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
498 (idx < pgd_index(GUARD_HOLE_END_ADDR));
f4e342c8 499#else
b176862f 500 return false;
f4e342c8 501#endif
b176862f 502}
f4e342c8 503
e1a58320 504static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
b4bf4f92 505 bool checkwx, bool dmesg)
926e5392 506{
a5673bd0 507 pgd_t *start = INIT_PGD;
da25e628 508 pgprotval_t prot;
926e5392 509 int i;
ef6bea6d 510 struct pg_state st = {};
926e5392 511
ef6bea6d
BP
512 if (pgd) {
513 start = pgd;
b4bf4f92 514 st.to_dmesg = dmesg;
ef6bea6d 515 }
926e5392 516
e1a58320
SS
517 st.check_wx = checkwx;
518 if (checkwx)
519 st.wx_pages = 0;
520
926e5392 521 for (i = 0; i < PTRS_PER_PGD; i++) {
fe770bf0 522 st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
f4e342c8 523 if (!pgd_none(*start) && !is_hypervisor_range(i)) {
da25e628
TK
524 if (pgd_large(*start) || !pgd_present(*start)) {
525 prot = pgd_flags(*start);
fe770bf0 526 note_page(m, &st, __pgprot(prot), 1);
da25e628 527 } else {
fdd3d8ce 528 walk_p4d_level(m, &st, *start,
fe770bf0 529 i * PGD_LEVEL_MULT);
da25e628 530 }
fe770bf0 531 } else
926e5392 532 note_page(m, &st, __pgprot(0), 1);
fe770bf0 533
146fbb76 534 cond_resched();
926e5392
AV
535 start++;
536 }
fe770bf0
PA
537
538 /* Flush out the last page */
539 st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
540 note_page(m, &st, __pgprot(0), 0);
e1a58320
SS
541 if (!checkwx)
542 return;
543 if (st.wx_pages)
544 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
545 st.wx_pages);
546 else
547 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
548}
549
550void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
551{
b4bf4f92
TG
552 ptdump_walk_pgd_level_core(m, pgd, false, true);
553}
554
a4b51ef6 555void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
b4bf4f92 556{
a4b51ef6
TG
557#ifdef CONFIG_PAGE_TABLE_ISOLATION
558 if (user && static_cpu_has(X86_FEATURE_PTI))
559 pgd = kernel_to_user_pgdp(pgd);
560#endif
b4bf4f92
TG
561 ptdump_walk_pgd_level_core(m, pgd, false, false);
562}
563EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
564
3a2cf6d0 565void ptdump_walk_user_pgd_level_checkwx(void)
b4bf4f92
TG
566{
567#ifdef CONFIG_PAGE_TABLE_ISOLATION
a5673bd0 568 pgd_t *pgd = INIT_PGD;
b4bf4f92 569
3a2cf6d0
JR
570 if (!(__supported_pte_mask & _PAGE_NX) ||
571 !static_cpu_has(X86_FEATURE_PTI))
b4bf4f92
TG
572 return;
573
574 pr_info("x86/mm: Checking user space page tables\n");
575 pgd = kernel_to_user_pgdp(pgd);
576 ptdump_walk_pgd_level_core(NULL, pgd, true, false);
577#endif
926e5392
AV
578}
579
e1a58320
SS
580void ptdump_walk_pgd_level_checkwx(void)
581{
b4bf4f92 582 ptdump_walk_pgd_level_core(NULL, NULL, true, false);
e1a58320
SS
583}
584
8609d1b5 585static int __init pt_dump_init(void)
926e5392 586{
0483e1fa
TG
587 /*
588 * Various markers are not compile-time constants, so assign them
589 * here.
590 */
591#ifdef CONFIG_X86_64
592 address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET;
593 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
594 address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
595#endif
fe770bf0 596#ifdef CONFIG_X86_32
92851e2f
AS
597 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
598 address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
fe770bf0 599# ifdef CONFIG_HIGHMEM
92851e2f 600 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
fe770bf0 601# endif
92851e2f 602 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
92a0f81d 603 address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
02b91bfd
JR
604# ifdef CONFIG_MODIFY_LDT_SYSCALL
605 address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
606# endif
fe770bf0 607#endif
926e5392
AV
608 return 0;
609}
926e5392 610__initcall(pt_dump_init);