]>
Commit | Line | Data |
---|---|---|
30d621f6 SP |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include <linux/pagewalk.h> | |
4 | #include <linux/ptdump.h> | |
5 | #include <linux/kasan.h> | |
6 | ||
0fea6e9a | 7 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
30d621f6 SP |
8 | /* |
9 | * This is an optimization for KASAN=y case. Since all kasan page tables | |
10 | * eventually point to the kasan_early_shadow_page we could call note_page() | |
11 | * right away without walking through lower level page tables. This saves | |
12 | * us dozens of seconds (minutes for 5-level config) while checking for | |
13 | * W+X mapping or reading kernel_page_tables debugfs file. | |
14 | */ | |
15 | static inline int note_kasan_page_table(struct mm_walk *walk, | |
16 | unsigned long addr) | |
17 | { | |
18 | struct ptdump_state *st = walk->private; | |
19 | ||
f8f0d0b6 | 20 | st->note_page(st, addr, 4, pte_val(kasan_early_shadow_pte[0])); |
30d621f6 SP |
21 | |
22 | walk->action = ACTION_CONTINUE; | |
23 | ||
24 | return 0; | |
25 | } | |
26 | #endif | |
27 | ||
28 | static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr, | |
29 | unsigned long next, struct mm_walk *walk) | |
30 | { | |
31 | struct ptdump_state *st = walk->private; | |
32 | pgd_t val = READ_ONCE(*pgd); | |
33 | ||
0fea6e9a AK |
34 | #if CONFIG_PGTABLE_LEVELS > 4 && \ |
35 | (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) | |
30d621f6 SP |
36 | if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d))) |
37 | return note_kasan_page_table(walk, addr); | |
38 | #endif | |
39 | ||
1494e0c3 SP |
40 | if (st->effective_prot) |
41 | st->effective_prot(st, 0, pgd_val(val)); | |
42 | ||
30d621f6 | 43 | if (pgd_leaf(val)) |
f8f0d0b6 | 44 | st->note_page(st, addr, 0, pgd_val(val)); |
30d621f6 SP |
45 | |
46 | return 0; | |
47 | } | |
48 | ||
49 | static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr, | |
50 | unsigned long next, struct mm_walk *walk) | |
51 | { | |
52 | struct ptdump_state *st = walk->private; | |
53 | p4d_t val = READ_ONCE(*p4d); | |
54 | ||
0fea6e9a AK |
55 | #if CONFIG_PGTABLE_LEVELS > 3 && \ |
56 | (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) | |
30d621f6 SP |
57 | if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud))) |
58 | return note_kasan_page_table(walk, addr); | |
59 | #endif | |
60 | ||
1494e0c3 SP |
61 | if (st->effective_prot) |
62 | st->effective_prot(st, 1, p4d_val(val)); | |
63 | ||
30d621f6 | 64 | if (p4d_leaf(val)) |
f8f0d0b6 | 65 | st->note_page(st, addr, 1, p4d_val(val)); |
30d621f6 SP |
66 | |
67 | return 0; | |
68 | } | |
69 | ||
70 | static int ptdump_pud_entry(pud_t *pud, unsigned long addr, | |
71 | unsigned long next, struct mm_walk *walk) | |
72 | { | |
73 | struct ptdump_state *st = walk->private; | |
74 | pud_t val = READ_ONCE(*pud); | |
75 | ||
0fea6e9a AK |
76 | #if CONFIG_PGTABLE_LEVELS > 2 && \ |
77 | (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) | |
30d621f6 SP |
78 | if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd))) |
79 | return note_kasan_page_table(walk, addr); | |
80 | #endif | |
81 | ||
1494e0c3 SP |
82 | if (st->effective_prot) |
83 | st->effective_prot(st, 2, pud_val(val)); | |
84 | ||
30d621f6 | 85 | if (pud_leaf(val)) |
f8f0d0b6 | 86 | st->note_page(st, addr, 2, pud_val(val)); |
30d621f6 SP |
87 | |
88 | return 0; | |
89 | } | |
90 | ||
91 | static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr, | |
92 | unsigned long next, struct mm_walk *walk) | |
93 | { | |
94 | struct ptdump_state *st = walk->private; | |
95 | pmd_t val = READ_ONCE(*pmd); | |
96 | ||
0fea6e9a | 97 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
30d621f6 SP |
98 | if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte))) |
99 | return note_kasan_page_table(walk, addr); | |
100 | #endif | |
101 | ||
1494e0c3 SP |
102 | if (st->effective_prot) |
103 | st->effective_prot(st, 3, pmd_val(val)); | |
30d621f6 | 104 | if (pmd_leaf(val)) |
f8f0d0b6 | 105 | st->note_page(st, addr, 3, pmd_val(val)); |
30d621f6 SP |
106 | |
107 | return 0; | |
108 | } | |
109 | ||
110 | static int ptdump_pte_entry(pte_t *pte, unsigned long addr, | |
111 | unsigned long next, struct mm_walk *walk) | |
112 | { | |
113 | struct ptdump_state *st = walk->private; | |
45837691 | 114 | pte_t val = ptep_get(pte); |
1494e0c3 SP |
115 | |
116 | if (st->effective_prot) | |
117 | st->effective_prot(st, 4, pte_val(val)); | |
30d621f6 | 118 | |
1494e0c3 | 119 | st->note_page(st, addr, 4, pte_val(val)); |
30d621f6 SP |
120 | |
121 | return 0; | |
122 | } | |
123 | ||
124 | static int ptdump_hole(unsigned long addr, unsigned long next, | |
125 | int depth, struct mm_walk *walk) | |
126 | { | |
127 | struct ptdump_state *st = walk->private; | |
128 | ||
f8f0d0b6 | 129 | st->note_page(st, addr, depth, 0); |
30d621f6 SP |
130 | |
131 | return 0; | |
132 | } | |
133 | ||
134 | static const struct mm_walk_ops ptdump_ops = { | |
135 | .pgd_entry = ptdump_pgd_entry, | |
136 | .p4d_entry = ptdump_p4d_entry, | |
137 | .pud_entry = ptdump_pud_entry, | |
138 | .pmd_entry = ptdump_pmd_entry, | |
139 | .pte_entry = ptdump_pte_entry, | |
140 | .pte_hole = ptdump_hole, | |
141 | }; | |
142 | ||
e47690d7 | 143 | void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd) |
30d621f6 SP |
144 | { |
145 | const struct ptdump_range *range = st->range; | |
146 | ||
d8ed45c5 | 147 | mmap_read_lock(mm); |
30d621f6 SP |
148 | while (range->start != range->end) { |
149 | walk_page_range_novma(mm, range->start, range->end, | |
e47690d7 | 150 | &ptdump_ops, pgd, st); |
30d621f6 SP |
151 | range++; |
152 | } | |
d8ed45c5 | 153 | mmap_read_unlock(mm); |
30d621f6 SP |
154 | |
155 | /* Flush out the last page */ | |
f8f0d0b6 | 156 | st->note_page(st, 0, -1, 0); |
30d621f6 | 157 | } |