]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/pagewalk.c
mm, x86: add support for PUD-sized transparent hugepages
[mirror_ubuntu-bionic-kernel.git] / mm / pagewalk.c
CommitLineData
e6473092
MM
1#include <linux/mm.h>
2#include <linux/highmem.h>
3#include <linux/sched.h>
d33b9f45 4#include <linux/hugetlb.h>
e6473092
MM
5
6static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
2165009b 7 struct mm_walk *walk)
e6473092
MM
8{
9 pte_t *pte;
10 int err = 0;
11
12 pte = pte_offset_map(pmd, addr);
556637cd 13 for (;;) {
2165009b 14 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
e6473092
MM
15 if (err)
16 break;
556637cd
JW
17 addr += PAGE_SIZE;
18 if (addr == end)
19 break;
20 pte++;
21 }
e6473092
MM
22
23 pte_unmap(pte);
24 return err;
25}
26
27static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
2165009b 28 struct mm_walk *walk)
e6473092
MM
29{
30 pmd_t *pmd;
31 unsigned long next;
32 int err = 0;
33
34 pmd = pmd_offset(pud, addr);
35 do {
03319327 36again:
e6473092 37 next = pmd_addr_end(addr, end);
48684a65 38 if (pmd_none(*pmd) || !walk->vma) {
e6473092 39 if (walk->pte_hole)
2165009b 40 err = walk->pte_hole(addr, next, walk);
e6473092
MM
41 if (err)
42 break;
43 continue;
44 }
03319327
DH
45 /*
46 * This implies that each ->pmd_entry() handler
47 * needs to know about pmd_trans_huge() pmds
48 */
e6473092 49 if (walk->pmd_entry)
2165009b 50 err = walk->pmd_entry(pmd, addr, next, walk);
03319327
DH
51 if (err)
52 break;
53
54 /*
55 * Check this here so we only break down trans_huge
56 * pages when we _need_ to
57 */
58 if (!walk->pte_entry)
59 continue;
60
78ddc534 61 split_huge_pmd(walk->vma, pmd, addr);
fafaa426 62 if (pmd_trans_unstable(pmd))
03319327
DH
63 goto again;
64 err = walk_pte_range(pmd, addr, next, walk);
e6473092
MM
65 if (err)
66 break;
67 } while (pmd++, addr = next, addr != end);
68
69 return err;
70}
71
72static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
2165009b 73 struct mm_walk *walk)
e6473092
MM
74{
75 pud_t *pud;
76 unsigned long next;
77 int err = 0;
78
79 pud = pud_offset(pgd, addr);
80 do {
a00cc7d9 81 again:
e6473092 82 next = pud_addr_end(addr, end);
a00cc7d9 83 if (pud_none(*pud) || !walk->vma) {
e6473092 84 if (walk->pte_hole)
2165009b 85 err = walk->pte_hole(addr, next, walk);
e6473092
MM
86 if (err)
87 break;
88 continue;
89 }
a00cc7d9
MW
90
91 if (walk->pud_entry) {
92 spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
93
94 if (ptl) {
95 err = walk->pud_entry(pud, addr, next, walk);
96 spin_unlock(ptl);
97 if (err)
98 break;
99 continue;
100 }
101 }
102
103 split_huge_pud(walk->vma, pud, addr);
104 if (pud_none(*pud))
105 goto again;
106
0b1fbfe5 107 if (walk->pmd_entry || walk->pte_entry)
2165009b 108 err = walk_pmd_range(pud, addr, next, walk);
e6473092
MM
109 if (err)
110 break;
111 } while (pud++, addr = next, addr != end);
112
113 return err;
114}
115
fafaa426
NH
116static int walk_pgd_range(unsigned long addr, unsigned long end,
117 struct mm_walk *walk)
118{
119 pgd_t *pgd;
120 unsigned long next;
121 int err = 0;
122
123 pgd = pgd_offset(walk->mm, addr);
124 do {
125 next = pgd_addr_end(addr, end);
126 if (pgd_none_or_clear_bad(pgd)) {
127 if (walk->pte_hole)
128 err = walk->pte_hole(addr, next, walk);
129 if (err)
130 break;
131 continue;
132 }
133 if (walk->pmd_entry || walk->pte_entry)
134 err = walk_pud_range(pgd, addr, next, walk);
135 if (err)
136 break;
137 } while (pgd++, addr = next, addr != end);
138
139 return err;
140}
141
116354d1
NH
142#ifdef CONFIG_HUGETLB_PAGE
143static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
144 unsigned long end)
145{
146 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
147 return boundary < end ? boundary : end;
148}
149
fafaa426 150static int walk_hugetlb_range(unsigned long addr, unsigned long end,
116354d1
NH
151 struct mm_walk *walk)
152{
fafaa426 153 struct vm_area_struct *vma = walk->vma;
116354d1
NH
154 struct hstate *h = hstate_vma(vma);
155 unsigned long next;
156 unsigned long hmask = huge_page_mask(h);
157 pte_t *pte;
158 int err = 0;
159
160 do {
161 next = hugetlb_entry_end(h, addr, end);
162 pte = huge_pte_offset(walk->mm, addr & hmask);
163 if (pte && walk->hugetlb_entry)
164 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
165 if (err)
fafaa426 166 break;
116354d1
NH
167 } while (addr = next, addr != end);
168
fafaa426 169 return err;
116354d1 170}
6c6d5280 171
6c6d5280 172#else /* CONFIG_HUGETLB_PAGE */
fafaa426 173static int walk_hugetlb_range(unsigned long addr, unsigned long end,
6c6d5280
KM
174 struct mm_walk *walk)
175{
176 return 0;
177}
178
179#endif /* CONFIG_HUGETLB_PAGE */
180
fafaa426
NH
181/*
182 * Decide whether we really walk over the current vma on [@start, @end)
183 * or skip it via the returned value. Return 0 if we do walk over the
184 * current vma, and return 1 if we skip the vma. Negative values means
185 * error, where we abort the current walk.
fafaa426
NH
186 */
187static int walk_page_test(unsigned long start, unsigned long end,
188 struct mm_walk *walk)
189{
190 struct vm_area_struct *vma = walk->vma;
6c6d5280 191
fafaa426
NH
192 if (walk->test_walk)
193 return walk->test_walk(start, end, walk);
194
195 /*
48684a65
NH
196 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
197 * range, so we don't walk over it as we do for normal vmas. However,
198 * Some callers are interested in handling hole range and they don't
199 * want to just ignore any single address range. Such users certainly
200 * define their ->pte_hole() callbacks, so let's delegate them to handle
201 * vma(VM_PFNMAP).
fafaa426 202 */
48684a65
NH
203 if (vma->vm_flags & VM_PFNMAP) {
204 int err = 1;
205 if (walk->pte_hole)
206 err = walk->pte_hole(start, end, walk);
207 return err ? err : 1;
208 }
fafaa426
NH
209 return 0;
210}
211
212static int __walk_page_range(unsigned long start, unsigned long end,
213 struct mm_walk *walk)
214{
215 int err = 0;
216 struct vm_area_struct *vma = walk->vma;
217
218 if (vma && is_vm_hugetlb_page(vma)) {
219 if (walk->hugetlb_entry)
220 err = walk_hugetlb_range(start, end, walk);
221 } else
222 err = walk_pgd_range(start, end, walk);
223
224 return err;
225}
116354d1 226
e6473092 227/**
fafaa426 228 * walk_page_range - walk page table with caller specific callbacks
e6473092 229 *
fafaa426
NH
230 * Recursively walk the page table tree of the process represented by @walk->mm
231 * within the virtual address range [@start, @end). During walking, we can do
232 * some caller-specific works for each entry, by setting up pmd_entry(),
233 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
234 * callbacks, the associated entries/pages are just ignored.
235 * The return values of these callbacks are commonly defined like below:
236 * - 0 : succeeded to handle the current entry, and if you don't reach the
237 * end address yet, continue to walk.
238 * - >0 : succeeded to handle the current entry, and return to the caller
239 * with caller specific value.
240 * - <0 : failed to handle the current entry, and return to the caller
241 * with error code.
e6473092 242 *
fafaa426
NH
243 * Before starting to walk page table, some callers want to check whether
244 * they really want to walk over the current vma, typically by checking
245 * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
246 * purpose.
e6473092 247 *
fafaa426
NH
248 * struct mm_walk keeps current values of some common data like vma and pmd,
249 * which are useful for the access from callbacks. If you want to pass some
250 * caller-specific data to callbacks, @walk->private should be helpful.
c27fe4c8 251 *
fafaa426
NH
252 * Locking:
253 * Callers of walk_page_range() and walk_page_vma() should hold
254 * @walk->mm->mmap_sem, because these function traverse vma list and/or
255 * access to vma's data.
e6473092 256 */
fafaa426 257int walk_page_range(unsigned long start, unsigned long end,
2165009b 258 struct mm_walk *walk)
e6473092 259{
e6473092 260 int err = 0;
fafaa426
NH
261 unsigned long next;
262 struct vm_area_struct *vma;
e6473092 263
fafaa426
NH
264 if (start >= end)
265 return -EINVAL;
e6473092 266
2165009b
DH
267 if (!walk->mm)
268 return -EINVAL;
269
96dad67f 270 VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
a9ff785e 271
fafaa426 272 vma = find_vma(walk->mm, start);
e6473092 273 do {
fafaa426
NH
274 if (!vma) { /* after the last vma */
275 walk->vma = NULL;
276 next = end;
277 } else if (start < vma->vm_start) { /* outside vma */
278 walk->vma = NULL;
279 next = min(end, vma->vm_start);
280 } else { /* inside vma */
281 walk->vma = vma;
282 next = min(end, vma->vm_end);
283 vma = vma->vm_next;
5f0af70a 284
fafaa426 285 err = walk_page_test(start, next, walk);
f6837395
NH
286 if (err > 0) {
287 /*
288 * positive return values are purely for
289 * controlling the pagewalk, so should never
290 * be passed to the callers.
291 */
292 err = 0;
a9ff785e 293 continue;
f6837395 294 }
fafaa426 295 if (err < 0)
e6473092 296 break;
e6473092 297 }
fafaa426
NH
298 if (walk->vma || walk->pte_hole)
299 err = __walk_page_range(start, next, walk);
e6473092
MM
300 if (err)
301 break;
fafaa426 302 } while (start = next, start < end);
e6473092
MM
303 return err;
304}
900fc5f1
NH
305
306int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
307{
308 int err;
309
310 if (!walk->mm)
311 return -EINVAL;
312
313 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
314 VM_BUG_ON(!vma);
315 walk->vma = vma;
316 err = walk_page_test(vma->vm_start, vma->vm_end, walk);
317 if (err > 0)
318 return 0;
319 if (err < 0)
320 return err;
321 return __walk_page_range(vma->vm_start, vma->vm_end, walk);
322}