]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - mm/pagewalk.c
mm/sparsemem: fix 'mem_section' will never be NULL gcc 12 warning
[mirror_ubuntu-focal-kernel.git] / mm / pagewalk.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a520110e 2#include <linux/pagewalk.h>
e6473092
MM
3#include <linux/highmem.h>
4#include <linux/sched.h>
d33b9f45 5#include <linux/hugetlb.h>
e6473092
MM
6
7static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
2165009b 8 struct mm_walk *walk)
e6473092
MM
9{
10 pte_t *pte;
11 int err = 0;
7b86ac33 12 const struct mm_walk_ops *ops = walk->ops;
e6473092
MM
13
14 pte = pte_offset_map(pmd, addr);
556637cd 15 for (;;) {
7b86ac33 16 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
e6473092
MM
17 if (err)
18 break;
246f7905 19 if (addr >= end - PAGE_SIZE)
556637cd 20 break;
246f7905 21 addr += PAGE_SIZE;
556637cd
JW
22 pte++;
23 }
e6473092
MM
24
25 pte_unmap(pte);
26 return err;
27}
28
29static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
2165009b 30 struct mm_walk *walk)
e6473092
MM
31{
32 pmd_t *pmd;
33 unsigned long next;
7b86ac33 34 const struct mm_walk_ops *ops = walk->ops;
e6473092
MM
35 int err = 0;
36
37 pmd = pmd_offset(pud, addr);
38 do {
03319327 39again:
e6473092 40 next = pmd_addr_end(addr, end);
48684a65 41 if (pmd_none(*pmd) || !walk->vma) {
7b86ac33
CH
42 if (ops->pte_hole)
43 err = ops->pte_hole(addr, next, walk);
e6473092
MM
44 if (err)
45 break;
46 continue;
47 }
03319327
DH
48 /*
49 * This implies that each ->pmd_entry() handler
50 * needs to know about pmd_trans_huge() pmds
51 */
7b86ac33
CH
52 if (ops->pmd_entry)
53 err = ops->pmd_entry(pmd, addr, next, walk);
03319327
DH
54 if (err)
55 break;
56
57 /*
58 * Check this here so we only break down trans_huge
59 * pages when we _need_ to
60 */
7b86ac33 61 if (!ops->pte_entry)
03319327
DH
62 continue;
63
78ddc534 64 split_huge_pmd(walk->vma, pmd, addr);
fafaa426 65 if (pmd_trans_unstable(pmd))
03319327
DH
66 goto again;
67 err = walk_pte_range(pmd, addr, next, walk);
e6473092
MM
68 if (err)
69 break;
70 } while (pmd++, addr = next, addr != end);
71
72 return err;
73}
74
c2febafc 75static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
2165009b 76 struct mm_walk *walk)
e6473092
MM
77{
78 pud_t *pud;
79 unsigned long next;
7b86ac33 80 const struct mm_walk_ops *ops = walk->ops;
e6473092
MM
81 int err = 0;
82
c2febafc 83 pud = pud_offset(p4d, addr);
e6473092 84 do {
a00cc7d9 85 again:
e6473092 86 next = pud_addr_end(addr, end);
a00cc7d9 87 if (pud_none(*pud) || !walk->vma) {
7b86ac33
CH
88 if (ops->pte_hole)
89 err = ops->pte_hole(addr, next, walk);
e6473092
MM
90 if (err)
91 break;
92 continue;
93 }
a00cc7d9 94
7b86ac33 95 if (ops->pud_entry) {
a00cc7d9
MW
96 spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
97
98 if (ptl) {
7b86ac33 99 err = ops->pud_entry(pud, addr, next, walk);
a00cc7d9
MW
100 spin_unlock(ptl);
101 if (err)
102 break;
103 continue;
104 }
105 }
106
107 split_huge_pud(walk->vma, pud, addr);
108 if (pud_none(*pud))
109 goto again;
110
7b86ac33 111 if (ops->pmd_entry || ops->pte_entry)
2165009b 112 err = walk_pmd_range(pud, addr, next, walk);
e6473092
MM
113 if (err)
114 break;
115 } while (pud++, addr = next, addr != end);
116
117 return err;
118}
119
c2febafc
KS
120static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
121 struct mm_walk *walk)
122{
123 p4d_t *p4d;
124 unsigned long next;
7b86ac33 125 const struct mm_walk_ops *ops = walk->ops;
c2febafc
KS
126 int err = 0;
127
128 p4d = p4d_offset(pgd, addr);
129 do {
130 next = p4d_addr_end(addr, end);
131 if (p4d_none_or_clear_bad(p4d)) {
7b86ac33
CH
132 if (ops->pte_hole)
133 err = ops->pte_hole(addr, next, walk);
c2febafc
KS
134 if (err)
135 break;
136 continue;
137 }
7b86ac33 138 if (ops->pmd_entry || ops->pte_entry)
c2febafc
KS
139 err = walk_pud_range(p4d, addr, next, walk);
140 if (err)
141 break;
142 } while (p4d++, addr = next, addr != end);
143
144 return err;
145}
146
fafaa426
NH
147static int walk_pgd_range(unsigned long addr, unsigned long end,
148 struct mm_walk *walk)
149{
150 pgd_t *pgd;
151 unsigned long next;
7b86ac33 152 const struct mm_walk_ops *ops = walk->ops;
fafaa426
NH
153 int err = 0;
154
155 pgd = pgd_offset(walk->mm, addr);
156 do {
157 next = pgd_addr_end(addr, end);
158 if (pgd_none_or_clear_bad(pgd)) {
7b86ac33
CH
159 if (ops->pte_hole)
160 err = ops->pte_hole(addr, next, walk);
fafaa426
NH
161 if (err)
162 break;
163 continue;
164 }
7b86ac33 165 if (ops->pmd_entry || ops->pte_entry)
c2febafc 166 err = walk_p4d_range(pgd, addr, next, walk);
fafaa426
NH
167 if (err)
168 break;
169 } while (pgd++, addr = next, addr != end);
170
171 return err;
172}
173
116354d1
NH
174#ifdef CONFIG_HUGETLB_PAGE
175static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
176 unsigned long end)
177{
178 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
179 return boundary < end ? boundary : end;
180}
181
fafaa426 182static int walk_hugetlb_range(unsigned long addr, unsigned long end,
116354d1
NH
183 struct mm_walk *walk)
184{
fafaa426 185 struct vm_area_struct *vma = walk->vma;
116354d1
NH
186 struct hstate *h = hstate_vma(vma);
187 unsigned long next;
188 unsigned long hmask = huge_page_mask(h);
7868a208 189 unsigned long sz = huge_page_size(h);
116354d1 190 pte_t *pte;
7b86ac33 191 const struct mm_walk_ops *ops = walk->ops;
116354d1
NH
192 int err = 0;
193
194 do {
195 next = hugetlb_entry_end(h, addr, end);
7868a208 196 pte = huge_pte_offset(walk->mm, addr & hmask, sz);
373c4557
JH
197
198 if (pte)
7b86ac33
CH
199 err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
200 else if (ops->pte_hole)
201 err = ops->pte_hole(addr, next, walk);
373c4557 202
116354d1 203 if (err)
fafaa426 204 break;
116354d1
NH
205 } while (addr = next, addr != end);
206
fafaa426 207 return err;
116354d1 208}
6c6d5280 209
6c6d5280 210#else /* CONFIG_HUGETLB_PAGE */
fafaa426 211static int walk_hugetlb_range(unsigned long addr, unsigned long end,
6c6d5280
KM
212 struct mm_walk *walk)
213{
214 return 0;
215}
216
217#endif /* CONFIG_HUGETLB_PAGE */
218
fafaa426
NH
219/*
220 * Decide whether we really walk over the current vma on [@start, @end)
221 * or skip it via the returned value. Return 0 if we do walk over the
222 * current vma, and return 1 if we skip the vma. Negative values means
223 * error, where we abort the current walk.
fafaa426
NH
224 */
225static int walk_page_test(unsigned long start, unsigned long end,
226 struct mm_walk *walk)
227{
228 struct vm_area_struct *vma = walk->vma;
7b86ac33 229 const struct mm_walk_ops *ops = walk->ops;
6c6d5280 230
7b86ac33
CH
231 if (ops->test_walk)
232 return ops->test_walk(start, end, walk);
fafaa426
NH
233
234 /*
48684a65
NH
235 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
236 * range, so we don't walk over it as we do for normal vmas. However,
237 * Some callers are interested in handling hole range and they don't
238 * want to just ignore any single address range. Such users certainly
239 * define their ->pte_hole() callbacks, so let's delegate them to handle
240 * vma(VM_PFNMAP).
fafaa426 241 */
48684a65
NH
242 if (vma->vm_flags & VM_PFNMAP) {
243 int err = 1;
7b86ac33
CH
244 if (ops->pte_hole)
245 err = ops->pte_hole(start, end, walk);
48684a65
NH
246 return err ? err : 1;
247 }
fafaa426
NH
248 return 0;
249}
250
251static int __walk_page_range(unsigned long start, unsigned long end,
252 struct mm_walk *walk)
253{
254 int err = 0;
255 struct vm_area_struct *vma = walk->vma;
256
257 if (vma && is_vm_hugetlb_page(vma)) {
7b86ac33 258 if (walk->ops->hugetlb_entry)
fafaa426
NH
259 err = walk_hugetlb_range(start, end, walk);
260 } else
261 err = walk_pgd_range(start, end, walk);
262
263 return err;
264}
116354d1 265
e6473092 266/**
fafaa426 267 * walk_page_range - walk page table with caller specific callbacks
7b86ac33
CH
268 * @mm: mm_struct representing the target process of page table walk
269 * @start: start address of the virtual address range
270 * @end: end address of the virtual address range
271 * @ops: operation to call during the walk
272 * @private: private data for callbacks' usage
e6473092 273 *
7b86ac33 274 * Recursively walk the page table tree of the process represented by @mm
fafaa426
NH
275 * within the virtual address range [@start, @end). During walking, we can do
276 * some caller-specific works for each entry, by setting up pmd_entry(),
277 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
278 * callbacks, the associated entries/pages are just ignored.
279 * The return values of these callbacks are commonly defined like below:
a5d09bed 280 *
fafaa426
NH
281 * - 0 : succeeded to handle the current entry, and if you don't reach the
282 * end address yet, continue to walk.
283 * - >0 : succeeded to handle the current entry, and return to the caller
284 * with caller specific value.
285 * - <0 : failed to handle the current entry, and return to the caller
286 * with error code.
e6473092 287 *
fafaa426
NH
288 * Before starting to walk page table, some callers want to check whether
289 * they really want to walk over the current vma, typically by checking
7b86ac33 290 * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
fafaa426 291 * purpose.
e6473092 292 *
fafaa426
NH
293 * struct mm_walk keeps current values of some common data like vma and pmd,
294 * which are useful for the access from callbacks. If you want to pass some
7b86ac33 295 * caller-specific data to callbacks, @private should be helpful.
c27fe4c8 296 *
fafaa426 297 * Locking:
7b86ac33
CH
298 * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_sem,
299 * because these function traverse vma list and/or access to vma's data.
e6473092 300 */
7b86ac33
CH
301int walk_page_range(struct mm_struct *mm, unsigned long start,
302 unsigned long end, const struct mm_walk_ops *ops,
303 void *private)
e6473092 304{
e6473092 305 int err = 0;
fafaa426
NH
306 unsigned long next;
307 struct vm_area_struct *vma;
7b86ac33
CH
308 struct mm_walk walk = {
309 .ops = ops,
310 .mm = mm,
311 .private = private,
312 };
e6473092 313
fafaa426
NH
314 if (start >= end)
315 return -EINVAL;
e6473092 316
7b86ac33 317 if (!walk.mm)
2165009b
DH
318 return -EINVAL;
319
b4bc7817 320 lockdep_assert_held(&walk.mm->mmap_sem);
a9ff785e 321
7b86ac33 322 vma = find_vma(walk.mm, start);
e6473092 323 do {
fafaa426 324 if (!vma) { /* after the last vma */
7b86ac33 325 walk.vma = NULL;
fafaa426
NH
326 next = end;
327 } else if (start < vma->vm_start) { /* outside vma */
7b86ac33 328 walk.vma = NULL;
fafaa426
NH
329 next = min(end, vma->vm_start);
330 } else { /* inside vma */
7b86ac33 331 walk.vma = vma;
fafaa426
NH
332 next = min(end, vma->vm_end);
333 vma = vma->vm_next;
5f0af70a 334
7b86ac33 335 err = walk_page_test(start, next, &walk);
f6837395
NH
336 if (err > 0) {
337 /*
338 * positive return values are purely for
339 * controlling the pagewalk, so should never
340 * be passed to the callers.
341 */
342 err = 0;
a9ff785e 343 continue;
f6837395 344 }
fafaa426 345 if (err < 0)
e6473092 346 break;
e6473092 347 }
7b86ac33
CH
348 if (walk.vma || walk.ops->pte_hole)
349 err = __walk_page_range(start, next, &walk);
e6473092
MM
350 if (err)
351 break;
fafaa426 352 } while (start = next, start < end);
e6473092
MM
353 return err;
354}
900fc5f1 355
7b86ac33
CH
356int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
357 void *private)
900fc5f1 358{
7b86ac33
CH
359 struct mm_walk walk = {
360 .ops = ops,
361 .mm = vma->vm_mm,
362 .vma = vma,
363 .private = private,
364 };
900fc5f1
NH
365 int err;
366
7b86ac33 367 if (!walk.mm)
900fc5f1
NH
368 return -EINVAL;
369
b4bc7817 370 lockdep_assert_held(&walk.mm->mmap_sem);
7b86ac33
CH
371
372 err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
900fc5f1
NH
373 if (err > 0)
374 return 0;
375 if (err < 0)
376 return err;
7b86ac33 377 return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
900fc5f1 378}