]>
git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - mm/pagewalk.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/highmem.h>
4 #include <linux/sched.h>
5 #include <linux/hugetlb.h>
8 * We want to know the real level where a entry is located ignoring any
9 * folding of levels which may be happening. For example if p4d is folded then
10 * a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
12 static int real_depth(int depth
)
14 if (depth
== 3 && PTRS_PER_PMD
== 1)
16 if (depth
== 2 && PTRS_PER_PUD
== 1)
18 if (depth
== 1 && PTRS_PER_P4D
== 1)
23 static int walk_pte_range_inner(pte_t
*pte
, unsigned long addr
,
24 unsigned long end
, struct mm_walk
*walk
)
26 const struct mm_walk_ops
*ops
= walk
->ops
;
30 err
= ops
->pte_entry(pte
, addr
, addr
+ PAGE_SIZE
, walk
);
33 if (addr
>= end
- PAGE_SIZE
)
41 static int walk_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
49 pte
= pte_offset_map(pmd
, addr
);
50 err
= walk_pte_range_inner(pte
, addr
, end
, walk
);
53 pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
54 err
= walk_pte_range_inner(pte
, addr
, end
, walk
);
55 pte_unmap_unlock(pte
, ptl
);
61 #ifdef CONFIG_ARCH_HAS_HUGEPD
62 static int walk_hugepd_range(hugepd_t
*phpd
, unsigned long addr
,
63 unsigned long end
, struct mm_walk
*walk
, int pdshift
)
66 const struct mm_walk_ops
*ops
= walk
->ops
;
67 int shift
= hugepd_shift(*phpd
);
68 int page_size
= 1 << shift
;
73 if (addr
& (page_size
- 1))
79 spin_lock(&walk
->mm
->page_table_lock
);
80 pte
= hugepte_offset(*phpd
, addr
, pdshift
);
81 err
= ops
->pte_entry(pte
, addr
, addr
+ page_size
, walk
);
82 spin_unlock(&walk
->mm
->page_table_lock
);
86 if (addr
>= end
- page_size
)
93 static int walk_hugepd_range(hugepd_t
*phpd
, unsigned long addr
,
94 unsigned long end
, struct mm_walk
*walk
, int pdshift
)
100 static int walk_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
,
101 struct mm_walk
*walk
)
105 const struct mm_walk_ops
*ops
= walk
->ops
;
107 int depth
= real_depth(3);
109 pmd
= pmd_offset(pud
, addr
);
112 next
= pmd_addr_end(addr
, end
);
113 if (pmd_none(*pmd
) || (!walk
->vma
&& !walk
->no_vma
)) {
115 err
= ops
->pte_hole(addr
, next
, depth
, walk
);
121 walk
->action
= ACTION_SUBTREE
;
124 * This implies that each ->pmd_entry() handler
125 * needs to know about pmd_trans_huge() pmds
128 err
= ops
->pmd_entry(pmd
, addr
, next
, walk
);
132 if (walk
->action
== ACTION_AGAIN
)
136 * Check this here so we only break down trans_huge
137 * pages when we _need_ to
139 if ((!walk
->vma
&& (pmd_leaf(*pmd
) || !pmd_present(*pmd
))) ||
140 walk
->action
== ACTION_CONTINUE
||
145 split_huge_pmd(walk
->vma
, pmd
, addr
);
146 if (pmd_trans_unstable(pmd
))
150 if (is_hugepd(__hugepd(pmd_val(*pmd
))))
151 err
= walk_hugepd_range((hugepd_t
*)pmd
, addr
, next
, walk
, PMD_SHIFT
);
153 err
= walk_pte_range(pmd
, addr
, next
, walk
);
156 } while (pmd
++, addr
= next
, addr
!= end
);
161 static int walk_pud_range(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
162 struct mm_walk
*walk
)
166 const struct mm_walk_ops
*ops
= walk
->ops
;
168 int depth
= real_depth(2);
170 pud
= pud_offset(p4d
, addr
);
173 next
= pud_addr_end(addr
, end
);
174 if (pud_none(*pud
) || (!walk
->vma
&& !walk
->no_vma
)) {
176 err
= ops
->pte_hole(addr
, next
, depth
, walk
);
182 walk
->action
= ACTION_SUBTREE
;
185 err
= ops
->pud_entry(pud
, addr
, next
, walk
);
189 if (walk
->action
== ACTION_AGAIN
)
192 if ((!walk
->vma
&& (pud_leaf(*pud
) || !pud_present(*pud
))) ||
193 walk
->action
== ACTION_CONTINUE
||
194 !(ops
->pmd_entry
|| ops
->pte_entry
))
198 split_huge_pud(walk
->vma
, pud
, addr
);
202 if (is_hugepd(__hugepd(pud_val(*pud
))))
203 err
= walk_hugepd_range((hugepd_t
*)pud
, addr
, next
, walk
, PUD_SHIFT
);
205 err
= walk_pmd_range(pud
, addr
, next
, walk
);
208 } while (pud
++, addr
= next
, addr
!= end
);
213 static int walk_p4d_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
214 struct mm_walk
*walk
)
218 const struct mm_walk_ops
*ops
= walk
->ops
;
220 int depth
= real_depth(1);
222 p4d
= p4d_offset(pgd
, addr
);
224 next
= p4d_addr_end(addr
, end
);
225 if (p4d_none_or_clear_bad(p4d
)) {
227 err
= ops
->pte_hole(addr
, next
, depth
, walk
);
232 if (ops
->p4d_entry
) {
233 err
= ops
->p4d_entry(p4d
, addr
, next
, walk
);
237 if (is_hugepd(__hugepd(p4d_val(*p4d
))))
238 err
= walk_hugepd_range((hugepd_t
*)p4d
, addr
, next
, walk
, P4D_SHIFT
);
239 else if (ops
->pud_entry
|| ops
->pmd_entry
|| ops
->pte_entry
)
240 err
= walk_pud_range(p4d
, addr
, next
, walk
);
243 } while (p4d
++, addr
= next
, addr
!= end
);
248 static int walk_pgd_range(unsigned long addr
, unsigned long end
,
249 struct mm_walk
*walk
)
253 const struct mm_walk_ops
*ops
= walk
->ops
;
257 pgd
= walk
->pgd
+ pgd_index(addr
);
259 pgd
= pgd_offset(walk
->mm
, addr
);
261 next
= pgd_addr_end(addr
, end
);
262 if (pgd_none_or_clear_bad(pgd
)) {
264 err
= ops
->pte_hole(addr
, next
, 0, walk
);
269 if (ops
->pgd_entry
) {
270 err
= ops
->pgd_entry(pgd
, addr
, next
, walk
);
274 if (is_hugepd(__hugepd(pgd_val(*pgd
))))
275 err
= walk_hugepd_range((hugepd_t
*)pgd
, addr
, next
, walk
, PGDIR_SHIFT
);
276 else if (ops
->p4d_entry
|| ops
->pud_entry
|| ops
->pmd_entry
|| ops
->pte_entry
)
277 err
= walk_p4d_range(pgd
, addr
, next
, walk
);
280 } while (pgd
++, addr
= next
, addr
!= end
);
285 #ifdef CONFIG_HUGETLB_PAGE
286 static unsigned long hugetlb_entry_end(struct hstate
*h
, unsigned long addr
,
289 unsigned long boundary
= (addr
& huge_page_mask(h
)) + huge_page_size(h
);
290 return boundary
< end
? boundary
: end
;
293 static int walk_hugetlb_range(unsigned long addr
, unsigned long end
,
294 struct mm_walk
*walk
)
296 struct vm_area_struct
*vma
= walk
->vma
;
297 struct hstate
*h
= hstate_vma(vma
);
299 unsigned long hmask
= huge_page_mask(h
);
300 unsigned long sz
= huge_page_size(h
);
302 const struct mm_walk_ops
*ops
= walk
->ops
;
306 next
= hugetlb_entry_end(h
, addr
, end
);
307 pte
= huge_pte_offset(walk
->mm
, addr
& hmask
, sz
);
310 err
= ops
->hugetlb_entry(pte
, hmask
, addr
, next
, walk
);
311 else if (ops
->pte_hole
)
312 err
= ops
->pte_hole(addr
, next
, -1, walk
);
316 } while (addr
= next
, addr
!= end
);
321 #else /* CONFIG_HUGETLB_PAGE */
322 static int walk_hugetlb_range(unsigned long addr
, unsigned long end
,
323 struct mm_walk
*walk
)
328 #endif /* CONFIG_HUGETLB_PAGE */
331 * Decide whether we really walk over the current vma on [@start, @end)
332 * or skip it via the returned value. Return 0 if we do walk over the
333 * current vma, and return 1 if we skip the vma. Negative values means
334 * error, where we abort the current walk.
336 static int walk_page_test(unsigned long start
, unsigned long end
,
337 struct mm_walk
*walk
)
339 struct vm_area_struct
*vma
= walk
->vma
;
340 const struct mm_walk_ops
*ops
= walk
->ops
;
343 return ops
->test_walk(start
, end
, walk
);
346 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
347 * range, so we don't walk over it as we do for normal vmas. However,
348 * Some callers are interested in handling hole range and they don't
349 * want to just ignore any single address range. Such users certainly
350 * define their ->pte_hole() callbacks, so let's delegate them to handle
353 if (vma
->vm_flags
& VM_PFNMAP
) {
356 err
= ops
->pte_hole(start
, end
, -1, walk
);
357 return err
? err
: 1;
362 static int __walk_page_range(unsigned long start
, unsigned long end
,
363 struct mm_walk
*walk
)
366 struct vm_area_struct
*vma
= walk
->vma
;
367 const struct mm_walk_ops
*ops
= walk
->ops
;
369 if (vma
&& ops
->pre_vma
) {
370 err
= ops
->pre_vma(start
, end
, walk
);
375 if (vma
&& is_vm_hugetlb_page(vma
)) {
376 if (ops
->hugetlb_entry
)
377 err
= walk_hugetlb_range(start
, end
, walk
);
379 err
= walk_pgd_range(start
, end
, walk
);
381 if (vma
&& ops
->post_vma
)
388 * walk_page_range - walk page table with caller specific callbacks
389 * @mm: mm_struct representing the target process of page table walk
390 * @start: start address of the virtual address range
391 * @end: end address of the virtual address range
392 * @ops: operation to call during the walk
393 * @private: private data for callbacks' usage
395 * Recursively walk the page table tree of the process represented by @mm
396 * within the virtual address range [@start, @end). During walking, we can do
397 * some caller-specific works for each entry, by setting up pmd_entry(),
398 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
399 * callbacks, the associated entries/pages are just ignored.
400 * The return values of these callbacks are commonly defined like below:
402 * - 0 : succeeded to handle the current entry, and if you don't reach the
403 * end address yet, continue to walk.
404 * - >0 : succeeded to handle the current entry, and return to the caller
405 * with caller specific value.
406 * - <0 : failed to handle the current entry, and return to the caller
409 * Before starting to walk page table, some callers want to check whether
410 * they really want to walk over the current vma, typically by checking
411 * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
414 * If operations need to be staged before and committed after a vma is walked,
415 * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
416 * since it is intended to handle commit-type operations, can't return any
419 * struct mm_walk keeps current values of some common data like vma and pmd,
420 * which are useful for the access from callbacks. If you want to pass some
421 * caller-specific data to callbacks, @private should be helpful.
424 * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
425 * because these function traverse vma list and/or access to vma's data.
427 int walk_page_range(struct mm_struct
*mm
, unsigned long start
,
428 unsigned long end
, const struct mm_walk_ops
*ops
,
433 struct vm_area_struct
*vma
;
434 struct mm_walk walk
= {
446 mmap_assert_locked(walk
.mm
);
448 vma
= find_vma(walk
.mm
, start
);
450 if (!vma
) { /* after the last vma */
453 } else if (start
< vma
->vm_start
) { /* outside vma */
455 next
= min(end
, vma
->vm_start
);
456 } else { /* inside vma */
458 next
= min(end
, vma
->vm_end
);
461 err
= walk_page_test(start
, next
, &walk
);
464 * positive return values are purely for
465 * controlling the pagewalk, so should never
466 * be passed to the callers.
474 if (walk
.vma
|| walk
.ops
->pte_hole
)
475 err
= __walk_page_range(start
, next
, &walk
);
478 } while (start
= next
, start
< end
);
483 * Similar to walk_page_range() but can walk any page tables even if they are
484 * not backed by VMAs. Because 'unusual' entries may be walked this function
485 * will also not lock the PTEs for the pte_entry() callback. This is useful for
486 * walking the kernel pages tables or page tables for firmware.
488 int walk_page_range_novma(struct mm_struct
*mm
, unsigned long start
,
489 unsigned long end
, const struct mm_walk_ops
*ops
,
493 struct mm_walk walk
= {
501 if (start
>= end
|| !walk
.mm
)
504 mmap_assert_locked(walk
.mm
);
506 return __walk_page_range(start
, end
, &walk
);
509 int walk_page_vma(struct vm_area_struct
*vma
, const struct mm_walk_ops
*ops
,
512 struct mm_walk walk
= {
523 mmap_assert_locked(walk
.mm
);
525 err
= walk_page_test(vma
->vm_start
, vma
->vm_end
, &walk
);
530 return __walk_page_range(vma
->vm_start
, vma
->vm_end
, &walk
);
534 * walk_page_mapping - walk all memory areas mapped into a struct address_space.
535 * @mapping: Pointer to the struct address_space
536 * @first_index: First page offset in the address_space
537 * @nr: Number of incremental page offsets to cover
538 * @ops: operation to call during the walk
539 * @private: private data for callbacks' usage
541 * This function walks all memory areas mapped into a struct address_space.
542 * The walk is limited to only the given page-size index range, but if
543 * the index boundaries cross a huge page-table entry, that entry will be
546 * Also see walk_page_range() for additional information.
549 * This function can't require that the struct mm_struct::mmap_lock is held,
550 * since @mapping may be mapped by multiple processes. Instead
551 * @mapping->i_mmap_rwsem must be held. This might have implications in the
552 * callbacks, and it's up tho the caller to ensure that the
553 * struct mm_struct::mmap_lock is not needed.
555 * Also this means that a caller can't rely on the struct
556 * vm_area_struct::vm_flags to be constant across a call,
557 * except for immutable flags. Callers requiring this shouldn't use
560 * Return: 0 on success, negative error code on failure, positive number on
561 * caller defined premature termination.
563 int walk_page_mapping(struct address_space
*mapping
, pgoff_t first_index
,
564 pgoff_t nr
, const struct mm_walk_ops
*ops
,
567 struct mm_walk walk
= {
571 struct vm_area_struct
*vma
;
572 pgoff_t vba
, vea
, cba
, cea
;
573 unsigned long start_addr
, end_addr
;
576 lockdep_assert_held(&mapping
->i_mmap_rwsem
);
577 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, first_index
,
578 first_index
+ nr
- 1) {
579 /* Clip to the vma */
581 vea
= vba
+ vma_pages(vma
);
584 cea
= first_index
+ nr
;
587 start_addr
= ((cba
- vba
) << PAGE_SHIFT
) + vma
->vm_start
;
588 end_addr
= ((cea
- vba
) << PAGE_SHIFT
) + vma
->vm_start
;
589 if (start_addr
>= end_addr
)
593 walk
.mm
= vma
->vm_mm
;
595 err
= walk_page_test(vma
->vm_start
, vma
->vm_end
, &walk
);
602 err
= __walk_page_range(start_addr
, end_addr
, &walk
);