1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
10 static inline bool not_found(struct page_vma_mapped_walk
*pvmw
)
12 page_vma_mapped_walk_done(pvmw
);
16 static bool map_pte(struct page_vma_mapped_walk
*pvmw
)
18 pvmw
->pte
= pte_offset_map(pvmw
->pmd
, pvmw
->address
);
19 if (!(pvmw
->flags
& PVMW_SYNC
)) {
20 if (pvmw
->flags
& PVMW_MIGRATION
) {
21 if (!is_swap_pte(*pvmw
->pte
))
25 * We get here when we are trying to unmap a private
26 * device page from the process address space. Such
27 * page is not CPU accessible and thus is mapped as
28 * a special swap entry, nonetheless it still does
29 * count as a valid regular mapping for the page (and
30 * is accounted as such in page maps count).
32 * So handle this special case as if it was a normal
33 * page mapping ie lock CPU page table and returns
36 * For more details on device private memory see HMM
37 * (include/linux/hmm.h or mm/hmm.c).
39 if (is_swap_pte(*pvmw
->pte
)) {
42 /* Handle un-addressable ZONE_DEVICE memory */
43 entry
= pte_to_swp_entry(*pvmw
->pte
);
44 if (!is_device_private_entry(entry
) &&
45 !is_device_exclusive_entry(entry
))
47 } else if (!pte_present(*pvmw
->pte
))
51 pvmw
->ptl
= pte_lockptr(pvmw
->vma
->vm_mm
, pvmw
->pmd
);
56 static inline bool pfn_is_match(struct page
*page
, unsigned long pfn
)
58 unsigned long page_pfn
= page_to_pfn(page
);
60 /* normal page and hugetlbfs page */
61 if (!PageTransCompound(page
) || PageHuge(page
))
62 return page_pfn
== pfn
;
64 /* THP can be referenced by any subpage */
65 return pfn
>= page_pfn
&& pfn
- page_pfn
< thp_nr_pages(page
);
69 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
70 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
72 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
73 * mapped. check_pte() has to validate this.
75 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
78 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
79 * entry that points to @pvmw->page or any subpage in case of THP.
81 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
82 * pvmw->page or any subpage in case of THP.
84 * Otherwise, return false.
87 static bool check_pte(struct page_vma_mapped_walk
*pvmw
)
91 if (pvmw
->flags
& PVMW_MIGRATION
) {
93 if (!is_swap_pte(*pvmw
->pte
))
95 entry
= pte_to_swp_entry(*pvmw
->pte
);
97 if (!is_migration_entry(entry
) &&
98 !is_device_exclusive_entry(entry
))
101 pfn
= swp_offset(entry
);
102 } else if (is_swap_pte(*pvmw
->pte
)) {
105 /* Handle un-addressable ZONE_DEVICE memory */
106 entry
= pte_to_swp_entry(*pvmw
->pte
);
107 if (!is_device_private_entry(entry
) &&
108 !is_device_exclusive_entry(entry
))
111 pfn
= swp_offset(entry
);
113 if (!pte_present(*pvmw
->pte
))
116 pfn
= pte_pfn(*pvmw
->pte
);
119 return pfn_is_match(pvmw
->page
, pfn
);
122 static void step_forward(struct page_vma_mapped_walk
*pvmw
, unsigned long size
)
124 pvmw
->address
= (pvmw
->address
+ size
) & ~(size
- 1);
126 pvmw
->address
= ULONG_MAX
;
130 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
132 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
133 * must be set. pmd, pte and ptl must be NULL.
135 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
136 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
137 * adjusted if needed (for PTE-mapped THPs).
139 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
140 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
141 * a loop to find all PTEs that map the THP.
143 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
144 * regardless of which page table level the page is mapped at. @pvmw->pmd is
147 * Returns false if there are no more page table entries for the page in
148 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
150 * If you need to stop the walk before page_vma_mapped_walk() returned false,
151 * use page_vma_mapped_walk_done(). It will do the housekeeping.
153 bool page_vma_mapped_walk(struct page_vma_mapped_walk
*pvmw
)
155 struct mm_struct
*mm
= pvmw
->vma
->vm_mm
;
156 struct page
*page
= pvmw
->page
;
163 /* The only possible pmd mapping has been handled on last iteration */
164 if (pvmw
->pmd
&& !pvmw
->pte
)
165 return not_found(pvmw
);
167 if (unlikely(PageHuge(page
))) {
168 /* The only possible mapping was handled on last iteration */
170 return not_found(pvmw
);
172 /* when pud is not present, pte will be NULL */
173 pvmw
->pte
= huge_pte_offset(mm
, pvmw
->address
, page_size(page
));
177 pvmw
->ptl
= huge_pte_lockptr(page_hstate(page
), mm
, pvmw
->pte
);
178 spin_lock(pvmw
->ptl
);
179 if (!check_pte(pvmw
))
180 return not_found(pvmw
);
185 * Seek to next pte only makes sense for THP.
186 * But more important than that optimization, is to filter out
187 * any PageKsm page: whose page->index misleads vma_address()
188 * and vma_address_end() to disaster.
190 end
= PageTransCompound(page
) ?
191 vma_address_end(page
, pvmw
->vma
) :
192 pvmw
->address
+ PAGE_SIZE
;
197 pgd
= pgd_offset(mm
, pvmw
->address
);
198 if (!pgd_present(*pgd
)) {
199 step_forward(pvmw
, PGDIR_SIZE
);
202 p4d
= p4d_offset(pgd
, pvmw
->address
);
203 if (!p4d_present(*p4d
)) {
204 step_forward(pvmw
, P4D_SIZE
);
207 pud
= pud_offset(p4d
, pvmw
->address
);
208 if (!pud_present(*pud
)) {
209 step_forward(pvmw
, PUD_SIZE
);
213 pvmw
->pmd
= pmd_offset(pud
, pvmw
->address
);
215 * Make sure the pmd value isn't cached in a register by the
216 * compiler and used as a stale value after we've observed a
219 pmde
= READ_ONCE(*pvmw
->pmd
);
221 if (pmd_trans_huge(pmde
) || is_pmd_migration_entry(pmde
)) {
222 pvmw
->ptl
= pmd_lock(mm
, pvmw
->pmd
);
224 if (likely(pmd_trans_huge(pmde
))) {
225 if (pvmw
->flags
& PVMW_MIGRATION
)
226 return not_found(pvmw
);
227 if (pmd_page(pmde
) != page
)
228 return not_found(pvmw
);
231 if (!pmd_present(pmde
)) {
234 if (!thp_migration_supported() ||
235 !(pvmw
->flags
& PVMW_MIGRATION
))
236 return not_found(pvmw
);
237 entry
= pmd_to_swp_entry(pmde
);
238 if (!is_migration_entry(entry
) ||
239 pfn_swap_entry_to_page(entry
) != page
)
240 return not_found(pvmw
);
243 /* THP pmd was split under us: handle on pte level */
244 spin_unlock(pvmw
->ptl
);
246 } else if (!pmd_present(pmde
)) {
248 * If PVMW_SYNC, take and drop THP pmd lock so that we
249 * cannot return prematurely, while zap_huge_pmd() has
250 * cleared *pmd but not decremented compound_mapcount().
252 if ((pvmw
->flags
& PVMW_SYNC
) &&
253 PageTransCompound(page
)) {
254 spinlock_t
*ptl
= pmd_lock(mm
, pvmw
->pmd
);
258 step_forward(pvmw
, PMD_SIZE
);
268 pvmw
->address
+= PAGE_SIZE
;
269 if (pvmw
->address
>= end
)
270 return not_found(pvmw
);
271 /* Did we cross page table boundary? */
272 if ((pvmw
->address
& (PMD_SIZE
- PAGE_SIZE
)) == 0) {
274 spin_unlock(pvmw
->ptl
);
277 pte_unmap(pvmw
->pte
);
282 if ((pvmw
->flags
& PVMW_SYNC
) && !pvmw
->ptl
) {
283 pvmw
->ptl
= pte_lockptr(mm
, pvmw
->pmd
);
284 spin_lock(pvmw
->ptl
);
286 } while (pte_none(*pvmw
->pte
));
289 pvmw
->ptl
= pte_lockptr(mm
, pvmw
->pmd
);
290 spin_lock(pvmw
->ptl
);
293 } while (pvmw
->address
< end
);
299 * page_mapped_in_vma - check whether a page is really mapped in a VMA
300 * @page: the page to test
301 * @vma: the VMA to test
303 * Returns 1 if the page is mapped into the page tables of the VMA, 0
304 * if the page is not mapped into the page tables of this VMA. Only
305 * valid for normal file or anonymous VMAs.
307 int page_mapped_in_vma(struct page
*page
, struct vm_area_struct
*vma
)
309 struct page_vma_mapped_walk pvmw
= {
315 pvmw
.address
= vma_address(page
, vma
);
316 if (pvmw
.address
== -EFAULT
)
318 if (!page_vma_mapped_walk(&pvmw
))
320 page_vma_mapped_walk_done(&pvmw
);