]>
Commit | Line | Data |
---|---|---|
ace71a19 KS |
1 | #include <linux/mm.h> |
2 | #include <linux/rmap.h> | |
3 | #include <linux/hugetlb.h> | |
4 | #include <linux/swap.h> | |
5 | #include <linux/swapops.h> | |
6 | ||
7 | #include "internal.h" | |
8 | ||
9 | static inline bool check_pmd(struct page_vma_mapped_walk *pvmw) | |
10 | { | |
11 | pmd_t pmde; | |
12 | /* | |
13 | * Make sure we don't re-load pmd between present and !trans_huge check. | |
14 | * We need a consistent view. | |
15 | */ | |
16 | pmde = READ_ONCE(*pvmw->pmd); | |
17 | return pmd_present(pmde) && !pmd_trans_huge(pmde); | |
18 | } | |
19 | ||
20 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) | |
21 | { | |
22 | page_vma_mapped_walk_done(pvmw); | |
23 | return false; | |
24 | } | |
25 | ||
26 | static bool map_pte(struct page_vma_mapped_walk *pvmw) | |
27 | { | |
28 | pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); | |
29 | if (!(pvmw->flags & PVMW_SYNC)) { | |
30 | if (pvmw->flags & PVMW_MIGRATION) { | |
31 | if (!is_swap_pte(*pvmw->pte)) | |
32 | return false; | |
33 | } else { | |
34 | if (!pte_present(*pvmw->pte)) | |
35 | return false; | |
36 | } | |
37 | } | |
38 | pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); | |
39 | spin_lock(pvmw->ptl); | |
40 | return true; | |
41 | } | |
42 | ||
43 | static bool check_pte(struct page_vma_mapped_walk *pvmw) | |
44 | { | |
45 | if (pvmw->flags & PVMW_MIGRATION) { | |
46 | #ifdef CONFIG_MIGRATION | |
47 | swp_entry_t entry; | |
48 | if (!is_swap_pte(*pvmw->pte)) | |
49 | return false; | |
50 | entry = pte_to_swp_entry(*pvmw->pte); | |
a5430dda | 51 | |
ace71a19 KS |
52 | if (!is_migration_entry(entry)) |
53 | return false; | |
54 | if (migration_entry_to_page(entry) - pvmw->page >= | |
55 | hpage_nr_pages(pvmw->page)) { | |
56 | return false; | |
57 | } | |
58 | if (migration_entry_to_page(entry) < pvmw->page) | |
59 | return false; | |
60 | #else | |
61 | WARN_ON_ONCE(1); | |
62 | #endif | |
63 | } else { | |
a5430dda JG |
64 | if (is_swap_pte(*pvmw->pte)) { |
65 | swp_entry_t entry; | |
66 | ||
67 | entry = pte_to_swp_entry(*pvmw->pte); | |
68 | if (is_device_private_entry(entry) && | |
69 | device_private_entry_to_page(entry) == pvmw->page) | |
70 | return true; | |
71 | } | |
72 | ||
ace71a19 KS |
73 | if (!pte_present(*pvmw->pte)) |
74 | return false; | |
75 | ||
76 | /* THP can be referenced by any subpage */ | |
77 | if (pte_page(*pvmw->pte) - pvmw->page >= | |
78 | hpage_nr_pages(pvmw->page)) { | |
79 | return false; | |
80 | } | |
81 | if (pte_page(*pvmw->pte) < pvmw->page) | |
82 | return false; | |
83 | } | |
84 | ||
85 | return true; | |
86 | } | |
87 | ||
88 | /** | |
89 | * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at | |
90 | * @pvmw->address | |
91 | * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags | |
92 | * must be set. pmd, pte and ptl must be NULL. | |
93 | * | |
94 | * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point | |
95 | * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is | |
96 | * adjusted if needed (for PTE-mapped THPs). | |
97 | * | |
98 | * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page | |
99 | * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in | |
100 | * a loop to find all PTEs that map the THP. | |
101 | * | |
102 | * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry | |
103 | * regardless of which page table level the page is mapped at. @pvmw->pmd is | |
104 | * NULL. | |
105 | * | |
106 | * Retruns false if there are no more page table entries for the page in | |
107 | * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. | |
108 | * | |
109 | * If you need to stop the walk before page_vma_mapped_walk() returned false, | |
110 | * use page_vma_mapped_walk_done(). It will do the housekeeping. | |
111 | */ | |
112 | bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) | |
113 | { | |
114 | struct mm_struct *mm = pvmw->vma->vm_mm; | |
115 | struct page *page = pvmw->page; | |
116 | pgd_t *pgd; | |
c2febafc | 117 | p4d_t *p4d; |
ace71a19 KS |
118 | pud_t *pud; |
119 | ||
120 | /* The only possible pmd mapping has been handled on last iteration */ | |
121 | if (pvmw->pmd && !pvmw->pte) | |
122 | return not_found(pvmw); | |
123 | ||
d75450ff | 124 | if (pvmw->pte) |
ace71a19 | 125 | goto next_pte; |
ace71a19 KS |
126 | |
127 | if (unlikely(PageHuge(pvmw->page))) { | |
128 | /* when pud is not present, pte will be NULL */ | |
7868a208 PA |
129 | pvmw->pte = huge_pte_offset(mm, pvmw->address, |
130 | PAGE_SIZE << compound_order(page)); | |
ace71a19 KS |
131 | if (!pvmw->pte) |
132 | return false; | |
133 | ||
134 | pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte); | |
135 | spin_lock(pvmw->ptl); | |
136 | if (!check_pte(pvmw)) | |
137 | return not_found(pvmw); | |
138 | return true; | |
139 | } | |
140 | restart: | |
141 | pgd = pgd_offset(mm, pvmw->address); | |
142 | if (!pgd_present(*pgd)) | |
143 | return false; | |
c2febafc KS |
144 | p4d = p4d_offset(pgd, pvmw->address); |
145 | if (!p4d_present(*p4d)) | |
146 | return false; | |
147 | pud = pud_offset(p4d, pvmw->address); | |
ace71a19 KS |
148 | if (!pud_present(*pud)) |
149 | return false; | |
150 | pvmw->pmd = pmd_offset(pud, pvmw->address); | |
616b8371 | 151 | if (pmd_trans_huge(*pvmw->pmd) || is_pmd_migration_entry(*pvmw->pmd)) { |
ace71a19 | 152 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); |
ace71a19 KS |
153 | if (likely(pmd_trans_huge(*pvmw->pmd))) { |
154 | if (pvmw->flags & PVMW_MIGRATION) | |
155 | return not_found(pvmw); | |
156 | if (pmd_page(*pvmw->pmd) != page) | |
157 | return not_found(pvmw); | |
158 | return true; | |
616b8371 ZY |
159 | } else if (!pmd_present(*pvmw->pmd)) { |
160 | if (thp_migration_supported()) { | |
161 | if (!(pvmw->flags & PVMW_MIGRATION)) | |
162 | return not_found(pvmw); | |
163 | if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) { | |
164 | swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd); | |
165 | ||
166 | if (migration_entry_to_page(entry) != page) | |
167 | return not_found(pvmw); | |
168 | return true; | |
169 | } | |
170 | } else | |
171 | WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); | |
172 | return not_found(pvmw); | |
ace71a19 KS |
173 | } else { |
174 | /* THP pmd was split under us: handle on pte level */ | |
175 | spin_unlock(pvmw->ptl); | |
176 | pvmw->ptl = NULL; | |
177 | } | |
178 | } else { | |
179 | if (!check_pmd(pvmw)) | |
180 | return false; | |
181 | } | |
182 | if (!map_pte(pvmw)) | |
183 | goto next_pte; | |
184 | while (1) { | |
185 | if (check_pte(pvmw)) | |
186 | return true; | |
d75450ff HD |
187 | next_pte: |
188 | /* Seek to next pte only makes sense for THP */ | |
189 | if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page)) | |
190 | return not_found(pvmw); | |
191 | do { | |
ace71a19 | 192 | pvmw->address += PAGE_SIZE; |
d75450ff HD |
193 | if (pvmw->address >= pvmw->vma->vm_end || |
194 | pvmw->address >= | |
ace71a19 KS |
195 | __vma_address(pvmw->page, pvmw->vma) + |
196 | hpage_nr_pages(pvmw->page) * PAGE_SIZE) | |
197 | return not_found(pvmw); | |
198 | /* Did we cross page table boundary? */ | |
199 | if (pvmw->address % PMD_SIZE == 0) { | |
200 | pte_unmap(pvmw->pte); | |
201 | if (pvmw->ptl) { | |
202 | spin_unlock(pvmw->ptl); | |
203 | pvmw->ptl = NULL; | |
204 | } | |
205 | goto restart; | |
206 | } else { | |
207 | pvmw->pte++; | |
208 | } | |
209 | } while (pte_none(*pvmw->pte)); | |
210 | ||
211 | if (!pvmw->ptl) { | |
212 | pvmw->ptl = pte_lockptr(mm, pvmw->pmd); | |
213 | spin_lock(pvmw->ptl); | |
214 | } | |
215 | } | |
216 | } | |
6a328a62 KS |
217 | |
218 | /** | |
219 | * page_mapped_in_vma - check whether a page is really mapped in a VMA | |
220 | * @page: the page to test | |
221 | * @vma: the VMA to test | |
222 | * | |
223 | * Returns 1 if the page is mapped into the page tables of the VMA, 0 | |
224 | * if the page is not mapped into the page tables of this VMA. Only | |
225 | * valid for normal file or anonymous VMAs. | |
226 | */ | |
227 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) | |
228 | { | |
229 | struct page_vma_mapped_walk pvmw = { | |
230 | .page = page, | |
231 | .vma = vma, | |
232 | .flags = PVMW_SYNC, | |
233 | }; | |
234 | unsigned long start, end; | |
235 | ||
236 | start = __vma_address(page, vma); | |
237 | end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); | |
238 | ||
239 | if (unlikely(end < vma->vm_start || start >= vma->vm_end)) | |
240 | return 0; | |
241 | pvmw.address = max(start, vma->vm_start); | |
242 | if (!page_vma_mapped_walk(&pvmw)) | |
243 | return 0; | |
244 | page_vma_mapped_walk_done(&pvmw); | |
245 | return 1; | |
246 | } |