]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - mm/page_vma_mapped.c
KVM: SVM: Move spec control call after restore of GS
[mirror_ubuntu-artful-kernel.git] / mm / page_vma_mapped.c
1 #include <linux/mm.h>
2 #include <linux/rmap.h>
3 #include <linux/hugetlb.h>
4 #include <linux/swap.h>
5 #include <linux/swapops.h>
6
7 #include "internal.h"
8
9 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
10 {
11 page_vma_mapped_walk_done(pvmw);
12 return false;
13 }
14
15 static bool map_pte(struct page_vma_mapped_walk *pvmw)
16 {
17 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
18 if (!(pvmw->flags & PVMW_SYNC)) {
19 if (pvmw->flags & PVMW_MIGRATION) {
20 if (!is_swap_pte(*pvmw->pte))
21 return false;
22 } else {
23 if (!pte_present(*pvmw->pte))
24 return false;
25 }
26 }
27 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
28 spin_lock(pvmw->ptl);
29 return true;
30 }
31
32 static bool check_pte(struct page_vma_mapped_walk *pvmw)
33 {
34 if (pvmw->flags & PVMW_MIGRATION) {
35 #ifdef CONFIG_MIGRATION
36 swp_entry_t entry;
37 if (!is_swap_pte(*pvmw->pte))
38 return false;
39 entry = pte_to_swp_entry(*pvmw->pte);
40 if (!is_migration_entry(entry))
41 return false;
42 if (migration_entry_to_page(entry) - pvmw->page >=
43 hpage_nr_pages(pvmw->page)) {
44 return false;
45 }
46 if (migration_entry_to_page(entry) < pvmw->page)
47 return false;
48 #else
49 WARN_ON_ONCE(1);
50 #endif
51 } else {
52 if (!pte_present(*pvmw->pte))
53 return false;
54
55 /* THP can be referenced by any subpage */
56 if (pte_page(*pvmw->pte) - pvmw->page >=
57 hpage_nr_pages(pvmw->page)) {
58 return false;
59 }
60 if (pte_page(*pvmw->pte) < pvmw->page)
61 return false;
62 }
63
64 return true;
65 }
66
67 /**
68 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
69 * @pvmw->address
70 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
71 * must be set. pmd, pte and ptl must be NULL.
72 *
73 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
74 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
75 * adjusted if needed (for PTE-mapped THPs).
76 *
77 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
78 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
79 * a loop to find all PTEs that map the THP.
80 *
81 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
82 * regardless of which page table level the page is mapped at. @pvmw->pmd is
83 * NULL.
84 *
85 * Retruns false if there are no more page table entries for the page in
86 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
87 *
88 * If you need to stop the walk before page_vma_mapped_walk() returned false,
89 * use page_vma_mapped_walk_done(). It will do the housekeeping.
90 */
91 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
92 {
93 struct mm_struct *mm = pvmw->vma->vm_mm;
94 struct page *page = pvmw->page;
95 pgd_t *pgd;
96 p4d_t *p4d;
97 pud_t *pud;
98 pmd_t pmde;
99
100 /* The only possible pmd mapping has been handled on last iteration */
101 if (pvmw->pmd && !pvmw->pte)
102 return not_found(pvmw);
103
104 if (pvmw->pte)
105 goto next_pte;
106
107 if (unlikely(PageHuge(pvmw->page))) {
108 /* when pud is not present, pte will be NULL */
109 pvmw->pte = huge_pte_offset(mm, pvmw->address,
110 PAGE_SIZE << compound_order(page));
111 if (!pvmw->pte)
112 return false;
113
114 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
115 spin_lock(pvmw->ptl);
116 if (!check_pte(pvmw))
117 return not_found(pvmw);
118 return true;
119 }
120 restart:
121 pgd = pgd_offset(mm, pvmw->address);
122 if (!pgd_present(*pgd))
123 return false;
124 p4d = p4d_offset(pgd, pvmw->address);
125 if (!p4d_present(*p4d))
126 return false;
127 pud = pud_offset(p4d, pvmw->address);
128 if (!pud_present(*pud))
129 return false;
130 pvmw->pmd = pmd_offset(pud, pvmw->address);
131 /*
132 * Make sure the pmd value isn't cached in a register by the
133 * compiler and used as a stale value after we've observed a
134 * subsequent update.
135 */
136 pmde = READ_ONCE(*pvmw->pmd);
137 if (pmd_trans_huge(pmde)) {
138 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
139 if (!pmd_present(*pvmw->pmd))
140 return not_found(pvmw);
141 if (likely(pmd_trans_huge(*pvmw->pmd))) {
142 if (pvmw->flags & PVMW_MIGRATION)
143 return not_found(pvmw);
144 if (pmd_page(*pvmw->pmd) != page)
145 return not_found(pvmw);
146 return true;
147 } else {
148 /* THP pmd was split under us: handle on pte level */
149 spin_unlock(pvmw->ptl);
150 pvmw->ptl = NULL;
151 }
152 } else if (!pmd_present(pmde)) {
153 return false;
154 }
155 if (!map_pte(pvmw))
156 goto next_pte;
157 while (1) {
158 if (check_pte(pvmw))
159 return true;
160 next_pte:
161 /* Seek to next pte only makes sense for THP */
162 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
163 return not_found(pvmw);
164 do {
165 pvmw->address += PAGE_SIZE;
166 if (pvmw->address >= pvmw->vma->vm_end ||
167 pvmw->address >=
168 __vma_address(pvmw->page, pvmw->vma) +
169 hpage_nr_pages(pvmw->page) * PAGE_SIZE)
170 return not_found(pvmw);
171 /* Did we cross page table boundary? */
172 if (pvmw->address % PMD_SIZE == 0) {
173 pte_unmap(pvmw->pte);
174 if (pvmw->ptl) {
175 spin_unlock(pvmw->ptl);
176 pvmw->ptl = NULL;
177 }
178 goto restart;
179 } else {
180 pvmw->pte++;
181 }
182 } while (pte_none(*pvmw->pte));
183
184 if (!pvmw->ptl) {
185 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
186 spin_lock(pvmw->ptl);
187 }
188 }
189 }
190
191 /**
192 * page_mapped_in_vma - check whether a page is really mapped in a VMA
193 * @page: the page to test
194 * @vma: the VMA to test
195 *
196 * Returns 1 if the page is mapped into the page tables of the VMA, 0
197 * if the page is not mapped into the page tables of this VMA. Only
198 * valid for normal file or anonymous VMAs.
199 */
200 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
201 {
202 struct page_vma_mapped_walk pvmw = {
203 .page = page,
204 .vma = vma,
205 .flags = PVMW_SYNC,
206 };
207 unsigned long start, end;
208
209 start = __vma_address(page, vma);
210 end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
211
212 if (unlikely(end < vma->vm_start || start >= vma->vm_end))
213 return 0;
214 pvmw.address = max(start, vma->vm_start);
215 if (!page_vma_mapped_walk(&pvmw))
216 return 0;
217 page_vma_mapped_walk_done(&pvmw);
218 return 1;
219 }