]>
Commit | Line | Data |
---|---|---|
1 | #include <linux/mm.h> | |
2 | #include <linux/highmem.h> | |
3 | #include <linux/sched.h> | |
4 | #include <linux/hugetlb.h> | |
5 | ||
6 | static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |
7 | struct mm_walk *walk) | |
8 | { | |
9 | pte_t *pte; | |
10 | int err = 0; | |
11 | ||
12 | pte = pte_offset_map(pmd, addr); | |
13 | for (;;) { | |
14 | err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); | |
15 | if (err) | |
16 | break; | |
17 | addr += PAGE_SIZE; | |
18 | if (addr == end) | |
19 | break; | |
20 | pte++; | |
21 | } | |
22 | ||
23 | pte_unmap(pte); | |
24 | return err; | |
25 | } | |
26 | ||
27 | static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, | |
28 | struct mm_walk *walk) | |
29 | { | |
30 | pmd_t *pmd; | |
31 | unsigned long next; | |
32 | int err = 0; | |
33 | ||
34 | pmd = pmd_offset(pud, addr); | |
35 | do { | |
36 | again: | |
37 | next = pmd_addr_end(addr, end); | |
38 | if (pmd_none(*pmd) || !walk->vma) { | |
39 | if (walk->pte_hole) | |
40 | err = walk->pte_hole(addr, next, walk); | |
41 | if (err) | |
42 | break; | |
43 | continue; | |
44 | } | |
45 | /* | |
46 | * This implies that each ->pmd_entry() handler | |
47 | * needs to know about pmd_trans_huge() pmds | |
48 | */ | |
49 | if (walk->pmd_entry) | |
50 | err = walk->pmd_entry(pmd, addr, next, walk); | |
51 | if (err) | |
52 | break; | |
53 | ||
54 | /* | |
55 | * Check this here so we only break down trans_huge | |
56 | * pages when we _need_ to | |
57 | */ | |
58 | if (!walk->pte_entry) | |
59 | continue; | |
60 | ||
61 | split_huge_pmd(walk->vma, pmd, addr); | |
62 | if (pmd_trans_unstable(pmd)) | |
63 | goto again; | |
64 | err = walk_pte_range(pmd, addr, next, walk); | |
65 | if (err) | |
66 | break; | |
67 | } while (pmd++, addr = next, addr != end); | |
68 | ||
69 | return err; | |
70 | } | |
71 | ||
72 | static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, | |
73 | struct mm_walk *walk) | |
74 | { | |
75 | pud_t *pud; | |
76 | unsigned long next; | |
77 | int err = 0; | |
78 | ||
79 | pud = pud_offset(p4d, addr); | |
80 | do { | |
81 | again: | |
82 | next = pud_addr_end(addr, end); | |
83 | if (pud_none(*pud) || !walk->vma) { | |
84 | if (walk->pte_hole) | |
85 | err = walk->pte_hole(addr, next, walk); | |
86 | if (err) | |
87 | break; | |
88 | continue; | |
89 | } | |
90 | ||
91 | if (walk->pud_entry) { | |
92 | spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma); | |
93 | ||
94 | if (ptl) { | |
95 | err = walk->pud_entry(pud, addr, next, walk); | |
96 | spin_unlock(ptl); | |
97 | if (err) | |
98 | break; | |
99 | continue; | |
100 | } | |
101 | } | |
102 | ||
103 | split_huge_pud(walk->vma, pud, addr); | |
104 | if (pud_none(*pud)) | |
105 | goto again; | |
106 | ||
107 | if (walk->pmd_entry || walk->pte_entry) | |
108 | err = walk_pmd_range(pud, addr, next, walk); | |
109 | if (err) | |
110 | break; | |
111 | } while (pud++, addr = next, addr != end); | |
112 | ||
113 | return err; | |
114 | } | |
115 | ||
116 | static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, | |
117 | struct mm_walk *walk) | |
118 | { | |
119 | p4d_t *p4d; | |
120 | unsigned long next; | |
121 | int err = 0; | |
122 | ||
123 | p4d = p4d_offset(pgd, addr); | |
124 | do { | |
125 | next = p4d_addr_end(addr, end); | |
126 | if (p4d_none_or_clear_bad(p4d)) { | |
127 | if (walk->pte_hole) | |
128 | err = walk->pte_hole(addr, next, walk); | |
129 | if (err) | |
130 | break; | |
131 | continue; | |
132 | } | |
133 | if (walk->pmd_entry || walk->pte_entry) | |
134 | err = walk_pud_range(p4d, addr, next, walk); | |
135 | if (err) | |
136 | break; | |
137 | } while (p4d++, addr = next, addr != end); | |
138 | ||
139 | return err; | |
140 | } | |
141 | ||
142 | static int walk_pgd_range(unsigned long addr, unsigned long end, | |
143 | struct mm_walk *walk) | |
144 | { | |
145 | pgd_t *pgd; | |
146 | unsigned long next; | |
147 | int err = 0; | |
148 | ||
149 | pgd = pgd_offset(walk->mm, addr); | |
150 | do { | |
151 | next = pgd_addr_end(addr, end); | |
152 | if (pgd_none_or_clear_bad(pgd)) { | |
153 | if (walk->pte_hole) | |
154 | err = walk->pte_hole(addr, next, walk); | |
155 | if (err) | |
156 | break; | |
157 | continue; | |
158 | } | |
159 | if (walk->pmd_entry || walk->pte_entry) | |
160 | err = walk_p4d_range(pgd, addr, next, walk); | |
161 | if (err) | |
162 | break; | |
163 | } while (pgd++, addr = next, addr != end); | |
164 | ||
165 | return err; | |
166 | } | |
167 | ||
168 | #ifdef CONFIG_HUGETLB_PAGE | |
169 | static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, | |
170 | unsigned long end) | |
171 | { | |
172 | unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); | |
173 | return boundary < end ? boundary : end; | |
174 | } | |
175 | ||
176 | static int walk_hugetlb_range(unsigned long addr, unsigned long end, | |
177 | struct mm_walk *walk) | |
178 | { | |
179 | struct vm_area_struct *vma = walk->vma; | |
180 | struct hstate *h = hstate_vma(vma); | |
181 | unsigned long next; | |
182 | unsigned long hmask = huge_page_mask(h); | |
183 | unsigned long sz = huge_page_size(h); | |
184 | pte_t *pte; | |
185 | int err = 0; | |
186 | ||
187 | do { | |
188 | next = hugetlb_entry_end(h, addr, end); | |
189 | pte = huge_pte_offset(walk->mm, addr & hmask, sz); | |
190 | ||
191 | if (pte) | |
192 | err = walk->hugetlb_entry(pte, hmask, addr, next, walk); | |
193 | else if (walk->pte_hole) | |
194 | err = walk->pte_hole(addr, next, walk); | |
195 | ||
196 | if (err) | |
197 | break; | |
198 | } while (addr = next, addr != end); | |
199 | ||
200 | return err; | |
201 | } | |
202 | ||
203 | #else /* CONFIG_HUGETLB_PAGE */ | |
204 | static int walk_hugetlb_range(unsigned long addr, unsigned long end, | |
205 | struct mm_walk *walk) | |
206 | { | |
207 | return 0; | |
208 | } | |
209 | ||
210 | #endif /* CONFIG_HUGETLB_PAGE */ | |
211 | ||
212 | /* | |
213 | * Decide whether we really walk over the current vma on [@start, @end) | |
214 | * or skip it via the returned value. Return 0 if we do walk over the | |
215 | * current vma, and return 1 if we skip the vma. Negative values means | |
216 | * error, where we abort the current walk. | |
217 | */ | |
218 | static int walk_page_test(unsigned long start, unsigned long end, | |
219 | struct mm_walk *walk) | |
220 | { | |
221 | struct vm_area_struct *vma = walk->vma; | |
222 | ||
223 | if (walk->test_walk) | |
224 | return walk->test_walk(start, end, walk); | |
225 | ||
226 | /* | |
227 | * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP | |
228 | * range, so we don't walk over it as we do for normal vmas. However, | |
229 | * Some callers are interested in handling hole range and they don't | |
230 | * want to just ignore any single address range. Such users certainly | |
231 | * define their ->pte_hole() callbacks, so let's delegate them to handle | |
232 | * vma(VM_PFNMAP). | |
233 | */ | |
234 | if (vma->vm_flags & VM_PFNMAP) { | |
235 | int err = 1; | |
236 | if (walk->pte_hole) | |
237 | err = walk->pte_hole(start, end, walk); | |
238 | return err ? err : 1; | |
239 | } | |
240 | return 0; | |
241 | } | |
242 | ||
243 | static int __walk_page_range(unsigned long start, unsigned long end, | |
244 | struct mm_walk *walk) | |
245 | { | |
246 | int err = 0; | |
247 | struct vm_area_struct *vma = walk->vma; | |
248 | ||
249 | if (vma && is_vm_hugetlb_page(vma)) { | |
250 | if (walk->hugetlb_entry) | |
251 | err = walk_hugetlb_range(start, end, walk); | |
252 | } else | |
253 | err = walk_pgd_range(start, end, walk); | |
254 | ||
255 | return err; | |
256 | } | |
257 | ||
258 | /** | |
259 | * walk_page_range - walk page table with caller specific callbacks | |
260 | * | |
261 | * Recursively walk the page table tree of the process represented by @walk->mm | |
262 | * within the virtual address range [@start, @end). During walking, we can do | |
263 | * some caller-specific works for each entry, by setting up pmd_entry(), | |
264 | * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these | |
265 | * callbacks, the associated entries/pages are just ignored. | |
266 | * The return values of these callbacks are commonly defined like below: | |
267 | * - 0 : succeeded to handle the current entry, and if you don't reach the | |
268 | * end address yet, continue to walk. | |
269 | * - >0 : succeeded to handle the current entry, and return to the caller | |
270 | * with caller specific value. | |
271 | * - <0 : failed to handle the current entry, and return to the caller | |
272 | * with error code. | |
273 | * | |
274 | * Before starting to walk page table, some callers want to check whether | |
275 | * they really want to walk over the current vma, typically by checking | |
276 | * its vm_flags. walk_page_test() and @walk->test_walk() are used for this | |
277 | * purpose. | |
278 | * | |
279 | * struct mm_walk keeps current values of some common data like vma and pmd, | |
280 | * which are useful for the access from callbacks. If you want to pass some | |
281 | * caller-specific data to callbacks, @walk->private should be helpful. | |
282 | * | |
283 | * Locking: | |
284 | * Callers of walk_page_range() and walk_page_vma() should hold | |
285 | * @walk->mm->mmap_sem, because these function traverse vma list and/or | |
286 | * access to vma's data. | |
287 | */ | |
288 | int walk_page_range(unsigned long start, unsigned long end, | |
289 | struct mm_walk *walk) | |
290 | { | |
291 | int err = 0; | |
292 | unsigned long next; | |
293 | struct vm_area_struct *vma; | |
294 | ||
295 | if (start >= end) | |
296 | return -EINVAL; | |
297 | ||
298 | if (!walk->mm) | |
299 | return -EINVAL; | |
300 | ||
301 | VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm); | |
302 | ||
303 | vma = find_vma(walk->mm, start); | |
304 | do { | |
305 | if (!vma) { /* after the last vma */ | |
306 | walk->vma = NULL; | |
307 | next = end; | |
308 | } else if (start < vma->vm_start) { /* outside vma */ | |
309 | walk->vma = NULL; | |
310 | next = min(end, vma->vm_start); | |
311 | } else { /* inside vma */ | |
312 | walk->vma = vma; | |
313 | next = min(end, vma->vm_end); | |
314 | vma = vma->vm_next; | |
315 | ||
316 | err = walk_page_test(start, next, walk); | |
317 | if (err > 0) { | |
318 | /* | |
319 | * positive return values are purely for | |
320 | * controlling the pagewalk, so should never | |
321 | * be passed to the callers. | |
322 | */ | |
323 | err = 0; | |
324 | continue; | |
325 | } | |
326 | if (err < 0) | |
327 | break; | |
328 | } | |
329 | if (walk->vma || walk->pte_hole) | |
330 | err = __walk_page_range(start, next, walk); | |
331 | if (err) | |
332 | break; | |
333 | } while (start = next, start < end); | |
334 | return err; | |
335 | } | |
336 | ||
337 | int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk) | |
338 | { | |
339 | int err; | |
340 | ||
341 | if (!walk->mm) | |
342 | return -EINVAL; | |
343 | ||
344 | VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); | |
345 | VM_BUG_ON(!vma); | |
346 | walk->vma = vma; | |
347 | err = walk_page_test(vma->vm_start, vma->vm_end, walk); | |
348 | if (err > 0) | |
349 | return 0; | |
350 | if (err < 0) | |
351 | return err; | |
352 | return __walk_page_range(vma->vm_start, vma->vm_end, walk); | |
353 | } |