]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-or-later | |
2 | /* | |
3 | * Copyright 2013 Red Hat Inc. | |
4 | * | |
5 | * Authors: Jérôme Glisse <jglisse@redhat.com> | |
6 | */ | |
7 | /* | |
8 | * Refer to include/linux/hmm.h for information about heterogeneous memory | |
9 | * management or HMM for short. | |
10 | */ | |
11 | #include <linux/pagewalk.h> | |
12 | #include <linux/hmm.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/rmap.h> | |
15 | #include <linux/swap.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/mmzone.h> | |
19 | #include <linux/pagemap.h> | |
20 | #include <linux/swapops.h> | |
21 | #include <linux/hugetlb.h> | |
22 | #include <linux/memremap.h> | |
23 | #include <linux/sched/mm.h> | |
24 | #include <linux/jump_label.h> | |
25 | #include <linux/dma-mapping.h> | |
26 | #include <linux/mmu_notifier.h> | |
27 | #include <linux/memory_hotplug.h> | |
28 | ||
29 | struct hmm_vma_walk { | |
30 | struct hmm_range *range; | |
31 | unsigned long last; | |
32 | }; | |
33 | ||
34 | enum { | |
35 | HMM_NEED_FAULT = 1 << 0, | |
36 | HMM_NEED_WRITE_FAULT = 1 << 1, | |
37 | HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT, | |
38 | }; | |
39 | ||
40 | static int hmm_pfns_fill(unsigned long addr, unsigned long end, | |
41 | struct hmm_range *range, unsigned long cpu_flags) | |
42 | { | |
43 | unsigned long i = (addr - range->start) >> PAGE_SHIFT; | |
44 | ||
45 | for (; addr < end; addr += PAGE_SIZE, i++) | |
46 | range->hmm_pfns[i] = cpu_flags; | |
47 | return 0; | |
48 | } | |
49 | ||
50 | /* | |
51 | * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s) | |
52 | * @addr: range virtual start address (inclusive) | |
53 | * @end: range virtual end address (exclusive) | |
54 | * @required_fault: HMM_NEED_* flags | |
55 | * @walk: mm_walk structure | |
56 | * Return: -EBUSY after page fault, or page fault error | |
57 | * | |
58 | * This function will be called whenever pmd_none() or pte_none() returns true, | |
59 | * or whenever there is no page directory covering the virtual address range. | |
60 | */ | |
61 | static int hmm_vma_fault(unsigned long addr, unsigned long end, | |
62 | unsigned int required_fault, struct mm_walk *walk) | |
63 | { | |
64 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
65 | struct vm_area_struct *vma = walk->vma; | |
66 | unsigned int fault_flags = FAULT_FLAG_REMOTE; | |
67 | ||
68 | WARN_ON_ONCE(!required_fault); | |
69 | hmm_vma_walk->last = addr; | |
70 | ||
71 | if (required_fault & HMM_NEED_WRITE_FAULT) { | |
72 | if (!(vma->vm_flags & VM_WRITE)) | |
73 | return -EPERM; | |
74 | fault_flags |= FAULT_FLAG_WRITE; | |
75 | } | |
76 | ||
77 | for (; addr < end; addr += PAGE_SIZE) | |
78 | if (handle_mm_fault(vma, addr, fault_flags, NULL) & | |
79 | VM_FAULT_ERROR) | |
80 | return -EFAULT; | |
81 | return -EBUSY; | |
82 | } | |
83 | ||
84 | static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, | |
85 | unsigned long pfn_req_flags, | |
86 | unsigned long cpu_flags) | |
87 | { | |
88 | struct hmm_range *range = hmm_vma_walk->range; | |
89 | ||
90 | /* | |
91 | * So we not only consider the individual per page request we also | |
92 | * consider the default flags requested for the range. The API can | |
93 | * be used 2 ways. The first one where the HMM user coalesces | |
94 | * multiple page faults into one request and sets flags per pfn for | |
95 | * those faults. The second one where the HMM user wants to pre- | |
96 | * fault a range with specific flags. For the latter one it is a | |
97 | * waste to have the user pre-fill the pfn arrays with a default | |
98 | * flags value. | |
99 | */ | |
100 | pfn_req_flags &= range->pfn_flags_mask; | |
101 | pfn_req_flags |= range->default_flags; | |
102 | ||
103 | /* We aren't ask to do anything ... */ | |
104 | if (!(pfn_req_flags & HMM_PFN_REQ_FAULT)) | |
105 | return 0; | |
106 | ||
107 | /* Need to write fault ? */ | |
108 | if ((pfn_req_flags & HMM_PFN_REQ_WRITE) && | |
109 | !(cpu_flags & HMM_PFN_WRITE)) | |
110 | return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT; | |
111 | ||
112 | /* If CPU page table is not valid then we need to fault */ | |
113 | if (!(cpu_flags & HMM_PFN_VALID)) | |
114 | return HMM_NEED_FAULT; | |
115 | return 0; | |
116 | } | |
117 | ||
118 | static unsigned int | |
119 | hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, | |
120 | const unsigned long hmm_pfns[], unsigned long npages, | |
121 | unsigned long cpu_flags) | |
122 | { | |
123 | struct hmm_range *range = hmm_vma_walk->range; | |
124 | unsigned int required_fault = 0; | |
125 | unsigned long i; | |
126 | ||
127 | /* | |
128 | * If the default flags do not request to fault pages, and the mask does | |
129 | * not allow for individual pages to be faulted, then | |
130 | * hmm_pte_need_fault() will always return 0. | |
131 | */ | |
132 | if (!((range->default_flags | range->pfn_flags_mask) & | |
133 | HMM_PFN_REQ_FAULT)) | |
134 | return 0; | |
135 | ||
136 | for (i = 0; i < npages; ++i) { | |
137 | required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i], | |
138 | cpu_flags); | |
139 | if (required_fault == HMM_NEED_ALL_BITS) | |
140 | return required_fault; | |
141 | } | |
142 | return required_fault; | |
143 | } | |
144 | ||
145 | static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, | |
146 | __always_unused int depth, struct mm_walk *walk) | |
147 | { | |
148 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
149 | struct hmm_range *range = hmm_vma_walk->range; | |
150 | unsigned int required_fault; | |
151 | unsigned long i, npages; | |
152 | unsigned long *hmm_pfns; | |
153 | ||
154 | i = (addr - range->start) >> PAGE_SHIFT; | |
155 | npages = (end - addr) >> PAGE_SHIFT; | |
156 | hmm_pfns = &range->hmm_pfns[i]; | |
157 | required_fault = | |
158 | hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0); | |
159 | if (!walk->vma) { | |
160 | if (required_fault) | |
161 | return -EFAULT; | |
162 | return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR); | |
163 | } | |
164 | if (required_fault) | |
165 | return hmm_vma_fault(addr, end, required_fault, walk); | |
166 | return hmm_pfns_fill(addr, end, range, 0); | |
167 | } | |
168 | ||
169 | static inline unsigned long hmm_pfn_flags_order(unsigned long order) | |
170 | { | |
171 | return order << HMM_PFN_ORDER_SHIFT; | |
172 | } | |
173 | ||
174 | static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, | |
175 | pmd_t pmd) | |
176 | { | |
177 | if (pmd_protnone(pmd)) | |
178 | return 0; | |
179 | return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : | |
180 | HMM_PFN_VALID) | | |
181 | hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT); | |
182 | } | |
183 | ||
184 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
185 | static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, | |
186 | unsigned long end, unsigned long hmm_pfns[], | |
187 | pmd_t pmd) | |
188 | { | |
189 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
190 | struct hmm_range *range = hmm_vma_walk->range; | |
191 | unsigned long pfn, npages, i; | |
192 | unsigned int required_fault; | |
193 | unsigned long cpu_flags; | |
194 | ||
195 | npages = (end - addr) >> PAGE_SHIFT; | |
196 | cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); | |
197 | required_fault = | |
198 | hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags); | |
199 | if (required_fault) | |
200 | return hmm_vma_fault(addr, end, required_fault, walk); | |
201 | ||
202 | pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); | |
203 | for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) | |
204 | hmm_pfns[i] = pfn | cpu_flags; | |
205 | return 0; | |
206 | } | |
207 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
208 | /* stub to allow the code below to compile */ | |
209 | int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, | |
210 | unsigned long end, unsigned long hmm_pfns[], pmd_t pmd); | |
211 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
212 | ||
213 | static inline bool hmm_is_device_private_entry(struct hmm_range *range, | |
214 | swp_entry_t entry) | |
215 | { | |
216 | return is_device_private_entry(entry) && | |
217 | device_private_entry_to_page(entry)->pgmap->owner == | |
218 | range->dev_private_owner; | |
219 | } | |
220 | ||
221 | static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, | |
222 | pte_t pte) | |
223 | { | |
224 | if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) | |
225 | return 0; | |
226 | return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; | |
227 | } | |
228 | ||
229 | static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, | |
230 | unsigned long end, pmd_t *pmdp, pte_t *ptep, | |
231 | unsigned long *hmm_pfn) | |
232 | { | |
233 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
234 | struct hmm_range *range = hmm_vma_walk->range; | |
235 | unsigned int required_fault; | |
236 | unsigned long cpu_flags; | |
237 | pte_t pte = *ptep; | |
238 | uint64_t pfn_req_flags = *hmm_pfn; | |
239 | ||
240 | if (pte_none(pte)) { | |
241 | required_fault = | |
242 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); | |
243 | if (required_fault) | |
244 | goto fault; | |
245 | *hmm_pfn = 0; | |
246 | return 0; | |
247 | } | |
248 | ||
249 | if (!pte_present(pte)) { | |
250 | swp_entry_t entry = pte_to_swp_entry(pte); | |
251 | ||
252 | /* | |
253 | * Never fault in device private pages, but just report | |
254 | * the PFN even if not present. | |
255 | */ | |
256 | if (hmm_is_device_private_entry(range, entry)) { | |
257 | cpu_flags = HMM_PFN_VALID; | |
258 | if (is_write_device_private_entry(entry)) | |
259 | cpu_flags |= HMM_PFN_WRITE; | |
260 | *hmm_pfn = device_private_entry_to_pfn(entry) | | |
261 | cpu_flags; | |
262 | return 0; | |
263 | } | |
264 | ||
265 | required_fault = | |
266 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); | |
267 | if (!required_fault) { | |
268 | *hmm_pfn = 0; | |
269 | return 0; | |
270 | } | |
271 | ||
272 | if (!non_swap_entry(entry)) | |
273 | goto fault; | |
274 | ||
275 | if (is_migration_entry(entry)) { | |
276 | pte_unmap(ptep); | |
277 | hmm_vma_walk->last = addr; | |
278 | migration_entry_wait(walk->mm, pmdp, addr); | |
279 | return -EBUSY; | |
280 | } | |
281 | ||
282 | /* Report error for everything else */ | |
283 | pte_unmap(ptep); | |
284 | return -EFAULT; | |
285 | } | |
286 | ||
287 | cpu_flags = pte_to_hmm_pfn_flags(range, pte); | |
288 | required_fault = | |
289 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); | |
290 | if (required_fault) | |
291 | goto fault; | |
292 | ||
293 | /* | |
294 | * Since each architecture defines a struct page for the zero page, just | |
295 | * fall through and treat it like a normal page. | |
296 | */ | |
297 | if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) { | |
298 | if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) { | |
299 | pte_unmap(ptep); | |
300 | return -EFAULT; | |
301 | } | |
302 | *hmm_pfn = HMM_PFN_ERROR; | |
303 | return 0; | |
304 | } | |
305 | ||
306 | *hmm_pfn = pte_pfn(pte) | cpu_flags; | |
307 | return 0; | |
308 | ||
309 | fault: | |
310 | pte_unmap(ptep); | |
311 | /* Fault any virtual address we were asked to fault */ | |
312 | return hmm_vma_fault(addr, end, required_fault, walk); | |
313 | } | |
314 | ||
315 | static int hmm_vma_walk_pmd(pmd_t *pmdp, | |
316 | unsigned long start, | |
317 | unsigned long end, | |
318 | struct mm_walk *walk) | |
319 | { | |
320 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
321 | struct hmm_range *range = hmm_vma_walk->range; | |
322 | unsigned long *hmm_pfns = | |
323 | &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; | |
324 | unsigned long npages = (end - start) >> PAGE_SHIFT; | |
325 | unsigned long addr = start; | |
326 | pte_t *ptep; | |
327 | pmd_t pmd; | |
328 | ||
329 | again: | |
330 | pmd = READ_ONCE(*pmdp); | |
331 | if (pmd_none(pmd)) | |
332 | return hmm_vma_walk_hole(start, end, -1, walk); | |
333 | ||
334 | if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { | |
335 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) { | |
336 | hmm_vma_walk->last = addr; | |
337 | pmd_migration_entry_wait(walk->mm, pmdp); | |
338 | return -EBUSY; | |
339 | } | |
340 | return hmm_pfns_fill(start, end, range, 0); | |
341 | } | |
342 | ||
343 | if (!pmd_present(pmd)) { | |
344 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) | |
345 | return -EFAULT; | |
346 | return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); | |
347 | } | |
348 | ||
349 | if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { | |
350 | /* | |
351 | * No need to take pmd_lock here, even if some other thread | |
352 | * is splitting the huge pmd we will get that event through | |
353 | * mmu_notifier callback. | |
354 | * | |
355 | * So just read pmd value and check again it's a transparent | |
356 | * huge or device mapping one and compute corresponding pfn | |
357 | * values. | |
358 | */ | |
359 | pmd = pmd_read_atomic(pmdp); | |
360 | barrier(); | |
361 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) | |
362 | goto again; | |
363 | ||
364 | return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd); | |
365 | } | |
366 | ||
367 | /* | |
368 | * We have handled all the valid cases above ie either none, migration, | |
369 | * huge or transparent huge. At this point either it is a valid pmd | |
370 | * entry pointing to pte directory or it is a bad pmd that will not | |
371 | * recover. | |
372 | */ | |
373 | if (pmd_bad(pmd)) { | |
374 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) | |
375 | return -EFAULT; | |
376 | return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); | |
377 | } | |
378 | ||
379 | ptep = pte_offset_map(pmdp, addr); | |
380 | for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) { | |
381 | int r; | |
382 | ||
383 | r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns); | |
384 | if (r) { | |
385 | /* hmm_vma_handle_pte() did pte_unmap() */ | |
386 | return r; | |
387 | } | |
388 | } | |
389 | pte_unmap(ptep - 1); | |
390 | return 0; | |
391 | } | |
392 | ||
393 | #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ | |
394 | defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) | |
395 | static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, | |
396 | pud_t pud) | |
397 | { | |
398 | if (!pud_present(pud)) | |
399 | return 0; | |
400 | return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : | |
401 | HMM_PFN_VALID) | | |
402 | hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT); | |
403 | } | |
404 | ||
405 | static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, | |
406 | struct mm_walk *walk) | |
407 | { | |
408 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
409 | struct hmm_range *range = hmm_vma_walk->range; | |
410 | unsigned long addr = start; | |
411 | pud_t pud; | |
412 | int ret = 0; | |
413 | spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma); | |
414 | ||
415 | if (!ptl) | |
416 | return 0; | |
417 | ||
418 | /* Normally we don't want to split the huge page */ | |
419 | walk->action = ACTION_CONTINUE; | |
420 | ||
421 | pud = READ_ONCE(*pudp); | |
422 | if (pud_none(pud)) { | |
423 | spin_unlock(ptl); | |
424 | return hmm_vma_walk_hole(start, end, -1, walk); | |
425 | } | |
426 | ||
427 | if (pud_huge(pud) && pud_devmap(pud)) { | |
428 | unsigned long i, npages, pfn; | |
429 | unsigned int required_fault; | |
430 | unsigned long *hmm_pfns; | |
431 | unsigned long cpu_flags; | |
432 | ||
433 | if (!pud_present(pud)) { | |
434 | spin_unlock(ptl); | |
435 | return hmm_vma_walk_hole(start, end, -1, walk); | |
436 | } | |
437 | ||
438 | i = (addr - range->start) >> PAGE_SHIFT; | |
439 | npages = (end - addr) >> PAGE_SHIFT; | |
440 | hmm_pfns = &range->hmm_pfns[i]; | |
441 | ||
442 | cpu_flags = pud_to_hmm_pfn_flags(range, pud); | |
443 | required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns, | |
444 | npages, cpu_flags); | |
445 | if (required_fault) { | |
446 | spin_unlock(ptl); | |
447 | return hmm_vma_fault(addr, end, required_fault, walk); | |
448 | } | |
449 | ||
450 | pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); | |
451 | for (i = 0; i < npages; ++i, ++pfn) | |
452 | hmm_pfns[i] = pfn | cpu_flags; | |
453 | goto out_unlock; | |
454 | } | |
455 | ||
456 | /* Ask for the PUD to be split */ | |
457 | walk->action = ACTION_SUBTREE; | |
458 | ||
459 | out_unlock: | |
460 | spin_unlock(ptl); | |
461 | return ret; | |
462 | } | |
463 | #else | |
464 | #define hmm_vma_walk_pud NULL | |
465 | #endif | |
466 | ||
467 | #ifdef CONFIG_HUGETLB_PAGE | |
468 | static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, | |
469 | unsigned long start, unsigned long end, | |
470 | struct mm_walk *walk) | |
471 | { | |
472 | unsigned long addr = start, i, pfn; | |
473 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
474 | struct hmm_range *range = hmm_vma_walk->range; | |
475 | struct vm_area_struct *vma = walk->vma; | |
476 | unsigned int required_fault; | |
477 | unsigned long pfn_req_flags; | |
478 | unsigned long cpu_flags; | |
479 | spinlock_t *ptl; | |
480 | pte_t entry; | |
481 | ||
482 | ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); | |
483 | entry = huge_ptep_get(pte); | |
484 | ||
485 | i = (start - range->start) >> PAGE_SHIFT; | |
486 | pfn_req_flags = range->hmm_pfns[i]; | |
487 | cpu_flags = pte_to_hmm_pfn_flags(range, entry) | | |
488 | hmm_pfn_flags_order(huge_page_order(hstate_vma(vma))); | |
489 | required_fault = | |
490 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); | |
491 | if (required_fault) { | |
492 | spin_unlock(ptl); | |
493 | return hmm_vma_fault(addr, end, required_fault, walk); | |
494 | } | |
495 | ||
496 | pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); | |
497 | for (; addr < end; addr += PAGE_SIZE, i++, pfn++) | |
498 | range->hmm_pfns[i] = pfn | cpu_flags; | |
499 | ||
500 | spin_unlock(ptl); | |
501 | return 0; | |
502 | } | |
503 | #else | |
504 | #define hmm_vma_walk_hugetlb_entry NULL | |
505 | #endif /* CONFIG_HUGETLB_PAGE */ | |
506 | ||
507 | static int hmm_vma_walk_test(unsigned long start, unsigned long end, | |
508 | struct mm_walk *walk) | |
509 | { | |
510 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
511 | struct hmm_range *range = hmm_vma_walk->range; | |
512 | struct vm_area_struct *vma = walk->vma; | |
513 | ||
514 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) && | |
515 | vma->vm_flags & VM_READ) | |
516 | return 0; | |
517 | ||
518 | /* | |
519 | * vma ranges that don't have struct page backing them or map I/O | |
520 | * devices directly cannot be handled by hmm_range_fault(). | |
521 | * | |
522 | * If the vma does not allow read access, then assume that it does not | |
523 | * allow write access either. HMM does not support architectures that | |
524 | * allow write without read. | |
525 | * | |
526 | * If a fault is requested for an unsupported range then it is a hard | |
527 | * failure. | |
528 | */ | |
529 | if (hmm_range_need_fault(hmm_vma_walk, | |
530 | range->hmm_pfns + | |
531 | ((start - range->start) >> PAGE_SHIFT), | |
532 | (end - start) >> PAGE_SHIFT, 0)) | |
533 | return -EFAULT; | |
534 | ||
535 | hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); | |
536 | ||
537 | /* Skip this vma and continue processing the next vma. */ | |
538 | return 1; | |
539 | } | |
540 | ||
541 | static const struct mm_walk_ops hmm_walk_ops = { | |
542 | .pud_entry = hmm_vma_walk_pud, | |
543 | .pmd_entry = hmm_vma_walk_pmd, | |
544 | .pte_hole = hmm_vma_walk_hole, | |
545 | .hugetlb_entry = hmm_vma_walk_hugetlb_entry, | |
546 | .test_walk = hmm_vma_walk_test, | |
547 | }; | |
548 | ||
549 | /** | |
550 | * hmm_range_fault - try to fault some address in a virtual address range | |
551 | * @range: argument structure | |
552 | * | |
553 | * Returns 0 on success or one of the following error codes: | |
554 | * | |
555 | * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma | |
556 | * (e.g., device file vma). | |
557 | * -ENOMEM: Out of memory. | |
558 | * -EPERM: Invalid permission (e.g., asking for write and range is read | |
559 | * only). | |
560 | * -EBUSY: The range has been invalidated and the caller needs to wait for | |
561 | * the invalidation to finish. | |
562 | * -EFAULT: A page was requested to be valid and could not be made valid | |
563 | * ie it has no backing VMA or it is illegal to access | |
564 | * | |
565 | * This is similar to get_user_pages(), except that it can read the page tables | |
566 | * without mutating them (ie causing faults). | |
567 | */ | |
568 | int hmm_range_fault(struct hmm_range *range) | |
569 | { | |
570 | struct hmm_vma_walk hmm_vma_walk = { | |
571 | .range = range, | |
572 | .last = range->start, | |
573 | }; | |
574 | struct mm_struct *mm = range->notifier->mm; | |
575 | int ret; | |
576 | ||
577 | mmap_assert_locked(mm); | |
578 | ||
579 | do { | |
580 | /* If range is no longer valid force retry. */ | |
581 | if (mmu_interval_check_retry(range->notifier, | |
582 | range->notifier_seq)) | |
583 | return -EBUSY; | |
584 | ret = walk_page_range(mm, hmm_vma_walk.last, range->end, | |
585 | &hmm_walk_ops, &hmm_vma_walk); | |
586 | /* | |
587 | * When -EBUSY is returned the loop restarts with | |
588 | * hmm_vma_walk.last set to an address that has not been stored | |
589 | * in pfns. All entries < last in the pfn array are set to their | |
590 | * output, and all >= are still at their input values. | |
591 | */ | |
592 | } while (ret == -EBUSY); | |
593 | return ret; | |
594 | } | |
595 | EXPORT_SYMBOL(hmm_range_fault); |