1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2013 Red Hat Inc.
5 * Authors: Jérôme Glisse <jglisse@redhat.com>
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
11 #include <linux/pagewalk.h>
12 #include <linux/hmm.h>
13 #include <linux/init.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/mmzone.h>
19 #include <linux/pagemap.h>
20 #include <linux/swapops.h>
21 #include <linux/hugetlb.h>
22 #include <linux/memremap.h>
23 #include <linux/sched/mm.h>
24 #include <linux/jump_label.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/memory_hotplug.h>
30 struct hmm_range
*range
;
35 HMM_NEED_FAULT
= 1 << 0,
36 HMM_NEED_WRITE_FAULT
= 1 << 1,
37 HMM_NEED_ALL_BITS
= HMM_NEED_FAULT
| HMM_NEED_WRITE_FAULT
,
41 * hmm_device_entry_from_pfn() - create a valid device entry value from pfn
42 * @range: range use to encode HMM pfn value
43 * @pfn: pfn value for which to create the device entry
44 * Return: valid device entry for the pfn
46 static uint64_t hmm_device_entry_from_pfn(const struct hmm_range
*range
,
49 return (pfn
<< range
->pfn_shift
) | range
->flags
[HMM_PFN_VALID
];
52 static int hmm_pfns_fill(unsigned long addr
, unsigned long end
,
53 struct hmm_range
*range
, enum hmm_pfn_value_e value
)
55 uint64_t *pfns
= range
->pfns
;
58 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
59 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++)
60 pfns
[i
] = range
->values
[value
];
66 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
67 * @addr: range virtual start address (inclusive)
68 * @end: range virtual end address (exclusive)
69 * @required_fault: HMM_NEED_* flags
70 * @walk: mm_walk structure
71 * Return: -EBUSY after page fault, or page fault error
73 * This function will be called whenever pmd_none() or pte_none() returns true,
74 * or whenever there is no page directory covering the virtual address range.
76 static int hmm_vma_fault(unsigned long addr
, unsigned long end
,
77 unsigned int required_fault
, struct mm_walk
*walk
)
79 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
80 struct hmm_range
*range
= hmm_vma_walk
->range
;
81 struct vm_area_struct
*vma
= walk
->vma
;
82 uint64_t *pfns
= range
->pfns
;
83 unsigned long i
= (addr
- range
->start
) >> PAGE_SHIFT
;
84 unsigned int fault_flags
= FAULT_FLAG_REMOTE
;
86 WARN_ON_ONCE(!required_fault
);
87 hmm_vma_walk
->last
= addr
;
92 if (required_fault
& HMM_NEED_WRITE_FAULT
) {
93 if (!(vma
->vm_flags
& VM_WRITE
))
95 fault_flags
|= FAULT_FLAG_WRITE
;
98 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++)
99 if (handle_mm_fault(vma
, addr
, fault_flags
) & VM_FAULT_ERROR
)
105 pfns
[i
] = range
->values
[HMM_PFN_ERROR
];
109 static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
110 uint64_t pfns
, uint64_t cpu_flags
)
112 struct hmm_range
*range
= hmm_vma_walk
->range
;
115 * So we not only consider the individual per page request we also
116 * consider the default flags requested for the range. The API can
117 * be used 2 ways. The first one where the HMM user coalesces
118 * multiple page faults into one request and sets flags per pfn for
119 * those faults. The second one where the HMM user wants to pre-
120 * fault a range with specific flags. For the latter one it is a
121 * waste to have the user pre-fill the pfn arrays with a default
124 pfns
= (pfns
& range
->pfn_flags_mask
) | range
->default_flags
;
126 /* We aren't ask to do anything ... */
127 if (!(pfns
& range
->flags
[HMM_PFN_VALID
]))
130 /* Need to write fault ? */
131 if ((pfns
& range
->flags
[HMM_PFN_WRITE
]) &&
132 !(cpu_flags
& range
->flags
[HMM_PFN_WRITE
]))
133 return HMM_NEED_FAULT
| HMM_NEED_WRITE_FAULT
;
135 /* If CPU page table is not valid then we need to fault */
136 if (!(cpu_flags
& range
->flags
[HMM_PFN_VALID
]))
137 return HMM_NEED_FAULT
;
142 hmm_range_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
143 const uint64_t *pfns
, unsigned long npages
,
146 struct hmm_range
*range
= hmm_vma_walk
->range
;
147 unsigned int required_fault
= 0;
151 * If the default flags do not request to fault pages, and the mask does
152 * not allow for individual pages to be faulted, then
153 * hmm_pte_need_fault() will always return 0.
155 if (!((range
->default_flags
| range
->pfn_flags_mask
) &
156 range
->flags
[HMM_PFN_VALID
]))
159 for (i
= 0; i
< npages
; ++i
) {
161 hmm_pte_need_fault(hmm_vma_walk
, pfns
[i
], cpu_flags
);
162 if (required_fault
== HMM_NEED_ALL_BITS
)
163 return required_fault
;
165 return required_fault
;
168 static int hmm_vma_walk_hole(unsigned long addr
, unsigned long end
,
169 __always_unused
int depth
, struct mm_walk
*walk
)
171 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
172 struct hmm_range
*range
= hmm_vma_walk
->range
;
173 unsigned int required_fault
;
174 unsigned long i
, npages
;
177 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
178 npages
= (end
- addr
) >> PAGE_SHIFT
;
179 pfns
= &range
->pfns
[i
];
180 required_fault
= hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
, 0);
182 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
183 hmm_vma_walk
->last
= addr
;
184 return hmm_pfns_fill(addr
, end
, range
, HMM_PFN_NONE
);
187 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range
*range
, pmd_t pmd
)
189 if (pmd_protnone(pmd
))
191 return pmd_write(pmd
) ? range
->flags
[HMM_PFN_VALID
] |
192 range
->flags
[HMM_PFN_WRITE
] :
193 range
->flags
[HMM_PFN_VALID
];
196 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
197 static int hmm_vma_handle_pmd(struct mm_walk
*walk
, unsigned long addr
,
198 unsigned long end
, uint64_t *pfns
, pmd_t pmd
)
200 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
201 struct hmm_range
*range
= hmm_vma_walk
->range
;
202 unsigned long pfn
, npages
, i
;
203 unsigned int required_fault
;
206 npages
= (end
- addr
) >> PAGE_SHIFT
;
207 cpu_flags
= pmd_to_hmm_pfn_flags(range
, pmd
);
209 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
, cpu_flags
);
211 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
213 pfn
= pmd_pfn(pmd
) + ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
214 for (i
= 0; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++)
215 pfns
[i
] = hmm_device_entry_from_pfn(range
, pfn
) | cpu_flags
;
216 hmm_vma_walk
->last
= end
;
219 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
220 /* stub to allow the code below to compile */
221 int hmm_vma_handle_pmd(struct mm_walk
*walk
, unsigned long addr
,
222 unsigned long end
, uint64_t *pfns
, pmd_t pmd
);
223 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
225 static inline bool hmm_is_device_private_entry(struct hmm_range
*range
,
228 return is_device_private_entry(entry
) &&
229 device_private_entry_to_page(entry
)->pgmap
->owner
==
230 range
->dev_private_owner
;
233 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range
*range
, pte_t pte
)
235 if (pte_none(pte
) || !pte_present(pte
) || pte_protnone(pte
))
237 return pte_write(pte
) ? range
->flags
[HMM_PFN_VALID
] |
238 range
->flags
[HMM_PFN_WRITE
] :
239 range
->flags
[HMM_PFN_VALID
];
242 static int hmm_vma_handle_pte(struct mm_walk
*walk
, unsigned long addr
,
243 unsigned long end
, pmd_t
*pmdp
, pte_t
*ptep
,
246 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
247 struct hmm_range
*range
= hmm_vma_walk
->range
;
248 unsigned int required_fault
;
251 uint64_t orig_pfn
= *pfn
;
253 *pfn
= range
->values
[HMM_PFN_NONE
];
255 required_fault
= hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, 0);
261 if (!pte_present(pte
)) {
262 swp_entry_t entry
= pte_to_swp_entry(pte
);
265 * Never fault in device private pages pages, but just report
266 * the PFN even if not present.
268 if (hmm_is_device_private_entry(range
, entry
)) {
269 *pfn
= hmm_device_entry_from_pfn(range
,
270 device_private_entry_to_pfn(entry
));
271 *pfn
|= range
->flags
[HMM_PFN_VALID
];
272 if (is_write_device_private_entry(entry
))
273 *pfn
|= range
->flags
[HMM_PFN_WRITE
];
277 required_fault
= hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, 0);
281 if (!non_swap_entry(entry
))
284 if (is_migration_entry(entry
)) {
286 hmm_vma_walk
->last
= addr
;
287 migration_entry_wait(walk
->mm
, pmdp
, addr
);
291 /* Report error for everything else */
293 *pfn
= range
->values
[HMM_PFN_ERROR
];
297 cpu_flags
= pte_to_hmm_pfn_flags(range
, pte
);
298 required_fault
= hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
);
303 * Since each architecture defines a struct page for the zero page, just
304 * fall through and treat it like a normal page.
306 if (pte_special(pte
) && !is_zero_pfn(pte_pfn(pte
))) {
307 if (hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, 0)) {
311 *pfn
= range
->values
[HMM_PFN_SPECIAL
];
315 *pfn
= hmm_device_entry_from_pfn(range
, pte_pfn(pte
)) | cpu_flags
;
320 /* Fault any virtual address we were asked to fault */
321 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
324 static int hmm_vma_walk_pmd(pmd_t
*pmdp
,
327 struct mm_walk
*walk
)
329 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
330 struct hmm_range
*range
= hmm_vma_walk
->range
;
331 uint64_t *pfns
= &range
->pfns
[(start
- range
->start
) >> PAGE_SHIFT
];
332 unsigned long npages
= (end
- start
) >> PAGE_SHIFT
;
333 unsigned long addr
= start
;
338 pmd
= READ_ONCE(*pmdp
);
340 return hmm_vma_walk_hole(start
, end
, -1, walk
);
342 if (thp_migration_supported() && is_pmd_migration_entry(pmd
)) {
343 if (hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
, 0)) {
344 hmm_vma_walk
->last
= addr
;
345 pmd_migration_entry_wait(walk
->mm
, pmdp
);
348 return hmm_pfns_fill(start
, end
, range
, HMM_PFN_NONE
);
351 if (!pmd_present(pmd
)) {
352 if (hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
, 0))
354 return hmm_pfns_fill(start
, end
, range
, HMM_PFN_ERROR
);
357 if (pmd_devmap(pmd
) || pmd_trans_huge(pmd
)) {
359 * No need to take pmd_lock here, even if some other thread
360 * is splitting the huge pmd we will get that event through
361 * mmu_notifier callback.
363 * So just read pmd value and check again it's a transparent
364 * huge or device mapping one and compute corresponding pfn
367 pmd
= pmd_read_atomic(pmdp
);
369 if (!pmd_devmap(pmd
) && !pmd_trans_huge(pmd
))
372 return hmm_vma_handle_pmd(walk
, addr
, end
, pfns
, pmd
);
376 * We have handled all the valid cases above ie either none, migration,
377 * huge or transparent huge. At this point either it is a valid pmd
378 * entry pointing to pte directory or it is a bad pmd that will not
382 if (hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
, 0))
384 return hmm_pfns_fill(start
, end
, range
, HMM_PFN_ERROR
);
387 ptep
= pte_offset_map(pmdp
, addr
);
388 for (; addr
< end
; addr
+= PAGE_SIZE
, ptep
++, pfns
++) {
391 r
= hmm_vma_handle_pte(walk
, addr
, end
, pmdp
, ptep
, pfns
);
393 /* hmm_vma_handle_pte() did pte_unmap() */
394 hmm_vma_walk
->last
= addr
;
400 hmm_vma_walk
->last
= addr
;
404 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
405 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
406 static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range
*range
, pud_t pud
)
408 if (!pud_present(pud
))
410 return pud_write(pud
) ? range
->flags
[HMM_PFN_VALID
] |
411 range
->flags
[HMM_PFN_WRITE
] :
412 range
->flags
[HMM_PFN_VALID
];
415 static int hmm_vma_walk_pud(pud_t
*pudp
, unsigned long start
, unsigned long end
,
416 struct mm_walk
*walk
)
418 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
419 struct hmm_range
*range
= hmm_vma_walk
->range
;
420 unsigned long addr
= start
;
423 spinlock_t
*ptl
= pud_trans_huge_lock(pudp
, walk
->vma
);
428 /* Normally we don't want to split the huge page */
429 walk
->action
= ACTION_CONTINUE
;
431 pud
= READ_ONCE(*pudp
);
434 return hmm_vma_walk_hole(start
, end
, -1, walk
);
437 if (pud_huge(pud
) && pud_devmap(pud
)) {
438 unsigned long i
, npages
, pfn
;
439 unsigned int required_fault
;
440 uint64_t *pfns
, cpu_flags
;
442 if (!pud_present(pud
)) {
444 return hmm_vma_walk_hole(start
, end
, -1, walk
);
447 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
448 npages
= (end
- addr
) >> PAGE_SHIFT
;
449 pfns
= &range
->pfns
[i
];
451 cpu_flags
= pud_to_hmm_pfn_flags(range
, pud
);
452 required_fault
= hmm_range_need_fault(hmm_vma_walk
, pfns
,
454 if (required_fault
) {
456 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
459 pfn
= pud_pfn(pud
) + ((addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
460 for (i
= 0; i
< npages
; ++i
, ++pfn
)
461 pfns
[i
] = hmm_device_entry_from_pfn(range
, pfn
) |
463 hmm_vma_walk
->last
= end
;
467 /* Ask for the PUD to be split */
468 walk
->action
= ACTION_SUBTREE
;
475 #define hmm_vma_walk_pud NULL
478 #ifdef CONFIG_HUGETLB_PAGE
479 static int hmm_vma_walk_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
480 unsigned long start
, unsigned long end
,
481 struct mm_walk
*walk
)
483 unsigned long addr
= start
, i
, pfn
;
484 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
485 struct hmm_range
*range
= hmm_vma_walk
->range
;
486 struct vm_area_struct
*vma
= walk
->vma
;
487 uint64_t orig_pfn
, cpu_flags
;
488 unsigned int required_fault
;
492 ptl
= huge_pte_lock(hstate_vma(vma
), walk
->mm
, pte
);
493 entry
= huge_ptep_get(pte
);
495 i
= (start
- range
->start
) >> PAGE_SHIFT
;
496 orig_pfn
= range
->pfns
[i
];
497 range
->pfns
[i
] = range
->values
[HMM_PFN_NONE
];
498 cpu_flags
= pte_to_hmm_pfn_flags(range
, entry
);
499 required_fault
= hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
);
500 if (required_fault
) {
502 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
505 pfn
= pte_pfn(entry
) + ((start
& ~hmask
) >> PAGE_SHIFT
);
506 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++)
507 range
->pfns
[i
] = hmm_device_entry_from_pfn(range
, pfn
) |
509 hmm_vma_walk
->last
= end
;
514 #define hmm_vma_walk_hugetlb_entry NULL
515 #endif /* CONFIG_HUGETLB_PAGE */
517 static int hmm_vma_walk_test(unsigned long start
, unsigned long end
,
518 struct mm_walk
*walk
)
520 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
521 struct hmm_range
*range
= hmm_vma_walk
->range
;
522 struct vm_area_struct
*vma
= walk
->vma
;
524 if (!(vma
->vm_flags
& (VM_IO
| VM_PFNMAP
| VM_MIXEDMAP
)) &&
525 vma
->vm_flags
& VM_READ
)
529 * vma ranges that don't have struct page backing them or map I/O
530 * devices directly cannot be handled by hmm_range_fault().
532 * If the vma does not allow read access, then assume that it does not
533 * allow write access either. HMM does not support architectures that
534 * allow write without read.
536 * If a fault is requested for an unsupported range then it is a hard
539 if (hmm_range_need_fault(hmm_vma_walk
,
541 ((start
- range
->start
) >> PAGE_SHIFT
),
542 (end
- start
) >> PAGE_SHIFT
, 0))
545 hmm_pfns_fill(start
, end
, range
, HMM_PFN_ERROR
);
546 hmm_vma_walk
->last
= end
;
548 /* Skip this vma and continue processing the next vma. */
552 static const struct mm_walk_ops hmm_walk_ops
= {
553 .pud_entry
= hmm_vma_walk_pud
,
554 .pmd_entry
= hmm_vma_walk_pmd
,
555 .pte_hole
= hmm_vma_walk_hole
,
556 .hugetlb_entry
= hmm_vma_walk_hugetlb_entry
,
557 .test_walk
= hmm_vma_walk_test
,
561 * hmm_range_fault - try to fault some address in a virtual address range
562 * @range: argument structure
564 * Return: the number of valid pages in range->pfns[] (from range start
565 * address), which may be zero. On error one of the following status codes
568 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
569 * (e.g., device file vma).
570 * -ENOMEM: Out of memory.
571 * -EPERM: Invalid permission (e.g., asking for write and range is read
573 * -EBUSY: The range has been invalidated and the caller needs to wait for
574 * the invalidation to finish.
575 * -EFAULT: A page was requested to be valid and could not be made valid
576 * ie it has no backing VMA or it is illegal to access
578 * This is similar to get_user_pages(), except that it can read the page tables
579 * without mutating them (ie causing faults).
581 * On error, for one virtual address in the range, the function will mark the
582 * corresponding HMM pfn entry with an error flag.
584 long hmm_range_fault(struct hmm_range
*range
)
586 struct hmm_vma_walk hmm_vma_walk
= {
588 .last
= range
->start
,
590 struct mm_struct
*mm
= range
->notifier
->mm
;
593 lockdep_assert_held(&mm
->mmap_sem
);
596 /* If range is no longer valid force retry. */
597 if (mmu_interval_check_retry(range
->notifier
,
598 range
->notifier_seq
))
600 ret
= walk_page_range(mm
, hmm_vma_walk
.last
, range
->end
,
601 &hmm_walk_ops
, &hmm_vma_walk
);
602 } while (ret
== -EBUSY
);
606 return (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
608 EXPORT_SYMBOL(hmm_range_fault
);