1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2013 Red Hat Inc.
5 * Authors: Jérôme Glisse <jglisse@redhat.com>
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
12 #include <linux/hmm.h>
13 #include <linux/init.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/mmzone.h>
19 #include <linux/pagemap.h>
20 #include <linux/swapops.h>
21 #include <linux/hugetlb.h>
22 #include <linux/memremap.h>
23 #include <linux/sched/mm.h>
24 #include <linux/jump_label.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/memory_hotplug.h>
29 static const struct mmu_notifier_ops hmm_mmu_notifier_ops
;
32 * hmm_get_or_create - register HMM against an mm (HMM internal)
34 * @mm: mm struct to attach to
35 * Returns: returns an HMM object, either by referencing the existing
36 * (per-process) object, or by creating a new one.
38 * This is not intended to be used directly by device drivers. If mm already
39 * has an HMM struct then it get a reference on it and returns it. Otherwise
40 * it allocates an HMM struct, initializes it, associate it with the mm and
43 static struct hmm
*hmm_get_or_create(struct mm_struct
*mm
)
47 lockdep_assert_held_write(&mm
->mmap_sem
);
49 /* Abuse the page_table_lock to also protect mm->hmm. */
50 spin_lock(&mm
->page_table_lock
);
52 if (mm
->hmm
&& kref_get_unless_zero(&mm
->hmm
->kref
))
54 spin_unlock(&mm
->page_table_lock
);
56 hmm
= kmalloc(sizeof(*hmm
), GFP_KERNEL
);
59 init_waitqueue_head(&hmm
->wq
);
60 INIT_LIST_HEAD(&hmm
->mirrors
);
61 init_rwsem(&hmm
->mirrors_sem
);
62 hmm
->mmu_notifier
.ops
= NULL
;
63 INIT_LIST_HEAD(&hmm
->ranges
);
64 spin_lock_init(&hmm
->ranges_lock
);
65 kref_init(&hmm
->kref
);
69 hmm
->mmu_notifier
.ops
= &hmm_mmu_notifier_ops
;
70 if (__mmu_notifier_register(&hmm
->mmu_notifier
, mm
)) {
78 * We hold the exclusive mmap_sem here so we know that mm->hmm is
79 * still NULL or 0 kref, and is safe to update.
81 spin_lock(&mm
->page_table_lock
);
85 spin_unlock(&mm
->page_table_lock
);
89 static void hmm_free_rcu(struct rcu_head
*rcu
)
91 struct hmm
*hmm
= container_of(rcu
, struct hmm
, rcu
);
97 static void hmm_free(struct kref
*kref
)
99 struct hmm
*hmm
= container_of(kref
, struct hmm
, kref
);
101 spin_lock(&hmm
->mm
->page_table_lock
);
102 if (hmm
->mm
->hmm
== hmm
)
104 spin_unlock(&hmm
->mm
->page_table_lock
);
106 mmu_notifier_unregister_no_release(&hmm
->mmu_notifier
, hmm
->mm
);
107 mmu_notifier_call_srcu(&hmm
->rcu
, hmm_free_rcu
);
110 static inline void hmm_put(struct hmm
*hmm
)
112 kref_put(&hmm
->kref
, hmm_free
);
115 static void hmm_release(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
117 struct hmm
*hmm
= container_of(mn
, struct hmm
, mmu_notifier
);
118 struct hmm_mirror
*mirror
;
120 /* Bail out if hmm is in the process of being freed */
121 if (!kref_get_unless_zero(&hmm
->kref
))
125 * Since hmm_range_register() holds the mmget() lock hmm_release() is
126 * prevented as long as a range exists.
128 WARN_ON(!list_empty_careful(&hmm
->ranges
));
130 down_read(&hmm
->mirrors_sem
);
131 list_for_each_entry(mirror
, &hmm
->mirrors
, list
) {
133 * Note: The driver is not allowed to trigger
134 * hmm_mirror_unregister() from this thread.
136 if (mirror
->ops
->release
)
137 mirror
->ops
->release(mirror
);
139 up_read(&hmm
->mirrors_sem
);
144 static void notifiers_decrement(struct hmm
*hmm
)
148 spin_lock_irqsave(&hmm
->ranges_lock
, flags
);
150 if (!hmm
->notifiers
) {
151 struct hmm_range
*range
;
153 list_for_each_entry(range
, &hmm
->ranges
, list
) {
158 wake_up_all(&hmm
->wq
);
160 spin_unlock_irqrestore(&hmm
->ranges_lock
, flags
);
163 static int hmm_invalidate_range_start(struct mmu_notifier
*mn
,
164 const struct mmu_notifier_range
*nrange
)
166 struct hmm
*hmm
= container_of(mn
, struct hmm
, mmu_notifier
);
167 struct hmm_mirror
*mirror
;
168 struct hmm_update update
;
169 struct hmm_range
*range
;
173 if (!kref_get_unless_zero(&hmm
->kref
))
176 update
.start
= nrange
->start
;
177 update
.end
= nrange
->end
;
178 update
.event
= HMM_UPDATE_INVALIDATE
;
179 update
.blockable
= mmu_notifier_range_blockable(nrange
);
181 spin_lock_irqsave(&hmm
->ranges_lock
, flags
);
183 list_for_each_entry(range
, &hmm
->ranges
, list
) {
184 if (update
.end
< range
->start
|| update
.start
>= range
->end
)
187 range
->valid
= false;
189 spin_unlock_irqrestore(&hmm
->ranges_lock
, flags
);
191 if (mmu_notifier_range_blockable(nrange
))
192 down_read(&hmm
->mirrors_sem
);
193 else if (!down_read_trylock(&hmm
->mirrors_sem
)) {
198 list_for_each_entry(mirror
, &hmm
->mirrors
, list
) {
201 rc
= mirror
->ops
->sync_cpu_device_pagetables(mirror
, &update
);
203 if (WARN_ON(update
.blockable
|| rc
!= -EAGAIN
))
209 up_read(&hmm
->mirrors_sem
);
213 notifiers_decrement(hmm
);
218 static void hmm_invalidate_range_end(struct mmu_notifier
*mn
,
219 const struct mmu_notifier_range
*nrange
)
221 struct hmm
*hmm
= container_of(mn
, struct hmm
, mmu_notifier
);
223 if (!kref_get_unless_zero(&hmm
->kref
))
226 notifiers_decrement(hmm
);
230 static const struct mmu_notifier_ops hmm_mmu_notifier_ops
= {
231 .release
= hmm_release
,
232 .invalidate_range_start
= hmm_invalidate_range_start
,
233 .invalidate_range_end
= hmm_invalidate_range_end
,
237 * hmm_mirror_register() - register a mirror against an mm
239 * @mirror: new mirror struct to register
240 * @mm: mm to register against
241 * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments
243 * To start mirroring a process address space, the device driver must register
244 * an HMM mirror struct.
246 int hmm_mirror_register(struct hmm_mirror
*mirror
, struct mm_struct
*mm
)
248 lockdep_assert_held_write(&mm
->mmap_sem
);
251 if (!mm
|| !mirror
|| !mirror
->ops
)
254 mirror
->hmm
= hmm_get_or_create(mm
);
258 down_write(&mirror
->hmm
->mirrors_sem
);
259 list_add(&mirror
->list
, &mirror
->hmm
->mirrors
);
260 up_write(&mirror
->hmm
->mirrors_sem
);
264 EXPORT_SYMBOL(hmm_mirror_register
);
267 * hmm_mirror_unregister() - unregister a mirror
269 * @mirror: mirror struct to unregister
271 * Stop mirroring a process address space, and cleanup.
273 void hmm_mirror_unregister(struct hmm_mirror
*mirror
)
275 struct hmm
*hmm
= mirror
->hmm
;
277 down_write(&hmm
->mirrors_sem
);
278 list_del(&mirror
->list
);
279 up_write(&hmm
->mirrors_sem
);
282 EXPORT_SYMBOL(hmm_mirror_unregister
);
284 struct hmm_vma_walk
{
285 struct hmm_range
*range
;
286 struct dev_pagemap
*pgmap
;
292 static int hmm_vma_do_fault(struct mm_walk
*walk
, unsigned long addr
,
293 bool write_fault
, uint64_t *pfn
)
295 unsigned int flags
= FAULT_FLAG_REMOTE
;
296 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
297 struct hmm_range
*range
= hmm_vma_walk
->range
;
298 struct vm_area_struct
*vma
= walk
->vma
;
301 flags
|= hmm_vma_walk
->block
? 0 : FAULT_FLAG_ALLOW_RETRY
;
302 flags
|= write_fault
? FAULT_FLAG_WRITE
: 0;
303 ret
= handle_mm_fault(vma
, addr
, flags
);
304 if (ret
& VM_FAULT_RETRY
)
306 if (ret
& VM_FAULT_ERROR
) {
307 *pfn
= range
->values
[HMM_PFN_ERROR
];
314 static int hmm_pfns_bad(unsigned long addr
,
316 struct mm_walk
*walk
)
318 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
319 struct hmm_range
*range
= hmm_vma_walk
->range
;
320 uint64_t *pfns
= range
->pfns
;
323 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
324 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++)
325 pfns
[i
] = range
->values
[HMM_PFN_ERROR
];
331 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
332 * @start: range virtual start address (inclusive)
333 * @end: range virtual end address (exclusive)
334 * @fault: should we fault or not ?
335 * @write_fault: write fault ?
336 * @walk: mm_walk structure
337 * Return: 0 on success, -EBUSY after page fault, or page fault error
339 * This function will be called whenever pmd_none() or pte_none() returns true,
340 * or whenever there is no page directory covering the virtual address range.
342 static int hmm_vma_walk_hole_(unsigned long addr
, unsigned long end
,
343 bool fault
, bool write_fault
,
344 struct mm_walk
*walk
)
346 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
347 struct hmm_range
*range
= hmm_vma_walk
->range
;
348 uint64_t *pfns
= range
->pfns
;
349 unsigned long i
, page_size
;
351 hmm_vma_walk
->last
= addr
;
352 page_size
= hmm_range_page_size(range
);
353 i
= (addr
- range
->start
) >> range
->page_shift
;
355 for (; addr
< end
; addr
+= page_size
, i
++) {
356 pfns
[i
] = range
->values
[HMM_PFN_NONE
];
357 if (fault
|| write_fault
) {
360 ret
= hmm_vma_do_fault(walk
, addr
, write_fault
,
367 return (fault
|| write_fault
) ? -EBUSY
: 0;
370 static inline void hmm_pte_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
371 uint64_t pfns
, uint64_t cpu_flags
,
372 bool *fault
, bool *write_fault
)
374 struct hmm_range
*range
= hmm_vma_walk
->range
;
376 if (!hmm_vma_walk
->fault
)
380 * So we not only consider the individual per page request we also
381 * consider the default flags requested for the range. The API can
382 * be use in 2 fashions. The first one where the HMM user coalesce
383 * multiple page fault into one request and set flags per pfns for
384 * of those faults. The second one where the HMM user want to pre-
385 * fault a range with specific flags. For the latter one it is a
386 * waste to have the user pre-fill the pfn arrays with a default
389 pfns
= (pfns
& range
->pfn_flags_mask
) | range
->default_flags
;
391 /* We aren't ask to do anything ... */
392 if (!(pfns
& range
->flags
[HMM_PFN_VALID
]))
394 /* If this is device memory than only fault if explicitly requested */
395 if ((cpu_flags
& range
->flags
[HMM_PFN_DEVICE_PRIVATE
])) {
396 /* Do we fault on device memory ? */
397 if (pfns
& range
->flags
[HMM_PFN_DEVICE_PRIVATE
]) {
398 *write_fault
= pfns
& range
->flags
[HMM_PFN_WRITE
];
404 /* If CPU page table is not valid then we need to fault */
405 *fault
= !(cpu_flags
& range
->flags
[HMM_PFN_VALID
]);
406 /* Need to write fault ? */
407 if ((pfns
& range
->flags
[HMM_PFN_WRITE
]) &&
408 !(cpu_flags
& range
->flags
[HMM_PFN_WRITE
])) {
414 static void hmm_range_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
415 const uint64_t *pfns
, unsigned long npages
,
416 uint64_t cpu_flags
, bool *fault
,
421 if (!hmm_vma_walk
->fault
) {
422 *fault
= *write_fault
= false;
426 *fault
= *write_fault
= false;
427 for (i
= 0; i
< npages
; ++i
) {
428 hmm_pte_need_fault(hmm_vma_walk
, pfns
[i
], cpu_flags
,
435 static int hmm_vma_walk_hole(unsigned long addr
, unsigned long end
,
436 struct mm_walk
*walk
)
438 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
439 struct hmm_range
*range
= hmm_vma_walk
->range
;
440 bool fault
, write_fault
;
441 unsigned long i
, npages
;
444 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
445 npages
= (end
- addr
) >> PAGE_SHIFT
;
446 pfns
= &range
->pfns
[i
];
447 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
448 0, &fault
, &write_fault
);
449 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
452 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range
*range
, pmd_t pmd
)
454 if (pmd_protnone(pmd
))
456 return pmd_write(pmd
) ? range
->flags
[HMM_PFN_VALID
] |
457 range
->flags
[HMM_PFN_WRITE
] :
458 range
->flags
[HMM_PFN_VALID
];
461 static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range
*range
, pud_t pud
)
463 if (!pud_present(pud
))
465 return pud_write(pud
) ? range
->flags
[HMM_PFN_VALID
] |
466 range
->flags
[HMM_PFN_WRITE
] :
467 range
->flags
[HMM_PFN_VALID
];
470 static int hmm_vma_handle_pmd(struct mm_walk
*walk
,
476 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
477 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
478 struct hmm_range
*range
= hmm_vma_walk
->range
;
479 unsigned long pfn
, npages
, i
;
480 bool fault
, write_fault
;
483 npages
= (end
- addr
) >> PAGE_SHIFT
;
484 cpu_flags
= pmd_to_hmm_pfn_flags(range
, pmd
);
485 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
, cpu_flags
,
486 &fault
, &write_fault
);
488 if (pmd_protnone(pmd
) || fault
|| write_fault
)
489 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
491 pfn
= pmd_pfn(pmd
) + pte_index(addr
);
492 for (i
= 0; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++) {
493 if (pmd_devmap(pmd
)) {
494 hmm_vma_walk
->pgmap
= get_dev_pagemap(pfn
,
495 hmm_vma_walk
->pgmap
);
496 if (unlikely(!hmm_vma_walk
->pgmap
))
499 pfns
[i
] = hmm_device_entry_from_pfn(range
, pfn
) | cpu_flags
;
501 if (hmm_vma_walk
->pgmap
) {
502 put_dev_pagemap(hmm_vma_walk
->pgmap
);
503 hmm_vma_walk
->pgmap
= NULL
;
505 hmm_vma_walk
->last
= end
;
508 /* If THP is not enabled then we should never reach that code ! */
513 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range
*range
, pte_t pte
)
515 if (pte_none(pte
) || !pte_present(pte
) || pte_protnone(pte
))
517 return pte_write(pte
) ? range
->flags
[HMM_PFN_VALID
] |
518 range
->flags
[HMM_PFN_WRITE
] :
519 range
->flags
[HMM_PFN_VALID
];
522 static int hmm_vma_handle_pte(struct mm_walk
*walk
, unsigned long addr
,
523 unsigned long end
, pmd_t
*pmdp
, pte_t
*ptep
,
526 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
527 struct hmm_range
*range
= hmm_vma_walk
->range
;
528 struct vm_area_struct
*vma
= walk
->vma
;
529 bool fault
, write_fault
;
532 uint64_t orig_pfn
= *pfn
;
534 *pfn
= range
->values
[HMM_PFN_NONE
];
535 fault
= write_fault
= false;
538 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, 0,
539 &fault
, &write_fault
);
540 if (fault
|| write_fault
)
545 if (!pte_present(pte
)) {
546 swp_entry_t entry
= pte_to_swp_entry(pte
);
548 if (!non_swap_entry(entry
)) {
549 if (fault
|| write_fault
)
555 * This is a special swap entry, ignore migration, use
556 * device and report anything else as error.
558 if (is_device_private_entry(entry
)) {
559 cpu_flags
= range
->flags
[HMM_PFN_VALID
] |
560 range
->flags
[HMM_PFN_DEVICE_PRIVATE
];
561 cpu_flags
|= is_write_device_private_entry(entry
) ?
562 range
->flags
[HMM_PFN_WRITE
] : 0;
563 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
564 &fault
, &write_fault
);
565 if (fault
|| write_fault
)
567 *pfn
= hmm_device_entry_from_pfn(range
,
573 if (is_migration_entry(entry
)) {
574 if (fault
|| write_fault
) {
576 hmm_vma_walk
->last
= addr
;
577 migration_entry_wait(vma
->vm_mm
,
584 /* Report error for everything else */
585 *pfn
= range
->values
[HMM_PFN_ERROR
];
588 cpu_flags
= pte_to_hmm_pfn_flags(range
, pte
);
589 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
590 &fault
, &write_fault
);
593 if (fault
|| write_fault
)
596 if (pte_devmap(pte
)) {
597 hmm_vma_walk
->pgmap
= get_dev_pagemap(pte_pfn(pte
),
598 hmm_vma_walk
->pgmap
);
599 if (unlikely(!hmm_vma_walk
->pgmap
))
601 } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL
) && pte_special(pte
)) {
602 *pfn
= range
->values
[HMM_PFN_SPECIAL
];
606 *pfn
= hmm_device_entry_from_pfn(range
, pte_pfn(pte
)) | cpu_flags
;
610 if (hmm_vma_walk
->pgmap
) {
611 put_dev_pagemap(hmm_vma_walk
->pgmap
);
612 hmm_vma_walk
->pgmap
= NULL
;
615 /* Fault any virtual address we were asked to fault */
616 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
619 static int hmm_vma_walk_pmd(pmd_t
*pmdp
,
622 struct mm_walk
*walk
)
624 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
625 struct hmm_range
*range
= hmm_vma_walk
->range
;
626 struct vm_area_struct
*vma
= walk
->vma
;
627 uint64_t *pfns
= range
->pfns
;
628 unsigned long addr
= start
, i
;
634 pmd
= READ_ONCE(*pmdp
);
636 return hmm_vma_walk_hole(start
, end
, walk
);
638 if (pmd_huge(pmd
) && (range
->vma
->vm_flags
& VM_HUGETLB
))
639 return hmm_pfns_bad(start
, end
, walk
);
641 if (thp_migration_supported() && is_pmd_migration_entry(pmd
)) {
642 bool fault
, write_fault
;
643 unsigned long npages
;
646 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
647 npages
= (end
- addr
) >> PAGE_SHIFT
;
648 pfns
= &range
->pfns
[i
];
650 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
651 0, &fault
, &write_fault
);
652 if (fault
|| write_fault
) {
653 hmm_vma_walk
->last
= addr
;
654 pmd_migration_entry_wait(vma
->vm_mm
, pmdp
);
658 } else if (!pmd_present(pmd
))
659 return hmm_pfns_bad(start
, end
, walk
);
661 if (pmd_devmap(pmd
) || pmd_trans_huge(pmd
)) {
663 * No need to take pmd_lock here, even if some other threads
664 * is splitting the huge pmd we will get that event through
665 * mmu_notifier callback.
667 * So just read pmd value and check again its a transparent
668 * huge or device mapping one and compute corresponding pfn
671 pmd
= pmd_read_atomic(pmdp
);
673 if (!pmd_devmap(pmd
) && !pmd_trans_huge(pmd
))
676 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
677 return hmm_vma_handle_pmd(walk
, addr
, end
, &pfns
[i
], pmd
);
681 * We have handled all the valid case above ie either none, migration,
682 * huge or transparent huge. At this point either it is a valid pmd
683 * entry pointing to pte directory or it is a bad pmd that will not
687 return hmm_pfns_bad(start
, end
, walk
);
689 ptep
= pte_offset_map(pmdp
, addr
);
690 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
691 for (; addr
< end
; addr
+= PAGE_SIZE
, ptep
++, i
++) {
694 r
= hmm_vma_handle_pte(walk
, addr
, end
, pmdp
, ptep
, &pfns
[i
]);
696 /* hmm_vma_handle_pte() did unmap pte directory */
697 hmm_vma_walk
->last
= addr
;
701 if (hmm_vma_walk
->pgmap
) {
703 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
704 * so that we can leverage get_dev_pagemap() optimization which
705 * will not re-take a reference on a pgmap if we already have
708 put_dev_pagemap(hmm_vma_walk
->pgmap
);
709 hmm_vma_walk
->pgmap
= NULL
;
713 hmm_vma_walk
->last
= addr
;
717 static int hmm_vma_walk_pud(pud_t
*pudp
,
720 struct mm_walk
*walk
)
722 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
723 struct hmm_range
*range
= hmm_vma_walk
->range
;
724 unsigned long addr
= start
, next
;
730 pud
= READ_ONCE(*pudp
);
732 return hmm_vma_walk_hole(start
, end
, walk
);
734 if (pud_huge(pud
) && pud_devmap(pud
)) {
735 unsigned long i
, npages
, pfn
;
736 uint64_t *pfns
, cpu_flags
;
737 bool fault
, write_fault
;
739 if (!pud_present(pud
))
740 return hmm_vma_walk_hole(start
, end
, walk
);
742 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
743 npages
= (end
- addr
) >> PAGE_SHIFT
;
744 pfns
= &range
->pfns
[i
];
746 cpu_flags
= pud_to_hmm_pfn_flags(range
, pud
);
747 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
748 cpu_flags
, &fault
, &write_fault
);
749 if (fault
|| write_fault
)
750 return hmm_vma_walk_hole_(addr
, end
, fault
,
753 pfn
= pud_pfn(pud
) + ((addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
754 for (i
= 0; i
< npages
; ++i
, ++pfn
) {
755 hmm_vma_walk
->pgmap
= get_dev_pagemap(pfn
,
756 hmm_vma_walk
->pgmap
);
757 if (unlikely(!hmm_vma_walk
->pgmap
))
759 pfns
[i
] = hmm_device_entry_from_pfn(range
, pfn
) |
762 if (hmm_vma_walk
->pgmap
) {
763 put_dev_pagemap(hmm_vma_walk
->pgmap
);
764 hmm_vma_walk
->pgmap
= NULL
;
766 hmm_vma_walk
->last
= end
;
770 split_huge_pud(walk
->vma
, pudp
, addr
);
774 pmdp
= pmd_offset(pudp
, addr
);
776 next
= pmd_addr_end(addr
, end
);
777 ret
= hmm_vma_walk_pmd(pmdp
, addr
, next
, walk
);
780 } while (pmdp
++, addr
= next
, addr
!= end
);
785 static int hmm_vma_walk_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
786 unsigned long start
, unsigned long end
,
787 struct mm_walk
*walk
)
789 #ifdef CONFIG_HUGETLB_PAGE
790 unsigned long addr
= start
, i
, pfn
, mask
, size
, pfn_inc
;
791 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
792 struct hmm_range
*range
= hmm_vma_walk
->range
;
793 struct vm_area_struct
*vma
= walk
->vma
;
794 struct hstate
*h
= hstate_vma(vma
);
795 uint64_t orig_pfn
, cpu_flags
;
796 bool fault
, write_fault
;
801 size
= 1UL << huge_page_shift(h
);
803 if (range
->page_shift
!= PAGE_SHIFT
) {
804 /* Make sure we are looking at full page. */
807 if (end
< (start
+ size
))
809 pfn_inc
= size
>> PAGE_SHIFT
;
816 ptl
= huge_pte_lock(hstate_vma(walk
->vma
), walk
->mm
, pte
);
817 entry
= huge_ptep_get(pte
);
819 i
= (start
- range
->start
) >> range
->page_shift
;
820 orig_pfn
= range
->pfns
[i
];
821 range
->pfns
[i
] = range
->values
[HMM_PFN_NONE
];
822 cpu_flags
= pte_to_hmm_pfn_flags(range
, entry
);
823 fault
= write_fault
= false;
824 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
825 &fault
, &write_fault
);
826 if (fault
|| write_fault
) {
831 pfn
= pte_pfn(entry
) + ((start
& mask
) >> range
->page_shift
);
832 for (; addr
< end
; addr
+= size
, i
++, pfn
+= pfn_inc
)
833 range
->pfns
[i
] = hmm_device_entry_from_pfn(range
, pfn
) |
835 hmm_vma_walk
->last
= end
;
841 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
844 #else /* CONFIG_HUGETLB_PAGE */
849 static void hmm_pfns_clear(struct hmm_range
*range
,
854 for (; addr
< end
; addr
+= PAGE_SIZE
, pfns
++)
855 *pfns
= range
->values
[HMM_PFN_NONE
];
859 * hmm_range_register() - start tracking change to CPU page table over a range
861 * @mm: the mm struct for the range of virtual address
862 * @start: start virtual address (inclusive)
863 * @end: end virtual address (exclusive)
864 * @page_shift: expect page shift for the range
865 * Returns 0 on success, -EFAULT if the address space is no longer valid
867 * Track updates to the CPU page table see include/linux/hmm.h
869 int hmm_range_register(struct hmm_range
*range
,
870 struct hmm_mirror
*mirror
,
875 unsigned long mask
= ((1UL << page_shift
) - 1UL);
876 struct hmm
*hmm
= mirror
->hmm
;
879 range
->valid
= false;
882 if ((start
& mask
) || (end
& mask
))
887 range
->page_shift
= page_shift
;
888 range
->start
= start
;
891 /* Prevent hmm_release() from running while the range is valid */
892 if (!mmget_not_zero(hmm
->mm
))
895 /* Initialize range to track CPU page table updates. */
896 spin_lock_irqsave(&hmm
->ranges_lock
, flags
);
899 kref_get(&hmm
->kref
);
900 list_add(&range
->list
, &hmm
->ranges
);
903 * If there are any concurrent notifiers we have to wait for them for
904 * the range to be valid (see hmm_range_wait_until_valid()).
908 spin_unlock_irqrestore(&hmm
->ranges_lock
, flags
);
912 EXPORT_SYMBOL(hmm_range_register
);
915 * hmm_range_unregister() - stop tracking change to CPU page table over a range
918 * Range struct is used to track updates to the CPU page table after a call to
919 * hmm_range_register(). See include/linux/hmm.h for how to use it.
921 void hmm_range_unregister(struct hmm_range
*range
)
923 struct hmm
*hmm
= range
->hmm
;
926 spin_lock_irqsave(&hmm
->ranges_lock
, flags
);
927 list_del_init(&range
->list
);
928 spin_unlock_irqrestore(&hmm
->ranges_lock
, flags
);
930 /* Drop reference taken by hmm_range_register() */
935 * The range is now invalid and the ref on the hmm is dropped, so
936 * poison the pointer. Leave other fields in place, for the caller's
939 range
->valid
= false;
940 memset(&range
->hmm
, POISON_INUSE
, sizeof(range
->hmm
));
942 EXPORT_SYMBOL(hmm_range_unregister
);
945 * hmm_range_snapshot() - snapshot CPU page table for a range
947 * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
948 * permission (for instance asking for write and range is read only),
949 * -EBUSY if you need to retry, -EFAULT invalid (ie either no valid
950 * vma or it is illegal to access that range), number of valid pages
951 * in range->pfns[] (from range start address).
953 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
954 * validity is tracked by range struct. See in include/linux/hmm.h for example
957 long hmm_range_snapshot(struct hmm_range
*range
)
959 const unsigned long device_vma
= VM_IO
| VM_PFNMAP
| VM_MIXEDMAP
;
960 unsigned long start
= range
->start
, end
;
961 struct hmm_vma_walk hmm_vma_walk
;
962 struct hmm
*hmm
= range
->hmm
;
963 struct vm_area_struct
*vma
;
964 struct mm_walk mm_walk
;
966 lockdep_assert_held(&hmm
->mm
->mmap_sem
);
968 /* If range is no longer valid force retry. */
972 vma
= find_vma(hmm
->mm
, start
);
973 if (vma
== NULL
|| (vma
->vm_flags
& device_vma
))
976 if (is_vm_hugetlb_page(vma
)) {
977 if (huge_page_shift(hstate_vma(vma
)) !=
979 range
->page_shift
!= PAGE_SHIFT
)
982 if (range
->page_shift
!= PAGE_SHIFT
)
986 if (!(vma
->vm_flags
& VM_READ
)) {
988 * If vma do not allow read access, then assume that it
989 * does not allow write access, either. HMM does not
990 * support architecture that allow write without read.
992 hmm_pfns_clear(range
, range
->pfns
,
993 range
->start
, range
->end
);
998 hmm_vma_walk
.pgmap
= NULL
;
999 hmm_vma_walk
.last
= start
;
1000 hmm_vma_walk
.fault
= false;
1001 hmm_vma_walk
.range
= range
;
1002 mm_walk
.private = &hmm_vma_walk
;
1003 end
= min(range
->end
, vma
->vm_end
);
1006 mm_walk
.mm
= vma
->vm_mm
;
1007 mm_walk
.pte_entry
= NULL
;
1008 mm_walk
.test_walk
= NULL
;
1009 mm_walk
.hugetlb_entry
= NULL
;
1010 mm_walk
.pud_entry
= hmm_vma_walk_pud
;
1011 mm_walk
.pmd_entry
= hmm_vma_walk_pmd
;
1012 mm_walk
.pte_hole
= hmm_vma_walk_hole
;
1013 mm_walk
.hugetlb_entry
= hmm_vma_walk_hugetlb_entry
;
1015 walk_page_range(start
, end
, &mm_walk
);
1017 } while (start
< range
->end
);
1019 return (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
1021 EXPORT_SYMBOL(hmm_range_snapshot
);
1024 * hmm_range_fault() - try to fault some address in a virtual address range
1025 * @range: range being faulted
1026 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
1027 * Return: number of valid pages in range->pfns[] (from range start
1028 * address). This may be zero. If the return value is negative,
1029 * then one of the following values may be returned:
1031 * -EINVAL invalid arguments or mm or virtual address are in an
1032 * invalid vma (for instance device file vma).
1033 * -ENOMEM: Out of memory.
1034 * -EPERM: Invalid permission (for instance asking for write and
1035 * range is read only).
1036 * -EAGAIN: If you need to retry and mmap_sem was drop. This can only
1037 * happens if block argument is false.
1038 * -EBUSY: If the the range is being invalidated and you should wait
1039 * for invalidation to finish.
1040 * -EFAULT: Invalid (ie either no valid vma or it is illegal to access
1041 * that range), number of valid pages in range->pfns[] (from
1042 * range start address).
1044 * This is similar to a regular CPU page fault except that it will not trigger
1045 * any memory migration if the memory being faulted is not accessible by CPUs
1046 * and caller does not ask for migration.
1048 * On error, for one virtual address in the range, the function will mark the
1049 * corresponding HMM pfn entry with an error flag.
1051 long hmm_range_fault(struct hmm_range
*range
, bool block
)
1053 const unsigned long device_vma
= VM_IO
| VM_PFNMAP
| VM_MIXEDMAP
;
1054 unsigned long start
= range
->start
, end
;
1055 struct hmm_vma_walk hmm_vma_walk
;
1056 struct hmm
*hmm
= range
->hmm
;
1057 struct vm_area_struct
*vma
;
1058 struct mm_walk mm_walk
;
1061 lockdep_assert_held(&hmm
->mm
->mmap_sem
);
1064 /* If range is no longer valid force retry. */
1068 vma
= find_vma(hmm
->mm
, start
);
1069 if (vma
== NULL
|| (vma
->vm_flags
& device_vma
))
1072 if (is_vm_hugetlb_page(vma
)) {
1073 if (huge_page_shift(hstate_vma(vma
)) !=
1074 range
->page_shift
&&
1075 range
->page_shift
!= PAGE_SHIFT
)
1078 if (range
->page_shift
!= PAGE_SHIFT
)
1082 if (!(vma
->vm_flags
& VM_READ
)) {
1084 * If vma do not allow read access, then assume that it
1085 * does not allow write access, either. HMM does not
1086 * support architecture that allow write without read.
1088 hmm_pfns_clear(range
, range
->pfns
,
1089 range
->start
, range
->end
);
1094 hmm_vma_walk
.pgmap
= NULL
;
1095 hmm_vma_walk
.last
= start
;
1096 hmm_vma_walk
.fault
= true;
1097 hmm_vma_walk
.block
= block
;
1098 hmm_vma_walk
.range
= range
;
1099 mm_walk
.private = &hmm_vma_walk
;
1100 end
= min(range
->end
, vma
->vm_end
);
1103 mm_walk
.mm
= vma
->vm_mm
;
1104 mm_walk
.pte_entry
= NULL
;
1105 mm_walk
.test_walk
= NULL
;
1106 mm_walk
.hugetlb_entry
= NULL
;
1107 mm_walk
.pud_entry
= hmm_vma_walk_pud
;
1108 mm_walk
.pmd_entry
= hmm_vma_walk_pmd
;
1109 mm_walk
.pte_hole
= hmm_vma_walk_hole
;
1110 mm_walk
.hugetlb_entry
= hmm_vma_walk_hugetlb_entry
;
1113 ret
= walk_page_range(start
, end
, &mm_walk
);
1114 start
= hmm_vma_walk
.last
;
1116 /* Keep trying while the range is valid. */
1117 } while (ret
== -EBUSY
&& range
->valid
);
1122 i
= (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
1123 hmm_pfns_clear(range
, &range
->pfns
[i
],
1124 hmm_vma_walk
.last
, range
->end
);
1129 } while (start
< range
->end
);
1131 return (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
1133 EXPORT_SYMBOL(hmm_range_fault
);
1136 * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one.
1137 * @range: range being faulted
1138 * @device: device against to dma map page to
1139 * @daddrs: dma address of mapped pages
1140 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
1141 * Return: number of pages mapped on success, -EAGAIN if mmap_sem have been
1142 * drop and you need to try again, some other error value otherwise
1144 * Note same usage pattern as hmm_range_fault().
1146 long hmm_range_dma_map(struct hmm_range
*range
,
1147 struct device
*device
,
1151 unsigned long i
, npages
, mapped
;
1154 ret
= hmm_range_fault(range
, block
);
1156 return ret
? ret
: -EBUSY
;
1158 npages
= (range
->end
- range
->start
) >> PAGE_SHIFT
;
1159 for (i
= 0, mapped
= 0; i
< npages
; ++i
) {
1160 enum dma_data_direction dir
= DMA_TO_DEVICE
;
1164 * FIXME need to update DMA API to provide invalid DMA address
1165 * value instead of a function to test dma address value. This
1166 * would remove lot of dumb code duplicated accross many arch.
1168 * For now setting it to 0 here is good enough as the pfns[]
1169 * value is what is use to check what is valid and what isn't.
1173 page
= hmm_device_entry_to_page(range
, range
->pfns
[i
]);
1177 /* Check if range is being invalidated */
1178 if (!range
->valid
) {
1183 /* If it is read and write than map bi-directional. */
1184 if (range
->pfns
[i
] & range
->flags
[HMM_PFN_WRITE
])
1185 dir
= DMA_BIDIRECTIONAL
;
1187 daddrs
[i
] = dma_map_page(device
, page
, 0, PAGE_SIZE
, dir
);
1188 if (dma_mapping_error(device
, daddrs
[i
])) {
1199 for (npages
= i
, i
= 0; (i
< npages
) && mapped
; ++i
) {
1200 enum dma_data_direction dir
= DMA_TO_DEVICE
;
1203 page
= hmm_device_entry_to_page(range
, range
->pfns
[i
]);
1207 if (dma_mapping_error(device
, daddrs
[i
]))
1210 /* If it is read and write than map bi-directional. */
1211 if (range
->pfns
[i
] & range
->flags
[HMM_PFN_WRITE
])
1212 dir
= DMA_BIDIRECTIONAL
;
1214 dma_unmap_page(device
, daddrs
[i
], PAGE_SIZE
, dir
);
1220 EXPORT_SYMBOL(hmm_range_dma_map
);
1223 * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
1224 * @range: range being unmapped
1225 * @vma: the vma against which the range (optional)
1226 * @device: device against which dma map was done
1227 * @daddrs: dma address of mapped pages
1228 * @dirty: dirty page if it had the write flag set
1229 * Return: number of page unmapped on success, -EINVAL otherwise
1231 * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
1232 * to the sync_cpu_device_pagetables() callback so that it is safe here to
1233 * call set_page_dirty(). Caller must also take appropriate locks to avoid
1234 * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
1236 long hmm_range_dma_unmap(struct hmm_range
*range
,
1237 struct vm_area_struct
*vma
,
1238 struct device
*device
,
1242 unsigned long i
, npages
;
1246 if (range
->end
<= range
->start
)
1253 npages
= (range
->end
- range
->start
) >> PAGE_SHIFT
;
1254 for (i
= 0; i
< npages
; ++i
) {
1255 enum dma_data_direction dir
= DMA_TO_DEVICE
;
1258 page
= hmm_device_entry_to_page(range
, range
->pfns
[i
]);
1262 /* If it is read and write than map bi-directional. */
1263 if (range
->pfns
[i
] & range
->flags
[HMM_PFN_WRITE
]) {
1264 dir
= DMA_BIDIRECTIONAL
;
1267 * See comments in function description on why it is
1268 * safe here to call set_page_dirty()
1271 set_page_dirty(page
);
1274 /* Unmap and clear pfns/dma address */
1275 dma_unmap_page(device
, daddrs
[i
], PAGE_SIZE
, dir
);
1276 range
->pfns
[i
] = range
->values
[HMM_PFN_NONE
];
1277 /* FIXME see comments in hmm_vma_dma_map() */
1284 EXPORT_SYMBOL(hmm_range_dma_unmap
);