1 // SPDX-License-Identifier: GPL-2.0
3 * Device Memory Migration functionality.
5 * Originally written by Jérôme Glisse.
7 #include <linux/export.h>
8 #include <linux/memremap.h>
9 #include <linux/migrate.h>
10 #include <linux/mm_inline.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/oom.h>
13 #include <linux/pagewalk.h>
14 #include <linux/rmap.h>
15 #include <linux/swapops.h>
16 #include <asm/tlbflush.h>
19 static int migrate_vma_collect_skip(unsigned long start
,
23 struct migrate_vma
*migrate
= walk
->private;
26 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
27 migrate
->dst
[migrate
->npages
] = 0;
28 migrate
->src
[migrate
->npages
++] = 0;
34 static int migrate_vma_collect_hole(unsigned long start
,
36 __always_unused
int depth
,
39 struct migrate_vma
*migrate
= walk
->private;
42 /* Only allow populating anonymous memory. */
43 if (!vma_is_anonymous(walk
->vma
))
44 return migrate_vma_collect_skip(start
, end
, walk
);
46 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
47 migrate
->src
[migrate
->npages
] = MIGRATE_PFN_MIGRATE
;
48 migrate
->dst
[migrate
->npages
] = 0;
56 static int migrate_vma_collect_pmd(pmd_t
*pmdp
,
61 struct migrate_vma
*migrate
= walk
->private;
62 struct vm_area_struct
*vma
= walk
->vma
;
63 struct mm_struct
*mm
= vma
->vm_mm
;
64 unsigned long addr
= start
, unmapped
= 0;
70 return migrate_vma_collect_hole(start
, end
, -1, walk
);
72 if (pmd_trans_huge(*pmdp
)) {
75 ptl
= pmd_lock(mm
, pmdp
);
76 if (unlikely(!pmd_trans_huge(*pmdp
))) {
81 page
= pmd_page(*pmdp
);
82 if (is_huge_zero_page(page
)) {
84 split_huge_pmd(vma
, pmdp
, addr
);
85 if (pmd_trans_unstable(pmdp
))
86 return migrate_vma_collect_skip(start
, end
,
93 if (unlikely(!trylock_page(page
)))
94 return migrate_vma_collect_skip(start
, end
,
96 ret
= split_huge_page(page
);
100 return migrate_vma_collect_skip(start
, end
,
103 return migrate_vma_collect_hole(start
, end
, -1,
108 if (unlikely(pmd_bad(*pmdp
)))
109 return migrate_vma_collect_skip(start
, end
, walk
);
111 ptep
= pte_offset_map_lock(mm
, pmdp
, addr
, &ptl
);
112 arch_enter_lazy_mmu_mode();
114 for (; addr
< end
; addr
+= PAGE_SIZE
, ptep
++) {
115 unsigned long mpfn
= 0, pfn
;
123 if (vma_is_anonymous(vma
)) {
124 mpfn
= MIGRATE_PFN_MIGRATE
;
130 if (!pte_present(pte
)) {
132 * Only care about unaddressable device page special
133 * page table entry. Other special swap entries are not
134 * migratable, and we ignore regular swapped page.
136 entry
= pte_to_swp_entry(pte
);
137 if (!is_device_private_entry(entry
))
140 page
= pfn_swap_entry_to_page(entry
);
141 if (!(migrate
->flags
&
142 MIGRATE_VMA_SELECT_DEVICE_PRIVATE
) ||
143 page
->pgmap
->owner
!= migrate
->pgmap_owner
)
146 mpfn
= migrate_pfn(page_to_pfn(page
)) |
148 if (is_writable_device_private_entry(entry
))
149 mpfn
|= MIGRATE_PFN_WRITE
;
152 if (is_zero_pfn(pfn
) &&
153 (migrate
->flags
& MIGRATE_VMA_SELECT_SYSTEM
)) {
154 mpfn
= MIGRATE_PFN_MIGRATE
;
158 page
= vm_normal_page(migrate
->vma
, addr
, pte
);
159 if (page
&& !is_zone_device_page(page
) &&
160 !(migrate
->flags
& MIGRATE_VMA_SELECT_SYSTEM
))
162 else if (page
&& is_device_coherent_page(page
) &&
163 (!(migrate
->flags
& MIGRATE_VMA_SELECT_DEVICE_COHERENT
) ||
164 page
->pgmap
->owner
!= migrate
->pgmap_owner
))
166 mpfn
= migrate_pfn(pfn
) | MIGRATE_PFN_MIGRATE
;
167 mpfn
|= pte_write(pte
) ? MIGRATE_PFN_WRITE
: 0;
170 /* FIXME support THP */
171 if (!page
|| !page
->mapping
|| PageTransCompound(page
)) {
177 * By getting a reference on the page we pin it and that blocks
178 * any kind of migration. Side effect is that it "freezes" the
181 * We drop this reference after isolating the page from the lru
182 * for non device page (device page are not on the lru and thus
183 * can't be dropped from it).
188 * Optimize for the common case where page is only mapped once
189 * in one process. If we can lock the page, then we can safely
190 * set up a special migration page table entry now.
192 if (trylock_page(page
)) {
196 anon_exclusive
= PageAnon(page
) && PageAnonExclusive(page
);
197 if (anon_exclusive
) {
198 flush_cache_page(vma
, addr
, pte_pfn(*ptep
));
199 ptep_clear_flush(vma
, addr
, ptep
);
201 if (page_try_share_anon_rmap(page
)) {
202 set_pte_at(mm
, addr
, ptep
, pte
);
209 ptep_get_and_clear(mm
, addr
, ptep
);
214 /* Setup special migration page table entry */
215 if (mpfn
& MIGRATE_PFN_WRITE
)
216 entry
= make_writable_migration_entry(
218 else if (anon_exclusive
)
219 entry
= make_readable_exclusive_migration_entry(
222 entry
= make_readable_migration_entry(
224 swp_pte
= swp_entry_to_pte(entry
);
225 if (pte_present(pte
)) {
226 if (pte_soft_dirty(pte
))
227 swp_pte
= pte_swp_mksoft_dirty(swp_pte
);
228 if (pte_uffd_wp(pte
))
229 swp_pte
= pte_swp_mkuffd_wp(swp_pte
);
231 if (pte_swp_soft_dirty(pte
))
232 swp_pte
= pte_swp_mksoft_dirty(swp_pte
);
233 if (pte_swp_uffd_wp(pte
))
234 swp_pte
= pte_swp_mkuffd_wp(swp_pte
);
236 set_pte_at(mm
, addr
, ptep
, swp_pte
);
239 * This is like regular unmap: we remove the rmap and
240 * drop page refcount. Page won't be freed, as we took
241 * a reference just above.
243 page_remove_rmap(page
, vma
, false);
246 if (pte_present(pte
))
254 migrate
->dst
[migrate
->npages
] = 0;
255 migrate
->src
[migrate
->npages
++] = mpfn
;
257 arch_leave_lazy_mmu_mode();
258 pte_unmap_unlock(ptep
- 1, ptl
);
260 /* Only flush the TLB if we actually modified any entries */
262 flush_tlb_range(walk
->vma
, start
, end
);
267 static const struct mm_walk_ops migrate_vma_walk_ops
= {
268 .pmd_entry
= migrate_vma_collect_pmd
,
269 .pte_hole
= migrate_vma_collect_hole
,
273 * migrate_vma_collect() - collect pages over a range of virtual addresses
274 * @migrate: migrate struct containing all migration information
276 * This will walk the CPU page table. For each virtual address backed by a
277 * valid page, it updates the src array and takes a reference on the page, in
278 * order to pin the page until we lock it and unmap it.
280 static void migrate_vma_collect(struct migrate_vma
*migrate
)
282 struct mmu_notifier_range range
;
285 * Note that the pgmap_owner is passed to the mmu notifier callback so
286 * that the registered device driver can skip invalidating device
287 * private page mappings that won't be migrated.
289 mmu_notifier_range_init_owner(&range
, MMU_NOTIFY_MIGRATE
, 0,
290 migrate
->vma
, migrate
->vma
->vm_mm
, migrate
->start
, migrate
->end
,
291 migrate
->pgmap_owner
);
292 mmu_notifier_invalidate_range_start(&range
);
294 walk_page_range(migrate
->vma
->vm_mm
, migrate
->start
, migrate
->end
,
295 &migrate_vma_walk_ops
, migrate
);
297 mmu_notifier_invalidate_range_end(&range
);
298 migrate
->end
= migrate
->start
+ (migrate
->npages
<< PAGE_SHIFT
);
302 * migrate_vma_check_page() - check if page is pinned or not
303 * @page: struct page to check
305 * Pinned pages cannot be migrated. This is the same test as in
306 * folio_migrate_mapping(), except that here we allow migration of a
309 static bool migrate_vma_check_page(struct page
*page
)
312 * One extra ref because caller holds an extra reference, either from
313 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
319 * FIXME support THP (transparent huge page), it is bit more complex to
320 * check them than regular pages, because they can be mapped with a pmd
321 * or with a pte (split pte mapping).
323 if (PageCompound(page
))
326 /* Page from ZONE_DEVICE have one extra reference */
327 if (is_zone_device_page(page
))
330 /* For file back page */
331 if (page_mapping(page
))
332 extra
+= 1 + page_has_private(page
);
334 if ((page_count(page
) - extra
) > page_mapcount(page
))
341 * migrate_vma_unmap() - replace page mapping with special migration pte entry
342 * @migrate: migrate struct containing all migration information
344 * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
345 * special migration pte entry and check if it has been pinned. Pinned pages are
346 * restored because we cannot migrate them.
348 * This is the last step before we call the device driver callback to allocate
349 * destination memory and copy contents of original page over to new page.
351 static void migrate_vma_unmap(struct migrate_vma
*migrate
)
353 const unsigned long npages
= migrate
->npages
;
354 unsigned long i
, restore
= 0;
355 bool allow_drain
= true;
359 for (i
= 0; i
< npages
; i
++) {
360 struct page
*page
= migrate_pfn_to_page(migrate
->src
[i
]);
366 /* ZONE_DEVICE pages are not on LRU */
367 if (!is_zone_device_page(page
)) {
368 if (!PageLRU(page
) && allow_drain
) {
369 /* Drain CPU's pagevec */
374 if (isolate_lru_page(page
)) {
375 migrate
->src
[i
] &= ~MIGRATE_PFN_MIGRATE
;
381 /* Drop the reference we took in collect */
385 folio
= page_folio(page
);
386 if (folio_mapped(folio
))
387 try_to_migrate(folio
, 0);
389 if (page_mapped(page
) || !migrate_vma_check_page(page
)) {
390 if (!is_zone_device_page(page
)) {
392 putback_lru_page(page
);
395 migrate
->src
[i
] &= ~MIGRATE_PFN_MIGRATE
;
402 for (i
= 0; i
< npages
&& restore
; i
++) {
403 struct page
*page
= migrate_pfn_to_page(migrate
->src
[i
]);
406 if (!page
|| (migrate
->src
[i
] & MIGRATE_PFN_MIGRATE
))
409 folio
= page_folio(page
);
410 remove_migration_ptes(folio
, folio
, false);
420 * migrate_vma_setup() - prepare to migrate a range of memory
421 * @args: contains the vma, start, and pfns arrays for the migration
423 * Returns: negative errno on failures, 0 when 0 or more pages were migrated
426 * Prepare to migrate a range of memory virtual address range by collecting all
427 * the pages backing each virtual address in the range, saving them inside the
428 * src array. Then lock those pages and unmap them. Once the pages are locked
429 * and unmapped, check whether each page is pinned or not. Pages that aren't
430 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
431 * corresponding src array entry. Then restores any pages that are pinned, by
432 * remapping and unlocking those pages.
434 * The caller should then allocate destination memory and copy source memory to
435 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
436 * flag set). Once these are allocated and copied, the caller must update each
437 * corresponding entry in the dst array with the pfn value of the destination
438 * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
441 * Note that the caller does not have to migrate all the pages that are marked
442 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
443 * device memory to system memory. If the caller cannot migrate a device page
444 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
445 * consequences for the userspace process, so it must be avoided if at all
448 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
449 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
450 * allowing the caller to allocate device memory for those unbacked virtual
451 * addresses. For this the caller simply has to allocate device memory and
452 * properly set the destination entry like for regular migration. Note that
453 * this can still fail, and thus inside the device driver you must check if the
454 * migration was successful for those entries after calling migrate_vma_pages(),
455 * just like for regular migration.
457 * After that, the callers must call migrate_vma_pages() to go over each entry
458 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
459 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
460 * then migrate_vma_pages() to migrate struct page information from the source
461 * struct page to the destination struct page. If it fails to migrate the
462 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
465 * At this point all successfully migrated pages have an entry in the src
466 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
467 * array entry with MIGRATE_PFN_VALID flag set.
469 * Once migrate_vma_pages() returns the caller may inspect which pages were
470 * successfully migrated, and which were not. Successfully migrated pages will
471 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
473 * It is safe to update device page table after migrate_vma_pages() because
474 * both destination and source page are still locked, and the mmap_lock is held
475 * in read mode (hence no one can unmap the range being migrated).
477 * Once the caller is done cleaning up things and updating its page table (if it
478 * chose to do so, this is not an obligation) it finally calls
479 * migrate_vma_finalize() to update the CPU page table to point to new pages
480 * for successfully migrated pages or otherwise restore the CPU page table to
481 * point to the original source pages.
483 int migrate_vma_setup(struct migrate_vma
*args
)
485 long nr_pages
= (args
->end
- args
->start
) >> PAGE_SHIFT
;
487 args
->start
&= PAGE_MASK
;
488 args
->end
&= PAGE_MASK
;
489 if (!args
->vma
|| is_vm_hugetlb_page(args
->vma
) ||
490 (args
->vma
->vm_flags
& VM_SPECIAL
) || vma_is_dax(args
->vma
))
494 if (args
->start
< args
->vma
->vm_start
||
495 args
->start
>= args
->vma
->vm_end
)
497 if (args
->end
<= args
->vma
->vm_start
|| args
->end
> args
->vma
->vm_end
)
499 if (!args
->src
|| !args
->dst
)
502 memset(args
->src
, 0, sizeof(*args
->src
) * nr_pages
);
506 migrate_vma_collect(args
);
509 migrate_vma_unmap(args
);
512 * At this point pages are locked and unmapped, and thus they have
513 * stable content and can safely be copied to destination memory that
514 * is allocated by the drivers.
519 EXPORT_SYMBOL(migrate_vma_setup
);
522 * This code closely matches the code in:
523 * __handle_mm_fault()
525 * do_anonymous_page()
526 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
527 * private or coherent page.
529 static void migrate_vma_insert_page(struct migrate_vma
*migrate
,
534 struct vm_area_struct
*vma
= migrate
->vma
;
535 struct mm_struct
*mm
= vma
->vm_mm
;
545 /* Only allow populating anonymous memory */
546 if (!vma_is_anonymous(vma
))
549 pgdp
= pgd_offset(mm
, addr
);
550 p4dp
= p4d_alloc(mm
, pgdp
, addr
);
553 pudp
= pud_alloc(mm
, p4dp
, addr
);
556 pmdp
= pmd_alloc(mm
, pudp
, addr
);
560 if (pmd_trans_huge(*pmdp
) || pmd_devmap(*pmdp
))
564 * Use pte_alloc() instead of pte_alloc_map(). We can't run
565 * pte_offset_map() on pmds where a huge pmd might be created
566 * from a different thread.
568 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
569 * parallel threads are excluded by other means.
571 * Here we only have mmap_read_lock(mm).
573 if (pte_alloc(mm
, pmdp
))
576 /* See the comment in pte_alloc_one_map() */
577 if (unlikely(pmd_trans_unstable(pmdp
)))
580 if (unlikely(anon_vma_prepare(vma
)))
582 if (mem_cgroup_charge(page_folio(page
), vma
->vm_mm
, GFP_KERNEL
))
586 * The memory barrier inside __SetPageUptodate makes sure that
587 * preceding stores to the page contents become visible before
588 * the set_pte_at() write.
590 __SetPageUptodate(page
);
592 if (is_device_private_page(page
)) {
593 swp_entry_t swp_entry
;
595 if (vma
->vm_flags
& VM_WRITE
)
596 swp_entry
= make_writable_device_private_entry(
599 swp_entry
= make_readable_device_private_entry(
601 entry
= swp_entry_to_pte(swp_entry
);
603 if (is_zone_device_page(page
) &&
604 !is_device_coherent_page(page
)) {
605 pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
608 entry
= mk_pte(page
, vma
->vm_page_prot
);
609 if (vma
->vm_flags
& VM_WRITE
)
610 entry
= pte_mkwrite(pte_mkdirty(entry
));
613 ptep
= pte_offset_map_lock(mm
, pmdp
, addr
, &ptl
);
615 if (check_stable_address_space(mm
))
618 if (pte_present(*ptep
)) {
619 unsigned long pfn
= pte_pfn(*ptep
);
621 if (!is_zero_pfn(pfn
))
624 } else if (!pte_none(*ptep
))
628 * Check for userfaultfd but do not deliver the fault. Instead,
631 if (userfaultfd_missing(vma
))
634 inc_mm_counter(mm
, MM_ANONPAGES
);
635 page_add_new_anon_rmap(page
, vma
, addr
);
636 if (!is_zone_device_page(page
))
637 lru_cache_add_inactive_or_unevictable(page
, vma
);
641 flush_cache_page(vma
, addr
, pte_pfn(*ptep
));
642 ptep_clear_flush_notify(vma
, addr
, ptep
);
643 set_pte_at_notify(mm
, addr
, ptep
, entry
);
644 update_mmu_cache(vma
, addr
, ptep
);
646 /* No need to invalidate - it was non-present before */
647 set_pte_at(mm
, addr
, ptep
, entry
);
648 update_mmu_cache(vma
, addr
, ptep
);
651 pte_unmap_unlock(ptep
, ptl
);
652 *src
= MIGRATE_PFN_MIGRATE
;
656 pte_unmap_unlock(ptep
, ptl
);
658 *src
&= ~MIGRATE_PFN_MIGRATE
;
662 * migrate_vma_pages() - migrate meta-data from src page to dst page
663 * @migrate: migrate struct containing all migration information
665 * This migrates struct page meta-data from source struct page to destination
666 * struct page. This effectively finishes the migration from source page to the
669 void migrate_vma_pages(struct migrate_vma
*migrate
)
671 const unsigned long npages
= migrate
->npages
;
672 const unsigned long start
= migrate
->start
;
673 struct mmu_notifier_range range
;
674 unsigned long addr
, i
;
675 bool notified
= false;
677 for (i
= 0, addr
= start
; i
< npages
; addr
+= PAGE_SIZE
, i
++) {
678 struct page
*newpage
= migrate_pfn_to_page(migrate
->dst
[i
]);
679 struct page
*page
= migrate_pfn_to_page(migrate
->src
[i
]);
680 struct address_space
*mapping
;
684 migrate
->src
[i
] &= ~MIGRATE_PFN_MIGRATE
;
690 * The only time there is no vma is when called from
691 * migrate_device_coherent_page(). However this isn't
692 * called if the page could not be unmapped.
694 VM_BUG_ON(!migrate
->vma
);
695 if (!(migrate
->src
[i
] & MIGRATE_PFN_MIGRATE
))
700 mmu_notifier_range_init_owner(&range
,
701 MMU_NOTIFY_MIGRATE
, 0, migrate
->vma
,
702 migrate
->vma
->vm_mm
, addr
, migrate
->end
,
703 migrate
->pgmap_owner
);
704 mmu_notifier_invalidate_range_start(&range
);
706 migrate_vma_insert_page(migrate
, addr
, newpage
,
711 mapping
= page_mapping(page
);
713 if (is_device_private_page(newpage
) ||
714 is_device_coherent_page(newpage
)) {
716 * For now only support anonymous memory migrating to
717 * device private or coherent memory.
720 migrate
->src
[i
] &= ~MIGRATE_PFN_MIGRATE
;
723 } else if (is_zone_device_page(newpage
)) {
725 * Other types of ZONE_DEVICE page are not supported.
727 migrate
->src
[i
] &= ~MIGRATE_PFN_MIGRATE
;
731 r
= migrate_folio(mapping
, page_folio(newpage
),
732 page_folio(page
), MIGRATE_SYNC_NO_COPY
);
733 if (r
!= MIGRATEPAGE_SUCCESS
)
734 migrate
->src
[i
] &= ~MIGRATE_PFN_MIGRATE
;
738 * No need to double call mmu_notifier->invalidate_range() callback as
739 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
740 * did already call it.
743 mmu_notifier_invalidate_range_only_end(&range
);
745 EXPORT_SYMBOL(migrate_vma_pages
);
748 * migrate_vma_finalize() - restore CPU page table entry
749 * @migrate: migrate struct containing all migration information
751 * This replaces the special migration pte entry with either a mapping to the
752 * new page if migration was successful for that page, or to the original page
755 * This also unlocks the pages and puts them back on the lru, or drops the extra
756 * refcount, for device pages.
758 void migrate_vma_finalize(struct migrate_vma
*migrate
)
760 const unsigned long npages
= migrate
->npages
;
763 for (i
= 0; i
< npages
; i
++) {
764 struct folio
*dst
, *src
;
765 struct page
*newpage
= migrate_pfn_to_page(migrate
->dst
[i
]);
766 struct page
*page
= migrate_pfn_to_page(migrate
->src
[i
]);
770 unlock_page(newpage
);
776 if (!(migrate
->src
[i
] & MIGRATE_PFN_MIGRATE
) || !newpage
) {
778 unlock_page(newpage
);
784 src
= page_folio(page
);
785 dst
= page_folio(newpage
);
786 remove_migration_ptes(src
, dst
, false);
789 if (is_zone_device_page(page
))
792 putback_lru_page(page
);
794 if (newpage
!= page
) {
795 unlock_page(newpage
);
796 if (is_zone_device_page(newpage
))
799 putback_lru_page(newpage
);
803 EXPORT_SYMBOL(migrate_vma_finalize
);
806 * Migrate a device coherent page back to normal memory. The caller should have
807 * a reference on page which will be copied to the new page if migration is
808 * successful or dropped on failure.
810 int migrate_device_coherent_page(struct page
*page
)
812 unsigned long src_pfn
, dst_pfn
= 0;
813 struct migrate_vma args
;
816 WARN_ON_ONCE(PageCompound(page
));
819 src_pfn
= migrate_pfn(page_to_pfn(page
)) | MIGRATE_PFN_MIGRATE
;
827 * We don't have a VMA and don't need to walk the page tables to find
828 * the source page. So call migrate_vma_unmap() directly to unmap the
829 * page as migrate_vma_setup() will fail if args.vma == NULL.
831 migrate_vma_unmap(&args
);
832 if (!(src_pfn
& MIGRATE_PFN_MIGRATE
))
835 dpage
= alloc_page(GFP_USER
| __GFP_NOWARN
);
838 dst_pfn
= migrate_pfn(page_to_pfn(dpage
));
841 migrate_vma_pages(&args
);
842 if (src_pfn
& MIGRATE_PFN_MIGRATE
)
843 copy_highpage(dpage
, page
);
844 migrate_vma_finalize(&args
);
846 if (src_pfn
& MIGRATE_PFN_MIGRATE
)