2 * Copyright 2013 Red Hat Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Authors: Jérôme Glisse <jglisse@redhat.com>
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
21 #include <linux/hmm.h>
22 #include <linux/init.h>
23 #include <linux/rmap.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/mmzone.h>
28 #include <linux/pagemap.h>
29 #include <linux/swapops.h>
30 #include <linux/hugetlb.h>
31 #include <linux/memremap.h>
32 #include <linux/jump_label.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/memory_hotplug.h>
36 #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
38 #if IS_ENABLED(CONFIG_HMM_MIRROR)
39 static const struct mmu_notifier_ops hmm_mmu_notifier_ops
;
42 * struct hmm - HMM per mm struct
44 * @mm: mm struct this HMM struct is bound to
45 * @lock: lock protecting ranges list
46 * @ranges: list of range being snapshotted
47 * @mirrors: list of mirrors for this mm
48 * @mmu_notifier: mmu notifier to track updates to CPU page table
49 * @mirrors_sem: read/write semaphore protecting the mirrors list
54 struct list_head ranges
;
55 struct list_head mirrors
;
56 struct mmu_notifier mmu_notifier
;
57 struct rw_semaphore mirrors_sem
;
61 * hmm_register - register HMM against an mm (HMM internal)
63 * @mm: mm struct to attach to
65 * This is not intended to be used directly by device drivers. It allocates an
66 * HMM struct if mm does not have one, and initializes it.
68 static struct hmm
*hmm_register(struct mm_struct
*mm
)
70 struct hmm
*hmm
= READ_ONCE(mm
->hmm
);
74 * The hmm struct can only be freed once the mm_struct goes away,
75 * hence we should always have pre-allocated an new hmm struct
81 hmm
= kmalloc(sizeof(*hmm
), GFP_KERNEL
);
84 INIT_LIST_HEAD(&hmm
->mirrors
);
85 init_rwsem(&hmm
->mirrors_sem
);
86 hmm
->mmu_notifier
.ops
= NULL
;
87 INIT_LIST_HEAD(&hmm
->ranges
);
88 spin_lock_init(&hmm
->lock
);
91 spin_lock(&mm
->page_table_lock
);
96 spin_unlock(&mm
->page_table_lock
);
102 * We should only get here if hold the mmap_sem in write mode ie on
103 * registration of first mirror through hmm_mirror_register()
105 hmm
->mmu_notifier
.ops
= &hmm_mmu_notifier_ops
;
106 if (__mmu_notifier_register(&hmm
->mmu_notifier
, mm
))
112 spin_lock(&mm
->page_table_lock
);
115 spin_unlock(&mm
->page_table_lock
);
121 void hmm_mm_destroy(struct mm_struct
*mm
)
126 static int hmm_invalidate_range(struct hmm
*hmm
, bool device
,
127 const struct hmm_update
*update
)
129 struct hmm_mirror
*mirror
;
130 struct hmm_range
*range
;
132 spin_lock(&hmm
->lock
);
133 list_for_each_entry(range
, &hmm
->ranges
, list
) {
134 unsigned long addr
, idx
, npages
;
136 if (update
->end
< range
->start
|| update
->start
>= range
->end
)
139 range
->valid
= false;
140 addr
= max(update
->start
, range
->start
);
141 idx
= (addr
- range
->start
) >> PAGE_SHIFT
;
142 npages
= (min(range
->end
, update
->end
) - addr
) >> PAGE_SHIFT
;
143 memset(&range
->pfns
[idx
], 0, sizeof(*range
->pfns
) * npages
);
145 spin_unlock(&hmm
->lock
);
150 down_read(&hmm
->mirrors_sem
);
151 list_for_each_entry(mirror
, &hmm
->mirrors
, list
) {
154 ret
= mirror
->ops
->sync_cpu_device_pagetables(mirror
, update
);
155 if (!update
->blockable
&& ret
== -EAGAIN
) {
156 up_read(&hmm
->mirrors_sem
);
160 up_read(&hmm
->mirrors_sem
);
165 static void hmm_release(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
167 struct hmm_mirror
*mirror
;
168 struct hmm
*hmm
= mm
->hmm
;
170 down_write(&hmm
->mirrors_sem
);
171 mirror
= list_first_entry_or_null(&hmm
->mirrors
, struct hmm_mirror
,
174 list_del_init(&mirror
->list
);
175 if (mirror
->ops
->release
) {
177 * Drop mirrors_sem so callback can wait on any pending
178 * work that might itself trigger mmu_notifier callback
179 * and thus would deadlock with us.
181 up_write(&hmm
->mirrors_sem
);
182 mirror
->ops
->release(mirror
);
183 down_write(&hmm
->mirrors_sem
);
185 mirror
= list_first_entry_or_null(&hmm
->mirrors
,
186 struct hmm_mirror
, list
);
188 up_write(&hmm
->mirrors_sem
);
191 static int hmm_invalidate_range_start(struct mmu_notifier
*mn
,
192 const struct mmu_notifier_range
*range
)
194 struct hmm_update update
;
195 struct hmm
*hmm
= range
->mm
->hmm
;
199 update
.start
= range
->start
;
200 update
.end
= range
->end
;
201 update
.event
= HMM_UPDATE_INVALIDATE
;
202 update
.blockable
= range
->blockable
;
203 return hmm_invalidate_range(hmm
, true, &update
);
206 static void hmm_invalidate_range_end(struct mmu_notifier
*mn
,
207 const struct mmu_notifier_range
*range
)
209 struct hmm_update update
;
210 struct hmm
*hmm
= range
->mm
->hmm
;
214 update
.start
= range
->start
;
215 update
.end
= range
->end
;
216 update
.event
= HMM_UPDATE_INVALIDATE
;
217 update
.blockable
= true;
218 hmm_invalidate_range(hmm
, false, &update
);
221 static const struct mmu_notifier_ops hmm_mmu_notifier_ops
= {
222 .release
= hmm_release
,
223 .invalidate_range_start
= hmm_invalidate_range_start
,
224 .invalidate_range_end
= hmm_invalidate_range_end
,
228 * hmm_mirror_register() - register a mirror against an mm
230 * @mirror: new mirror struct to register
231 * @mm: mm to register against
233 * To start mirroring a process address space, the device driver must register
234 * an HMM mirror struct.
236 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
238 int hmm_mirror_register(struct hmm_mirror
*mirror
, struct mm_struct
*mm
)
241 if (!mm
|| !mirror
|| !mirror
->ops
)
245 mirror
->hmm
= hmm_register(mm
);
249 down_write(&mirror
->hmm
->mirrors_sem
);
250 if (mirror
->hmm
->mm
== NULL
) {
252 * A racing hmm_mirror_unregister() is about to destroy the hmm
253 * struct. Try again to allocate a new one.
255 up_write(&mirror
->hmm
->mirrors_sem
);
259 list_add(&mirror
->list
, &mirror
->hmm
->mirrors
);
260 up_write(&mirror
->hmm
->mirrors_sem
);
265 EXPORT_SYMBOL(hmm_mirror_register
);
268 * hmm_mirror_unregister() - unregister a mirror
270 * @mirror: new mirror struct to register
272 * Stop mirroring a process address space, and cleanup.
274 void hmm_mirror_unregister(struct hmm_mirror
*mirror
)
276 bool should_unregister
= false;
277 struct mm_struct
*mm
;
280 if (mirror
->hmm
== NULL
)
284 down_write(&hmm
->mirrors_sem
);
285 list_del_init(&mirror
->list
);
286 should_unregister
= list_empty(&hmm
->mirrors
);
290 up_write(&hmm
->mirrors_sem
);
292 if (!should_unregister
|| mm
== NULL
)
295 mmu_notifier_unregister_no_release(&hmm
->mmu_notifier
, mm
);
297 spin_lock(&mm
->page_table_lock
);
300 spin_unlock(&mm
->page_table_lock
);
304 EXPORT_SYMBOL(hmm_mirror_unregister
);
306 struct hmm_vma_walk
{
307 struct hmm_range
*range
;
313 static int hmm_vma_do_fault(struct mm_walk
*walk
, unsigned long addr
,
314 bool write_fault
, uint64_t *pfn
)
316 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_REMOTE
;
317 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
318 struct hmm_range
*range
= hmm_vma_walk
->range
;
319 struct vm_area_struct
*vma
= walk
->vma
;
322 flags
|= hmm_vma_walk
->block
? 0 : FAULT_FLAG_ALLOW_RETRY
;
323 flags
|= write_fault
? FAULT_FLAG_WRITE
: 0;
324 ret
= handle_mm_fault(vma
, addr
, flags
);
325 if (ret
& VM_FAULT_RETRY
)
327 if (ret
& VM_FAULT_ERROR
) {
328 *pfn
= range
->values
[HMM_PFN_ERROR
];
335 static int hmm_pfns_bad(unsigned long addr
,
337 struct mm_walk
*walk
)
339 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
340 struct hmm_range
*range
= hmm_vma_walk
->range
;
341 uint64_t *pfns
= range
->pfns
;
344 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
345 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++)
346 pfns
[i
] = range
->values
[HMM_PFN_ERROR
];
352 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
353 * @start: range virtual start address (inclusive)
354 * @end: range virtual end address (exclusive)
355 * @fault: should we fault or not ?
356 * @write_fault: write fault ?
357 * @walk: mm_walk structure
358 * Returns: 0 on success, -EAGAIN after page fault, or page fault error
360 * This function will be called whenever pmd_none() or pte_none() returns true,
361 * or whenever there is no page directory covering the virtual address range.
363 static int hmm_vma_walk_hole_(unsigned long addr
, unsigned long end
,
364 bool fault
, bool write_fault
,
365 struct mm_walk
*walk
)
367 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
368 struct hmm_range
*range
= hmm_vma_walk
->range
;
369 uint64_t *pfns
= range
->pfns
;
372 hmm_vma_walk
->last
= addr
;
373 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
374 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++) {
375 pfns
[i
] = range
->values
[HMM_PFN_NONE
];
376 if (fault
|| write_fault
) {
379 ret
= hmm_vma_do_fault(walk
, addr
, write_fault
,
386 return (fault
|| write_fault
) ? -EAGAIN
: 0;
389 static inline void hmm_pte_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
390 uint64_t pfns
, uint64_t cpu_flags
,
391 bool *fault
, bool *write_fault
)
393 struct hmm_range
*range
= hmm_vma_walk
->range
;
395 *fault
= *write_fault
= false;
396 if (!hmm_vma_walk
->fault
)
399 /* We aren't ask to do anything ... */
400 if (!(pfns
& range
->flags
[HMM_PFN_VALID
]))
402 /* If this is device memory than only fault if explicitly requested */
403 if ((cpu_flags
& range
->flags
[HMM_PFN_DEVICE_PRIVATE
])) {
404 /* Do we fault on device memory ? */
405 if (pfns
& range
->flags
[HMM_PFN_DEVICE_PRIVATE
]) {
406 *write_fault
= pfns
& range
->flags
[HMM_PFN_WRITE
];
412 /* If CPU page table is not valid then we need to fault */
413 *fault
= !(cpu_flags
& range
->flags
[HMM_PFN_VALID
]);
414 /* Need to write fault ? */
415 if ((pfns
& range
->flags
[HMM_PFN_WRITE
]) &&
416 !(cpu_flags
& range
->flags
[HMM_PFN_WRITE
])) {
422 static void hmm_range_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
423 const uint64_t *pfns
, unsigned long npages
,
424 uint64_t cpu_flags
, bool *fault
,
429 if (!hmm_vma_walk
->fault
) {
430 *fault
= *write_fault
= false;
434 for (i
= 0; i
< npages
; ++i
) {
435 hmm_pte_need_fault(hmm_vma_walk
, pfns
[i
], cpu_flags
,
437 if ((*fault
) || (*write_fault
))
442 static int hmm_vma_walk_hole(unsigned long addr
, unsigned long end
,
443 struct mm_walk
*walk
)
445 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
446 struct hmm_range
*range
= hmm_vma_walk
->range
;
447 bool fault
, write_fault
;
448 unsigned long i
, npages
;
451 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
452 npages
= (end
- addr
) >> PAGE_SHIFT
;
453 pfns
= &range
->pfns
[i
];
454 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
455 0, &fault
, &write_fault
);
456 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
459 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range
*range
, pmd_t pmd
)
461 if (pmd_protnone(pmd
))
463 return pmd_write(pmd
) ? range
->flags
[HMM_PFN_VALID
] |
464 range
->flags
[HMM_PFN_WRITE
] :
465 range
->flags
[HMM_PFN_VALID
];
468 static int hmm_vma_handle_pmd(struct mm_walk
*walk
,
474 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
475 struct hmm_range
*range
= hmm_vma_walk
->range
;
476 unsigned long pfn
, npages
, i
;
477 bool fault
, write_fault
;
480 npages
= (end
- addr
) >> PAGE_SHIFT
;
481 cpu_flags
= pmd_to_hmm_pfn_flags(range
, pmd
);
482 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
, cpu_flags
,
483 &fault
, &write_fault
);
485 if (pmd_protnone(pmd
) || fault
|| write_fault
)
486 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
488 pfn
= pmd_pfn(pmd
) + pte_index(addr
);
489 for (i
= 0; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++)
490 pfns
[i
] = hmm_pfn_from_pfn(range
, pfn
) | cpu_flags
;
491 hmm_vma_walk
->last
= end
;
495 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range
*range
, pte_t pte
)
497 if (pte_none(pte
) || !pte_present(pte
))
499 return pte_write(pte
) ? range
->flags
[HMM_PFN_VALID
] |
500 range
->flags
[HMM_PFN_WRITE
] :
501 range
->flags
[HMM_PFN_VALID
];
504 static int hmm_vma_handle_pte(struct mm_walk
*walk
, unsigned long addr
,
505 unsigned long end
, pmd_t
*pmdp
, pte_t
*ptep
,
508 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
509 struct hmm_range
*range
= hmm_vma_walk
->range
;
510 struct vm_area_struct
*vma
= walk
->vma
;
511 bool fault
, write_fault
;
514 uint64_t orig_pfn
= *pfn
;
516 *pfn
= range
->values
[HMM_PFN_NONE
];
517 cpu_flags
= pte_to_hmm_pfn_flags(range
, pte
);
518 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
519 &fault
, &write_fault
);
522 if (fault
|| write_fault
)
527 if (!pte_present(pte
)) {
528 swp_entry_t entry
= pte_to_swp_entry(pte
);
530 if (!non_swap_entry(entry
)) {
531 if (fault
|| write_fault
)
537 * This is a special swap entry, ignore migration, use
538 * device and report anything else as error.
540 if (is_device_private_entry(entry
)) {
541 cpu_flags
= range
->flags
[HMM_PFN_VALID
] |
542 range
->flags
[HMM_PFN_DEVICE_PRIVATE
];
543 cpu_flags
|= is_write_device_private_entry(entry
) ?
544 range
->flags
[HMM_PFN_WRITE
] : 0;
545 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
546 &fault
, &write_fault
);
547 if (fault
|| write_fault
)
549 *pfn
= hmm_pfn_from_pfn(range
, swp_offset(entry
));
554 if (is_migration_entry(entry
)) {
555 if (fault
|| write_fault
) {
557 hmm_vma_walk
->last
= addr
;
558 migration_entry_wait(vma
->vm_mm
,
565 /* Report error for everything else */
566 *pfn
= range
->values
[HMM_PFN_ERROR
];
570 if (fault
|| write_fault
)
573 *pfn
= hmm_pfn_from_pfn(range
, pte_pfn(pte
)) | cpu_flags
;
578 /* Fault any virtual address we were asked to fault */
579 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
582 static int hmm_vma_walk_pmd(pmd_t
*pmdp
,
585 struct mm_walk
*walk
)
587 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
588 struct hmm_range
*range
= hmm_vma_walk
->range
;
589 struct vm_area_struct
*vma
= walk
->vma
;
590 uint64_t *pfns
= range
->pfns
;
591 unsigned long addr
= start
, i
;
597 pmd
= READ_ONCE(*pmdp
);
599 return hmm_vma_walk_hole(start
, end
, walk
);
601 if (pmd_huge(pmd
) && (range
->vma
->vm_flags
& VM_HUGETLB
))
602 return hmm_pfns_bad(start
, end
, walk
);
604 if (thp_migration_supported() && is_pmd_migration_entry(pmd
)) {
605 bool fault
, write_fault
;
606 unsigned long npages
;
609 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
610 npages
= (end
- addr
) >> PAGE_SHIFT
;
611 pfns
= &range
->pfns
[i
];
613 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
614 0, &fault
, &write_fault
);
615 if (fault
|| write_fault
) {
616 hmm_vma_walk
->last
= addr
;
617 pmd_migration_entry_wait(vma
->vm_mm
, pmdp
);
621 } else if (!pmd_present(pmd
))
622 return hmm_pfns_bad(start
, end
, walk
);
624 if (pmd_devmap(pmd
) || pmd_trans_huge(pmd
)) {
626 * No need to take pmd_lock here, even if some other threads
627 * is splitting the huge pmd we will get that event through
628 * mmu_notifier callback.
630 * So just read pmd value and check again its a transparent
631 * huge or device mapping one and compute corresponding pfn
634 pmd
= pmd_read_atomic(pmdp
);
636 if (!pmd_devmap(pmd
) && !pmd_trans_huge(pmd
))
639 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
640 return hmm_vma_handle_pmd(walk
, addr
, end
, &pfns
[i
], pmd
);
644 * We have handled all the valid case above ie either none, migration,
645 * huge or transparent huge. At this point either it is a valid pmd
646 * entry pointing to pte directory or it is a bad pmd that will not
650 return hmm_pfns_bad(start
, end
, walk
);
652 ptep
= pte_offset_map(pmdp
, addr
);
653 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
654 for (; addr
< end
; addr
+= PAGE_SIZE
, ptep
++, i
++) {
657 r
= hmm_vma_handle_pte(walk
, addr
, end
, pmdp
, ptep
, &pfns
[i
]);
659 /* hmm_vma_handle_pte() did unmap pte directory */
660 hmm_vma_walk
->last
= addr
;
666 hmm_vma_walk
->last
= addr
;
670 static void hmm_pfns_clear(struct hmm_range
*range
,
675 for (; addr
< end
; addr
+= PAGE_SIZE
, pfns
++)
676 *pfns
= range
->values
[HMM_PFN_NONE
];
679 static void hmm_pfns_special(struct hmm_range
*range
)
681 unsigned long addr
= range
->start
, i
= 0;
683 for (; addr
< range
->end
; addr
+= PAGE_SIZE
, i
++)
684 range
->pfns
[i
] = range
->values
[HMM_PFN_SPECIAL
];
688 * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
689 * @range: range being snapshotted
690 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
691 * vma permission, 0 success
693 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
694 * validity is tracked by range struct. See hmm_vma_range_done() for further
697 * The range struct is initialized here. It tracks the CPU page table, but only
698 * if the function returns success (0), in which case the caller must then call
699 * hmm_vma_range_done() to stop CPU page table update tracking on this range.
701 * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS
702 * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED !
704 int hmm_vma_get_pfns(struct hmm_range
*range
)
706 struct vm_area_struct
*vma
= range
->vma
;
707 struct hmm_vma_walk hmm_vma_walk
;
708 struct mm_walk mm_walk
;
711 /* Sanity check, this really should not happen ! */
712 if (range
->start
< vma
->vm_start
|| range
->start
>= vma
->vm_end
)
714 if (range
->end
< vma
->vm_start
|| range
->end
> vma
->vm_end
)
717 hmm
= hmm_register(vma
->vm_mm
);
720 /* Caller must have registered a mirror, via hmm_mirror_register() ! */
721 if (!hmm
->mmu_notifier
.ops
)
724 /* FIXME support hugetlb fs */
725 if (is_vm_hugetlb_page(vma
) || (vma
->vm_flags
& VM_SPECIAL
) ||
727 hmm_pfns_special(range
);
731 if (!(vma
->vm_flags
& VM_READ
)) {
733 * If vma do not allow read access, then assume that it does
734 * not allow write access, either. Architecture that allow
735 * write without read access are not supported by HMM, because
736 * operations such has atomic access would not work.
738 hmm_pfns_clear(range
, range
->pfns
, range
->start
, range
->end
);
742 /* Initialize range to track CPU page table update */
743 spin_lock(&hmm
->lock
);
745 list_add_rcu(&range
->list
, &hmm
->ranges
);
746 spin_unlock(&hmm
->lock
);
748 hmm_vma_walk
.fault
= false;
749 hmm_vma_walk
.range
= range
;
750 mm_walk
.private = &hmm_vma_walk
;
753 mm_walk
.mm
= vma
->vm_mm
;
754 mm_walk
.pte_entry
= NULL
;
755 mm_walk
.test_walk
= NULL
;
756 mm_walk
.hugetlb_entry
= NULL
;
757 mm_walk
.pmd_entry
= hmm_vma_walk_pmd
;
758 mm_walk
.pte_hole
= hmm_vma_walk_hole
;
760 walk_page_range(range
->start
, range
->end
, &mm_walk
);
763 EXPORT_SYMBOL(hmm_vma_get_pfns
);
766 * hmm_vma_range_done() - stop tracking change to CPU page table over a range
767 * @range: range being tracked
768 * Returns: false if range data has been invalidated, true otherwise
770 * Range struct is used to track updates to the CPU page table after a call to
771 * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done
772 * using the data, or wants to lock updates to the data it got from those
773 * functions, it must call the hmm_vma_range_done() function, which will then
774 * stop tracking CPU page table updates.
776 * Note that device driver must still implement general CPU page table update
777 * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using
778 * the mmu_notifier API directly.
780 * CPU page table update tracking done through hmm_range is only temporary and
781 * to be used while trying to duplicate CPU page table contents for a range of
784 * There are two ways to use this :
786 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
787 * trans = device_build_page_table_update_transaction(pfns);
788 * device_page_table_lock();
789 * if (!hmm_vma_range_done(range)) {
790 * device_page_table_unlock();
793 * device_commit_transaction(trans);
794 * device_page_table_unlock();
797 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
798 * device_page_table_lock();
799 * hmm_vma_range_done(range);
800 * device_update_page_table(range->pfns);
801 * device_page_table_unlock();
803 bool hmm_vma_range_done(struct hmm_range
*range
)
805 unsigned long npages
= (range
->end
- range
->start
) >> PAGE_SHIFT
;
808 if (range
->end
<= range
->start
) {
813 hmm
= hmm_register(range
->vma
->vm_mm
);
815 memset(range
->pfns
, 0, sizeof(*range
->pfns
) * npages
);
819 spin_lock(&hmm
->lock
);
820 list_del_rcu(&range
->list
);
821 spin_unlock(&hmm
->lock
);
825 EXPORT_SYMBOL(hmm_vma_range_done
);
828 * hmm_vma_fault() - try to fault some address in a virtual address range
829 * @range: range being faulted
830 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
831 * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop)
833 * This is similar to a regular CPU page fault except that it will not trigger
834 * any memory migration if the memory being faulted is not accessible by CPUs.
836 * On error, for one virtual address in the range, the function will mark the
837 * corresponding HMM pfn entry with an error flag.
839 * Expected use pattern:
841 * down_read(&mm->mmap_sem);
842 * // Find vma and address device wants to fault, initialize hmm_pfn_t
843 * // array accordingly
844 * ret = hmm_vma_fault(range, write, block);
847 * hmm_vma_range_done(range);
848 * // You might want to rate limit or yield to play nicely, you may
849 * // also commit any valid pfn in the array assuming that you are
850 * // getting true from hmm_vma_range_monitor_end()
859 * up_read(&mm->mmap_sem)
862 * // Take device driver lock that serialize device page table update
863 * driver_lock_device_page_table_update();
864 * hmm_vma_range_done(range);
865 * // Commit pfns we got from hmm_vma_fault()
866 * driver_unlock_device_page_table_update();
867 * up_read(&mm->mmap_sem)
869 * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0)
870 * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION !
872 * YOU HAVE BEEN WARNED !
874 int hmm_vma_fault(struct hmm_range
*range
, bool block
)
876 struct vm_area_struct
*vma
= range
->vma
;
877 unsigned long start
= range
->start
;
878 struct hmm_vma_walk hmm_vma_walk
;
879 struct mm_walk mm_walk
;
883 /* Sanity check, this really should not happen ! */
884 if (range
->start
< vma
->vm_start
|| range
->start
>= vma
->vm_end
)
886 if (range
->end
< vma
->vm_start
|| range
->end
> vma
->vm_end
)
889 hmm
= hmm_register(vma
->vm_mm
);
891 hmm_pfns_clear(range
, range
->pfns
, range
->start
, range
->end
);
894 /* Caller must have registered a mirror using hmm_mirror_register() */
895 if (!hmm
->mmu_notifier
.ops
)
898 /* FIXME support hugetlb fs */
899 if (is_vm_hugetlb_page(vma
) || (vma
->vm_flags
& VM_SPECIAL
) ||
901 hmm_pfns_special(range
);
905 if (!(vma
->vm_flags
& VM_READ
)) {
907 * If vma do not allow read access, then assume that it does
908 * not allow write access, either. Architecture that allow
909 * write without read access are not supported by HMM, because
910 * operations such has atomic access would not work.
912 hmm_pfns_clear(range
, range
->pfns
, range
->start
, range
->end
);
916 /* Initialize range to track CPU page table update */
917 spin_lock(&hmm
->lock
);
919 list_add_rcu(&range
->list
, &hmm
->ranges
);
920 spin_unlock(&hmm
->lock
);
922 hmm_vma_walk
.fault
= true;
923 hmm_vma_walk
.block
= block
;
924 hmm_vma_walk
.range
= range
;
925 mm_walk
.private = &hmm_vma_walk
;
926 hmm_vma_walk
.last
= range
->start
;
929 mm_walk
.mm
= vma
->vm_mm
;
930 mm_walk
.pte_entry
= NULL
;
931 mm_walk
.test_walk
= NULL
;
932 mm_walk
.hugetlb_entry
= NULL
;
933 mm_walk
.pmd_entry
= hmm_vma_walk_pmd
;
934 mm_walk
.pte_hole
= hmm_vma_walk_hole
;
937 ret
= walk_page_range(start
, range
->end
, &mm_walk
);
938 start
= hmm_vma_walk
.last
;
939 } while (ret
== -EAGAIN
);
944 i
= (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
945 hmm_pfns_clear(range
, &range
->pfns
[i
], hmm_vma_walk
.last
,
947 hmm_vma_range_done(range
);
951 EXPORT_SYMBOL(hmm_vma_fault
);
952 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
955 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
956 struct page
*hmm_vma_alloc_locked_page(struct vm_area_struct
*vma
,
961 page
= alloc_page_vma(GFP_HIGHUSER
, vma
, addr
);
967 EXPORT_SYMBOL(hmm_vma_alloc_locked_page
);
970 static void hmm_devmem_ref_release(struct percpu_ref
*ref
)
972 struct hmm_devmem
*devmem
;
974 devmem
= container_of(ref
, struct hmm_devmem
, ref
);
975 complete(&devmem
->completion
);
978 static void hmm_devmem_ref_exit(void *data
)
980 struct percpu_ref
*ref
= data
;
981 struct hmm_devmem
*devmem
;
983 devmem
= container_of(ref
, struct hmm_devmem
, ref
);
984 wait_for_completion(&devmem
->completion
);
985 percpu_ref_exit(ref
);
988 static void hmm_devmem_ref_kill(struct percpu_ref
*ref
)
990 percpu_ref_kill(ref
);
993 static vm_fault_t
hmm_devmem_fault(struct vm_area_struct
*vma
,
995 const struct page
*page
,
999 struct hmm_devmem
*devmem
= page
->pgmap
->data
;
1001 return devmem
->ops
->fault(devmem
, vma
, addr
, page
, flags
, pmdp
);
1004 static void hmm_devmem_free(struct page
*page
, void *data
)
1006 struct hmm_devmem
*devmem
= data
;
1008 page
->mapping
= NULL
;
1010 devmem
->ops
->free(devmem
, page
);
1014 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1016 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1017 * @device: device struct to bind the resource too
1018 * @size: size in bytes of the device memory to add
1019 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1021 * This function first finds an empty range of physical address big enough to
1022 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1023 * in turn allocates struct pages. It does not do anything beyond that; all
1024 * events affecting the memory will go through the various callbacks provided
1025 * by hmm_devmem_ops struct.
1027 * Device driver should call this function during device initialization and
1028 * is then responsible of memory management. HMM only provides helpers.
1030 struct hmm_devmem
*hmm_devmem_add(const struct hmm_devmem_ops
*ops
,
1031 struct device
*device
,
1034 struct hmm_devmem
*devmem
;
1035 resource_size_t addr
;
1039 dev_pagemap_get_ops();
1041 devmem
= devm_kzalloc(device
, sizeof(*devmem
), GFP_KERNEL
);
1043 return ERR_PTR(-ENOMEM
);
1045 init_completion(&devmem
->completion
);
1046 devmem
->pfn_first
= -1UL;
1047 devmem
->pfn_last
= -1UL;
1048 devmem
->resource
= NULL
;
1049 devmem
->device
= device
;
1052 ret
= percpu_ref_init(&devmem
->ref
, &hmm_devmem_ref_release
,
1055 return ERR_PTR(ret
);
1057 ret
= devm_add_action_or_reset(device
, hmm_devmem_ref_exit
, &devmem
->ref
);
1059 return ERR_PTR(ret
);
1061 size
= ALIGN(size
, PA_SECTION_SIZE
);
1062 addr
= min((unsigned long)iomem_resource
.end
,
1063 (1UL << MAX_PHYSMEM_BITS
) - 1);
1064 addr
= addr
- size
+ 1UL;
1067 * FIXME add a new helper to quickly walk resource tree and find free
1070 * FIXME what about ioport_resource resource ?
1072 for (; addr
> size
&& addr
>= iomem_resource
.start
; addr
-= size
) {
1073 ret
= region_intersects(addr
, size
, 0, IORES_DESC_NONE
);
1074 if (ret
!= REGION_DISJOINT
)
1077 devmem
->resource
= devm_request_mem_region(device
, addr
, size
,
1079 if (!devmem
->resource
)
1080 return ERR_PTR(-ENOMEM
);
1083 if (!devmem
->resource
)
1084 return ERR_PTR(-ERANGE
);
1086 devmem
->resource
->desc
= IORES_DESC_DEVICE_PRIVATE_MEMORY
;
1087 devmem
->pfn_first
= devmem
->resource
->start
>> PAGE_SHIFT
;
1088 devmem
->pfn_last
= devmem
->pfn_first
+
1089 (resource_size(devmem
->resource
) >> PAGE_SHIFT
);
1090 devmem
->page_fault
= hmm_devmem_fault
;
1092 devmem
->pagemap
.type
= MEMORY_DEVICE_PRIVATE
;
1093 devmem
->pagemap
.res
= *devmem
->resource
;
1094 devmem
->pagemap
.page_free
= hmm_devmem_free
;
1095 devmem
->pagemap
.altmap_valid
= false;
1096 devmem
->pagemap
.ref
= &devmem
->ref
;
1097 devmem
->pagemap
.data
= devmem
;
1098 devmem
->pagemap
.kill
= hmm_devmem_ref_kill
;
1100 result
= devm_memremap_pages(devmem
->device
, &devmem
->pagemap
);
1105 EXPORT_SYMBOL_GPL(hmm_devmem_add
);
1107 struct hmm_devmem
*hmm_devmem_add_resource(const struct hmm_devmem_ops
*ops
,
1108 struct device
*device
,
1109 struct resource
*res
)
1111 struct hmm_devmem
*devmem
;
1115 if (res
->desc
!= IORES_DESC_DEVICE_PUBLIC_MEMORY
)
1116 return ERR_PTR(-EINVAL
);
1118 dev_pagemap_get_ops();
1120 devmem
= devm_kzalloc(device
, sizeof(*devmem
), GFP_KERNEL
);
1122 return ERR_PTR(-ENOMEM
);
1124 init_completion(&devmem
->completion
);
1125 devmem
->pfn_first
= -1UL;
1126 devmem
->pfn_last
= -1UL;
1127 devmem
->resource
= res
;
1128 devmem
->device
= device
;
1131 ret
= percpu_ref_init(&devmem
->ref
, &hmm_devmem_ref_release
,
1134 return ERR_PTR(ret
);
1136 ret
= devm_add_action_or_reset(device
, hmm_devmem_ref_exit
,
1139 return ERR_PTR(ret
);
1141 devmem
->pfn_first
= devmem
->resource
->start
>> PAGE_SHIFT
;
1142 devmem
->pfn_last
= devmem
->pfn_first
+
1143 (resource_size(devmem
->resource
) >> PAGE_SHIFT
);
1144 devmem
->page_fault
= hmm_devmem_fault
;
1146 devmem
->pagemap
.type
= MEMORY_DEVICE_PUBLIC
;
1147 devmem
->pagemap
.res
= *devmem
->resource
;
1148 devmem
->pagemap
.page_free
= hmm_devmem_free
;
1149 devmem
->pagemap
.altmap_valid
= false;
1150 devmem
->pagemap
.ref
= &devmem
->ref
;
1151 devmem
->pagemap
.data
= devmem
;
1152 devmem
->pagemap
.kill
= hmm_devmem_ref_kill
;
1154 result
= devm_memremap_pages(devmem
->device
, &devmem
->pagemap
);
1159 EXPORT_SYMBOL_GPL(hmm_devmem_add_resource
);
1162 * A device driver that wants to handle multiple devices memory through a
1163 * single fake device can use hmm_device to do so. This is purely a helper
1164 * and it is not needed to make use of any HMM functionality.
1166 #define HMM_DEVICE_MAX 256
1168 static DECLARE_BITMAP(hmm_device_mask
, HMM_DEVICE_MAX
);
1169 static DEFINE_SPINLOCK(hmm_device_lock
);
1170 static struct class *hmm_device_class
;
1171 static dev_t hmm_device_devt
;
1173 static void hmm_device_release(struct device
*device
)
1175 struct hmm_device
*hmm_device
;
1177 hmm_device
= container_of(device
, struct hmm_device
, device
);
1178 spin_lock(&hmm_device_lock
);
1179 clear_bit(hmm_device
->minor
, hmm_device_mask
);
1180 spin_unlock(&hmm_device_lock
);
1185 struct hmm_device
*hmm_device_new(void *drvdata
)
1187 struct hmm_device
*hmm_device
;
1189 hmm_device
= kzalloc(sizeof(*hmm_device
), GFP_KERNEL
);
1191 return ERR_PTR(-ENOMEM
);
1193 spin_lock(&hmm_device_lock
);
1194 hmm_device
->minor
= find_first_zero_bit(hmm_device_mask
, HMM_DEVICE_MAX
);
1195 if (hmm_device
->minor
>= HMM_DEVICE_MAX
) {
1196 spin_unlock(&hmm_device_lock
);
1198 return ERR_PTR(-EBUSY
);
1200 set_bit(hmm_device
->minor
, hmm_device_mask
);
1201 spin_unlock(&hmm_device_lock
);
1203 dev_set_name(&hmm_device
->device
, "hmm_device%d", hmm_device
->minor
);
1204 hmm_device
->device
.devt
= MKDEV(MAJOR(hmm_device_devt
),
1206 hmm_device
->device
.release
= hmm_device_release
;
1207 dev_set_drvdata(&hmm_device
->device
, drvdata
);
1208 hmm_device
->device
.class = hmm_device_class
;
1209 device_initialize(&hmm_device
->device
);
1213 EXPORT_SYMBOL(hmm_device_new
);
1215 void hmm_device_put(struct hmm_device
*hmm_device
)
1217 put_device(&hmm_device
->device
);
1219 EXPORT_SYMBOL(hmm_device_put
);
1221 static int __init
hmm_init(void)
1225 ret
= alloc_chrdev_region(&hmm_device_devt
, 0,
1231 hmm_device_class
= class_create(THIS_MODULE
, "hmm_device");
1232 if (IS_ERR(hmm_device_class
)) {
1233 unregister_chrdev_region(hmm_device_devt
, HMM_DEVICE_MAX
);
1234 return PTR_ERR(hmm_device_class
);
1239 device_initcall(hmm_init
);
1240 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */