2 * Copyright 2013 Red Hat Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Authors: Jérôme Glisse <jglisse@redhat.com>
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
21 #include <linux/hmm.h>
22 #include <linux/init.h>
23 #include <linux/rmap.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/mmzone.h>
28 #include <linux/pagemap.h>
29 #include <linux/swapops.h>
30 #include <linux/hugetlb.h>
31 #include <linux/memremap.h>
32 #include <linux/jump_label.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/memory_hotplug.h>
36 #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
38 #if IS_ENABLED(CONFIG_HMM_MIRROR)
39 static const struct mmu_notifier_ops hmm_mmu_notifier_ops
;
41 static inline struct hmm
*mm_get_hmm(struct mm_struct
*mm
)
43 struct hmm
*hmm
= READ_ONCE(mm
->hmm
);
45 if (hmm
&& kref_get_unless_zero(&hmm
->kref
))
52 * hmm_get_or_create - register HMM against an mm (HMM internal)
54 * @mm: mm struct to attach to
55 * Returns: returns an HMM object, either by referencing the existing
56 * (per-process) object, or by creating a new one.
58 * This is not intended to be used directly by device drivers. If mm already
59 * has an HMM struct then it get a reference on it and returns it. Otherwise
60 * it allocates an HMM struct, initializes it, associate it with the mm and
63 static struct hmm
*hmm_get_or_create(struct mm_struct
*mm
)
65 struct hmm
*hmm
= mm_get_hmm(mm
);
71 hmm
= kmalloc(sizeof(*hmm
), GFP_KERNEL
);
74 init_waitqueue_head(&hmm
->wq
);
75 INIT_LIST_HEAD(&hmm
->mirrors
);
76 init_rwsem(&hmm
->mirrors_sem
);
77 hmm
->mmu_notifier
.ops
= NULL
;
78 INIT_LIST_HEAD(&hmm
->ranges
);
79 mutex_init(&hmm
->lock
);
80 kref_init(&hmm
->kref
);
85 spin_lock(&mm
->page_table_lock
);
90 spin_unlock(&mm
->page_table_lock
);
96 * We should only get here if hold the mmap_sem in write mode ie on
97 * registration of first mirror through hmm_mirror_register()
99 hmm
->mmu_notifier
.ops
= &hmm_mmu_notifier_ops
;
100 if (__mmu_notifier_register(&hmm
->mmu_notifier
, mm
))
106 spin_lock(&mm
->page_table_lock
);
109 spin_unlock(&mm
->page_table_lock
);
115 static void hmm_free(struct kref
*kref
)
117 struct hmm
*hmm
= container_of(kref
, struct hmm
, kref
);
118 struct mm_struct
*mm
= hmm
->mm
;
120 mmu_notifier_unregister_no_release(&hmm
->mmu_notifier
, mm
);
122 spin_lock(&mm
->page_table_lock
);
125 spin_unlock(&mm
->page_table_lock
);
130 static inline void hmm_put(struct hmm
*hmm
)
132 kref_put(&hmm
->kref
, hmm_free
);
135 void hmm_mm_destroy(struct mm_struct
*mm
)
139 spin_lock(&mm
->page_table_lock
);
140 hmm
= mm_get_hmm(mm
);
145 spin_unlock(&mm
->page_table_lock
);
150 spin_unlock(&mm
->page_table_lock
);
153 static void hmm_release(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
155 struct hmm
*hmm
= mm_get_hmm(mm
);
156 struct hmm_mirror
*mirror
;
157 struct hmm_range
*range
;
159 /* Report this HMM as dying. */
162 /* Wake-up everyone waiting on any range. */
163 mutex_lock(&hmm
->lock
);
164 list_for_each_entry(range
, &hmm
->ranges
, list
) {
165 range
->valid
= false;
167 wake_up_all(&hmm
->wq
);
168 mutex_unlock(&hmm
->lock
);
170 down_write(&hmm
->mirrors_sem
);
171 mirror
= list_first_entry_or_null(&hmm
->mirrors
, struct hmm_mirror
,
174 list_del_init(&mirror
->list
);
175 if (mirror
->ops
->release
) {
177 * Drop mirrors_sem so callback can wait on any pending
178 * work that might itself trigger mmu_notifier callback
179 * and thus would deadlock with us.
181 up_write(&hmm
->mirrors_sem
);
182 mirror
->ops
->release(mirror
);
183 down_write(&hmm
->mirrors_sem
);
185 mirror
= list_first_entry_or_null(&hmm
->mirrors
,
186 struct hmm_mirror
, list
);
188 up_write(&hmm
->mirrors_sem
);
193 static int hmm_invalidate_range_start(struct mmu_notifier
*mn
,
194 const struct mmu_notifier_range
*nrange
)
196 struct hmm
*hmm
= mm_get_hmm(nrange
->mm
);
197 struct hmm_mirror
*mirror
;
198 struct hmm_update update
;
199 struct hmm_range
*range
;
204 update
.start
= nrange
->start
;
205 update
.end
= nrange
->end
;
206 update
.event
= HMM_UPDATE_INVALIDATE
;
207 update
.blockable
= nrange
->blockable
;
209 if (nrange
->blockable
)
210 mutex_lock(&hmm
->lock
);
211 else if (!mutex_trylock(&hmm
->lock
)) {
216 list_for_each_entry(range
, &hmm
->ranges
, list
) {
217 if (update
.end
< range
->start
|| update
.start
>= range
->end
)
220 range
->valid
= false;
222 mutex_unlock(&hmm
->lock
);
224 if (nrange
->blockable
)
225 down_read(&hmm
->mirrors_sem
);
226 else if (!down_read_trylock(&hmm
->mirrors_sem
)) {
230 list_for_each_entry(mirror
, &hmm
->mirrors
, list
) {
233 ret
= mirror
->ops
->sync_cpu_device_pagetables(mirror
, &update
);
234 if (!update
.blockable
&& ret
== -EAGAIN
) {
235 up_read(&hmm
->mirrors_sem
);
240 up_read(&hmm
->mirrors_sem
);
247 static void hmm_invalidate_range_end(struct mmu_notifier
*mn
,
248 const struct mmu_notifier_range
*nrange
)
250 struct hmm
*hmm
= mm_get_hmm(nrange
->mm
);
254 mutex_lock(&hmm
->lock
);
256 if (!hmm
->notifiers
) {
257 struct hmm_range
*range
;
259 list_for_each_entry(range
, &hmm
->ranges
, list
) {
264 wake_up_all(&hmm
->wq
);
266 mutex_unlock(&hmm
->lock
);
271 static const struct mmu_notifier_ops hmm_mmu_notifier_ops
= {
272 .release
= hmm_release
,
273 .invalidate_range_start
= hmm_invalidate_range_start
,
274 .invalidate_range_end
= hmm_invalidate_range_end
,
278 * hmm_mirror_register() - register a mirror against an mm
280 * @mirror: new mirror struct to register
281 * @mm: mm to register against
283 * To start mirroring a process address space, the device driver must register
284 * an HMM mirror struct.
286 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
288 int hmm_mirror_register(struct hmm_mirror
*mirror
, struct mm_struct
*mm
)
291 if (!mm
|| !mirror
|| !mirror
->ops
)
294 mirror
->hmm
= hmm_get_or_create(mm
);
298 down_write(&mirror
->hmm
->mirrors_sem
);
299 list_add(&mirror
->list
, &mirror
->hmm
->mirrors
);
300 up_write(&mirror
->hmm
->mirrors_sem
);
304 EXPORT_SYMBOL(hmm_mirror_register
);
307 * hmm_mirror_unregister() - unregister a mirror
309 * @mirror: new mirror struct to register
311 * Stop mirroring a process address space, and cleanup.
313 void hmm_mirror_unregister(struct hmm_mirror
*mirror
)
315 struct hmm
*hmm
= READ_ONCE(mirror
->hmm
);
320 down_write(&hmm
->mirrors_sem
);
321 list_del_init(&mirror
->list
);
322 /* To protect us against double unregister ... */
324 up_write(&hmm
->mirrors_sem
);
328 EXPORT_SYMBOL(hmm_mirror_unregister
);
330 struct hmm_vma_walk
{
331 struct hmm_range
*range
;
337 static int hmm_vma_do_fault(struct mm_walk
*walk
, unsigned long addr
,
338 bool write_fault
, uint64_t *pfn
)
340 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_REMOTE
;
341 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
342 struct hmm_range
*range
= hmm_vma_walk
->range
;
343 struct vm_area_struct
*vma
= walk
->vma
;
346 flags
|= hmm_vma_walk
->block
? 0 : FAULT_FLAG_ALLOW_RETRY
;
347 flags
|= write_fault
? FAULT_FLAG_WRITE
: 0;
348 ret
= handle_mm_fault(vma
, addr
, flags
);
349 if (ret
& VM_FAULT_RETRY
)
351 if (ret
& VM_FAULT_ERROR
) {
352 *pfn
= range
->values
[HMM_PFN_ERROR
];
359 static int hmm_pfns_bad(unsigned long addr
,
361 struct mm_walk
*walk
)
363 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
364 struct hmm_range
*range
= hmm_vma_walk
->range
;
365 uint64_t *pfns
= range
->pfns
;
368 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
369 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++)
370 pfns
[i
] = range
->values
[HMM_PFN_ERROR
];
376 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
377 * @start: range virtual start address (inclusive)
378 * @end: range virtual end address (exclusive)
379 * @fault: should we fault or not ?
380 * @write_fault: write fault ?
381 * @walk: mm_walk structure
382 * Returns: 0 on success, -EBUSY after page fault, or page fault error
384 * This function will be called whenever pmd_none() or pte_none() returns true,
385 * or whenever there is no page directory covering the virtual address range.
387 static int hmm_vma_walk_hole_(unsigned long addr
, unsigned long end
,
388 bool fault
, bool write_fault
,
389 struct mm_walk
*walk
)
391 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
392 struct hmm_range
*range
= hmm_vma_walk
->range
;
393 uint64_t *pfns
= range
->pfns
;
396 hmm_vma_walk
->last
= addr
;
397 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
398 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++) {
399 pfns
[i
] = range
->values
[HMM_PFN_NONE
];
400 if (fault
|| write_fault
) {
403 ret
= hmm_vma_do_fault(walk
, addr
, write_fault
,
410 return (fault
|| write_fault
) ? -EBUSY
: 0;
413 static inline void hmm_pte_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
414 uint64_t pfns
, uint64_t cpu_flags
,
415 bool *fault
, bool *write_fault
)
417 struct hmm_range
*range
= hmm_vma_walk
->range
;
419 if (!hmm_vma_walk
->fault
)
423 * So we not only consider the individual per page request we also
424 * consider the default flags requested for the range. The API can
425 * be use in 2 fashions. The first one where the HMM user coalesce
426 * multiple page fault into one request and set flags per pfns for
427 * of those faults. The second one where the HMM user want to pre-
428 * fault a range with specific flags. For the latter one it is a
429 * waste to have the user pre-fill the pfn arrays with a default
432 pfns
= (pfns
& range
->pfn_flags_mask
) | range
->default_flags
;
434 /* We aren't ask to do anything ... */
435 if (!(pfns
& range
->flags
[HMM_PFN_VALID
]))
437 /* If this is device memory than only fault if explicitly requested */
438 if ((cpu_flags
& range
->flags
[HMM_PFN_DEVICE_PRIVATE
])) {
439 /* Do we fault on device memory ? */
440 if (pfns
& range
->flags
[HMM_PFN_DEVICE_PRIVATE
]) {
441 *write_fault
= pfns
& range
->flags
[HMM_PFN_WRITE
];
447 /* If CPU page table is not valid then we need to fault */
448 *fault
= !(cpu_flags
& range
->flags
[HMM_PFN_VALID
]);
449 /* Need to write fault ? */
450 if ((pfns
& range
->flags
[HMM_PFN_WRITE
]) &&
451 !(cpu_flags
& range
->flags
[HMM_PFN_WRITE
])) {
457 static void hmm_range_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
458 const uint64_t *pfns
, unsigned long npages
,
459 uint64_t cpu_flags
, bool *fault
,
464 if (!hmm_vma_walk
->fault
) {
465 *fault
= *write_fault
= false;
469 *fault
= *write_fault
= false;
470 for (i
= 0; i
< npages
; ++i
) {
471 hmm_pte_need_fault(hmm_vma_walk
, pfns
[i
], cpu_flags
,
478 static int hmm_vma_walk_hole(unsigned long addr
, unsigned long end
,
479 struct mm_walk
*walk
)
481 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
482 struct hmm_range
*range
= hmm_vma_walk
->range
;
483 bool fault
, write_fault
;
484 unsigned long i
, npages
;
487 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
488 npages
= (end
- addr
) >> PAGE_SHIFT
;
489 pfns
= &range
->pfns
[i
];
490 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
491 0, &fault
, &write_fault
);
492 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
495 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range
*range
, pmd_t pmd
)
497 if (pmd_protnone(pmd
))
499 return pmd_write(pmd
) ? range
->flags
[HMM_PFN_VALID
] |
500 range
->flags
[HMM_PFN_WRITE
] :
501 range
->flags
[HMM_PFN_VALID
];
504 static int hmm_vma_handle_pmd(struct mm_walk
*walk
,
510 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
511 struct hmm_range
*range
= hmm_vma_walk
->range
;
512 unsigned long pfn
, npages
, i
;
513 bool fault
, write_fault
;
516 npages
= (end
- addr
) >> PAGE_SHIFT
;
517 cpu_flags
= pmd_to_hmm_pfn_flags(range
, pmd
);
518 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
, cpu_flags
,
519 &fault
, &write_fault
);
521 if (pmd_protnone(pmd
) || fault
|| write_fault
)
522 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
524 pfn
= pmd_pfn(pmd
) + pte_index(addr
);
525 for (i
= 0; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++)
526 pfns
[i
] = hmm_pfn_from_pfn(range
, pfn
) | cpu_flags
;
527 hmm_vma_walk
->last
= end
;
531 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range
*range
, pte_t pte
)
533 if (pte_none(pte
) || !pte_present(pte
))
535 return pte_write(pte
) ? range
->flags
[HMM_PFN_VALID
] |
536 range
->flags
[HMM_PFN_WRITE
] :
537 range
->flags
[HMM_PFN_VALID
];
540 static int hmm_vma_handle_pte(struct mm_walk
*walk
, unsigned long addr
,
541 unsigned long end
, pmd_t
*pmdp
, pte_t
*ptep
,
544 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
545 struct hmm_range
*range
= hmm_vma_walk
->range
;
546 struct vm_area_struct
*vma
= walk
->vma
;
547 bool fault
, write_fault
;
550 uint64_t orig_pfn
= *pfn
;
552 *pfn
= range
->values
[HMM_PFN_NONE
];
553 fault
= write_fault
= false;
556 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, 0,
557 &fault
, &write_fault
);
558 if (fault
|| write_fault
)
563 if (!pte_present(pte
)) {
564 swp_entry_t entry
= pte_to_swp_entry(pte
);
566 if (!non_swap_entry(entry
)) {
567 if (fault
|| write_fault
)
573 * This is a special swap entry, ignore migration, use
574 * device and report anything else as error.
576 if (is_device_private_entry(entry
)) {
577 cpu_flags
= range
->flags
[HMM_PFN_VALID
] |
578 range
->flags
[HMM_PFN_DEVICE_PRIVATE
];
579 cpu_flags
|= is_write_device_private_entry(entry
) ?
580 range
->flags
[HMM_PFN_WRITE
] : 0;
581 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
582 &fault
, &write_fault
);
583 if (fault
|| write_fault
)
585 *pfn
= hmm_pfn_from_pfn(range
, swp_offset(entry
));
590 if (is_migration_entry(entry
)) {
591 if (fault
|| write_fault
) {
593 hmm_vma_walk
->last
= addr
;
594 migration_entry_wait(vma
->vm_mm
,
601 /* Report error for everything else */
602 *pfn
= range
->values
[HMM_PFN_ERROR
];
605 cpu_flags
= pte_to_hmm_pfn_flags(range
, pte
);
606 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
607 &fault
, &write_fault
);
610 if (fault
|| write_fault
)
613 *pfn
= hmm_pfn_from_pfn(range
, pte_pfn(pte
)) | cpu_flags
;
618 /* Fault any virtual address we were asked to fault */
619 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
622 static int hmm_vma_walk_pmd(pmd_t
*pmdp
,
625 struct mm_walk
*walk
)
627 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
628 struct hmm_range
*range
= hmm_vma_walk
->range
;
629 struct vm_area_struct
*vma
= walk
->vma
;
630 uint64_t *pfns
= range
->pfns
;
631 unsigned long addr
= start
, i
;
637 pmd
= READ_ONCE(*pmdp
);
639 return hmm_vma_walk_hole(start
, end
, walk
);
641 if (pmd_huge(pmd
) && (range
->vma
->vm_flags
& VM_HUGETLB
))
642 return hmm_pfns_bad(start
, end
, walk
);
644 if (thp_migration_supported() && is_pmd_migration_entry(pmd
)) {
645 bool fault
, write_fault
;
646 unsigned long npages
;
649 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
650 npages
= (end
- addr
) >> PAGE_SHIFT
;
651 pfns
= &range
->pfns
[i
];
653 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
654 0, &fault
, &write_fault
);
655 if (fault
|| write_fault
) {
656 hmm_vma_walk
->last
= addr
;
657 pmd_migration_entry_wait(vma
->vm_mm
, pmdp
);
661 } else if (!pmd_present(pmd
))
662 return hmm_pfns_bad(start
, end
, walk
);
664 if (pmd_devmap(pmd
) || pmd_trans_huge(pmd
)) {
666 * No need to take pmd_lock here, even if some other threads
667 * is splitting the huge pmd we will get that event through
668 * mmu_notifier callback.
670 * So just read pmd value and check again its a transparent
671 * huge or device mapping one and compute corresponding pfn
674 pmd
= pmd_read_atomic(pmdp
);
676 if (!pmd_devmap(pmd
) && !pmd_trans_huge(pmd
))
679 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
680 return hmm_vma_handle_pmd(walk
, addr
, end
, &pfns
[i
], pmd
);
684 * We have handled all the valid case above ie either none, migration,
685 * huge or transparent huge. At this point either it is a valid pmd
686 * entry pointing to pte directory or it is a bad pmd that will not
690 return hmm_pfns_bad(start
, end
, walk
);
692 ptep
= pte_offset_map(pmdp
, addr
);
693 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
694 for (; addr
< end
; addr
+= PAGE_SIZE
, ptep
++, i
++) {
697 r
= hmm_vma_handle_pte(walk
, addr
, end
, pmdp
, ptep
, &pfns
[i
]);
699 /* hmm_vma_handle_pte() did unmap pte directory */
700 hmm_vma_walk
->last
= addr
;
706 hmm_vma_walk
->last
= addr
;
710 static void hmm_pfns_clear(struct hmm_range
*range
,
715 for (; addr
< end
; addr
+= PAGE_SIZE
, pfns
++)
716 *pfns
= range
->values
[HMM_PFN_NONE
];
719 static void hmm_pfns_special(struct hmm_range
*range
)
721 unsigned long addr
= range
->start
, i
= 0;
723 for (; addr
< range
->end
; addr
+= PAGE_SIZE
, i
++)
724 range
->pfns
[i
] = range
->values
[HMM_PFN_SPECIAL
];
728 * hmm_range_register() - start tracking change to CPU page table over a range
730 * @mm: the mm struct for the range of virtual address
731 * @start: start virtual address (inclusive)
732 * @end: end virtual address (exclusive)
733 * Returns 0 on success, -EFAULT if the address space is no longer valid
735 * Track updates to the CPU page table see include/linux/hmm.h
737 int hmm_range_register(struct hmm_range
*range
,
738 struct mm_struct
*mm
,
742 range
->start
= start
& PAGE_MASK
;
743 range
->end
= end
& PAGE_MASK
;
744 range
->valid
= false;
747 if (range
->start
>= range
->end
)
750 range
->start
= start
;
753 range
->hmm
= hmm_get_or_create(mm
);
757 /* Check if hmm_mm_destroy() was call. */
758 if (range
->hmm
->mm
== NULL
|| range
->hmm
->dead
) {
763 /* Initialize range to track CPU page table update */
764 mutex_lock(&range
->hmm
->lock
);
766 list_add_rcu(&range
->list
, &range
->hmm
->ranges
);
769 * If there are any concurrent notifiers we have to wait for them for
770 * the range to be valid (see hmm_range_wait_until_valid()).
772 if (!range
->hmm
->notifiers
)
774 mutex_unlock(&range
->hmm
->lock
);
778 EXPORT_SYMBOL(hmm_range_register
);
781 * hmm_range_unregister() - stop tracking change to CPU page table over a range
784 * Range struct is used to track updates to the CPU page table after a call to
785 * hmm_range_register(). See include/linux/hmm.h for how to use it.
787 void hmm_range_unregister(struct hmm_range
*range
)
789 /* Sanity check this really should not happen. */
790 if (range
->hmm
== NULL
|| range
->end
<= range
->start
)
793 mutex_lock(&range
->hmm
->lock
);
794 list_del_rcu(&range
->list
);
795 mutex_unlock(&range
->hmm
->lock
);
797 /* Drop reference taken by hmm_range_register() */
798 range
->valid
= false;
802 EXPORT_SYMBOL(hmm_range_unregister
);
805 * hmm_range_snapshot() - snapshot CPU page table for a range
807 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
808 * permission (for instance asking for write and range is read only),
809 * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
810 * vma or it is illegal to access that range), number of valid pages
811 * in range->pfns[] (from range start address).
813 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
814 * validity is tracked by range struct. See in include/linux/hmm.h for example
817 long hmm_range_snapshot(struct hmm_range
*range
)
819 unsigned long start
= range
->start
, end
;
820 struct hmm_vma_walk hmm_vma_walk
;
821 struct hmm
*hmm
= range
->hmm
;
822 struct vm_area_struct
*vma
;
823 struct mm_walk mm_walk
;
825 /* Check if hmm_mm_destroy() was call. */
826 if (hmm
->mm
== NULL
|| hmm
->dead
)
830 /* If range is no longer valid force retry. */
834 vma
= find_vma(hmm
->mm
, start
);
835 if (vma
== NULL
|| (vma
->vm_flags
& VM_SPECIAL
))
838 /* FIXME support hugetlb fs/dax */
839 if (is_vm_hugetlb_page(vma
) || vma_is_dax(vma
)) {
840 hmm_pfns_special(range
);
844 if (!(vma
->vm_flags
& VM_READ
)) {
846 * If vma do not allow read access, then assume that it
847 * does not allow write access, either. HMM does not
848 * support architecture that allow write without read.
850 hmm_pfns_clear(range
, range
->pfns
,
851 range
->start
, range
->end
);
856 hmm_vma_walk
.last
= start
;
857 hmm_vma_walk
.fault
= false;
858 hmm_vma_walk
.range
= range
;
859 mm_walk
.private = &hmm_vma_walk
;
860 end
= min(range
->end
, vma
->vm_end
);
863 mm_walk
.mm
= vma
->vm_mm
;
864 mm_walk
.pte_entry
= NULL
;
865 mm_walk
.test_walk
= NULL
;
866 mm_walk
.hugetlb_entry
= NULL
;
867 mm_walk
.pmd_entry
= hmm_vma_walk_pmd
;
868 mm_walk
.pte_hole
= hmm_vma_walk_hole
;
870 walk_page_range(start
, end
, &mm_walk
);
872 } while (start
< range
->end
);
874 return (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
876 EXPORT_SYMBOL(hmm_range_snapshot
);
879 * hmm_range_fault() - try to fault some address in a virtual address range
880 * @range: range being faulted
881 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
882 * Returns: number of valid pages in range->pfns[] (from range start
883 * address). This may be zero. If the return value is negative,
884 * then one of the following values may be returned:
886 * -EINVAL invalid arguments or mm or virtual address are in an
887 * invalid vma (ie either hugetlbfs or device file vma).
888 * -ENOMEM: Out of memory.
889 * -EPERM: Invalid permission (for instance asking for write and
890 * range is read only).
891 * -EAGAIN: If you need to retry and mmap_sem was drop. This can only
892 * happens if block argument is false.
893 * -EBUSY: If the the range is being invalidated and you should wait
894 * for invalidation to finish.
895 * -EFAULT: Invalid (ie either no valid vma or it is illegal to access
896 * that range), number of valid pages in range->pfns[] (from
897 * range start address).
899 * This is similar to a regular CPU page fault except that it will not trigger
900 * any memory migration if the memory being faulted is not accessible by CPUs
901 * and caller does not ask for migration.
903 * On error, for one virtual address in the range, the function will mark the
904 * corresponding HMM pfn entry with an error flag.
906 long hmm_range_fault(struct hmm_range
*range
, bool block
)
908 unsigned long start
= range
->start
, end
;
909 struct hmm_vma_walk hmm_vma_walk
;
910 struct hmm
*hmm
= range
->hmm
;
911 struct vm_area_struct
*vma
;
912 struct mm_walk mm_walk
;
915 /* Check if hmm_mm_destroy() was call. */
916 if (hmm
->mm
== NULL
|| hmm
->dead
)
920 /* If range is no longer valid force retry. */
922 up_read(&hmm
->mm
->mmap_sem
);
926 vma
= find_vma(hmm
->mm
, start
);
927 if (vma
== NULL
|| (vma
->vm_flags
& VM_SPECIAL
))
930 /* FIXME support hugetlb fs/dax */
931 if (is_vm_hugetlb_page(vma
) || vma_is_dax(vma
)) {
932 hmm_pfns_special(range
);
936 if (!(vma
->vm_flags
& VM_READ
)) {
938 * If vma do not allow read access, then assume that it
939 * does not allow write access, either. HMM does not
940 * support architecture that allow write without read.
942 hmm_pfns_clear(range
, range
->pfns
,
943 range
->start
, range
->end
);
948 hmm_vma_walk
.last
= start
;
949 hmm_vma_walk
.fault
= true;
950 hmm_vma_walk
.block
= block
;
951 hmm_vma_walk
.range
= range
;
952 mm_walk
.private = &hmm_vma_walk
;
953 end
= min(range
->end
, vma
->vm_end
);
956 mm_walk
.mm
= vma
->vm_mm
;
957 mm_walk
.pte_entry
= NULL
;
958 mm_walk
.test_walk
= NULL
;
959 mm_walk
.hugetlb_entry
= NULL
;
960 mm_walk
.pmd_entry
= hmm_vma_walk_pmd
;
961 mm_walk
.pte_hole
= hmm_vma_walk_hole
;
964 ret
= walk_page_range(start
, end
, &mm_walk
);
965 start
= hmm_vma_walk
.last
;
967 /* Keep trying while the range is valid. */
968 } while (ret
== -EBUSY
&& range
->valid
);
973 i
= (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
974 hmm_pfns_clear(range
, &range
->pfns
[i
],
975 hmm_vma_walk
.last
, range
->end
);
980 } while (start
< range
->end
);
982 return (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
984 EXPORT_SYMBOL(hmm_range_fault
);
985 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
988 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
989 struct page
*hmm_vma_alloc_locked_page(struct vm_area_struct
*vma
,
994 page
= alloc_page_vma(GFP_HIGHUSER
, vma
, addr
);
1000 EXPORT_SYMBOL(hmm_vma_alloc_locked_page
);
1003 static void hmm_devmem_ref_release(struct percpu_ref
*ref
)
1005 struct hmm_devmem
*devmem
;
1007 devmem
= container_of(ref
, struct hmm_devmem
, ref
);
1008 complete(&devmem
->completion
);
1011 static void hmm_devmem_ref_exit(void *data
)
1013 struct percpu_ref
*ref
= data
;
1014 struct hmm_devmem
*devmem
;
1016 devmem
= container_of(ref
, struct hmm_devmem
, ref
);
1017 wait_for_completion(&devmem
->completion
);
1018 percpu_ref_exit(ref
);
1021 static void hmm_devmem_ref_kill(struct percpu_ref
*ref
)
1023 percpu_ref_kill(ref
);
1026 static vm_fault_t
hmm_devmem_fault(struct vm_area_struct
*vma
,
1028 const struct page
*page
,
1032 struct hmm_devmem
*devmem
= page
->pgmap
->data
;
1034 return devmem
->ops
->fault(devmem
, vma
, addr
, page
, flags
, pmdp
);
1037 static void hmm_devmem_free(struct page
*page
, void *data
)
1039 struct hmm_devmem
*devmem
= data
;
1041 page
->mapping
= NULL
;
1043 devmem
->ops
->free(devmem
, page
);
1047 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1049 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1050 * @device: device struct to bind the resource too
1051 * @size: size in bytes of the device memory to add
1052 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1054 * This function first finds an empty range of physical address big enough to
1055 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1056 * in turn allocates struct pages. It does not do anything beyond that; all
1057 * events affecting the memory will go through the various callbacks provided
1058 * by hmm_devmem_ops struct.
1060 * Device driver should call this function during device initialization and
1061 * is then responsible of memory management. HMM only provides helpers.
1063 struct hmm_devmem
*hmm_devmem_add(const struct hmm_devmem_ops
*ops
,
1064 struct device
*device
,
1067 struct hmm_devmem
*devmem
;
1068 resource_size_t addr
;
1072 dev_pagemap_get_ops();
1074 devmem
= devm_kzalloc(device
, sizeof(*devmem
), GFP_KERNEL
);
1076 return ERR_PTR(-ENOMEM
);
1078 init_completion(&devmem
->completion
);
1079 devmem
->pfn_first
= -1UL;
1080 devmem
->pfn_last
= -1UL;
1081 devmem
->resource
= NULL
;
1082 devmem
->device
= device
;
1085 ret
= percpu_ref_init(&devmem
->ref
, &hmm_devmem_ref_release
,
1088 return ERR_PTR(ret
);
1090 ret
= devm_add_action_or_reset(device
, hmm_devmem_ref_exit
, &devmem
->ref
);
1092 return ERR_PTR(ret
);
1094 size
= ALIGN(size
, PA_SECTION_SIZE
);
1095 addr
= min((unsigned long)iomem_resource
.end
,
1096 (1UL << MAX_PHYSMEM_BITS
) - 1);
1097 addr
= addr
- size
+ 1UL;
1100 * FIXME add a new helper to quickly walk resource tree and find free
1103 * FIXME what about ioport_resource resource ?
1105 for (; addr
> size
&& addr
>= iomem_resource
.start
; addr
-= size
) {
1106 ret
= region_intersects(addr
, size
, 0, IORES_DESC_NONE
);
1107 if (ret
!= REGION_DISJOINT
)
1110 devmem
->resource
= devm_request_mem_region(device
, addr
, size
,
1112 if (!devmem
->resource
)
1113 return ERR_PTR(-ENOMEM
);
1116 if (!devmem
->resource
)
1117 return ERR_PTR(-ERANGE
);
1119 devmem
->resource
->desc
= IORES_DESC_DEVICE_PRIVATE_MEMORY
;
1120 devmem
->pfn_first
= devmem
->resource
->start
>> PAGE_SHIFT
;
1121 devmem
->pfn_last
= devmem
->pfn_first
+
1122 (resource_size(devmem
->resource
) >> PAGE_SHIFT
);
1123 devmem
->page_fault
= hmm_devmem_fault
;
1125 devmem
->pagemap
.type
= MEMORY_DEVICE_PRIVATE
;
1126 devmem
->pagemap
.res
= *devmem
->resource
;
1127 devmem
->pagemap
.page_free
= hmm_devmem_free
;
1128 devmem
->pagemap
.altmap_valid
= false;
1129 devmem
->pagemap
.ref
= &devmem
->ref
;
1130 devmem
->pagemap
.data
= devmem
;
1131 devmem
->pagemap
.kill
= hmm_devmem_ref_kill
;
1133 result
= devm_memremap_pages(devmem
->device
, &devmem
->pagemap
);
1138 EXPORT_SYMBOL_GPL(hmm_devmem_add
);
1140 struct hmm_devmem
*hmm_devmem_add_resource(const struct hmm_devmem_ops
*ops
,
1141 struct device
*device
,
1142 struct resource
*res
)
1144 struct hmm_devmem
*devmem
;
1148 if (res
->desc
!= IORES_DESC_DEVICE_PUBLIC_MEMORY
)
1149 return ERR_PTR(-EINVAL
);
1151 dev_pagemap_get_ops();
1153 devmem
= devm_kzalloc(device
, sizeof(*devmem
), GFP_KERNEL
);
1155 return ERR_PTR(-ENOMEM
);
1157 init_completion(&devmem
->completion
);
1158 devmem
->pfn_first
= -1UL;
1159 devmem
->pfn_last
= -1UL;
1160 devmem
->resource
= res
;
1161 devmem
->device
= device
;
1164 ret
= percpu_ref_init(&devmem
->ref
, &hmm_devmem_ref_release
,
1167 return ERR_PTR(ret
);
1169 ret
= devm_add_action_or_reset(device
, hmm_devmem_ref_exit
,
1172 return ERR_PTR(ret
);
1174 devmem
->pfn_first
= devmem
->resource
->start
>> PAGE_SHIFT
;
1175 devmem
->pfn_last
= devmem
->pfn_first
+
1176 (resource_size(devmem
->resource
) >> PAGE_SHIFT
);
1177 devmem
->page_fault
= hmm_devmem_fault
;
1179 devmem
->pagemap
.type
= MEMORY_DEVICE_PUBLIC
;
1180 devmem
->pagemap
.res
= *devmem
->resource
;
1181 devmem
->pagemap
.page_free
= hmm_devmem_free
;
1182 devmem
->pagemap
.altmap_valid
= false;
1183 devmem
->pagemap
.ref
= &devmem
->ref
;
1184 devmem
->pagemap
.data
= devmem
;
1185 devmem
->pagemap
.kill
= hmm_devmem_ref_kill
;
1187 result
= devm_memremap_pages(devmem
->device
, &devmem
->pagemap
);
1192 EXPORT_SYMBOL_GPL(hmm_devmem_add_resource
);
1195 * A device driver that wants to handle multiple devices memory through a
1196 * single fake device can use hmm_device to do so. This is purely a helper
1197 * and it is not needed to make use of any HMM functionality.
1199 #define HMM_DEVICE_MAX 256
1201 static DECLARE_BITMAP(hmm_device_mask
, HMM_DEVICE_MAX
);
1202 static DEFINE_SPINLOCK(hmm_device_lock
);
1203 static struct class *hmm_device_class
;
1204 static dev_t hmm_device_devt
;
1206 static void hmm_device_release(struct device
*device
)
1208 struct hmm_device
*hmm_device
;
1210 hmm_device
= container_of(device
, struct hmm_device
, device
);
1211 spin_lock(&hmm_device_lock
);
1212 clear_bit(hmm_device
->minor
, hmm_device_mask
);
1213 spin_unlock(&hmm_device_lock
);
1218 struct hmm_device
*hmm_device_new(void *drvdata
)
1220 struct hmm_device
*hmm_device
;
1222 hmm_device
= kzalloc(sizeof(*hmm_device
), GFP_KERNEL
);
1224 return ERR_PTR(-ENOMEM
);
1226 spin_lock(&hmm_device_lock
);
1227 hmm_device
->minor
= find_first_zero_bit(hmm_device_mask
, HMM_DEVICE_MAX
);
1228 if (hmm_device
->minor
>= HMM_DEVICE_MAX
) {
1229 spin_unlock(&hmm_device_lock
);
1231 return ERR_PTR(-EBUSY
);
1233 set_bit(hmm_device
->minor
, hmm_device_mask
);
1234 spin_unlock(&hmm_device_lock
);
1236 dev_set_name(&hmm_device
->device
, "hmm_device%d", hmm_device
->minor
);
1237 hmm_device
->device
.devt
= MKDEV(MAJOR(hmm_device_devt
),
1239 hmm_device
->device
.release
= hmm_device_release
;
1240 dev_set_drvdata(&hmm_device
->device
, drvdata
);
1241 hmm_device
->device
.class = hmm_device_class
;
1242 device_initialize(&hmm_device
->device
);
1246 EXPORT_SYMBOL(hmm_device_new
);
1248 void hmm_device_put(struct hmm_device
*hmm_device
)
1250 put_device(&hmm_device
->device
);
1252 EXPORT_SYMBOL(hmm_device_put
);
1254 static int __init
hmm_init(void)
1258 ret
= alloc_chrdev_region(&hmm_device_devt
, 0,
1264 hmm_device_class
= class_create(THIS_MODULE
, "hmm_device");
1265 if (IS_ERR(hmm_device_class
)) {
1266 unregister_chrdev_region(hmm_device_devt
, HMM_DEVICE_MAX
);
1267 return PTR_ERR(hmm_device_class
);
1272 device_initcall(hmm_init
);
1273 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */