2 * Copyright 2013 Red Hat Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Authors: Jérôme Glisse <jglisse@redhat.com>
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
21 #include <linux/hmm.h>
22 #include <linux/init.h>
23 #include <linux/rmap.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/mmzone.h>
28 #include <linux/pagemap.h>
29 #include <linux/swapops.h>
30 #include <linux/hugetlb.h>
31 #include <linux/memremap.h>
32 #include <linux/jump_label.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/memory_hotplug.h>
37 #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
39 #if IS_ENABLED(CONFIG_HMM_MIRROR)
40 static const struct mmu_notifier_ops hmm_mmu_notifier_ops
;
42 static inline struct hmm
*mm_get_hmm(struct mm_struct
*mm
)
44 struct hmm
*hmm
= READ_ONCE(mm
->hmm
);
46 if (hmm
&& kref_get_unless_zero(&hmm
->kref
))
53 * hmm_get_or_create - register HMM against an mm (HMM internal)
55 * @mm: mm struct to attach to
56 * Returns: returns an HMM object, either by referencing the existing
57 * (per-process) object, or by creating a new one.
59 * This is not intended to be used directly by device drivers. If mm already
60 * has an HMM struct then it get a reference on it and returns it. Otherwise
61 * it allocates an HMM struct, initializes it, associate it with the mm and
64 static struct hmm
*hmm_get_or_create(struct mm_struct
*mm
)
66 struct hmm
*hmm
= mm_get_hmm(mm
);
72 hmm
= kmalloc(sizeof(*hmm
), GFP_KERNEL
);
75 init_waitqueue_head(&hmm
->wq
);
76 INIT_LIST_HEAD(&hmm
->mirrors
);
77 init_rwsem(&hmm
->mirrors_sem
);
78 hmm
->mmu_notifier
.ops
= NULL
;
79 INIT_LIST_HEAD(&hmm
->ranges
);
80 mutex_init(&hmm
->lock
);
81 kref_init(&hmm
->kref
);
86 spin_lock(&mm
->page_table_lock
);
91 spin_unlock(&mm
->page_table_lock
);
97 * We should only get here if hold the mmap_sem in write mode ie on
98 * registration of first mirror through hmm_mirror_register()
100 hmm
->mmu_notifier
.ops
= &hmm_mmu_notifier_ops
;
101 if (__mmu_notifier_register(&hmm
->mmu_notifier
, mm
))
107 spin_lock(&mm
->page_table_lock
);
110 spin_unlock(&mm
->page_table_lock
);
116 static void hmm_free(struct kref
*kref
)
118 struct hmm
*hmm
= container_of(kref
, struct hmm
, kref
);
119 struct mm_struct
*mm
= hmm
->mm
;
121 mmu_notifier_unregister_no_release(&hmm
->mmu_notifier
, mm
);
123 spin_lock(&mm
->page_table_lock
);
126 spin_unlock(&mm
->page_table_lock
);
131 static inline void hmm_put(struct hmm
*hmm
)
133 kref_put(&hmm
->kref
, hmm_free
);
136 void hmm_mm_destroy(struct mm_struct
*mm
)
140 spin_lock(&mm
->page_table_lock
);
141 hmm
= mm_get_hmm(mm
);
146 spin_unlock(&mm
->page_table_lock
);
151 spin_unlock(&mm
->page_table_lock
);
154 static void hmm_release(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
156 struct hmm
*hmm
= mm_get_hmm(mm
);
157 struct hmm_mirror
*mirror
;
158 struct hmm_range
*range
;
160 /* Report this HMM as dying. */
163 /* Wake-up everyone waiting on any range. */
164 mutex_lock(&hmm
->lock
);
165 list_for_each_entry(range
, &hmm
->ranges
, list
) {
166 range
->valid
= false;
168 wake_up_all(&hmm
->wq
);
169 mutex_unlock(&hmm
->lock
);
171 down_write(&hmm
->mirrors_sem
);
172 mirror
= list_first_entry_or_null(&hmm
->mirrors
, struct hmm_mirror
,
175 list_del_init(&mirror
->list
);
176 if (mirror
->ops
->release
) {
178 * Drop mirrors_sem so callback can wait on any pending
179 * work that might itself trigger mmu_notifier callback
180 * and thus would deadlock with us.
182 up_write(&hmm
->mirrors_sem
);
183 mirror
->ops
->release(mirror
);
184 down_write(&hmm
->mirrors_sem
);
186 mirror
= list_first_entry_or_null(&hmm
->mirrors
,
187 struct hmm_mirror
, list
);
189 up_write(&hmm
->mirrors_sem
);
194 static int hmm_invalidate_range_start(struct mmu_notifier
*mn
,
195 const struct mmu_notifier_range
*nrange
)
197 struct hmm
*hmm
= mm_get_hmm(nrange
->mm
);
198 struct hmm_mirror
*mirror
;
199 struct hmm_update update
;
200 struct hmm_range
*range
;
205 update
.start
= nrange
->start
;
206 update
.end
= nrange
->end
;
207 update
.event
= HMM_UPDATE_INVALIDATE
;
208 update
.blockable
= mmu_notifier_range_blockable(nrange
);
210 if (mmu_notifier_range_blockable(nrange
))
211 mutex_lock(&hmm
->lock
);
212 else if (!mutex_trylock(&hmm
->lock
)) {
217 list_for_each_entry(range
, &hmm
->ranges
, list
) {
218 if (update
.end
< range
->start
|| update
.start
>= range
->end
)
221 range
->valid
= false;
223 mutex_unlock(&hmm
->lock
);
225 if (mmu_notifier_range_blockable(nrange
))
226 down_read(&hmm
->mirrors_sem
);
227 else if (!down_read_trylock(&hmm
->mirrors_sem
)) {
231 list_for_each_entry(mirror
, &hmm
->mirrors
, list
) {
234 ret
= mirror
->ops
->sync_cpu_device_pagetables(mirror
, &update
);
235 if (!update
.blockable
&& ret
== -EAGAIN
) {
236 up_read(&hmm
->mirrors_sem
);
241 up_read(&hmm
->mirrors_sem
);
248 static void hmm_invalidate_range_end(struct mmu_notifier
*mn
,
249 const struct mmu_notifier_range
*nrange
)
251 struct hmm
*hmm
= mm_get_hmm(nrange
->mm
);
255 mutex_lock(&hmm
->lock
);
257 if (!hmm
->notifiers
) {
258 struct hmm_range
*range
;
260 list_for_each_entry(range
, &hmm
->ranges
, list
) {
265 wake_up_all(&hmm
->wq
);
267 mutex_unlock(&hmm
->lock
);
272 static const struct mmu_notifier_ops hmm_mmu_notifier_ops
= {
273 .release
= hmm_release
,
274 .invalidate_range_start
= hmm_invalidate_range_start
,
275 .invalidate_range_end
= hmm_invalidate_range_end
,
279 * hmm_mirror_register() - register a mirror against an mm
281 * @mirror: new mirror struct to register
282 * @mm: mm to register against
284 * To start mirroring a process address space, the device driver must register
285 * an HMM mirror struct.
287 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
289 int hmm_mirror_register(struct hmm_mirror
*mirror
, struct mm_struct
*mm
)
292 if (!mm
|| !mirror
|| !mirror
->ops
)
295 mirror
->hmm
= hmm_get_or_create(mm
);
299 down_write(&mirror
->hmm
->mirrors_sem
);
300 list_add(&mirror
->list
, &mirror
->hmm
->mirrors
);
301 up_write(&mirror
->hmm
->mirrors_sem
);
305 EXPORT_SYMBOL(hmm_mirror_register
);
308 * hmm_mirror_unregister() - unregister a mirror
310 * @mirror: new mirror struct to register
312 * Stop mirroring a process address space, and cleanup.
314 void hmm_mirror_unregister(struct hmm_mirror
*mirror
)
316 struct hmm
*hmm
= READ_ONCE(mirror
->hmm
);
321 down_write(&hmm
->mirrors_sem
);
322 list_del_init(&mirror
->list
);
323 /* To protect us against double unregister ... */
325 up_write(&hmm
->mirrors_sem
);
329 EXPORT_SYMBOL(hmm_mirror_unregister
);
331 struct hmm_vma_walk
{
332 struct hmm_range
*range
;
333 struct dev_pagemap
*pgmap
;
339 static int hmm_vma_do_fault(struct mm_walk
*walk
, unsigned long addr
,
340 bool write_fault
, uint64_t *pfn
)
342 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_REMOTE
;
343 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
344 struct hmm_range
*range
= hmm_vma_walk
->range
;
345 struct vm_area_struct
*vma
= walk
->vma
;
348 flags
|= hmm_vma_walk
->block
? 0 : FAULT_FLAG_ALLOW_RETRY
;
349 flags
|= write_fault
? FAULT_FLAG_WRITE
: 0;
350 ret
= handle_mm_fault(vma
, addr
, flags
);
351 if (ret
& VM_FAULT_RETRY
)
353 if (ret
& VM_FAULT_ERROR
) {
354 *pfn
= range
->values
[HMM_PFN_ERROR
];
361 static int hmm_pfns_bad(unsigned long addr
,
363 struct mm_walk
*walk
)
365 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
366 struct hmm_range
*range
= hmm_vma_walk
->range
;
367 uint64_t *pfns
= range
->pfns
;
370 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
371 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++)
372 pfns
[i
] = range
->values
[HMM_PFN_ERROR
];
378 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
379 * @start: range virtual start address (inclusive)
380 * @end: range virtual end address (exclusive)
381 * @fault: should we fault or not ?
382 * @write_fault: write fault ?
383 * @walk: mm_walk structure
384 * Returns: 0 on success, -EBUSY after page fault, or page fault error
386 * This function will be called whenever pmd_none() or pte_none() returns true,
387 * or whenever there is no page directory covering the virtual address range.
389 static int hmm_vma_walk_hole_(unsigned long addr
, unsigned long end
,
390 bool fault
, bool write_fault
,
391 struct mm_walk
*walk
)
393 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
394 struct hmm_range
*range
= hmm_vma_walk
->range
;
395 uint64_t *pfns
= range
->pfns
;
396 unsigned long i
, page_size
;
398 hmm_vma_walk
->last
= addr
;
399 page_size
= hmm_range_page_size(range
);
400 i
= (addr
- range
->start
) >> range
->page_shift
;
402 for (; addr
< end
; addr
+= page_size
, i
++) {
403 pfns
[i
] = range
->values
[HMM_PFN_NONE
];
404 if (fault
|| write_fault
) {
407 ret
= hmm_vma_do_fault(walk
, addr
, write_fault
,
414 return (fault
|| write_fault
) ? -EBUSY
: 0;
417 static inline void hmm_pte_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
418 uint64_t pfns
, uint64_t cpu_flags
,
419 bool *fault
, bool *write_fault
)
421 struct hmm_range
*range
= hmm_vma_walk
->range
;
423 if (!hmm_vma_walk
->fault
)
427 * So we not only consider the individual per page request we also
428 * consider the default flags requested for the range. The API can
429 * be use in 2 fashions. The first one where the HMM user coalesce
430 * multiple page fault into one request and set flags per pfns for
431 * of those faults. The second one where the HMM user want to pre-
432 * fault a range with specific flags. For the latter one it is a
433 * waste to have the user pre-fill the pfn arrays with a default
436 pfns
= (pfns
& range
->pfn_flags_mask
) | range
->default_flags
;
438 /* We aren't ask to do anything ... */
439 if (!(pfns
& range
->flags
[HMM_PFN_VALID
]))
441 /* If this is device memory than only fault if explicitly requested */
442 if ((cpu_flags
& range
->flags
[HMM_PFN_DEVICE_PRIVATE
])) {
443 /* Do we fault on device memory ? */
444 if (pfns
& range
->flags
[HMM_PFN_DEVICE_PRIVATE
]) {
445 *write_fault
= pfns
& range
->flags
[HMM_PFN_WRITE
];
451 /* If CPU page table is not valid then we need to fault */
452 *fault
= !(cpu_flags
& range
->flags
[HMM_PFN_VALID
]);
453 /* Need to write fault ? */
454 if ((pfns
& range
->flags
[HMM_PFN_WRITE
]) &&
455 !(cpu_flags
& range
->flags
[HMM_PFN_WRITE
])) {
461 static void hmm_range_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
462 const uint64_t *pfns
, unsigned long npages
,
463 uint64_t cpu_flags
, bool *fault
,
468 if (!hmm_vma_walk
->fault
) {
469 *fault
= *write_fault
= false;
473 *fault
= *write_fault
= false;
474 for (i
= 0; i
< npages
; ++i
) {
475 hmm_pte_need_fault(hmm_vma_walk
, pfns
[i
], cpu_flags
,
482 static int hmm_vma_walk_hole(unsigned long addr
, unsigned long end
,
483 struct mm_walk
*walk
)
485 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
486 struct hmm_range
*range
= hmm_vma_walk
->range
;
487 bool fault
, write_fault
;
488 unsigned long i
, npages
;
491 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
492 npages
= (end
- addr
) >> PAGE_SHIFT
;
493 pfns
= &range
->pfns
[i
];
494 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
495 0, &fault
, &write_fault
);
496 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
499 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range
*range
, pmd_t pmd
)
501 if (pmd_protnone(pmd
))
503 return pmd_write(pmd
) ? range
->flags
[HMM_PFN_VALID
] |
504 range
->flags
[HMM_PFN_WRITE
] :
505 range
->flags
[HMM_PFN_VALID
];
508 static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range
*range
, pud_t pud
)
510 if (!pud_present(pud
))
512 return pud_write(pud
) ? range
->flags
[HMM_PFN_VALID
] |
513 range
->flags
[HMM_PFN_WRITE
] :
514 range
->flags
[HMM_PFN_VALID
];
517 static int hmm_vma_handle_pmd(struct mm_walk
*walk
,
523 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
524 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
525 struct hmm_range
*range
= hmm_vma_walk
->range
;
526 unsigned long pfn
, npages
, i
;
527 bool fault
, write_fault
;
530 npages
= (end
- addr
) >> PAGE_SHIFT
;
531 cpu_flags
= pmd_to_hmm_pfn_flags(range
, pmd
);
532 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
, cpu_flags
,
533 &fault
, &write_fault
);
535 if (pmd_protnone(pmd
) || fault
|| write_fault
)
536 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
538 pfn
= pmd_pfn(pmd
) + pte_index(addr
);
539 for (i
= 0; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++) {
540 if (pmd_devmap(pmd
)) {
541 hmm_vma_walk
->pgmap
= get_dev_pagemap(pfn
,
542 hmm_vma_walk
->pgmap
);
543 if (unlikely(!hmm_vma_walk
->pgmap
))
546 pfns
[i
] = hmm_device_entry_from_pfn(range
, pfn
) | cpu_flags
;
548 if (hmm_vma_walk
->pgmap
) {
549 put_dev_pagemap(hmm_vma_walk
->pgmap
);
550 hmm_vma_walk
->pgmap
= NULL
;
552 hmm_vma_walk
->last
= end
;
555 /* If THP is not enabled then we should never reach that code ! */
560 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range
*range
, pte_t pte
)
562 if (pte_none(pte
) || !pte_present(pte
))
564 return pte_write(pte
) ? range
->flags
[HMM_PFN_VALID
] |
565 range
->flags
[HMM_PFN_WRITE
] :
566 range
->flags
[HMM_PFN_VALID
];
569 static int hmm_vma_handle_pte(struct mm_walk
*walk
, unsigned long addr
,
570 unsigned long end
, pmd_t
*pmdp
, pte_t
*ptep
,
573 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
574 struct hmm_range
*range
= hmm_vma_walk
->range
;
575 struct vm_area_struct
*vma
= walk
->vma
;
576 bool fault
, write_fault
;
579 uint64_t orig_pfn
= *pfn
;
581 *pfn
= range
->values
[HMM_PFN_NONE
];
582 fault
= write_fault
= false;
585 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, 0,
586 &fault
, &write_fault
);
587 if (fault
|| write_fault
)
592 if (!pte_present(pte
)) {
593 swp_entry_t entry
= pte_to_swp_entry(pte
);
595 if (!non_swap_entry(entry
)) {
596 if (fault
|| write_fault
)
602 * This is a special swap entry, ignore migration, use
603 * device and report anything else as error.
605 if (is_device_private_entry(entry
)) {
606 cpu_flags
= range
->flags
[HMM_PFN_VALID
] |
607 range
->flags
[HMM_PFN_DEVICE_PRIVATE
];
608 cpu_flags
|= is_write_device_private_entry(entry
) ?
609 range
->flags
[HMM_PFN_WRITE
] : 0;
610 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
611 &fault
, &write_fault
);
612 if (fault
|| write_fault
)
614 *pfn
= hmm_device_entry_from_pfn(range
,
620 if (is_migration_entry(entry
)) {
621 if (fault
|| write_fault
) {
623 hmm_vma_walk
->last
= addr
;
624 migration_entry_wait(vma
->vm_mm
,
631 /* Report error for everything else */
632 *pfn
= range
->values
[HMM_PFN_ERROR
];
635 cpu_flags
= pte_to_hmm_pfn_flags(range
, pte
);
636 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
637 &fault
, &write_fault
);
640 if (fault
|| write_fault
)
643 if (pte_devmap(pte
)) {
644 hmm_vma_walk
->pgmap
= get_dev_pagemap(pte_pfn(pte
),
645 hmm_vma_walk
->pgmap
);
646 if (unlikely(!hmm_vma_walk
->pgmap
))
648 } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL
) && pte_special(pte
)) {
649 *pfn
= range
->values
[HMM_PFN_SPECIAL
];
653 *pfn
= hmm_device_entry_from_pfn(range
, pte_pfn(pte
)) | cpu_flags
;
657 if (hmm_vma_walk
->pgmap
) {
658 put_dev_pagemap(hmm_vma_walk
->pgmap
);
659 hmm_vma_walk
->pgmap
= NULL
;
662 /* Fault any virtual address we were asked to fault */
663 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
666 static int hmm_vma_walk_pmd(pmd_t
*pmdp
,
669 struct mm_walk
*walk
)
671 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
672 struct hmm_range
*range
= hmm_vma_walk
->range
;
673 struct vm_area_struct
*vma
= walk
->vma
;
674 uint64_t *pfns
= range
->pfns
;
675 unsigned long addr
= start
, i
;
681 pmd
= READ_ONCE(*pmdp
);
683 return hmm_vma_walk_hole(start
, end
, walk
);
685 if (pmd_huge(pmd
) && (range
->vma
->vm_flags
& VM_HUGETLB
))
686 return hmm_pfns_bad(start
, end
, walk
);
688 if (thp_migration_supported() && is_pmd_migration_entry(pmd
)) {
689 bool fault
, write_fault
;
690 unsigned long npages
;
693 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
694 npages
= (end
- addr
) >> PAGE_SHIFT
;
695 pfns
= &range
->pfns
[i
];
697 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
698 0, &fault
, &write_fault
);
699 if (fault
|| write_fault
) {
700 hmm_vma_walk
->last
= addr
;
701 pmd_migration_entry_wait(vma
->vm_mm
, pmdp
);
705 } else if (!pmd_present(pmd
))
706 return hmm_pfns_bad(start
, end
, walk
);
708 if (pmd_devmap(pmd
) || pmd_trans_huge(pmd
)) {
710 * No need to take pmd_lock here, even if some other threads
711 * is splitting the huge pmd we will get that event through
712 * mmu_notifier callback.
714 * So just read pmd value and check again its a transparent
715 * huge or device mapping one and compute corresponding pfn
718 pmd
= pmd_read_atomic(pmdp
);
720 if (!pmd_devmap(pmd
) && !pmd_trans_huge(pmd
))
723 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
724 return hmm_vma_handle_pmd(walk
, addr
, end
, &pfns
[i
], pmd
);
728 * We have handled all the valid case above ie either none, migration,
729 * huge or transparent huge. At this point either it is a valid pmd
730 * entry pointing to pte directory or it is a bad pmd that will not
734 return hmm_pfns_bad(start
, end
, walk
);
736 ptep
= pte_offset_map(pmdp
, addr
);
737 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
738 for (; addr
< end
; addr
+= PAGE_SIZE
, ptep
++, i
++) {
741 r
= hmm_vma_handle_pte(walk
, addr
, end
, pmdp
, ptep
, &pfns
[i
]);
743 /* hmm_vma_handle_pte() did unmap pte directory */
744 hmm_vma_walk
->last
= addr
;
748 if (hmm_vma_walk
->pgmap
) {
750 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
751 * so that we can leverage get_dev_pagemap() optimization which
752 * will not re-take a reference on a pgmap if we already have
755 put_dev_pagemap(hmm_vma_walk
->pgmap
);
756 hmm_vma_walk
->pgmap
= NULL
;
760 hmm_vma_walk
->last
= addr
;
764 static int hmm_vma_walk_pud(pud_t
*pudp
,
767 struct mm_walk
*walk
)
769 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
770 struct hmm_range
*range
= hmm_vma_walk
->range
;
771 unsigned long addr
= start
, next
;
777 pud
= READ_ONCE(*pudp
);
779 return hmm_vma_walk_hole(start
, end
, walk
);
781 if (pud_huge(pud
) && pud_devmap(pud
)) {
782 unsigned long i
, npages
, pfn
;
783 uint64_t *pfns
, cpu_flags
;
784 bool fault
, write_fault
;
786 if (!pud_present(pud
))
787 return hmm_vma_walk_hole(start
, end
, walk
);
789 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
790 npages
= (end
- addr
) >> PAGE_SHIFT
;
791 pfns
= &range
->pfns
[i
];
793 cpu_flags
= pud_to_hmm_pfn_flags(range
, pud
);
794 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
795 cpu_flags
, &fault
, &write_fault
);
796 if (fault
|| write_fault
)
797 return hmm_vma_walk_hole_(addr
, end
, fault
,
800 #ifdef CONFIG_HUGETLB_PAGE
801 pfn
= pud_pfn(pud
) + ((addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
802 for (i
= 0; i
< npages
; ++i
, ++pfn
) {
803 hmm_vma_walk
->pgmap
= get_dev_pagemap(pfn
,
804 hmm_vma_walk
->pgmap
);
805 if (unlikely(!hmm_vma_walk
->pgmap
))
807 pfns
[i
] = hmm_device_entry_from_pfn(range
, pfn
) |
810 if (hmm_vma_walk
->pgmap
) {
811 put_dev_pagemap(hmm_vma_walk
->pgmap
);
812 hmm_vma_walk
->pgmap
= NULL
;
814 hmm_vma_walk
->last
= end
;
821 split_huge_pud(walk
->vma
, pudp
, addr
);
825 pmdp
= pmd_offset(pudp
, addr
);
827 next
= pmd_addr_end(addr
, end
);
828 ret
= hmm_vma_walk_pmd(pmdp
, addr
, next
, walk
);
831 } while (pmdp
++, addr
= next
, addr
!= end
);
836 static int hmm_vma_walk_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
837 unsigned long start
, unsigned long end
,
838 struct mm_walk
*walk
)
840 #ifdef CONFIG_HUGETLB_PAGE
841 unsigned long addr
= start
, i
, pfn
, mask
, size
, pfn_inc
;
842 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
843 struct hmm_range
*range
= hmm_vma_walk
->range
;
844 struct vm_area_struct
*vma
= walk
->vma
;
845 struct hstate
*h
= hstate_vma(vma
);
846 uint64_t orig_pfn
, cpu_flags
;
847 bool fault
, write_fault
;
852 size
= 1UL << huge_page_shift(h
);
854 if (range
->page_shift
!= PAGE_SHIFT
) {
855 /* Make sure we are looking at full page. */
858 if (end
< (start
+ size
))
860 pfn_inc
= size
>> PAGE_SHIFT
;
867 ptl
= huge_pte_lock(hstate_vma(walk
->vma
), walk
->mm
, pte
);
868 entry
= huge_ptep_get(pte
);
870 i
= (start
- range
->start
) >> range
->page_shift
;
871 orig_pfn
= range
->pfns
[i
];
872 range
->pfns
[i
] = range
->values
[HMM_PFN_NONE
];
873 cpu_flags
= pte_to_hmm_pfn_flags(range
, entry
);
874 fault
= write_fault
= false;
875 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
876 &fault
, &write_fault
);
877 if (fault
|| write_fault
) {
882 pfn
= pte_pfn(entry
) + ((start
& mask
) >> range
->page_shift
);
883 for (; addr
< end
; addr
+= size
, i
++, pfn
+= pfn_inc
)
884 range
->pfns
[i
] = hmm_device_entry_from_pfn(range
, pfn
) |
886 hmm_vma_walk
->last
= end
;
892 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
895 #else /* CONFIG_HUGETLB_PAGE */
900 static void hmm_pfns_clear(struct hmm_range
*range
,
905 for (; addr
< end
; addr
+= PAGE_SIZE
, pfns
++)
906 *pfns
= range
->values
[HMM_PFN_NONE
];
910 * hmm_range_register() - start tracking change to CPU page table over a range
912 * @mm: the mm struct for the range of virtual address
913 * @start: start virtual address (inclusive)
914 * @end: end virtual address (exclusive)
915 * @page_shift: expect page shift for the range
916 * Returns 0 on success, -EFAULT if the address space is no longer valid
918 * Track updates to the CPU page table see include/linux/hmm.h
920 int hmm_range_register(struct hmm_range
*range
,
921 struct mm_struct
*mm
,
926 unsigned long mask
= ((1UL << page_shift
) - 1UL);
928 range
->valid
= false;
931 if ((start
& mask
) || (end
& mask
))
936 range
->page_shift
= page_shift
;
937 range
->start
= start
;
940 range
->hmm
= hmm_get_or_create(mm
);
944 /* Check if hmm_mm_destroy() was call. */
945 if (range
->hmm
->mm
== NULL
|| range
->hmm
->dead
) {
950 /* Initialize range to track CPU page table update */
951 mutex_lock(&range
->hmm
->lock
);
953 list_add_rcu(&range
->list
, &range
->hmm
->ranges
);
956 * If there are any concurrent notifiers we have to wait for them for
957 * the range to be valid (see hmm_range_wait_until_valid()).
959 if (!range
->hmm
->notifiers
)
961 mutex_unlock(&range
->hmm
->lock
);
965 EXPORT_SYMBOL(hmm_range_register
);
968 * hmm_range_unregister() - stop tracking change to CPU page table over a range
971 * Range struct is used to track updates to the CPU page table after a call to
972 * hmm_range_register(). See include/linux/hmm.h for how to use it.
974 void hmm_range_unregister(struct hmm_range
*range
)
976 /* Sanity check this really should not happen. */
977 if (range
->hmm
== NULL
|| range
->end
<= range
->start
)
980 mutex_lock(&range
->hmm
->lock
);
981 list_del_rcu(&range
->list
);
982 mutex_unlock(&range
->hmm
->lock
);
984 /* Drop reference taken by hmm_range_register() */
985 range
->valid
= false;
989 EXPORT_SYMBOL(hmm_range_unregister
);
992 * hmm_range_snapshot() - snapshot CPU page table for a range
994 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
995 * permission (for instance asking for write and range is read only),
996 * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
997 * vma or it is illegal to access that range), number of valid pages
998 * in range->pfns[] (from range start address).
1000 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
1001 * validity is tracked by range struct. See in include/linux/hmm.h for example
1004 long hmm_range_snapshot(struct hmm_range
*range
)
1006 const unsigned long device_vma
= VM_IO
| VM_PFNMAP
| VM_MIXEDMAP
;
1007 unsigned long start
= range
->start
, end
;
1008 struct hmm_vma_walk hmm_vma_walk
;
1009 struct hmm
*hmm
= range
->hmm
;
1010 struct vm_area_struct
*vma
;
1011 struct mm_walk mm_walk
;
1013 /* Check if hmm_mm_destroy() was call. */
1014 if (hmm
->mm
== NULL
|| hmm
->dead
)
1018 /* If range is no longer valid force retry. */
1022 vma
= find_vma(hmm
->mm
, start
);
1023 if (vma
== NULL
|| (vma
->vm_flags
& device_vma
))
1026 if (is_vm_hugetlb_page(vma
)) {
1027 struct hstate
*h
= hstate_vma(vma
);
1029 if (huge_page_shift(h
) != range
->page_shift
&&
1030 range
->page_shift
!= PAGE_SHIFT
)
1033 if (range
->page_shift
!= PAGE_SHIFT
)
1037 if (!(vma
->vm_flags
& VM_READ
)) {
1039 * If vma do not allow read access, then assume that it
1040 * does not allow write access, either. HMM does not
1041 * support architecture that allow write without read.
1043 hmm_pfns_clear(range
, range
->pfns
,
1044 range
->start
, range
->end
);
1049 hmm_vma_walk
.pgmap
= NULL
;
1050 hmm_vma_walk
.last
= start
;
1051 hmm_vma_walk
.fault
= false;
1052 hmm_vma_walk
.range
= range
;
1053 mm_walk
.private = &hmm_vma_walk
;
1054 end
= min(range
->end
, vma
->vm_end
);
1057 mm_walk
.mm
= vma
->vm_mm
;
1058 mm_walk
.pte_entry
= NULL
;
1059 mm_walk
.test_walk
= NULL
;
1060 mm_walk
.hugetlb_entry
= NULL
;
1061 mm_walk
.pud_entry
= hmm_vma_walk_pud
;
1062 mm_walk
.pmd_entry
= hmm_vma_walk_pmd
;
1063 mm_walk
.pte_hole
= hmm_vma_walk_hole
;
1064 mm_walk
.hugetlb_entry
= hmm_vma_walk_hugetlb_entry
;
1066 walk_page_range(start
, end
, &mm_walk
);
1068 } while (start
< range
->end
);
1070 return (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
1072 EXPORT_SYMBOL(hmm_range_snapshot
);
1075 * hmm_range_fault() - try to fault some address in a virtual address range
1076 * @range: range being faulted
1077 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
1078 * Returns: number of valid pages in range->pfns[] (from range start
1079 * address). This may be zero. If the return value is negative,
1080 * then one of the following values may be returned:
1082 * -EINVAL invalid arguments or mm or virtual address are in an
1083 * invalid vma (for instance device file vma).
1084 * -ENOMEM: Out of memory.
1085 * -EPERM: Invalid permission (for instance asking for write and
1086 * range is read only).
1087 * -EAGAIN: If you need to retry and mmap_sem was drop. This can only
1088 * happens if block argument is false.
1089 * -EBUSY: If the the range is being invalidated and you should wait
1090 * for invalidation to finish.
1091 * -EFAULT: Invalid (ie either no valid vma or it is illegal to access
1092 * that range), number of valid pages in range->pfns[] (from
1093 * range start address).
1095 * This is similar to a regular CPU page fault except that it will not trigger
1096 * any memory migration if the memory being faulted is not accessible by CPUs
1097 * and caller does not ask for migration.
1099 * On error, for one virtual address in the range, the function will mark the
1100 * corresponding HMM pfn entry with an error flag.
1102 long hmm_range_fault(struct hmm_range
*range
, bool block
)
1104 const unsigned long device_vma
= VM_IO
| VM_PFNMAP
| VM_MIXEDMAP
;
1105 unsigned long start
= range
->start
, end
;
1106 struct hmm_vma_walk hmm_vma_walk
;
1107 struct hmm
*hmm
= range
->hmm
;
1108 struct vm_area_struct
*vma
;
1109 struct mm_walk mm_walk
;
1112 /* Check if hmm_mm_destroy() was call. */
1113 if (hmm
->mm
== NULL
|| hmm
->dead
)
1117 /* If range is no longer valid force retry. */
1118 if (!range
->valid
) {
1119 up_read(&hmm
->mm
->mmap_sem
);
1123 vma
= find_vma(hmm
->mm
, start
);
1124 if (vma
== NULL
|| (vma
->vm_flags
& device_vma
))
1127 if (is_vm_hugetlb_page(vma
)) {
1128 if (huge_page_shift(hstate_vma(vma
)) !=
1129 range
->page_shift
&&
1130 range
->page_shift
!= PAGE_SHIFT
)
1133 if (range
->page_shift
!= PAGE_SHIFT
)
1137 if (!(vma
->vm_flags
& VM_READ
)) {
1139 * If vma do not allow read access, then assume that it
1140 * does not allow write access, either. HMM does not
1141 * support architecture that allow write without read.
1143 hmm_pfns_clear(range
, range
->pfns
,
1144 range
->start
, range
->end
);
1149 hmm_vma_walk
.pgmap
= NULL
;
1150 hmm_vma_walk
.last
= start
;
1151 hmm_vma_walk
.fault
= true;
1152 hmm_vma_walk
.block
= block
;
1153 hmm_vma_walk
.range
= range
;
1154 mm_walk
.private = &hmm_vma_walk
;
1155 end
= min(range
->end
, vma
->vm_end
);
1158 mm_walk
.mm
= vma
->vm_mm
;
1159 mm_walk
.pte_entry
= NULL
;
1160 mm_walk
.test_walk
= NULL
;
1161 mm_walk
.hugetlb_entry
= NULL
;
1162 mm_walk
.pud_entry
= hmm_vma_walk_pud
;
1163 mm_walk
.pmd_entry
= hmm_vma_walk_pmd
;
1164 mm_walk
.pte_hole
= hmm_vma_walk_hole
;
1165 mm_walk
.hugetlb_entry
= hmm_vma_walk_hugetlb_entry
;
1168 ret
= walk_page_range(start
, end
, &mm_walk
);
1169 start
= hmm_vma_walk
.last
;
1171 /* Keep trying while the range is valid. */
1172 } while (ret
== -EBUSY
&& range
->valid
);
1177 i
= (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
1178 hmm_pfns_clear(range
, &range
->pfns
[i
],
1179 hmm_vma_walk
.last
, range
->end
);
1184 } while (start
< range
->end
);
1186 return (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
1188 EXPORT_SYMBOL(hmm_range_fault
);
1191 * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one.
1192 * @range: range being faulted
1193 * @device: device against to dma map page to
1194 * @daddrs: dma address of mapped pages
1195 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
1196 * Returns: number of pages mapped on success, -EAGAIN if mmap_sem have been
1197 * drop and you need to try again, some other error value otherwise
1199 * Note same usage pattern as hmm_range_fault().
1201 long hmm_range_dma_map(struct hmm_range
*range
,
1202 struct device
*device
,
1206 unsigned long i
, npages
, mapped
;
1209 ret
= hmm_range_fault(range
, block
);
1211 return ret
? ret
: -EBUSY
;
1213 npages
= (range
->end
- range
->start
) >> PAGE_SHIFT
;
1214 for (i
= 0, mapped
= 0; i
< npages
; ++i
) {
1215 enum dma_data_direction dir
= DMA_TO_DEVICE
;
1219 * FIXME need to update DMA API to provide invalid DMA address
1220 * value instead of a function to test dma address value. This
1221 * would remove lot of dumb code duplicated accross many arch.
1223 * For now setting it to 0 here is good enough as the pfns[]
1224 * value is what is use to check what is valid and what isn't.
1228 page
= hmm_device_entry_to_page(range
, range
->pfns
[i
]);
1232 /* Check if range is being invalidated */
1233 if (!range
->valid
) {
1238 /* If it is read and write than map bi-directional. */
1239 if (range
->pfns
[i
] & range
->flags
[HMM_PFN_WRITE
])
1240 dir
= DMA_BIDIRECTIONAL
;
1242 daddrs
[i
] = dma_map_page(device
, page
, 0, PAGE_SIZE
, dir
);
1243 if (dma_mapping_error(device
, daddrs
[i
])) {
1254 for (npages
= i
, i
= 0; (i
< npages
) && mapped
; ++i
) {
1255 enum dma_data_direction dir
= DMA_TO_DEVICE
;
1258 page
= hmm_device_entry_to_page(range
, range
->pfns
[i
]);
1262 if (dma_mapping_error(device
, daddrs
[i
]))
1265 /* If it is read and write than map bi-directional. */
1266 if (range
->pfns
[i
] & range
->flags
[HMM_PFN_WRITE
])
1267 dir
= DMA_BIDIRECTIONAL
;
1269 dma_unmap_page(device
, daddrs
[i
], PAGE_SIZE
, dir
);
1275 EXPORT_SYMBOL(hmm_range_dma_map
);
1278 * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
1279 * @range: range being unmapped
1280 * @vma: the vma against which the range (optional)
1281 * @device: device against which dma map was done
1282 * @daddrs: dma address of mapped pages
1283 * @dirty: dirty page if it had the write flag set
1284 * Returns: number of page unmapped on success, -EINVAL otherwise
1286 * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
1287 * to the sync_cpu_device_pagetables() callback so that it is safe here to
1288 * call set_page_dirty(). Caller must also take appropriate locks to avoid
1289 * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
1291 long hmm_range_dma_unmap(struct hmm_range
*range
,
1292 struct vm_area_struct
*vma
,
1293 struct device
*device
,
1297 unsigned long i
, npages
;
1301 if (range
->end
<= range
->start
)
1308 npages
= (range
->end
- range
->start
) >> PAGE_SHIFT
;
1309 for (i
= 0; i
< npages
; ++i
) {
1310 enum dma_data_direction dir
= DMA_TO_DEVICE
;
1313 page
= hmm_device_entry_to_page(range
, range
->pfns
[i
]);
1317 /* If it is read and write than map bi-directional. */
1318 if (range
->pfns
[i
] & range
->flags
[HMM_PFN_WRITE
]) {
1319 dir
= DMA_BIDIRECTIONAL
;
1322 * See comments in function description on why it is
1323 * safe here to call set_page_dirty()
1326 set_page_dirty(page
);
1329 /* Unmap and clear pfns/dma address */
1330 dma_unmap_page(device
, daddrs
[i
], PAGE_SIZE
, dir
);
1331 range
->pfns
[i
] = range
->values
[HMM_PFN_NONE
];
1332 /* FIXME see comments in hmm_vma_dma_map() */
1339 EXPORT_SYMBOL(hmm_range_dma_unmap
);
1340 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
1343 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
1344 struct page
*hmm_vma_alloc_locked_page(struct vm_area_struct
*vma
,
1349 page
= alloc_page_vma(GFP_HIGHUSER
, vma
, addr
);
1355 EXPORT_SYMBOL(hmm_vma_alloc_locked_page
);
1358 static void hmm_devmem_ref_release(struct percpu_ref
*ref
)
1360 struct hmm_devmem
*devmem
;
1362 devmem
= container_of(ref
, struct hmm_devmem
, ref
);
1363 complete(&devmem
->completion
);
1366 static void hmm_devmem_ref_exit(void *data
)
1368 struct percpu_ref
*ref
= data
;
1369 struct hmm_devmem
*devmem
;
1371 devmem
= container_of(ref
, struct hmm_devmem
, ref
);
1372 wait_for_completion(&devmem
->completion
);
1373 percpu_ref_exit(ref
);
1376 static void hmm_devmem_ref_kill(struct percpu_ref
*ref
)
1378 percpu_ref_kill(ref
);
1381 static vm_fault_t
hmm_devmem_fault(struct vm_area_struct
*vma
,
1383 const struct page
*page
,
1387 struct hmm_devmem
*devmem
= page
->pgmap
->data
;
1389 return devmem
->ops
->fault(devmem
, vma
, addr
, page
, flags
, pmdp
);
1392 static void hmm_devmem_free(struct page
*page
, void *data
)
1394 struct hmm_devmem
*devmem
= data
;
1396 page
->mapping
= NULL
;
1398 devmem
->ops
->free(devmem
, page
);
1402 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1404 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1405 * @device: device struct to bind the resource too
1406 * @size: size in bytes of the device memory to add
1407 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1409 * This function first finds an empty range of physical address big enough to
1410 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1411 * in turn allocates struct pages. It does not do anything beyond that; all
1412 * events affecting the memory will go through the various callbacks provided
1413 * by hmm_devmem_ops struct.
1415 * Device driver should call this function during device initialization and
1416 * is then responsible of memory management. HMM only provides helpers.
1418 struct hmm_devmem
*hmm_devmem_add(const struct hmm_devmem_ops
*ops
,
1419 struct device
*device
,
1422 struct hmm_devmem
*devmem
;
1423 resource_size_t addr
;
1427 dev_pagemap_get_ops();
1429 devmem
= devm_kzalloc(device
, sizeof(*devmem
), GFP_KERNEL
);
1431 return ERR_PTR(-ENOMEM
);
1433 init_completion(&devmem
->completion
);
1434 devmem
->pfn_first
= -1UL;
1435 devmem
->pfn_last
= -1UL;
1436 devmem
->resource
= NULL
;
1437 devmem
->device
= device
;
1440 ret
= percpu_ref_init(&devmem
->ref
, &hmm_devmem_ref_release
,
1443 return ERR_PTR(ret
);
1445 ret
= devm_add_action_or_reset(device
, hmm_devmem_ref_exit
, &devmem
->ref
);
1447 return ERR_PTR(ret
);
1449 size
= ALIGN(size
, PA_SECTION_SIZE
);
1450 addr
= min((unsigned long)iomem_resource
.end
,
1451 (1UL << MAX_PHYSMEM_BITS
) - 1);
1452 addr
= addr
- size
+ 1UL;
1455 * FIXME add a new helper to quickly walk resource tree and find free
1458 * FIXME what about ioport_resource resource ?
1460 for (; addr
> size
&& addr
>= iomem_resource
.start
; addr
-= size
) {
1461 ret
= region_intersects(addr
, size
, 0, IORES_DESC_NONE
);
1462 if (ret
!= REGION_DISJOINT
)
1465 devmem
->resource
= devm_request_mem_region(device
, addr
, size
,
1467 if (!devmem
->resource
)
1468 return ERR_PTR(-ENOMEM
);
1471 if (!devmem
->resource
)
1472 return ERR_PTR(-ERANGE
);
1474 devmem
->resource
->desc
= IORES_DESC_DEVICE_PRIVATE_MEMORY
;
1475 devmem
->pfn_first
= devmem
->resource
->start
>> PAGE_SHIFT
;
1476 devmem
->pfn_last
= devmem
->pfn_first
+
1477 (resource_size(devmem
->resource
) >> PAGE_SHIFT
);
1478 devmem
->page_fault
= hmm_devmem_fault
;
1480 devmem
->pagemap
.type
= MEMORY_DEVICE_PRIVATE
;
1481 devmem
->pagemap
.res
= *devmem
->resource
;
1482 devmem
->pagemap
.page_free
= hmm_devmem_free
;
1483 devmem
->pagemap
.altmap_valid
= false;
1484 devmem
->pagemap
.ref
= &devmem
->ref
;
1485 devmem
->pagemap
.data
= devmem
;
1486 devmem
->pagemap
.kill
= hmm_devmem_ref_kill
;
1488 result
= devm_memremap_pages(devmem
->device
, &devmem
->pagemap
);
1493 EXPORT_SYMBOL_GPL(hmm_devmem_add
);
1495 struct hmm_devmem
*hmm_devmem_add_resource(const struct hmm_devmem_ops
*ops
,
1496 struct device
*device
,
1497 struct resource
*res
)
1499 struct hmm_devmem
*devmem
;
1503 if (res
->desc
!= IORES_DESC_DEVICE_PUBLIC_MEMORY
)
1504 return ERR_PTR(-EINVAL
);
1506 dev_pagemap_get_ops();
1508 devmem
= devm_kzalloc(device
, sizeof(*devmem
), GFP_KERNEL
);
1510 return ERR_PTR(-ENOMEM
);
1512 init_completion(&devmem
->completion
);
1513 devmem
->pfn_first
= -1UL;
1514 devmem
->pfn_last
= -1UL;
1515 devmem
->resource
= res
;
1516 devmem
->device
= device
;
1519 ret
= percpu_ref_init(&devmem
->ref
, &hmm_devmem_ref_release
,
1522 return ERR_PTR(ret
);
1524 ret
= devm_add_action_or_reset(device
, hmm_devmem_ref_exit
,
1527 return ERR_PTR(ret
);
1529 devmem
->pfn_first
= devmem
->resource
->start
>> PAGE_SHIFT
;
1530 devmem
->pfn_last
= devmem
->pfn_first
+
1531 (resource_size(devmem
->resource
) >> PAGE_SHIFT
);
1532 devmem
->page_fault
= hmm_devmem_fault
;
1534 devmem
->pagemap
.type
= MEMORY_DEVICE_PUBLIC
;
1535 devmem
->pagemap
.res
= *devmem
->resource
;
1536 devmem
->pagemap
.page_free
= hmm_devmem_free
;
1537 devmem
->pagemap
.altmap_valid
= false;
1538 devmem
->pagemap
.ref
= &devmem
->ref
;
1539 devmem
->pagemap
.data
= devmem
;
1540 devmem
->pagemap
.kill
= hmm_devmem_ref_kill
;
1542 result
= devm_memremap_pages(devmem
->device
, &devmem
->pagemap
);
1547 EXPORT_SYMBOL_GPL(hmm_devmem_add_resource
);
1550 * A device driver that wants to handle multiple devices memory through a
1551 * single fake device can use hmm_device to do so. This is purely a helper
1552 * and it is not needed to make use of any HMM functionality.
1554 #define HMM_DEVICE_MAX 256
1556 static DECLARE_BITMAP(hmm_device_mask
, HMM_DEVICE_MAX
);
1557 static DEFINE_SPINLOCK(hmm_device_lock
);
1558 static struct class *hmm_device_class
;
1559 static dev_t hmm_device_devt
;
1561 static void hmm_device_release(struct device
*device
)
1563 struct hmm_device
*hmm_device
;
1565 hmm_device
= container_of(device
, struct hmm_device
, device
);
1566 spin_lock(&hmm_device_lock
);
1567 clear_bit(hmm_device
->minor
, hmm_device_mask
);
1568 spin_unlock(&hmm_device_lock
);
1573 struct hmm_device
*hmm_device_new(void *drvdata
)
1575 struct hmm_device
*hmm_device
;
1577 hmm_device
= kzalloc(sizeof(*hmm_device
), GFP_KERNEL
);
1579 return ERR_PTR(-ENOMEM
);
1581 spin_lock(&hmm_device_lock
);
1582 hmm_device
->minor
= find_first_zero_bit(hmm_device_mask
, HMM_DEVICE_MAX
);
1583 if (hmm_device
->minor
>= HMM_DEVICE_MAX
) {
1584 spin_unlock(&hmm_device_lock
);
1586 return ERR_PTR(-EBUSY
);
1588 set_bit(hmm_device
->minor
, hmm_device_mask
);
1589 spin_unlock(&hmm_device_lock
);
1591 dev_set_name(&hmm_device
->device
, "hmm_device%d", hmm_device
->minor
);
1592 hmm_device
->device
.devt
= MKDEV(MAJOR(hmm_device_devt
),
1594 hmm_device
->device
.release
= hmm_device_release
;
1595 dev_set_drvdata(&hmm_device
->device
, drvdata
);
1596 hmm_device
->device
.class = hmm_device_class
;
1597 device_initialize(&hmm_device
->device
);
1601 EXPORT_SYMBOL(hmm_device_new
);
1603 void hmm_device_put(struct hmm_device
*hmm_device
)
1605 put_device(&hmm_device
->device
);
1607 EXPORT_SYMBOL(hmm_device_put
);
1609 static int __init
hmm_init(void)
1613 ret
= alloc_chrdev_region(&hmm_device_devt
, 0,
1619 hmm_device_class
= class_create(THIS_MODULE
, "hmm_device");
1620 if (IS_ERR(hmm_device_class
)) {
1621 unregister_chrdev_region(hmm_device_devt
, HMM_DEVICE_MAX
);
1622 return PTR_ERR(hmm_device_class
);
1627 device_initcall(hmm_init
);
1628 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */