1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2013 Red Hat Inc.
5 * Authors: Jérôme Glisse <jglisse@redhat.com>
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
12 #include <linux/hmm.h>
13 #include <linux/init.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/mmzone.h>
19 #include <linux/pagemap.h>
20 #include <linux/swapops.h>
21 #include <linux/hugetlb.h>
22 #include <linux/memremap.h>
23 #include <linux/jump_label.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/mmu_notifier.h>
26 #include <linux/memory_hotplug.h>
28 #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
30 #if IS_ENABLED(CONFIG_HMM_MIRROR)
31 static const struct mmu_notifier_ops hmm_mmu_notifier_ops
;
33 static inline struct hmm
*mm_get_hmm(struct mm_struct
*mm
)
35 struct hmm
*hmm
= READ_ONCE(mm
->hmm
);
37 if (hmm
&& kref_get_unless_zero(&hmm
->kref
))
44 * hmm_get_or_create - register HMM against an mm (HMM internal)
46 * @mm: mm struct to attach to
47 * Returns: returns an HMM object, either by referencing the existing
48 * (per-process) object, or by creating a new one.
50 * This is not intended to be used directly by device drivers. If mm already
51 * has an HMM struct then it get a reference on it and returns it. Otherwise
52 * it allocates an HMM struct, initializes it, associate it with the mm and
55 static struct hmm
*hmm_get_or_create(struct mm_struct
*mm
)
57 struct hmm
*hmm
= mm_get_hmm(mm
);
63 hmm
= kmalloc(sizeof(*hmm
), GFP_KERNEL
);
66 init_waitqueue_head(&hmm
->wq
);
67 INIT_LIST_HEAD(&hmm
->mirrors
);
68 init_rwsem(&hmm
->mirrors_sem
);
69 hmm
->mmu_notifier
.ops
= NULL
;
70 INIT_LIST_HEAD(&hmm
->ranges
);
71 mutex_init(&hmm
->lock
);
72 kref_init(&hmm
->kref
);
77 spin_lock(&mm
->page_table_lock
);
82 spin_unlock(&mm
->page_table_lock
);
88 * We should only get here if hold the mmap_sem in write mode ie on
89 * registration of first mirror through hmm_mirror_register()
91 hmm
->mmu_notifier
.ops
= &hmm_mmu_notifier_ops
;
92 if (__mmu_notifier_register(&hmm
->mmu_notifier
, mm
))
98 spin_lock(&mm
->page_table_lock
);
101 spin_unlock(&mm
->page_table_lock
);
107 static void hmm_free(struct kref
*kref
)
109 struct hmm
*hmm
= container_of(kref
, struct hmm
, kref
);
110 struct mm_struct
*mm
= hmm
->mm
;
112 mmu_notifier_unregister_no_release(&hmm
->mmu_notifier
, mm
);
114 spin_lock(&mm
->page_table_lock
);
117 spin_unlock(&mm
->page_table_lock
);
122 static inline void hmm_put(struct hmm
*hmm
)
124 kref_put(&hmm
->kref
, hmm_free
);
127 void hmm_mm_destroy(struct mm_struct
*mm
)
131 spin_lock(&mm
->page_table_lock
);
132 hmm
= mm_get_hmm(mm
);
137 spin_unlock(&mm
->page_table_lock
);
142 spin_unlock(&mm
->page_table_lock
);
145 static void hmm_release(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
147 struct hmm
*hmm
= mm_get_hmm(mm
);
148 struct hmm_mirror
*mirror
;
149 struct hmm_range
*range
;
151 /* Report this HMM as dying. */
154 /* Wake-up everyone waiting on any range. */
155 mutex_lock(&hmm
->lock
);
156 list_for_each_entry(range
, &hmm
->ranges
, list
) {
157 range
->valid
= false;
159 wake_up_all(&hmm
->wq
);
160 mutex_unlock(&hmm
->lock
);
162 down_write(&hmm
->mirrors_sem
);
163 mirror
= list_first_entry_or_null(&hmm
->mirrors
, struct hmm_mirror
,
166 list_del_init(&mirror
->list
);
167 if (mirror
->ops
->release
) {
169 * Drop mirrors_sem so callback can wait on any pending
170 * work that might itself trigger mmu_notifier callback
171 * and thus would deadlock with us.
173 up_write(&hmm
->mirrors_sem
);
174 mirror
->ops
->release(mirror
);
175 down_write(&hmm
->mirrors_sem
);
177 mirror
= list_first_entry_or_null(&hmm
->mirrors
,
178 struct hmm_mirror
, list
);
180 up_write(&hmm
->mirrors_sem
);
185 static int hmm_invalidate_range_start(struct mmu_notifier
*mn
,
186 const struct mmu_notifier_range
*nrange
)
188 struct hmm
*hmm
= mm_get_hmm(nrange
->mm
);
189 struct hmm_mirror
*mirror
;
190 struct hmm_update update
;
191 struct hmm_range
*range
;
196 update
.start
= nrange
->start
;
197 update
.end
= nrange
->end
;
198 update
.event
= HMM_UPDATE_INVALIDATE
;
199 update
.blockable
= mmu_notifier_range_blockable(nrange
);
201 if (mmu_notifier_range_blockable(nrange
))
202 mutex_lock(&hmm
->lock
);
203 else if (!mutex_trylock(&hmm
->lock
)) {
208 list_for_each_entry(range
, &hmm
->ranges
, list
) {
209 if (update
.end
< range
->start
|| update
.start
>= range
->end
)
212 range
->valid
= false;
214 mutex_unlock(&hmm
->lock
);
216 if (mmu_notifier_range_blockable(nrange
))
217 down_read(&hmm
->mirrors_sem
);
218 else if (!down_read_trylock(&hmm
->mirrors_sem
)) {
222 list_for_each_entry(mirror
, &hmm
->mirrors
, list
) {
225 ret
= mirror
->ops
->sync_cpu_device_pagetables(mirror
, &update
);
226 if (!update
.blockable
&& ret
== -EAGAIN
) {
227 up_read(&hmm
->mirrors_sem
);
232 up_read(&hmm
->mirrors_sem
);
239 static void hmm_invalidate_range_end(struct mmu_notifier
*mn
,
240 const struct mmu_notifier_range
*nrange
)
242 struct hmm
*hmm
= mm_get_hmm(nrange
->mm
);
246 mutex_lock(&hmm
->lock
);
248 if (!hmm
->notifiers
) {
249 struct hmm_range
*range
;
251 list_for_each_entry(range
, &hmm
->ranges
, list
) {
256 wake_up_all(&hmm
->wq
);
258 mutex_unlock(&hmm
->lock
);
263 static const struct mmu_notifier_ops hmm_mmu_notifier_ops
= {
264 .release
= hmm_release
,
265 .invalidate_range_start
= hmm_invalidate_range_start
,
266 .invalidate_range_end
= hmm_invalidate_range_end
,
270 * hmm_mirror_register() - register a mirror against an mm
272 * @mirror: new mirror struct to register
273 * @mm: mm to register against
275 * To start mirroring a process address space, the device driver must register
276 * an HMM mirror struct.
278 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
280 int hmm_mirror_register(struct hmm_mirror
*mirror
, struct mm_struct
*mm
)
283 if (!mm
|| !mirror
|| !mirror
->ops
)
286 mirror
->hmm
= hmm_get_or_create(mm
);
290 down_write(&mirror
->hmm
->mirrors_sem
);
291 list_add(&mirror
->list
, &mirror
->hmm
->mirrors
);
292 up_write(&mirror
->hmm
->mirrors_sem
);
296 EXPORT_SYMBOL(hmm_mirror_register
);
299 * hmm_mirror_unregister() - unregister a mirror
301 * @mirror: new mirror struct to register
303 * Stop mirroring a process address space, and cleanup.
305 void hmm_mirror_unregister(struct hmm_mirror
*mirror
)
307 struct hmm
*hmm
= READ_ONCE(mirror
->hmm
);
312 down_write(&hmm
->mirrors_sem
);
313 list_del_init(&mirror
->list
);
314 /* To protect us against double unregister ... */
316 up_write(&hmm
->mirrors_sem
);
320 EXPORT_SYMBOL(hmm_mirror_unregister
);
322 struct hmm_vma_walk
{
323 struct hmm_range
*range
;
324 struct dev_pagemap
*pgmap
;
330 static int hmm_vma_do_fault(struct mm_walk
*walk
, unsigned long addr
,
331 bool write_fault
, uint64_t *pfn
)
333 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_REMOTE
;
334 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
335 struct hmm_range
*range
= hmm_vma_walk
->range
;
336 struct vm_area_struct
*vma
= walk
->vma
;
339 flags
|= hmm_vma_walk
->block
? 0 : FAULT_FLAG_ALLOW_RETRY
;
340 flags
|= write_fault
? FAULT_FLAG_WRITE
: 0;
341 ret
= handle_mm_fault(vma
, addr
, flags
);
342 if (ret
& VM_FAULT_RETRY
)
344 if (ret
& VM_FAULT_ERROR
) {
345 *pfn
= range
->values
[HMM_PFN_ERROR
];
352 static int hmm_pfns_bad(unsigned long addr
,
354 struct mm_walk
*walk
)
356 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
357 struct hmm_range
*range
= hmm_vma_walk
->range
;
358 uint64_t *pfns
= range
->pfns
;
361 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
362 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++)
363 pfns
[i
] = range
->values
[HMM_PFN_ERROR
];
369 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
370 * @start: range virtual start address (inclusive)
371 * @end: range virtual end address (exclusive)
372 * @fault: should we fault or not ?
373 * @write_fault: write fault ?
374 * @walk: mm_walk structure
375 * Returns: 0 on success, -EBUSY after page fault, or page fault error
377 * This function will be called whenever pmd_none() or pte_none() returns true,
378 * or whenever there is no page directory covering the virtual address range.
380 static int hmm_vma_walk_hole_(unsigned long addr
, unsigned long end
,
381 bool fault
, bool write_fault
,
382 struct mm_walk
*walk
)
384 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
385 struct hmm_range
*range
= hmm_vma_walk
->range
;
386 uint64_t *pfns
= range
->pfns
;
387 unsigned long i
, page_size
;
389 hmm_vma_walk
->last
= addr
;
390 page_size
= hmm_range_page_size(range
);
391 i
= (addr
- range
->start
) >> range
->page_shift
;
393 for (; addr
< end
; addr
+= page_size
, i
++) {
394 pfns
[i
] = range
->values
[HMM_PFN_NONE
];
395 if (fault
|| write_fault
) {
398 ret
= hmm_vma_do_fault(walk
, addr
, write_fault
,
405 return (fault
|| write_fault
) ? -EBUSY
: 0;
408 static inline void hmm_pte_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
409 uint64_t pfns
, uint64_t cpu_flags
,
410 bool *fault
, bool *write_fault
)
412 struct hmm_range
*range
= hmm_vma_walk
->range
;
414 if (!hmm_vma_walk
->fault
)
418 * So we not only consider the individual per page request we also
419 * consider the default flags requested for the range. The API can
420 * be use in 2 fashions. The first one where the HMM user coalesce
421 * multiple page fault into one request and set flags per pfns for
422 * of those faults. The second one where the HMM user want to pre-
423 * fault a range with specific flags. For the latter one it is a
424 * waste to have the user pre-fill the pfn arrays with a default
427 pfns
= (pfns
& range
->pfn_flags_mask
) | range
->default_flags
;
429 /* We aren't ask to do anything ... */
430 if (!(pfns
& range
->flags
[HMM_PFN_VALID
]))
432 /* If this is device memory than only fault if explicitly requested */
433 if ((cpu_flags
& range
->flags
[HMM_PFN_DEVICE_PRIVATE
])) {
434 /* Do we fault on device memory ? */
435 if (pfns
& range
->flags
[HMM_PFN_DEVICE_PRIVATE
]) {
436 *write_fault
= pfns
& range
->flags
[HMM_PFN_WRITE
];
442 /* If CPU page table is not valid then we need to fault */
443 *fault
= !(cpu_flags
& range
->flags
[HMM_PFN_VALID
]);
444 /* Need to write fault ? */
445 if ((pfns
& range
->flags
[HMM_PFN_WRITE
]) &&
446 !(cpu_flags
& range
->flags
[HMM_PFN_WRITE
])) {
452 static void hmm_range_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
453 const uint64_t *pfns
, unsigned long npages
,
454 uint64_t cpu_flags
, bool *fault
,
459 if (!hmm_vma_walk
->fault
) {
460 *fault
= *write_fault
= false;
464 *fault
= *write_fault
= false;
465 for (i
= 0; i
< npages
; ++i
) {
466 hmm_pte_need_fault(hmm_vma_walk
, pfns
[i
], cpu_flags
,
473 static int hmm_vma_walk_hole(unsigned long addr
, unsigned long end
,
474 struct mm_walk
*walk
)
476 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
477 struct hmm_range
*range
= hmm_vma_walk
->range
;
478 bool fault
, write_fault
;
479 unsigned long i
, npages
;
482 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
483 npages
= (end
- addr
) >> PAGE_SHIFT
;
484 pfns
= &range
->pfns
[i
];
485 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
486 0, &fault
, &write_fault
);
487 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
490 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range
*range
, pmd_t pmd
)
492 if (pmd_protnone(pmd
))
494 return pmd_write(pmd
) ? range
->flags
[HMM_PFN_VALID
] |
495 range
->flags
[HMM_PFN_WRITE
] :
496 range
->flags
[HMM_PFN_VALID
];
499 static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range
*range
, pud_t pud
)
501 if (!pud_present(pud
))
503 return pud_write(pud
) ? range
->flags
[HMM_PFN_VALID
] |
504 range
->flags
[HMM_PFN_WRITE
] :
505 range
->flags
[HMM_PFN_VALID
];
508 static int hmm_vma_handle_pmd(struct mm_walk
*walk
,
514 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
515 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
516 struct hmm_range
*range
= hmm_vma_walk
->range
;
517 unsigned long pfn
, npages
, i
;
518 bool fault
, write_fault
;
521 npages
= (end
- addr
) >> PAGE_SHIFT
;
522 cpu_flags
= pmd_to_hmm_pfn_flags(range
, pmd
);
523 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
, cpu_flags
,
524 &fault
, &write_fault
);
526 if (pmd_protnone(pmd
) || fault
|| write_fault
)
527 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
529 pfn
= pmd_pfn(pmd
) + pte_index(addr
);
530 for (i
= 0; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++) {
531 if (pmd_devmap(pmd
)) {
532 hmm_vma_walk
->pgmap
= get_dev_pagemap(pfn
,
533 hmm_vma_walk
->pgmap
);
534 if (unlikely(!hmm_vma_walk
->pgmap
))
537 pfns
[i
] = hmm_device_entry_from_pfn(range
, pfn
) | cpu_flags
;
539 if (hmm_vma_walk
->pgmap
) {
540 put_dev_pagemap(hmm_vma_walk
->pgmap
);
541 hmm_vma_walk
->pgmap
= NULL
;
543 hmm_vma_walk
->last
= end
;
546 /* If THP is not enabled then we should never reach that code ! */
551 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range
*range
, pte_t pte
)
553 if (pte_none(pte
) || !pte_present(pte
))
555 return pte_write(pte
) ? range
->flags
[HMM_PFN_VALID
] |
556 range
->flags
[HMM_PFN_WRITE
] :
557 range
->flags
[HMM_PFN_VALID
];
560 static int hmm_vma_handle_pte(struct mm_walk
*walk
, unsigned long addr
,
561 unsigned long end
, pmd_t
*pmdp
, pte_t
*ptep
,
564 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
565 struct hmm_range
*range
= hmm_vma_walk
->range
;
566 struct vm_area_struct
*vma
= walk
->vma
;
567 bool fault
, write_fault
;
570 uint64_t orig_pfn
= *pfn
;
572 *pfn
= range
->values
[HMM_PFN_NONE
];
573 fault
= write_fault
= false;
576 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, 0,
577 &fault
, &write_fault
);
578 if (fault
|| write_fault
)
583 if (!pte_present(pte
)) {
584 swp_entry_t entry
= pte_to_swp_entry(pte
);
586 if (!non_swap_entry(entry
)) {
587 if (fault
|| write_fault
)
593 * This is a special swap entry, ignore migration, use
594 * device and report anything else as error.
596 if (is_device_private_entry(entry
)) {
597 cpu_flags
= range
->flags
[HMM_PFN_VALID
] |
598 range
->flags
[HMM_PFN_DEVICE_PRIVATE
];
599 cpu_flags
|= is_write_device_private_entry(entry
) ?
600 range
->flags
[HMM_PFN_WRITE
] : 0;
601 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
602 &fault
, &write_fault
);
603 if (fault
|| write_fault
)
605 *pfn
= hmm_device_entry_from_pfn(range
,
611 if (is_migration_entry(entry
)) {
612 if (fault
|| write_fault
) {
614 hmm_vma_walk
->last
= addr
;
615 migration_entry_wait(vma
->vm_mm
,
622 /* Report error for everything else */
623 *pfn
= range
->values
[HMM_PFN_ERROR
];
626 cpu_flags
= pte_to_hmm_pfn_flags(range
, pte
);
627 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
628 &fault
, &write_fault
);
631 if (fault
|| write_fault
)
634 if (pte_devmap(pte
)) {
635 hmm_vma_walk
->pgmap
= get_dev_pagemap(pte_pfn(pte
),
636 hmm_vma_walk
->pgmap
);
637 if (unlikely(!hmm_vma_walk
->pgmap
))
639 } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL
) && pte_special(pte
)) {
640 *pfn
= range
->values
[HMM_PFN_SPECIAL
];
644 *pfn
= hmm_device_entry_from_pfn(range
, pte_pfn(pte
)) | cpu_flags
;
648 if (hmm_vma_walk
->pgmap
) {
649 put_dev_pagemap(hmm_vma_walk
->pgmap
);
650 hmm_vma_walk
->pgmap
= NULL
;
653 /* Fault any virtual address we were asked to fault */
654 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
657 static int hmm_vma_walk_pmd(pmd_t
*pmdp
,
660 struct mm_walk
*walk
)
662 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
663 struct hmm_range
*range
= hmm_vma_walk
->range
;
664 struct vm_area_struct
*vma
= walk
->vma
;
665 uint64_t *pfns
= range
->pfns
;
666 unsigned long addr
= start
, i
;
672 pmd
= READ_ONCE(*pmdp
);
674 return hmm_vma_walk_hole(start
, end
, walk
);
676 if (pmd_huge(pmd
) && (range
->vma
->vm_flags
& VM_HUGETLB
))
677 return hmm_pfns_bad(start
, end
, walk
);
679 if (thp_migration_supported() && is_pmd_migration_entry(pmd
)) {
680 bool fault
, write_fault
;
681 unsigned long npages
;
684 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
685 npages
= (end
- addr
) >> PAGE_SHIFT
;
686 pfns
= &range
->pfns
[i
];
688 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
689 0, &fault
, &write_fault
);
690 if (fault
|| write_fault
) {
691 hmm_vma_walk
->last
= addr
;
692 pmd_migration_entry_wait(vma
->vm_mm
, pmdp
);
696 } else if (!pmd_present(pmd
))
697 return hmm_pfns_bad(start
, end
, walk
);
699 if (pmd_devmap(pmd
) || pmd_trans_huge(pmd
)) {
701 * No need to take pmd_lock here, even if some other threads
702 * is splitting the huge pmd we will get that event through
703 * mmu_notifier callback.
705 * So just read pmd value and check again its a transparent
706 * huge or device mapping one and compute corresponding pfn
709 pmd
= pmd_read_atomic(pmdp
);
711 if (!pmd_devmap(pmd
) && !pmd_trans_huge(pmd
))
714 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
715 return hmm_vma_handle_pmd(walk
, addr
, end
, &pfns
[i
], pmd
);
719 * We have handled all the valid case above ie either none, migration,
720 * huge or transparent huge. At this point either it is a valid pmd
721 * entry pointing to pte directory or it is a bad pmd that will not
725 return hmm_pfns_bad(start
, end
, walk
);
727 ptep
= pte_offset_map(pmdp
, addr
);
728 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
729 for (; addr
< end
; addr
+= PAGE_SIZE
, ptep
++, i
++) {
732 r
= hmm_vma_handle_pte(walk
, addr
, end
, pmdp
, ptep
, &pfns
[i
]);
734 /* hmm_vma_handle_pte() did unmap pte directory */
735 hmm_vma_walk
->last
= addr
;
739 if (hmm_vma_walk
->pgmap
) {
741 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
742 * so that we can leverage get_dev_pagemap() optimization which
743 * will not re-take a reference on a pgmap if we already have
746 put_dev_pagemap(hmm_vma_walk
->pgmap
);
747 hmm_vma_walk
->pgmap
= NULL
;
751 hmm_vma_walk
->last
= addr
;
755 static int hmm_vma_walk_pud(pud_t
*pudp
,
758 struct mm_walk
*walk
)
760 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
761 struct hmm_range
*range
= hmm_vma_walk
->range
;
762 unsigned long addr
= start
, next
;
768 pud
= READ_ONCE(*pudp
);
770 return hmm_vma_walk_hole(start
, end
, walk
);
772 if (pud_huge(pud
) && pud_devmap(pud
)) {
773 unsigned long i
, npages
, pfn
;
774 uint64_t *pfns
, cpu_flags
;
775 bool fault
, write_fault
;
777 if (!pud_present(pud
))
778 return hmm_vma_walk_hole(start
, end
, walk
);
780 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
781 npages
= (end
- addr
) >> PAGE_SHIFT
;
782 pfns
= &range
->pfns
[i
];
784 cpu_flags
= pud_to_hmm_pfn_flags(range
, pud
);
785 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
786 cpu_flags
, &fault
, &write_fault
);
787 if (fault
|| write_fault
)
788 return hmm_vma_walk_hole_(addr
, end
, fault
,
791 #ifdef CONFIG_HUGETLB_PAGE
792 pfn
= pud_pfn(pud
) + ((addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
793 for (i
= 0; i
< npages
; ++i
, ++pfn
) {
794 hmm_vma_walk
->pgmap
= get_dev_pagemap(pfn
,
795 hmm_vma_walk
->pgmap
);
796 if (unlikely(!hmm_vma_walk
->pgmap
))
798 pfns
[i
] = hmm_device_entry_from_pfn(range
, pfn
) |
801 if (hmm_vma_walk
->pgmap
) {
802 put_dev_pagemap(hmm_vma_walk
->pgmap
);
803 hmm_vma_walk
->pgmap
= NULL
;
805 hmm_vma_walk
->last
= end
;
812 split_huge_pud(walk
->vma
, pudp
, addr
);
816 pmdp
= pmd_offset(pudp
, addr
);
818 next
= pmd_addr_end(addr
, end
);
819 ret
= hmm_vma_walk_pmd(pmdp
, addr
, next
, walk
);
822 } while (pmdp
++, addr
= next
, addr
!= end
);
827 static int hmm_vma_walk_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
828 unsigned long start
, unsigned long end
,
829 struct mm_walk
*walk
)
831 #ifdef CONFIG_HUGETLB_PAGE
832 unsigned long addr
= start
, i
, pfn
, mask
, size
, pfn_inc
;
833 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
834 struct hmm_range
*range
= hmm_vma_walk
->range
;
835 struct vm_area_struct
*vma
= walk
->vma
;
836 struct hstate
*h
= hstate_vma(vma
);
837 uint64_t orig_pfn
, cpu_flags
;
838 bool fault
, write_fault
;
843 size
= 1UL << huge_page_shift(h
);
845 if (range
->page_shift
!= PAGE_SHIFT
) {
846 /* Make sure we are looking at full page. */
849 if (end
< (start
+ size
))
851 pfn_inc
= size
>> PAGE_SHIFT
;
858 ptl
= huge_pte_lock(hstate_vma(walk
->vma
), walk
->mm
, pte
);
859 entry
= huge_ptep_get(pte
);
861 i
= (start
- range
->start
) >> range
->page_shift
;
862 orig_pfn
= range
->pfns
[i
];
863 range
->pfns
[i
] = range
->values
[HMM_PFN_NONE
];
864 cpu_flags
= pte_to_hmm_pfn_flags(range
, entry
);
865 fault
= write_fault
= false;
866 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
867 &fault
, &write_fault
);
868 if (fault
|| write_fault
) {
873 pfn
= pte_pfn(entry
) + ((start
& mask
) >> range
->page_shift
);
874 for (; addr
< end
; addr
+= size
, i
++, pfn
+= pfn_inc
)
875 range
->pfns
[i
] = hmm_device_entry_from_pfn(range
, pfn
) |
877 hmm_vma_walk
->last
= end
;
883 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
886 #else /* CONFIG_HUGETLB_PAGE */
891 static void hmm_pfns_clear(struct hmm_range
*range
,
896 for (; addr
< end
; addr
+= PAGE_SIZE
, pfns
++)
897 *pfns
= range
->values
[HMM_PFN_NONE
];
901 * hmm_range_register() - start tracking change to CPU page table over a range
903 * @mm: the mm struct for the range of virtual address
904 * @start: start virtual address (inclusive)
905 * @end: end virtual address (exclusive)
906 * @page_shift: expect page shift for the range
907 * Returns 0 on success, -EFAULT if the address space is no longer valid
909 * Track updates to the CPU page table see include/linux/hmm.h
911 int hmm_range_register(struct hmm_range
*range
,
912 struct mm_struct
*mm
,
917 unsigned long mask
= ((1UL << page_shift
) - 1UL);
919 range
->valid
= false;
922 if ((start
& mask
) || (end
& mask
))
927 range
->page_shift
= page_shift
;
928 range
->start
= start
;
931 range
->hmm
= hmm_get_or_create(mm
);
935 /* Check if hmm_mm_destroy() was call. */
936 if (range
->hmm
->mm
== NULL
|| range
->hmm
->dead
) {
941 /* Initialize range to track CPU page table update */
942 mutex_lock(&range
->hmm
->lock
);
944 list_add_rcu(&range
->list
, &range
->hmm
->ranges
);
947 * If there are any concurrent notifiers we have to wait for them for
948 * the range to be valid (see hmm_range_wait_until_valid()).
950 if (!range
->hmm
->notifiers
)
952 mutex_unlock(&range
->hmm
->lock
);
956 EXPORT_SYMBOL(hmm_range_register
);
959 * hmm_range_unregister() - stop tracking change to CPU page table over a range
962 * Range struct is used to track updates to the CPU page table after a call to
963 * hmm_range_register(). See include/linux/hmm.h for how to use it.
965 void hmm_range_unregister(struct hmm_range
*range
)
967 /* Sanity check this really should not happen. */
968 if (range
->hmm
== NULL
|| range
->end
<= range
->start
)
971 mutex_lock(&range
->hmm
->lock
);
972 list_del_rcu(&range
->list
);
973 mutex_unlock(&range
->hmm
->lock
);
975 /* Drop reference taken by hmm_range_register() */
976 range
->valid
= false;
980 EXPORT_SYMBOL(hmm_range_unregister
);
983 * hmm_range_snapshot() - snapshot CPU page table for a range
985 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
986 * permission (for instance asking for write and range is read only),
987 * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
988 * vma or it is illegal to access that range), number of valid pages
989 * in range->pfns[] (from range start address).
991 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
992 * validity is tracked by range struct. See in include/linux/hmm.h for example
995 long hmm_range_snapshot(struct hmm_range
*range
)
997 const unsigned long device_vma
= VM_IO
| VM_PFNMAP
| VM_MIXEDMAP
;
998 unsigned long start
= range
->start
, end
;
999 struct hmm_vma_walk hmm_vma_walk
;
1000 struct hmm
*hmm
= range
->hmm
;
1001 struct vm_area_struct
*vma
;
1002 struct mm_walk mm_walk
;
1004 /* Check if hmm_mm_destroy() was call. */
1005 if (hmm
->mm
== NULL
|| hmm
->dead
)
1009 /* If range is no longer valid force retry. */
1013 vma
= find_vma(hmm
->mm
, start
);
1014 if (vma
== NULL
|| (vma
->vm_flags
& device_vma
))
1017 if (is_vm_hugetlb_page(vma
)) {
1018 struct hstate
*h
= hstate_vma(vma
);
1020 if (huge_page_shift(h
) != range
->page_shift
&&
1021 range
->page_shift
!= PAGE_SHIFT
)
1024 if (range
->page_shift
!= PAGE_SHIFT
)
1028 if (!(vma
->vm_flags
& VM_READ
)) {
1030 * If vma do not allow read access, then assume that it
1031 * does not allow write access, either. HMM does not
1032 * support architecture that allow write without read.
1034 hmm_pfns_clear(range
, range
->pfns
,
1035 range
->start
, range
->end
);
1040 hmm_vma_walk
.pgmap
= NULL
;
1041 hmm_vma_walk
.last
= start
;
1042 hmm_vma_walk
.fault
= false;
1043 hmm_vma_walk
.range
= range
;
1044 mm_walk
.private = &hmm_vma_walk
;
1045 end
= min(range
->end
, vma
->vm_end
);
1048 mm_walk
.mm
= vma
->vm_mm
;
1049 mm_walk
.pte_entry
= NULL
;
1050 mm_walk
.test_walk
= NULL
;
1051 mm_walk
.hugetlb_entry
= NULL
;
1052 mm_walk
.pud_entry
= hmm_vma_walk_pud
;
1053 mm_walk
.pmd_entry
= hmm_vma_walk_pmd
;
1054 mm_walk
.pte_hole
= hmm_vma_walk_hole
;
1055 mm_walk
.hugetlb_entry
= hmm_vma_walk_hugetlb_entry
;
1057 walk_page_range(start
, end
, &mm_walk
);
1059 } while (start
< range
->end
);
1061 return (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
1063 EXPORT_SYMBOL(hmm_range_snapshot
);
1066 * hmm_range_fault() - try to fault some address in a virtual address range
1067 * @range: range being faulted
1068 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
1069 * Returns: number of valid pages in range->pfns[] (from range start
1070 * address). This may be zero. If the return value is negative,
1071 * then one of the following values may be returned:
1073 * -EINVAL invalid arguments or mm or virtual address are in an
1074 * invalid vma (for instance device file vma).
1075 * -ENOMEM: Out of memory.
1076 * -EPERM: Invalid permission (for instance asking for write and
1077 * range is read only).
1078 * -EAGAIN: If you need to retry and mmap_sem was drop. This can only
1079 * happens if block argument is false.
1080 * -EBUSY: If the the range is being invalidated and you should wait
1081 * for invalidation to finish.
1082 * -EFAULT: Invalid (ie either no valid vma or it is illegal to access
1083 * that range), number of valid pages in range->pfns[] (from
1084 * range start address).
1086 * This is similar to a regular CPU page fault except that it will not trigger
1087 * any memory migration if the memory being faulted is not accessible by CPUs
1088 * and caller does not ask for migration.
1090 * On error, for one virtual address in the range, the function will mark the
1091 * corresponding HMM pfn entry with an error flag.
1093 long hmm_range_fault(struct hmm_range
*range
, bool block
)
1095 const unsigned long device_vma
= VM_IO
| VM_PFNMAP
| VM_MIXEDMAP
;
1096 unsigned long start
= range
->start
, end
;
1097 struct hmm_vma_walk hmm_vma_walk
;
1098 struct hmm
*hmm
= range
->hmm
;
1099 struct vm_area_struct
*vma
;
1100 struct mm_walk mm_walk
;
1103 /* Check if hmm_mm_destroy() was call. */
1104 if (hmm
->mm
== NULL
|| hmm
->dead
)
1108 /* If range is no longer valid force retry. */
1109 if (!range
->valid
) {
1110 up_read(&hmm
->mm
->mmap_sem
);
1114 vma
= find_vma(hmm
->mm
, start
);
1115 if (vma
== NULL
|| (vma
->vm_flags
& device_vma
))
1118 if (is_vm_hugetlb_page(vma
)) {
1119 if (huge_page_shift(hstate_vma(vma
)) !=
1120 range
->page_shift
&&
1121 range
->page_shift
!= PAGE_SHIFT
)
1124 if (range
->page_shift
!= PAGE_SHIFT
)
1128 if (!(vma
->vm_flags
& VM_READ
)) {
1130 * If vma do not allow read access, then assume that it
1131 * does not allow write access, either. HMM does not
1132 * support architecture that allow write without read.
1134 hmm_pfns_clear(range
, range
->pfns
,
1135 range
->start
, range
->end
);
1140 hmm_vma_walk
.pgmap
= NULL
;
1141 hmm_vma_walk
.last
= start
;
1142 hmm_vma_walk
.fault
= true;
1143 hmm_vma_walk
.block
= block
;
1144 hmm_vma_walk
.range
= range
;
1145 mm_walk
.private = &hmm_vma_walk
;
1146 end
= min(range
->end
, vma
->vm_end
);
1149 mm_walk
.mm
= vma
->vm_mm
;
1150 mm_walk
.pte_entry
= NULL
;
1151 mm_walk
.test_walk
= NULL
;
1152 mm_walk
.hugetlb_entry
= NULL
;
1153 mm_walk
.pud_entry
= hmm_vma_walk_pud
;
1154 mm_walk
.pmd_entry
= hmm_vma_walk_pmd
;
1155 mm_walk
.pte_hole
= hmm_vma_walk_hole
;
1156 mm_walk
.hugetlb_entry
= hmm_vma_walk_hugetlb_entry
;
1159 ret
= walk_page_range(start
, end
, &mm_walk
);
1160 start
= hmm_vma_walk
.last
;
1162 /* Keep trying while the range is valid. */
1163 } while (ret
== -EBUSY
&& range
->valid
);
1168 i
= (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
1169 hmm_pfns_clear(range
, &range
->pfns
[i
],
1170 hmm_vma_walk
.last
, range
->end
);
1175 } while (start
< range
->end
);
1177 return (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
1179 EXPORT_SYMBOL(hmm_range_fault
);
1182 * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one.
1183 * @range: range being faulted
1184 * @device: device against to dma map page to
1185 * @daddrs: dma address of mapped pages
1186 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
1187 * Returns: number of pages mapped on success, -EAGAIN if mmap_sem have been
1188 * drop and you need to try again, some other error value otherwise
1190 * Note same usage pattern as hmm_range_fault().
1192 long hmm_range_dma_map(struct hmm_range
*range
,
1193 struct device
*device
,
1197 unsigned long i
, npages
, mapped
;
1200 ret
= hmm_range_fault(range
, block
);
1202 return ret
? ret
: -EBUSY
;
1204 npages
= (range
->end
- range
->start
) >> PAGE_SHIFT
;
1205 for (i
= 0, mapped
= 0; i
< npages
; ++i
) {
1206 enum dma_data_direction dir
= DMA_TO_DEVICE
;
1210 * FIXME need to update DMA API to provide invalid DMA address
1211 * value instead of a function to test dma address value. This
1212 * would remove lot of dumb code duplicated accross many arch.
1214 * For now setting it to 0 here is good enough as the pfns[]
1215 * value is what is use to check what is valid and what isn't.
1219 page
= hmm_device_entry_to_page(range
, range
->pfns
[i
]);
1223 /* Check if range is being invalidated */
1224 if (!range
->valid
) {
1229 /* If it is read and write than map bi-directional. */
1230 if (range
->pfns
[i
] & range
->flags
[HMM_PFN_WRITE
])
1231 dir
= DMA_BIDIRECTIONAL
;
1233 daddrs
[i
] = dma_map_page(device
, page
, 0, PAGE_SIZE
, dir
);
1234 if (dma_mapping_error(device
, daddrs
[i
])) {
1245 for (npages
= i
, i
= 0; (i
< npages
) && mapped
; ++i
) {
1246 enum dma_data_direction dir
= DMA_TO_DEVICE
;
1249 page
= hmm_device_entry_to_page(range
, range
->pfns
[i
]);
1253 if (dma_mapping_error(device
, daddrs
[i
]))
1256 /* If it is read and write than map bi-directional. */
1257 if (range
->pfns
[i
] & range
->flags
[HMM_PFN_WRITE
])
1258 dir
= DMA_BIDIRECTIONAL
;
1260 dma_unmap_page(device
, daddrs
[i
], PAGE_SIZE
, dir
);
1266 EXPORT_SYMBOL(hmm_range_dma_map
);
1269 * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
1270 * @range: range being unmapped
1271 * @vma: the vma against which the range (optional)
1272 * @device: device against which dma map was done
1273 * @daddrs: dma address of mapped pages
1274 * @dirty: dirty page if it had the write flag set
1275 * Returns: number of page unmapped on success, -EINVAL otherwise
1277 * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
1278 * to the sync_cpu_device_pagetables() callback so that it is safe here to
1279 * call set_page_dirty(). Caller must also take appropriate locks to avoid
1280 * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
1282 long hmm_range_dma_unmap(struct hmm_range
*range
,
1283 struct vm_area_struct
*vma
,
1284 struct device
*device
,
1288 unsigned long i
, npages
;
1292 if (range
->end
<= range
->start
)
1299 npages
= (range
->end
- range
->start
) >> PAGE_SHIFT
;
1300 for (i
= 0; i
< npages
; ++i
) {
1301 enum dma_data_direction dir
= DMA_TO_DEVICE
;
1304 page
= hmm_device_entry_to_page(range
, range
->pfns
[i
]);
1308 /* If it is read and write than map bi-directional. */
1309 if (range
->pfns
[i
] & range
->flags
[HMM_PFN_WRITE
]) {
1310 dir
= DMA_BIDIRECTIONAL
;
1313 * See comments in function description on why it is
1314 * safe here to call set_page_dirty()
1317 set_page_dirty(page
);
1320 /* Unmap and clear pfns/dma address */
1321 dma_unmap_page(device
, daddrs
[i
], PAGE_SIZE
, dir
);
1322 range
->pfns
[i
] = range
->values
[HMM_PFN_NONE
];
1323 /* FIXME see comments in hmm_vma_dma_map() */
1330 EXPORT_SYMBOL(hmm_range_dma_unmap
);
1331 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
1334 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
1335 struct page
*hmm_vma_alloc_locked_page(struct vm_area_struct
*vma
,
1340 page
= alloc_page_vma(GFP_HIGHUSER
, vma
, addr
);
1346 EXPORT_SYMBOL(hmm_vma_alloc_locked_page
);
1349 static void hmm_devmem_ref_release(struct percpu_ref
*ref
)
1351 struct hmm_devmem
*devmem
;
1353 devmem
= container_of(ref
, struct hmm_devmem
, ref
);
1354 complete(&devmem
->completion
);
1357 static void hmm_devmem_ref_exit(struct percpu_ref
*ref
)
1359 struct hmm_devmem
*devmem
;
1361 devmem
= container_of(ref
, struct hmm_devmem
, ref
);
1362 wait_for_completion(&devmem
->completion
);
1363 percpu_ref_exit(ref
);
1366 static void hmm_devmem_ref_kill(struct percpu_ref
*ref
)
1368 percpu_ref_kill(ref
);
1371 static vm_fault_t
hmm_devmem_fault(struct vm_area_struct
*vma
,
1373 const struct page
*page
,
1377 struct hmm_devmem
*devmem
= page
->pgmap
->data
;
1379 return devmem
->ops
->fault(devmem
, vma
, addr
, page
, flags
, pmdp
);
1382 static void hmm_devmem_free(struct page
*page
, void *data
)
1384 struct hmm_devmem
*devmem
= data
;
1386 page
->mapping
= NULL
;
1388 devmem
->ops
->free(devmem
, page
);
1392 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1394 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1395 * @device: device struct to bind the resource too
1396 * @size: size in bytes of the device memory to add
1397 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1399 * This function first finds an empty range of physical address big enough to
1400 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1401 * in turn allocates struct pages. It does not do anything beyond that; all
1402 * events affecting the memory will go through the various callbacks provided
1403 * by hmm_devmem_ops struct.
1405 * Device driver should call this function during device initialization and
1406 * is then responsible of memory management. HMM only provides helpers.
1408 struct hmm_devmem
*hmm_devmem_add(const struct hmm_devmem_ops
*ops
,
1409 struct device
*device
,
1412 struct hmm_devmem
*devmem
;
1413 resource_size_t addr
;
1417 dev_pagemap_get_ops();
1419 devmem
= devm_kzalloc(device
, sizeof(*devmem
), GFP_KERNEL
);
1421 return ERR_PTR(-ENOMEM
);
1423 init_completion(&devmem
->completion
);
1424 devmem
->pfn_first
= -1UL;
1425 devmem
->pfn_last
= -1UL;
1426 devmem
->resource
= NULL
;
1427 devmem
->device
= device
;
1430 ret
= percpu_ref_init(&devmem
->ref
, &hmm_devmem_ref_release
,
1433 return ERR_PTR(ret
);
1435 size
= ALIGN(size
, PA_SECTION_SIZE
);
1436 addr
= min((unsigned long)iomem_resource
.end
,
1437 (1UL << MAX_PHYSMEM_BITS
) - 1);
1438 addr
= addr
- size
+ 1UL;
1441 * FIXME add a new helper to quickly walk resource tree and find free
1444 * FIXME what about ioport_resource resource ?
1446 for (; addr
> size
&& addr
>= iomem_resource
.start
; addr
-= size
) {
1447 ret
= region_intersects(addr
, size
, 0, IORES_DESC_NONE
);
1448 if (ret
!= REGION_DISJOINT
)
1451 devmem
->resource
= devm_request_mem_region(device
, addr
, size
,
1453 if (!devmem
->resource
)
1454 return ERR_PTR(-ENOMEM
);
1457 if (!devmem
->resource
)
1458 return ERR_PTR(-ERANGE
);
1460 devmem
->resource
->desc
= IORES_DESC_DEVICE_PRIVATE_MEMORY
;
1461 devmem
->pfn_first
= devmem
->resource
->start
>> PAGE_SHIFT
;
1462 devmem
->pfn_last
= devmem
->pfn_first
+
1463 (resource_size(devmem
->resource
) >> PAGE_SHIFT
);
1464 devmem
->page_fault
= hmm_devmem_fault
;
1466 devmem
->pagemap
.type
= MEMORY_DEVICE_PRIVATE
;
1467 devmem
->pagemap
.res
= *devmem
->resource
;
1468 devmem
->pagemap
.page_free
= hmm_devmem_free
;
1469 devmem
->pagemap
.altmap_valid
= false;
1470 devmem
->pagemap
.ref
= &devmem
->ref
;
1471 devmem
->pagemap
.data
= devmem
;
1472 devmem
->pagemap
.kill
= hmm_devmem_ref_kill
;
1473 devmem
->pagemap
.cleanup
= hmm_devmem_ref_exit
;
1475 result
= devm_memremap_pages(devmem
->device
, &devmem
->pagemap
);
1480 EXPORT_SYMBOL_GPL(hmm_devmem_add
);
1482 struct hmm_devmem
*hmm_devmem_add_resource(const struct hmm_devmem_ops
*ops
,
1483 struct device
*device
,
1484 struct resource
*res
)
1486 struct hmm_devmem
*devmem
;
1490 if (res
->desc
!= IORES_DESC_DEVICE_PUBLIC_MEMORY
)
1491 return ERR_PTR(-EINVAL
);
1493 dev_pagemap_get_ops();
1495 devmem
= devm_kzalloc(device
, sizeof(*devmem
), GFP_KERNEL
);
1497 return ERR_PTR(-ENOMEM
);
1499 init_completion(&devmem
->completion
);
1500 devmem
->pfn_first
= -1UL;
1501 devmem
->pfn_last
= -1UL;
1502 devmem
->resource
= res
;
1503 devmem
->device
= device
;
1506 ret
= percpu_ref_init(&devmem
->ref
, &hmm_devmem_ref_release
,
1509 return ERR_PTR(ret
);
1511 devmem
->pfn_first
= devmem
->resource
->start
>> PAGE_SHIFT
;
1512 devmem
->pfn_last
= devmem
->pfn_first
+
1513 (resource_size(devmem
->resource
) >> PAGE_SHIFT
);
1514 devmem
->page_fault
= hmm_devmem_fault
;
1516 devmem
->pagemap
.type
= MEMORY_DEVICE_PUBLIC
;
1517 devmem
->pagemap
.res
= *devmem
->resource
;
1518 devmem
->pagemap
.page_free
= hmm_devmem_free
;
1519 devmem
->pagemap
.altmap_valid
= false;
1520 devmem
->pagemap
.ref
= &devmem
->ref
;
1521 devmem
->pagemap
.data
= devmem
;
1522 devmem
->pagemap
.kill
= hmm_devmem_ref_kill
;
1523 devmem
->pagemap
.cleanup
= hmm_devmem_ref_exit
;
1525 result
= devm_memremap_pages(devmem
->device
, &devmem
->pagemap
);
1530 EXPORT_SYMBOL_GPL(hmm_devmem_add_resource
);
1533 * A device driver that wants to handle multiple devices memory through a
1534 * single fake device can use hmm_device to do so. This is purely a helper
1535 * and it is not needed to make use of any HMM functionality.
1537 #define HMM_DEVICE_MAX 256
1539 static DECLARE_BITMAP(hmm_device_mask
, HMM_DEVICE_MAX
);
1540 static DEFINE_SPINLOCK(hmm_device_lock
);
1541 static struct class *hmm_device_class
;
1542 static dev_t hmm_device_devt
;
1544 static void hmm_device_release(struct device
*device
)
1546 struct hmm_device
*hmm_device
;
1548 hmm_device
= container_of(device
, struct hmm_device
, device
);
1549 spin_lock(&hmm_device_lock
);
1550 clear_bit(hmm_device
->minor
, hmm_device_mask
);
1551 spin_unlock(&hmm_device_lock
);
1556 struct hmm_device
*hmm_device_new(void *drvdata
)
1558 struct hmm_device
*hmm_device
;
1560 hmm_device
= kzalloc(sizeof(*hmm_device
), GFP_KERNEL
);
1562 return ERR_PTR(-ENOMEM
);
1564 spin_lock(&hmm_device_lock
);
1565 hmm_device
->minor
= find_first_zero_bit(hmm_device_mask
, HMM_DEVICE_MAX
);
1566 if (hmm_device
->minor
>= HMM_DEVICE_MAX
) {
1567 spin_unlock(&hmm_device_lock
);
1569 return ERR_PTR(-EBUSY
);
1571 set_bit(hmm_device
->minor
, hmm_device_mask
);
1572 spin_unlock(&hmm_device_lock
);
1574 dev_set_name(&hmm_device
->device
, "hmm_device%d", hmm_device
->minor
);
1575 hmm_device
->device
.devt
= MKDEV(MAJOR(hmm_device_devt
),
1577 hmm_device
->device
.release
= hmm_device_release
;
1578 dev_set_drvdata(&hmm_device
->device
, drvdata
);
1579 hmm_device
->device
.class = hmm_device_class
;
1580 device_initialize(&hmm_device
->device
);
1584 EXPORT_SYMBOL(hmm_device_new
);
1586 void hmm_device_put(struct hmm_device
*hmm_device
)
1588 put_device(&hmm_device
->device
);
1590 EXPORT_SYMBOL(hmm_device_put
);
1592 static int __init
hmm_init(void)
1596 ret
= alloc_chrdev_region(&hmm_device_devt
, 0,
1602 hmm_device_class
= class_create(THIS_MODULE
, "hmm_device");
1603 if (IS_ERR(hmm_device_class
)) {
1604 unregister_chrdev_region(hmm_device_devt
, HMM_DEVICE_MAX
);
1605 return PTR_ERR(hmm_device_class
);
1610 device_initcall(hmm_init
);
1611 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */