1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/spinlock.h>
8 #include <linux/memremap.h>
9 #include <linux/pagemap.h>
10 #include <linux/rmap.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/secretmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/rwsem.h>
17 #include <linux/hugetlb.h>
18 #include <linux/migrate.h>
19 #include <linux/mm_inline.h>
20 #include <linux/sched/mm.h>
22 #include <asm/mmu_context.h>
23 #include <asm/tlbflush.h>
27 struct follow_page_context
{
28 struct dev_pagemap
*pgmap
;
29 unsigned int page_mask
;
32 static inline void sanity_check_pinned_pages(struct page
**pages
,
35 if (!IS_ENABLED(CONFIG_DEBUG_VM
))
39 * We only pin anonymous pages if they are exclusive. Once pinned, we
40 * can no longer turn them possibly shared and PageAnonExclusive() will
41 * stick around until the page is freed.
43 * We'd like to verify that our pinned anonymous pages are still mapped
44 * exclusively. The issue with anon THP is that we don't know how
45 * they are/were mapped when pinning them. However, for anon
46 * THP we can assume that either the given page (PTE-mapped THP) or
47 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
48 * neither is the case, there is certainly something wrong.
50 for (; npages
; npages
--, pages
++) {
51 struct page
*page
= *pages
;
52 struct folio
*folio
= page_folio(page
);
54 if (!folio_test_anon(folio
))
56 if (!folio_test_large(folio
) || folio_test_hugetlb(folio
))
57 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio
->page
), page
);
59 /* Either a PTE-mapped or a PMD-mapped THP. */
60 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio
->page
) &&
61 !PageAnonExclusive(page
), page
);
66 * Return the folio with ref appropriately incremented,
67 * or NULL if that failed.
69 static inline struct folio
*try_get_folio(struct page
*page
, int refs
)
74 folio
= page_folio(page
);
75 if (WARN_ON_ONCE(folio_ref_count(folio
) < 0))
77 if (unlikely(!folio_ref_try_add_rcu(folio
, refs
)))
81 * At this point we have a stable reference to the folio; but it
82 * could be that between calling page_folio() and the refcount
83 * increment, the folio was split, in which case we'd end up
84 * holding a reference on a folio that has nothing to do with the page
85 * we were given anymore.
86 * So now that the folio is stable, recheck that the page still
87 * belongs to this folio.
89 if (unlikely(page_folio(page
) != folio
)) {
90 if (!put_devmap_managed_page_refs(&folio
->page
, refs
))
91 folio_put_refs(folio
, refs
);
99 * try_grab_folio() - Attempt to get or pin a folio.
100 * @page: pointer to page to be grabbed
101 * @refs: the value to (effectively) add to the folio's refcount
102 * @flags: gup flags: these are the FOLL_* flag values.
104 * "grab" names in this file mean, "look at flags to decide whether to use
105 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
107 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
108 * same time. (That's true throughout the get_user_pages*() and
109 * pin_user_pages*() APIs.) Cases:
111 * FOLL_GET: folio's refcount will be incremented by @refs.
113 * FOLL_PIN on large folios: folio's refcount will be incremented by
114 * @refs, and its compound_pincount will be incremented by @refs.
116 * FOLL_PIN on single-page folios: folio's refcount will be incremented by
117 * @refs * GUP_PIN_COUNTING_BIAS.
119 * Return: The folio containing @page (with refcount appropriately
120 * incremented) for success, or NULL upon failure. If neither FOLL_GET
121 * nor FOLL_PIN was set, that's considered failure, and furthermore,
122 * a likely bug in the caller, so a warning is also emitted.
124 struct folio
*try_grab_folio(struct page
*page
, int refs
, unsigned int flags
)
126 if (flags
& FOLL_GET
)
127 return try_get_folio(page
, refs
);
128 else if (flags
& FOLL_PIN
) {
132 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
133 * right zone, so fail and let the caller fall back to the slow
136 if (unlikely((flags
& FOLL_LONGTERM
) &&
137 !is_longterm_pinnable_page(page
)))
141 * CAUTION: Don't use compound_head() on the page before this
142 * point, the result won't be stable.
144 folio
= try_get_folio(page
, refs
);
149 * When pinning a large folio, use an exact count to track it.
151 * However, be sure to *also* increment the normal folio
152 * refcount field at least once, so that the folio really
153 * is pinned. That's why the refcount from the earlier
154 * try_get_folio() is left intact.
156 if (folio_test_large(folio
))
157 atomic_add(refs
, folio_pincount_ptr(folio
));
160 refs
* (GUP_PIN_COUNTING_BIAS
- 1));
161 node_stat_mod_folio(folio
, NR_FOLL_PIN_ACQUIRED
, refs
);
170 static void gup_put_folio(struct folio
*folio
, int refs
, unsigned int flags
)
172 if (flags
& FOLL_PIN
) {
173 node_stat_mod_folio(folio
, NR_FOLL_PIN_RELEASED
, refs
);
174 if (folio_test_large(folio
))
175 atomic_sub(refs
, folio_pincount_ptr(folio
));
177 refs
*= GUP_PIN_COUNTING_BIAS
;
180 if (!put_devmap_managed_page_refs(&folio
->page
, refs
))
181 folio_put_refs(folio
, refs
);
185 * try_grab_page() - elevate a page's refcount by a flag-dependent amount
186 * @page: pointer to page to be grabbed
187 * @flags: gup flags: these are the FOLL_* flag values.
189 * This might not do anything at all, depending on the flags argument.
191 * "grab" names in this file mean, "look at flags to decide whether to use
192 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
194 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
195 * time. Cases: please see the try_grab_folio() documentation, with
198 * Return: true for success, or if no action was required (if neither FOLL_PIN
199 * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
200 * FOLL_PIN was set, but the page could not be grabbed.
202 bool __must_check
try_grab_page(struct page
*page
, unsigned int flags
)
204 struct folio
*folio
= page_folio(page
);
206 WARN_ON_ONCE((flags
& (FOLL_GET
| FOLL_PIN
)) == (FOLL_GET
| FOLL_PIN
));
207 if (WARN_ON_ONCE(folio_ref_count(folio
) <= 0))
210 if (flags
& FOLL_GET
)
211 folio_ref_inc(folio
);
212 else if (flags
& FOLL_PIN
) {
214 * Similar to try_grab_folio(): be sure to *also*
215 * increment the normal page refcount field at least once,
216 * so that the page really is pinned.
218 if (folio_test_large(folio
)) {
219 folio_ref_add(folio
, 1);
220 atomic_add(1, folio_pincount_ptr(folio
));
222 folio_ref_add(folio
, GUP_PIN_COUNTING_BIAS
);
225 node_stat_mod_folio(folio
, NR_FOLL_PIN_ACQUIRED
, 1);
232 * unpin_user_page() - release a dma-pinned page
233 * @page: pointer to page to be released
235 * Pages that were pinned via pin_user_pages*() must be released via either
236 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
237 * that such pages can be separately tracked and uniquely handled. In
238 * particular, interactions with RDMA and filesystems need special handling.
240 void unpin_user_page(struct page
*page
)
242 sanity_check_pinned_pages(&page
, 1);
243 gup_put_folio(page_folio(page
), 1, FOLL_PIN
);
245 EXPORT_SYMBOL(unpin_user_page
);
247 static inline struct folio
*gup_folio_range_next(struct page
*start
,
248 unsigned long npages
, unsigned long i
, unsigned int *ntails
)
250 struct page
*next
= nth_page(start
, i
);
251 struct folio
*folio
= page_folio(next
);
254 if (folio_test_large(folio
))
255 nr
= min_t(unsigned int, npages
- i
,
256 folio_nr_pages(folio
) - folio_page_idx(folio
, next
));
262 static inline struct folio
*gup_folio_next(struct page
**list
,
263 unsigned long npages
, unsigned long i
, unsigned int *ntails
)
265 struct folio
*folio
= page_folio(list
[i
]);
268 for (nr
= i
+ 1; nr
< npages
; nr
++) {
269 if (page_folio(list
[nr
]) != folio
)
278 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
279 * @pages: array of pages to be maybe marked dirty, and definitely released.
280 * @npages: number of pages in the @pages array.
281 * @make_dirty: whether to mark the pages dirty
283 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
284 * variants called on that page.
286 * For each page in the @pages array, make that page (or its head page, if a
287 * compound page) dirty, if @make_dirty is true, and if the page was previously
288 * listed as clean. In any case, releases all pages using unpin_user_page(),
289 * possibly via unpin_user_pages(), for the non-dirty case.
291 * Please see the unpin_user_page() documentation for details.
293 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
294 * required, then the caller should a) verify that this is really correct,
295 * because _lock() is usually required, and b) hand code it:
296 * set_page_dirty_lock(), unpin_user_page().
299 void unpin_user_pages_dirty_lock(struct page
**pages
, unsigned long npages
,
307 unpin_user_pages(pages
, npages
);
311 sanity_check_pinned_pages(pages
, npages
);
312 for (i
= 0; i
< npages
; i
+= nr
) {
313 folio
= gup_folio_next(pages
, npages
, i
, &nr
);
315 * Checking PageDirty at this point may race with
316 * clear_page_dirty_for_io(), but that's OK. Two key
319 * 1) This code sees the page as already dirty, so it
320 * skips the call to set_page_dirty(). That could happen
321 * because clear_page_dirty_for_io() called
322 * page_mkclean(), followed by set_page_dirty().
323 * However, now the page is going to get written back,
324 * which meets the original intention of setting it
325 * dirty, so all is well: clear_page_dirty_for_io() goes
326 * on to call TestClearPageDirty(), and write the page
329 * 2) This code sees the page as clean, so it calls
330 * set_page_dirty(). The page stays dirty, despite being
331 * written back, so it gets written back again in the
332 * next writeback cycle. This is harmless.
334 if (!folio_test_dirty(folio
)) {
336 folio_mark_dirty(folio
);
339 gup_put_folio(folio
, nr
, FOLL_PIN
);
342 EXPORT_SYMBOL(unpin_user_pages_dirty_lock
);
345 * unpin_user_page_range_dirty_lock() - release and optionally dirty
346 * gup-pinned page range
348 * @page: the starting page of a range maybe marked dirty, and definitely released.
349 * @npages: number of consecutive pages to release.
350 * @make_dirty: whether to mark the pages dirty
352 * "gup-pinned page range" refers to a range of pages that has had one of the
353 * pin_user_pages() variants called on that page.
355 * For the page ranges defined by [page .. page+npages], make that range (or
356 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
357 * page range was previously listed as clean.
359 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
360 * required, then the caller should a) verify that this is really correct,
361 * because _lock() is usually required, and b) hand code it:
362 * set_page_dirty_lock(), unpin_user_page().
365 void unpin_user_page_range_dirty_lock(struct page
*page
, unsigned long npages
,
372 for (i
= 0; i
< npages
; i
+= nr
) {
373 folio
= gup_folio_range_next(page
, npages
, i
, &nr
);
374 if (make_dirty
&& !folio_test_dirty(folio
)) {
376 folio_mark_dirty(folio
);
379 gup_put_folio(folio
, nr
, FOLL_PIN
);
382 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock
);
384 static void unpin_user_pages_lockless(struct page
**pages
, unsigned long npages
)
391 * Don't perform any sanity checks because we might have raced with
392 * fork() and some anonymous pages might now actually be shared --
393 * which is why we're unpinning after all.
395 for (i
= 0; i
< npages
; i
+= nr
) {
396 folio
= gup_folio_next(pages
, npages
, i
, &nr
);
397 gup_put_folio(folio
, nr
, FOLL_PIN
);
402 * unpin_user_pages() - release an array of gup-pinned pages.
403 * @pages: array of pages to be marked dirty and released.
404 * @npages: number of pages in the @pages array.
406 * For each page in the @pages array, release the page using unpin_user_page().
408 * Please see the unpin_user_page() documentation for details.
410 void unpin_user_pages(struct page
**pages
, unsigned long npages
)
417 * If this WARN_ON() fires, then the system *might* be leaking pages (by
418 * leaving them pinned), but probably not. More likely, gup/pup returned
419 * a hard -ERRNO error to the caller, who erroneously passed it here.
421 if (WARN_ON(IS_ERR_VALUE(npages
)))
424 sanity_check_pinned_pages(pages
, npages
);
425 for (i
= 0; i
< npages
; i
+= nr
) {
426 folio
= gup_folio_next(pages
, npages
, i
, &nr
);
427 gup_put_folio(folio
, nr
, FOLL_PIN
);
430 EXPORT_SYMBOL(unpin_user_pages
);
433 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
434 * lifecycle. Avoid setting the bit unless necessary, or it might cause write
435 * cache bouncing on large SMP machines for concurrent pinned gups.
437 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags
)
439 if (!test_bit(MMF_HAS_PINNED
, mm_flags
))
440 set_bit(MMF_HAS_PINNED
, mm_flags
);
444 static struct page
*no_page_table(struct vm_area_struct
*vma
,
448 * When core dumping an enormous anonymous area that nobody
449 * has touched so far, we don't want to allocate unnecessary pages or
450 * page tables. Return error instead of NULL to skip handle_mm_fault,
451 * then get_dump_page() will return NULL to leave a hole in the dump.
452 * But we can only make this optimization where a hole would surely
453 * be zero-filled if handle_mm_fault() actually did handle it.
455 if ((flags
& FOLL_DUMP
) &&
456 (vma_is_anonymous(vma
) || !vma
->vm_ops
->fault
))
457 return ERR_PTR(-EFAULT
);
461 static int follow_pfn_pte(struct vm_area_struct
*vma
, unsigned long address
,
462 pte_t
*pte
, unsigned int flags
)
464 if (flags
& FOLL_TOUCH
) {
467 if (flags
& FOLL_WRITE
)
468 entry
= pte_mkdirty(entry
);
469 entry
= pte_mkyoung(entry
);
471 if (!pte_same(*pte
, entry
)) {
472 set_pte_at(vma
->vm_mm
, address
, pte
, entry
);
473 update_mmu_cache(vma
, address
, pte
);
477 /* Proper page table entry exists, but no corresponding struct page */
481 /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
482 static inline bool can_follow_write_pte(pte_t pte
, struct page
*page
,
483 struct vm_area_struct
*vma
,
486 /* If the pte is writable, we can write to the page. */
490 /* Maybe FOLL_FORCE is set to override it? */
491 if (!(flags
& FOLL_FORCE
))
494 /* But FOLL_FORCE has no effect on shared mappings */
495 if (vma
->vm_flags
& (VM_MAYSHARE
| VM_SHARED
))
498 /* ... or read-only private ones */
499 if (!(vma
->vm_flags
& VM_MAYWRITE
))
502 /* ... or already writable ones that just need to take a write fault */
503 if (vma
->vm_flags
& VM_WRITE
)
507 * See can_change_pte_writable(): we broke COW and could map the page
508 * writable if we have an exclusive anonymous page ...
510 if (!page
|| !PageAnon(page
) || !PageAnonExclusive(page
))
513 /* ... and a write-fault isn't required for other reasons. */
514 if (vma_soft_dirty_enabled(vma
) && !pte_soft_dirty(pte
))
516 return !userfaultfd_pte_wp(vma
, pte
);
519 static struct page
*follow_page_pte(struct vm_area_struct
*vma
,
520 unsigned long address
, pmd_t
*pmd
, unsigned int flags
,
521 struct dev_pagemap
**pgmap
)
523 struct mm_struct
*mm
= vma
->vm_mm
;
529 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
530 if (WARN_ON_ONCE((flags
& (FOLL_PIN
| FOLL_GET
)) ==
531 (FOLL_PIN
| FOLL_GET
)))
532 return ERR_PTR(-EINVAL
);
534 if (unlikely(pmd_bad(*pmd
)))
535 return no_page_table(vma
, flags
);
537 ptep
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
539 if (!pte_present(pte
)) {
542 * KSM's break_ksm() relies upon recognizing a ksm page
543 * even while it is being migrated, so for that case we
544 * need migration_entry_wait().
546 if (likely(!(flags
& FOLL_MIGRATION
)))
550 entry
= pte_to_swp_entry(pte
);
551 if (!is_migration_entry(entry
))
553 pte_unmap_unlock(ptep
, ptl
);
554 migration_entry_wait(mm
, pmd
, address
);
557 if ((flags
& FOLL_NUMA
) && pte_protnone(pte
))
560 page
= vm_normal_page(vma
, address
, pte
);
563 * We only care about anon pages in can_follow_write_pte() and don't
564 * have to worry about pte_devmap() because they are never anon.
566 if ((flags
& FOLL_WRITE
) &&
567 !can_follow_write_pte(pte
, page
, vma
, flags
)) {
572 if (!page
&& pte_devmap(pte
) && (flags
& (FOLL_GET
| FOLL_PIN
))) {
574 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
575 * case since they are only valid while holding the pgmap
578 *pgmap
= get_dev_pagemap(pte_pfn(pte
), *pgmap
);
580 page
= pte_page(pte
);
583 } else if (unlikely(!page
)) {
584 if (flags
& FOLL_DUMP
) {
585 /* Avoid special (like zero) pages in core dumps */
586 page
= ERR_PTR(-EFAULT
);
590 if (is_zero_pfn(pte_pfn(pte
))) {
591 page
= pte_page(pte
);
593 ret
= follow_pfn_pte(vma
, address
, ptep
, flags
);
599 if (!pte_write(pte
) && gup_must_unshare(flags
, page
)) {
600 page
= ERR_PTR(-EMLINK
);
604 VM_BUG_ON_PAGE((flags
& FOLL_PIN
) && PageAnon(page
) &&
605 !PageAnonExclusive(page
), page
);
607 /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
608 if (unlikely(!try_grab_page(page
, flags
))) {
609 page
= ERR_PTR(-ENOMEM
);
613 * We need to make the page accessible if and only if we are going
614 * to access its content (the FOLL_PIN case). Please see
615 * Documentation/core-api/pin_user_pages.rst for details.
617 if (flags
& FOLL_PIN
) {
618 ret
= arch_make_page_accessible(page
);
620 unpin_user_page(page
);
625 if (flags
& FOLL_TOUCH
) {
626 if ((flags
& FOLL_WRITE
) &&
627 !pte_dirty(pte
) && !PageDirty(page
))
628 set_page_dirty(page
);
630 * pte_mkyoung() would be more correct here, but atomic care
631 * is needed to avoid losing the dirty bit: it is easier to use
632 * mark_page_accessed().
634 mark_page_accessed(page
);
637 pte_unmap_unlock(ptep
, ptl
);
640 pte_unmap_unlock(ptep
, ptl
);
643 return no_page_table(vma
, flags
);
646 static struct page
*follow_pmd_mask(struct vm_area_struct
*vma
,
647 unsigned long address
, pud_t
*pudp
,
649 struct follow_page_context
*ctx
)
654 struct mm_struct
*mm
= vma
->vm_mm
;
656 pmd
= pmd_offset(pudp
, address
);
658 * The READ_ONCE() will stabilize the pmdval in a register or
659 * on the stack so that it will stop changing under the code.
661 pmdval
= READ_ONCE(*pmd
);
662 if (pmd_none(pmdval
))
663 return no_page_table(vma
, flags
);
664 if (pmd_huge(pmdval
) && is_vm_hugetlb_page(vma
)) {
665 page
= follow_huge_pmd(mm
, address
, pmd
, flags
);
668 return no_page_table(vma
, flags
);
670 if (is_hugepd(__hugepd(pmd_val(pmdval
)))) {
671 page
= follow_huge_pd(vma
, address
,
672 __hugepd(pmd_val(pmdval
)), flags
,
676 return no_page_table(vma
, flags
);
679 if (!pmd_present(pmdval
)) {
681 * Should never reach here, if thp migration is not supported;
682 * Otherwise, it must be a thp migration entry.
684 VM_BUG_ON(!thp_migration_supported() ||
685 !is_pmd_migration_entry(pmdval
));
687 if (likely(!(flags
& FOLL_MIGRATION
)))
688 return no_page_table(vma
, flags
);
690 pmd_migration_entry_wait(mm
, pmd
);
691 pmdval
= READ_ONCE(*pmd
);
693 * MADV_DONTNEED may convert the pmd to null because
694 * mmap_lock is held in read mode
696 if (pmd_none(pmdval
))
697 return no_page_table(vma
, flags
);
700 if (pmd_devmap(pmdval
)) {
701 ptl
= pmd_lock(mm
, pmd
);
702 page
= follow_devmap_pmd(vma
, address
, pmd
, flags
, &ctx
->pgmap
);
707 if (likely(!pmd_trans_huge(pmdval
)))
708 return follow_page_pte(vma
, address
, pmd
, flags
, &ctx
->pgmap
);
710 if ((flags
& FOLL_NUMA
) && pmd_protnone(pmdval
))
711 return no_page_table(vma
, flags
);
714 ptl
= pmd_lock(mm
, pmd
);
715 if (unlikely(pmd_none(*pmd
))) {
717 return no_page_table(vma
, flags
);
719 if (unlikely(!pmd_present(*pmd
))) {
721 if (likely(!(flags
& FOLL_MIGRATION
)))
722 return no_page_table(vma
, flags
);
723 pmd_migration_entry_wait(mm
, pmd
);
726 if (unlikely(!pmd_trans_huge(*pmd
))) {
728 return follow_page_pte(vma
, address
, pmd
, flags
, &ctx
->pgmap
);
730 if (flags
& FOLL_SPLIT_PMD
) {
732 page
= pmd_page(*pmd
);
733 if (is_huge_zero_page(page
)) {
736 split_huge_pmd(vma
, pmd
, address
);
737 if (pmd_trans_unstable(pmd
))
741 split_huge_pmd(vma
, pmd
, address
);
742 ret
= pte_alloc(mm
, pmd
) ? -ENOMEM
: 0;
745 return ret
? ERR_PTR(ret
) :
746 follow_page_pte(vma
, address
, pmd
, flags
, &ctx
->pgmap
);
748 page
= follow_trans_huge_pmd(vma
, address
, pmd
, flags
);
750 ctx
->page_mask
= HPAGE_PMD_NR
- 1;
754 static struct page
*follow_pud_mask(struct vm_area_struct
*vma
,
755 unsigned long address
, p4d_t
*p4dp
,
757 struct follow_page_context
*ctx
)
762 struct mm_struct
*mm
= vma
->vm_mm
;
764 pud
= pud_offset(p4dp
, address
);
766 return no_page_table(vma
, flags
);
767 if (pud_huge(*pud
) && is_vm_hugetlb_page(vma
)) {
768 page
= follow_huge_pud(mm
, address
, pud
, flags
);
771 return no_page_table(vma
, flags
);
773 if (is_hugepd(__hugepd(pud_val(*pud
)))) {
774 page
= follow_huge_pd(vma
, address
,
775 __hugepd(pud_val(*pud
)), flags
,
779 return no_page_table(vma
, flags
);
781 if (pud_devmap(*pud
)) {
782 ptl
= pud_lock(mm
, pud
);
783 page
= follow_devmap_pud(vma
, address
, pud
, flags
, &ctx
->pgmap
);
788 if (unlikely(pud_bad(*pud
)))
789 return no_page_table(vma
, flags
);
791 return follow_pmd_mask(vma
, address
, pud
, flags
, ctx
);
794 static struct page
*follow_p4d_mask(struct vm_area_struct
*vma
,
795 unsigned long address
, pgd_t
*pgdp
,
797 struct follow_page_context
*ctx
)
802 p4d
= p4d_offset(pgdp
, address
);
804 return no_page_table(vma
, flags
);
805 BUILD_BUG_ON(p4d_huge(*p4d
));
806 if (unlikely(p4d_bad(*p4d
)))
807 return no_page_table(vma
, flags
);
809 if (is_hugepd(__hugepd(p4d_val(*p4d
)))) {
810 page
= follow_huge_pd(vma
, address
,
811 __hugepd(p4d_val(*p4d
)), flags
,
815 return no_page_table(vma
, flags
);
817 return follow_pud_mask(vma
, address
, p4d
, flags
, ctx
);
821 * follow_page_mask - look up a page descriptor from a user-virtual address
822 * @vma: vm_area_struct mapping @address
823 * @address: virtual address to look up
824 * @flags: flags modifying lookup behaviour
825 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
826 * pointer to output page_mask
828 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
830 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
831 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
833 * When getting an anonymous page and the caller has to trigger unsharing
834 * of a shared anonymous page first, -EMLINK is returned. The caller should
835 * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
836 * relevant with FOLL_PIN and !FOLL_WRITE.
838 * On output, the @ctx->page_mask is set according to the size of the page.
840 * Return: the mapped (struct page *), %NULL if no mapping exists, or
841 * an error pointer if there is a mapping to something not represented
842 * by a page descriptor (see also vm_normal_page()).
844 static struct page
*follow_page_mask(struct vm_area_struct
*vma
,
845 unsigned long address
, unsigned int flags
,
846 struct follow_page_context
*ctx
)
850 struct mm_struct
*mm
= vma
->vm_mm
;
854 /* make this handle hugepd */
855 page
= follow_huge_addr(mm
, address
, flags
& FOLL_WRITE
);
857 WARN_ON_ONCE(flags
& (FOLL_GET
| FOLL_PIN
));
861 pgd
= pgd_offset(mm
, address
);
863 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
864 return no_page_table(vma
, flags
);
866 if (pgd_huge(*pgd
)) {
867 page
= follow_huge_pgd(mm
, address
, pgd
, flags
);
870 return no_page_table(vma
, flags
);
872 if (is_hugepd(__hugepd(pgd_val(*pgd
)))) {
873 page
= follow_huge_pd(vma
, address
,
874 __hugepd(pgd_val(*pgd
)), flags
,
878 return no_page_table(vma
, flags
);
881 return follow_p4d_mask(vma
, address
, pgd
, flags
, ctx
);
884 struct page
*follow_page(struct vm_area_struct
*vma
, unsigned long address
,
885 unsigned int foll_flags
)
887 struct follow_page_context ctx
= { NULL
};
890 if (vma_is_secretmem(vma
))
893 if (foll_flags
& FOLL_PIN
)
896 page
= follow_page_mask(vma
, address
, foll_flags
, &ctx
);
898 put_dev_pagemap(ctx
.pgmap
);
902 static int get_gate_page(struct mm_struct
*mm
, unsigned long address
,
903 unsigned int gup_flags
, struct vm_area_struct
**vma
,
913 /* user gate pages are read-only */
914 if (gup_flags
& FOLL_WRITE
)
916 if (address
> TASK_SIZE
)
917 pgd
= pgd_offset_k(address
);
919 pgd
= pgd_offset_gate(mm
, address
);
922 p4d
= p4d_offset(pgd
, address
);
925 pud
= pud_offset(p4d
, address
);
928 pmd
= pmd_offset(pud
, address
);
929 if (!pmd_present(*pmd
))
931 VM_BUG_ON(pmd_trans_huge(*pmd
));
932 pte
= pte_offset_map(pmd
, address
);
935 *vma
= get_gate_vma(mm
);
938 *page
= vm_normal_page(*vma
, address
, *pte
);
940 if ((gup_flags
& FOLL_DUMP
) || !is_zero_pfn(pte_pfn(*pte
)))
942 *page
= pte_page(*pte
);
944 if (unlikely(!try_grab_page(*page
, gup_flags
))) {
956 * mmap_lock must be held on entry. If @locked != NULL and *@flags
957 * does not include FOLL_NOWAIT, the mmap_lock may be released. If it
958 * is, *@locked will be set to 0 and -EBUSY returned.
960 static int faultin_page(struct vm_area_struct
*vma
,
961 unsigned long address
, unsigned int *flags
, bool unshare
,
964 unsigned int fault_flags
= 0;
967 if (*flags
& FOLL_NOFAULT
)
969 if (*flags
& FOLL_WRITE
)
970 fault_flags
|= FAULT_FLAG_WRITE
;
971 if (*flags
& FOLL_REMOTE
)
972 fault_flags
|= FAULT_FLAG_REMOTE
;
974 fault_flags
|= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
975 if (*flags
& FOLL_NOWAIT
)
976 fault_flags
|= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_RETRY_NOWAIT
;
977 if (*flags
& FOLL_TRIED
) {
979 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
982 fault_flags
|= FAULT_FLAG_TRIED
;
985 fault_flags
|= FAULT_FLAG_UNSHARE
;
986 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
987 VM_BUG_ON(fault_flags
& FAULT_FLAG_WRITE
);
990 ret
= handle_mm_fault(vma
, address
, fault_flags
, NULL
);
992 if (ret
& VM_FAULT_COMPLETED
) {
994 * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
995 * mmap lock in the page fault handler. Sanity check this.
997 WARN_ON_ONCE(fault_flags
& FAULT_FLAG_RETRY_NOWAIT
);
1001 * We should do the same as VM_FAULT_RETRY, but let's not
1002 * return -EBUSY since that's not reflecting the reality of
1003 * what has happened - we've just fully completed a page
1004 * fault, with the mmap lock released. Use -EAGAIN to show
1005 * that we want to take the mmap lock _again_.
1010 if (ret
& VM_FAULT_ERROR
) {
1011 int err
= vm_fault_to_errno(ret
, *flags
);
1018 if (ret
& VM_FAULT_RETRY
) {
1019 if (locked
&& !(fault_flags
& FAULT_FLAG_RETRY_NOWAIT
))
1027 static int check_vma_flags(struct vm_area_struct
*vma
, unsigned long gup_flags
)
1029 vm_flags_t vm_flags
= vma
->vm_flags
;
1030 int write
= (gup_flags
& FOLL_WRITE
);
1031 int foreign
= (gup_flags
& FOLL_REMOTE
);
1033 if (vm_flags
& (VM_IO
| VM_PFNMAP
))
1036 if (gup_flags
& FOLL_ANON
&& !vma_is_anonymous(vma
))
1039 if ((gup_flags
& FOLL_LONGTERM
) && vma_is_fsdax(vma
))
1042 if (vma_is_secretmem(vma
))
1046 if (!(vm_flags
& VM_WRITE
)) {
1047 if (!(gup_flags
& FOLL_FORCE
))
1050 * We used to let the write,force case do COW in a
1051 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
1052 * set a breakpoint in a read-only mapping of an
1053 * executable, without corrupting the file (yet only
1054 * when that file had been opened for writing!).
1055 * Anon pages in shared mappings are surprising: now
1058 if (!is_cow_mapping(vm_flags
))
1061 } else if (!(vm_flags
& VM_READ
)) {
1062 if (!(gup_flags
& FOLL_FORCE
))
1065 * Is there actually any vma we can reach here which does not
1066 * have VM_MAYREAD set?
1068 if (!(vm_flags
& VM_MAYREAD
))
1072 * gups are always data accesses, not instruction
1073 * fetches, so execute=false here
1075 if (!arch_vma_access_permitted(vma
, write
, false, foreign
))
1081 * __get_user_pages() - pin user pages in memory
1082 * @mm: mm_struct of target mm
1083 * @start: starting user address
1084 * @nr_pages: number of pages from start to pin
1085 * @gup_flags: flags modifying pin behaviour
1086 * @pages: array that receives pointers to the pages pinned.
1087 * Should be at least nr_pages long. Or NULL, if caller
1088 * only intends to ensure the pages are faulted in.
1089 * @vmas: array of pointers to vmas corresponding to each page.
1090 * Or NULL if the caller does not require them.
1091 * @locked: whether we're still with the mmap_lock held
1093 * Returns either number of pages pinned (which may be less than the
1094 * number requested), or an error. Details about the return value:
1096 * -- If nr_pages is 0, returns 0.
1097 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1098 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1099 * pages pinned. Again, this may be less than nr_pages.
1100 * -- 0 return value is possible when the fault would need to be retried.
1102 * The caller is responsible for releasing returned @pages, via put_page().
1104 * @vmas are valid only as long as mmap_lock is held.
1106 * Must be called with mmap_lock held. It may be released. See below.
1108 * __get_user_pages walks a process's page tables and takes a reference to
1109 * each struct page that each user address corresponds to at a given
1110 * instant. That is, it takes the page that would be accessed if a user
1111 * thread accesses the given user virtual address at that instant.
1113 * This does not guarantee that the page exists in the user mappings when
1114 * __get_user_pages returns, and there may even be a completely different
1115 * page there in some cases (eg. if mmapped pagecache has been invalidated
1116 * and subsequently re faulted). However it does guarantee that the page
1117 * won't be freed completely. And mostly callers simply care that the page
1118 * contains data that was valid *at some point in time*. Typically, an IO
1119 * or similar operation cannot guarantee anything stronger anyway because
1120 * locks can't be held over the syscall boundary.
1122 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1123 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1124 * appropriate) must be called after the page is finished with, and
1125 * before put_page is called.
1127 * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
1128 * released by an up_read(). That can happen if @gup_flags does not
1131 * A caller using such a combination of @locked and @gup_flags
1132 * must therefore hold the mmap_lock for reading only, and recognize
1133 * when it's been released. Otherwise, it must be held for either
1134 * reading or writing and will not be released.
1136 * In most cases, get_user_pages or get_user_pages_fast should be used
1137 * instead of __get_user_pages. __get_user_pages should be used only if
1138 * you need some special @gup_flags.
1140 static long __get_user_pages(struct mm_struct
*mm
,
1141 unsigned long start
, unsigned long nr_pages
,
1142 unsigned int gup_flags
, struct page
**pages
,
1143 struct vm_area_struct
**vmas
, int *locked
)
1145 long ret
= 0, i
= 0;
1146 struct vm_area_struct
*vma
= NULL
;
1147 struct follow_page_context ctx
= { NULL
};
1152 start
= untagged_addr(start
);
1154 VM_BUG_ON(!!pages
!= !!(gup_flags
& (FOLL_GET
| FOLL_PIN
)));
1157 * If FOLL_FORCE is set then do not force a full fault as the hinting
1158 * fault information is unrelated to the reference behaviour of a task
1159 * using the address space
1161 if (!(gup_flags
& FOLL_FORCE
))
1162 gup_flags
|= FOLL_NUMA
;
1166 unsigned int foll_flags
= gup_flags
;
1167 unsigned int page_increm
;
1169 /* first iteration or cross vma bound */
1170 if (!vma
|| start
>= vma
->vm_end
) {
1171 vma
= find_extend_vma(mm
, start
);
1172 if (!vma
&& in_gate_area(mm
, start
)) {
1173 ret
= get_gate_page(mm
, start
& PAGE_MASK
,
1175 pages
? &pages
[i
] : NULL
);
1186 ret
= check_vma_flags(vma
, gup_flags
);
1190 if (is_vm_hugetlb_page(vma
)) {
1191 i
= follow_hugetlb_page(mm
, vma
, pages
, vmas
,
1192 &start
, &nr_pages
, i
,
1194 if (locked
&& *locked
== 0) {
1196 * We've got a VM_FAULT_RETRY
1197 * and we've lost mmap_lock.
1198 * We must stop here.
1200 BUG_ON(gup_flags
& FOLL_NOWAIT
);
1208 * If we have a pending SIGKILL, don't keep faulting pages and
1209 * potentially allocating memory.
1211 if (fatal_signal_pending(current
)) {
1217 page
= follow_page_mask(vma
, start
, foll_flags
, &ctx
);
1218 if (!page
|| PTR_ERR(page
) == -EMLINK
) {
1219 ret
= faultin_page(vma
, start
, &foll_flags
,
1220 PTR_ERR(page
) == -EMLINK
, locked
);
1234 } else if (PTR_ERR(page
) == -EEXIST
) {
1236 * Proper page table entry exists, but no corresponding
1237 * struct page. If the caller expects **pages to be
1238 * filled in, bail out now, because that can't be done
1242 ret
= PTR_ERR(page
);
1247 } else if (IS_ERR(page
)) {
1248 ret
= PTR_ERR(page
);
1253 flush_anon_page(vma
, page
, start
);
1254 flush_dcache_page(page
);
1262 page_increm
= 1 + (~(start
>> PAGE_SHIFT
) & ctx
.page_mask
);
1263 if (page_increm
> nr_pages
)
1264 page_increm
= nr_pages
;
1266 start
+= page_increm
* PAGE_SIZE
;
1267 nr_pages
-= page_increm
;
1271 put_dev_pagemap(ctx
.pgmap
);
1275 static bool vma_permits_fault(struct vm_area_struct
*vma
,
1276 unsigned int fault_flags
)
1278 bool write
= !!(fault_flags
& FAULT_FLAG_WRITE
);
1279 bool foreign
= !!(fault_flags
& FAULT_FLAG_REMOTE
);
1280 vm_flags_t vm_flags
= write
? VM_WRITE
: VM_READ
;
1282 if (!(vm_flags
& vma
->vm_flags
))
1286 * The architecture might have a hardware protection
1287 * mechanism other than read/write that can deny access.
1289 * gup always represents data access, not instruction
1290 * fetches, so execute=false here:
1292 if (!arch_vma_access_permitted(vma
, write
, false, foreign
))
1299 * fixup_user_fault() - manually resolve a user page fault
1300 * @mm: mm_struct of target mm
1301 * @address: user address
1302 * @fault_flags:flags to pass down to handle_mm_fault()
1303 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
1304 * does not allow retry. If NULL, the caller must guarantee
1305 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1307 * This is meant to be called in the specific scenario where for locking reasons
1308 * we try to access user memory in atomic context (within a pagefault_disable()
1309 * section), this returns -EFAULT, and we want to resolve the user fault before
1312 * Typically this is meant to be used by the futex code.
1314 * The main difference with get_user_pages() is that this function will
1315 * unconditionally call handle_mm_fault() which will in turn perform all the
1316 * necessary SW fixup of the dirty and young bits in the PTE, while
1317 * get_user_pages() only guarantees to update these in the struct page.
1319 * This is important for some architectures where those bits also gate the
1320 * access permission to the page because they are maintained in software. On
1321 * such architectures, gup() will not be enough to make a subsequent access
1324 * This function will not return with an unlocked mmap_lock. So it has not the
1325 * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1327 int fixup_user_fault(struct mm_struct
*mm
,
1328 unsigned long address
, unsigned int fault_flags
,
1331 struct vm_area_struct
*vma
;
1334 address
= untagged_addr(address
);
1337 fault_flags
|= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
1340 vma
= find_extend_vma(mm
, address
);
1341 if (!vma
|| address
< vma
->vm_start
)
1344 if (!vma_permits_fault(vma
, fault_flags
))
1347 if ((fault_flags
& FAULT_FLAG_KILLABLE
) &&
1348 fatal_signal_pending(current
))
1351 ret
= handle_mm_fault(vma
, address
, fault_flags
, NULL
);
1353 if (ret
& VM_FAULT_COMPLETED
) {
1355 * NOTE: it's a pity that we need to retake the lock here
1356 * to pair with the unlock() in the callers. Ideally we
1357 * could tell the callers so they do not need to unlock.
1364 if (ret
& VM_FAULT_ERROR
) {
1365 int err
= vm_fault_to_errno(ret
, 0);
1372 if (ret
& VM_FAULT_RETRY
) {
1375 fault_flags
|= FAULT_FLAG_TRIED
;
1381 EXPORT_SYMBOL_GPL(fixup_user_fault
);
1384 * Please note that this function, unlike __get_user_pages will not
1385 * return 0 for nr_pages > 0 without FOLL_NOWAIT
1387 static __always_inline
long __get_user_pages_locked(struct mm_struct
*mm
,
1388 unsigned long start
,
1389 unsigned long nr_pages
,
1390 struct page
**pages
,
1391 struct vm_area_struct
**vmas
,
1395 long ret
, pages_done
;
1399 /* if VM_FAULT_RETRY can be returned, vmas become invalid */
1401 /* check caller initialized locked */
1402 BUG_ON(*locked
!= 1);
1405 if (flags
& FOLL_PIN
)
1406 mm_set_has_pinned_flag(&mm
->flags
);
1409 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1410 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1411 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1412 * for FOLL_GET, not for the newer FOLL_PIN.
1414 * FOLL_PIN always expects pages to be non-null, but no need to assert
1415 * that here, as any failures will be obvious enough.
1417 if (pages
&& !(flags
& FOLL_PIN
))
1421 lock_dropped
= false;
1423 ret
= __get_user_pages(mm
, start
, nr_pages
, flags
, pages
,
1426 /* VM_FAULT_RETRY couldn't trigger, bypass */
1429 /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
1432 BUG_ON(ret
>= nr_pages
);
1443 * VM_FAULT_RETRY didn't trigger or it was a
1451 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1452 * For the prefault case (!pages) we only update counts.
1456 start
+= ret
<< PAGE_SHIFT
;
1457 lock_dropped
= true;
1461 * Repeat on the address that fired VM_FAULT_RETRY
1462 * with both FAULT_FLAG_ALLOW_RETRY and
1463 * FAULT_FLAG_TRIED. Note that GUP can be interrupted
1464 * by fatal signals, so we need to check it before we
1465 * start trying again otherwise it can loop forever.
1468 if (fatal_signal_pending(current
)) {
1470 pages_done
= -EINTR
;
1474 ret
= mmap_read_lock_killable(mm
);
1483 ret
= __get_user_pages(mm
, start
, 1, flags
| FOLL_TRIED
,
1484 pages
, NULL
, locked
);
1486 /* Continue to retry until we succeeded */
1504 if (lock_dropped
&& *locked
) {
1506 * We must let the caller know we temporarily dropped the lock
1507 * and so the critical section protected by it was lost.
1509 mmap_read_unlock(mm
);
1516 * populate_vma_page_range() - populate a range of pages in the vma.
1518 * @start: start address
1520 * @locked: whether the mmap_lock is still held
1522 * This takes care of mlocking the pages too if VM_LOCKED is set.
1524 * Return either number of pages pinned in the vma, or a negative error
1527 * vma->vm_mm->mmap_lock must be held.
1529 * If @locked is NULL, it may be held for read or write and will
1532 * If @locked is non-NULL, it must held for read only and may be
1533 * released. If it's released, *@locked will be set to 0.
1535 long populate_vma_page_range(struct vm_area_struct
*vma
,
1536 unsigned long start
, unsigned long end
, int *locked
)
1538 struct mm_struct
*mm
= vma
->vm_mm
;
1539 unsigned long nr_pages
= (end
- start
) / PAGE_SIZE
;
1543 VM_BUG_ON(!PAGE_ALIGNED(start
));
1544 VM_BUG_ON(!PAGE_ALIGNED(end
));
1545 VM_BUG_ON_VMA(start
< vma
->vm_start
, vma
);
1546 VM_BUG_ON_VMA(end
> vma
->vm_end
, vma
);
1547 mmap_assert_locked(mm
);
1550 * Rightly or wrongly, the VM_LOCKONFAULT case has never used
1551 * faultin_page() to break COW, so it has no work to do here.
1553 if (vma
->vm_flags
& VM_LOCKONFAULT
)
1556 gup_flags
= FOLL_TOUCH
;
1558 * We want to touch writable mappings with a write fault in order
1559 * to break COW, except for shared mappings because these don't COW
1560 * and we would not want to dirty them for nothing.
1562 if ((vma
->vm_flags
& (VM_WRITE
| VM_SHARED
)) == VM_WRITE
)
1563 gup_flags
|= FOLL_WRITE
;
1566 * We want mlock to succeed for regions that have any permissions
1567 * other than PROT_NONE.
1569 if (vma_is_accessible(vma
))
1570 gup_flags
|= FOLL_FORCE
;
1573 * We made sure addr is within a VMA, so the following will
1574 * not result in a stack expansion that recurses back here.
1576 ret
= __get_user_pages(mm
, start
, nr_pages
, gup_flags
,
1577 NULL
, NULL
, locked
);
1583 * faultin_vma_page_range() - populate (prefault) page tables inside the
1584 * given VMA range readable/writable
1586 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1589 * @start: start address
1591 * @write: whether to prefault readable or writable
1592 * @locked: whether the mmap_lock is still held
1594 * Returns either number of processed pages in the vma, or a negative error
1595 * code on error (see __get_user_pages()).
1597 * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1598 * covered by the VMA.
1600 * If @locked is NULL, it may be held for read or write and will be unperturbed.
1602 * If @locked is non-NULL, it must held for read only and may be released. If
1603 * it's released, *@locked will be set to 0.
1605 long faultin_vma_page_range(struct vm_area_struct
*vma
, unsigned long start
,
1606 unsigned long end
, bool write
, int *locked
)
1608 struct mm_struct
*mm
= vma
->vm_mm
;
1609 unsigned long nr_pages
= (end
- start
) / PAGE_SIZE
;
1613 VM_BUG_ON(!PAGE_ALIGNED(start
));
1614 VM_BUG_ON(!PAGE_ALIGNED(end
));
1615 VM_BUG_ON_VMA(start
< vma
->vm_start
, vma
);
1616 VM_BUG_ON_VMA(end
> vma
->vm_end
, vma
);
1617 mmap_assert_locked(mm
);
1620 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1621 * the page dirty with FOLL_WRITE -- which doesn't make a
1622 * difference with !FOLL_FORCE, because the page is writable
1623 * in the page table.
1624 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1626 * !FOLL_FORCE: Require proper access permissions.
1628 gup_flags
= FOLL_TOUCH
| FOLL_HWPOISON
;
1630 gup_flags
|= FOLL_WRITE
;
1633 * We want to report -EINVAL instead of -EFAULT for any permission
1634 * problems or incompatible mappings.
1636 if (check_vma_flags(vma
, gup_flags
))
1639 ret
= __get_user_pages(mm
, start
, nr_pages
, gup_flags
,
1640 NULL
, NULL
, locked
);
1646 * __mm_populate - populate and/or mlock pages within a range of address space.
1648 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1649 * flags. VMAs must be already marked with the desired vm_flags, and
1650 * mmap_lock must not be held.
1652 int __mm_populate(unsigned long start
, unsigned long len
, int ignore_errors
)
1654 struct mm_struct
*mm
= current
->mm
;
1655 unsigned long end
, nstart
, nend
;
1656 struct vm_area_struct
*vma
= NULL
;
1662 for (nstart
= start
; nstart
< end
; nstart
= nend
) {
1664 * We want to fault in pages for [nstart; end) address range.
1665 * Find first corresponding VMA.
1670 vma
= find_vma(mm
, nstart
);
1671 } else if (nstart
>= vma
->vm_end
)
1673 if (!vma
|| vma
->vm_start
>= end
)
1676 * Set [nstart; nend) to intersection of desired address
1677 * range with the first VMA. Also, skip undesirable VMA types.
1679 nend
= min(end
, vma
->vm_end
);
1680 if (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
))
1682 if (nstart
< vma
->vm_start
)
1683 nstart
= vma
->vm_start
;
1685 * Now fault in a range of pages. populate_vma_page_range()
1686 * double checks the vma flags, so that it won't mlock pages
1687 * if the vma was already munlocked.
1689 ret
= populate_vma_page_range(vma
, nstart
, nend
, &locked
);
1691 if (ignore_errors
) {
1693 continue; /* continue at next VMA */
1697 nend
= nstart
+ ret
* PAGE_SIZE
;
1701 mmap_read_unlock(mm
);
1702 return ret
; /* 0 or negative error code */
1704 #else /* CONFIG_MMU */
1705 static long __get_user_pages_locked(struct mm_struct
*mm
, unsigned long start
,
1706 unsigned long nr_pages
, struct page
**pages
,
1707 struct vm_area_struct
**vmas
, int *locked
,
1708 unsigned int foll_flags
)
1710 struct vm_area_struct
*vma
;
1711 unsigned long vm_flags
;
1714 /* calculate required read or write permissions.
1715 * If FOLL_FORCE is set, we only require the "MAY" flags.
1717 vm_flags
= (foll_flags
& FOLL_WRITE
) ?
1718 (VM_WRITE
| VM_MAYWRITE
) : (VM_READ
| VM_MAYREAD
);
1719 vm_flags
&= (foll_flags
& FOLL_FORCE
) ?
1720 (VM_MAYREAD
| VM_MAYWRITE
) : (VM_READ
| VM_WRITE
);
1722 for (i
= 0; i
< nr_pages
; i
++) {
1723 vma
= find_vma(mm
, start
);
1725 goto finish_or_fault
;
1727 /* protect what we can, including chardevs */
1728 if ((vma
->vm_flags
& (VM_IO
| VM_PFNMAP
)) ||
1729 !(vm_flags
& vma
->vm_flags
))
1730 goto finish_or_fault
;
1733 pages
[i
] = virt_to_page((void *)start
);
1739 start
= (start
+ PAGE_SIZE
) & PAGE_MASK
;
1745 return i
? : -EFAULT
;
1747 #endif /* !CONFIG_MMU */
1750 * fault_in_writeable - fault in userspace address range for writing
1751 * @uaddr: start of address range
1752 * @size: size of address range
1754 * Returns the number of bytes not faulted in (like copy_to_user() and
1755 * copy_from_user()).
1757 size_t fault_in_writeable(char __user
*uaddr
, size_t size
)
1759 char __user
*start
= uaddr
, *end
;
1761 if (unlikely(size
== 0))
1763 if (!user_write_access_begin(uaddr
, size
))
1765 if (!PAGE_ALIGNED(uaddr
)) {
1766 unsafe_put_user(0, uaddr
, out
);
1767 uaddr
= (char __user
*)PAGE_ALIGN((unsigned long)uaddr
);
1769 end
= (char __user
*)PAGE_ALIGN((unsigned long)start
+ size
);
1770 if (unlikely(end
< start
))
1772 while (uaddr
!= end
) {
1773 unsafe_put_user(0, uaddr
, out
);
1778 user_write_access_end();
1779 if (size
> uaddr
- start
)
1780 return size
- (uaddr
- start
);
1783 EXPORT_SYMBOL(fault_in_writeable
);
1786 * fault_in_subpage_writeable - fault in an address range for writing
1787 * @uaddr: start of address range
1788 * @size: size of address range
1790 * Fault in a user address range for writing while checking for permissions at
1791 * sub-page granularity (e.g. arm64 MTE). This function should be used when
1792 * the caller cannot guarantee forward progress of a copy_to_user() loop.
1794 * Returns the number of bytes not faulted in (like copy_to_user() and
1795 * copy_from_user()).
1797 size_t fault_in_subpage_writeable(char __user
*uaddr
, size_t size
)
1802 * Attempt faulting in at page granularity first for page table
1803 * permission checking. The arch-specific probe_subpage_writeable()
1804 * functions may not check for this.
1806 faulted_in
= size
- fault_in_writeable(uaddr
, size
);
1808 faulted_in
-= probe_subpage_writeable(uaddr
, faulted_in
);
1810 return size
- faulted_in
;
1812 EXPORT_SYMBOL(fault_in_subpage_writeable
);
1815 * fault_in_safe_writeable - fault in an address range for writing
1816 * @uaddr: start of address range
1817 * @size: length of address range
1819 * Faults in an address range for writing. This is primarily useful when we
1820 * already know that some or all of the pages in the address range aren't in
1823 * Unlike fault_in_writeable(), this function is non-destructive.
1825 * Note that we don't pin or otherwise hold the pages referenced that we fault
1826 * in. There's no guarantee that they'll stay in memory for any duration of
1829 * Returns the number of bytes not faulted in, like copy_to_user() and
1832 size_t fault_in_safe_writeable(const char __user
*uaddr
, size_t size
)
1834 unsigned long start
= (unsigned long)uaddr
, end
;
1835 struct mm_struct
*mm
= current
->mm
;
1836 bool unlocked
= false;
1838 if (unlikely(size
== 0))
1840 end
= PAGE_ALIGN(start
+ size
);
1846 if (fixup_user_fault(mm
, start
, FAULT_FLAG_WRITE
, &unlocked
))
1848 start
= (start
+ PAGE_SIZE
) & PAGE_MASK
;
1849 } while (start
!= end
);
1850 mmap_read_unlock(mm
);
1852 if (size
> (unsigned long)uaddr
- start
)
1853 return size
- ((unsigned long)uaddr
- start
);
1856 EXPORT_SYMBOL(fault_in_safe_writeable
);
1859 * fault_in_readable - fault in userspace address range for reading
1860 * @uaddr: start of user address range
1861 * @size: size of user address range
1863 * Returns the number of bytes not faulted in (like copy_to_user() and
1864 * copy_from_user()).
1866 size_t fault_in_readable(const char __user
*uaddr
, size_t size
)
1868 const char __user
*start
= uaddr
, *end
;
1871 if (unlikely(size
== 0))
1873 if (!user_read_access_begin(uaddr
, size
))
1875 if (!PAGE_ALIGNED(uaddr
)) {
1876 unsafe_get_user(c
, uaddr
, out
);
1877 uaddr
= (const char __user
*)PAGE_ALIGN((unsigned long)uaddr
);
1879 end
= (const char __user
*)PAGE_ALIGN((unsigned long)start
+ size
);
1880 if (unlikely(end
< start
))
1882 while (uaddr
!= end
) {
1883 unsafe_get_user(c
, uaddr
, out
);
1888 user_read_access_end();
1890 if (size
> uaddr
- start
)
1891 return size
- (uaddr
- start
);
1894 EXPORT_SYMBOL(fault_in_readable
);
1897 * get_dump_page() - pin user page in memory while writing it to core dump
1898 * @addr: user address
1900 * Returns struct page pointer of user page pinned for dump,
1901 * to be freed afterwards by put_page().
1903 * Returns NULL on any kind of failure - a hole must then be inserted into
1904 * the corefile, to preserve alignment with its headers; and also returns
1905 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1906 * allowing a hole to be left in the corefile to save disk space.
1908 * Called without mmap_lock (takes and releases the mmap_lock by itself).
1910 #ifdef CONFIG_ELF_CORE
1911 struct page
*get_dump_page(unsigned long addr
)
1913 struct mm_struct
*mm
= current
->mm
;
1918 if (mmap_read_lock_killable(mm
))
1920 ret
= __get_user_pages_locked(mm
, addr
, 1, &page
, NULL
, &locked
,
1921 FOLL_FORCE
| FOLL_DUMP
| FOLL_GET
);
1923 mmap_read_unlock(mm
);
1924 return (ret
== 1) ? page
: NULL
;
1926 #endif /* CONFIG_ELF_CORE */
1928 #ifdef CONFIG_MIGRATION
1930 * Check whether all pages are pinnable, if so return number of pages. If some
1931 * pages are not pinnable, migrate them, and unpin all pages. Return zero if
1932 * pages were migrated, or if some pages were not successfully isolated.
1933 * Return negative error if migration fails.
1935 static long check_and_migrate_movable_pages(unsigned long nr_pages
,
1936 struct page
**pages
,
1937 unsigned int gup_flags
)
1939 unsigned long isolation_error_count
= 0, i
;
1940 struct folio
*prev_folio
= NULL
;
1941 LIST_HEAD(movable_page_list
);
1942 bool drain_allow
= true, coherent_pages
= false;
1945 for (i
= 0; i
< nr_pages
; i
++) {
1946 struct folio
*folio
= page_folio(pages
[i
]);
1948 if (folio
== prev_folio
)
1953 * Device coherent pages are managed by a driver and should not
1954 * be pinned indefinitely as it prevents the driver moving the
1955 * page. So when trying to pin with FOLL_LONGTERM instead try
1956 * to migrate the page out of device memory.
1958 if (folio_is_device_coherent(folio
)) {
1960 * We always want a new GUP lookup with device coherent
1964 coherent_pages
= true;
1967 * Migration will fail if the page is pinned, so convert
1968 * the pin on the source page to a normal reference.
1970 if (gup_flags
& FOLL_PIN
) {
1971 get_page(&folio
->page
);
1972 unpin_user_page(&folio
->page
);
1975 ret
= migrate_device_coherent_page(&folio
->page
);
1982 if (folio_is_longterm_pinnable(folio
))
1985 * Try to move out any movable page before pinning the range.
1987 if (folio_test_hugetlb(folio
)) {
1988 if (isolate_hugetlb(&folio
->page
,
1989 &movable_page_list
))
1990 isolation_error_count
++;
1994 if (!folio_test_lru(folio
) && drain_allow
) {
1995 lru_add_drain_all();
1996 drain_allow
= false;
1999 if (folio_isolate_lru(folio
)) {
2000 isolation_error_count
++;
2003 list_add_tail(&folio
->lru
, &movable_page_list
);
2004 node_stat_mod_folio(folio
,
2005 NR_ISOLATED_ANON
+ folio_is_file_lru(folio
),
2006 folio_nr_pages(folio
));
2009 if (!list_empty(&movable_page_list
) || isolation_error_count
||
2014 * If list is empty, and no isolation errors, means that all pages are
2015 * in the correct zone.
2021 * pages[i] might be NULL if any device coherent pages were found.
2023 for (i
= 0; i
< nr_pages
; i
++) {
2027 if (gup_flags
& FOLL_PIN
)
2028 unpin_user_page(pages
[i
]);
2033 if (!list_empty(&movable_page_list
)) {
2034 struct migration_target_control mtc
= {
2035 .nid
= NUMA_NO_NODE
,
2036 .gfp_mask
= GFP_USER
| __GFP_NOWARN
,
2039 ret
= migrate_pages(&movable_page_list
, alloc_migration_target
,
2040 NULL
, (unsigned long)&mtc
, MIGRATE_SYNC
,
2041 MR_LONGTERM_PIN
, NULL
);
2042 if (ret
> 0) /* number of pages not migrated */
2046 if (ret
&& !list_empty(&movable_page_list
))
2047 putback_movable_pages(&movable_page_list
);
2051 static long check_and_migrate_movable_pages(unsigned long nr_pages
,
2052 struct page
**pages
,
2053 unsigned int gup_flags
)
2057 #endif /* CONFIG_MIGRATION */
2060 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
2061 * allows us to process the FOLL_LONGTERM flag.
2063 static long __gup_longterm_locked(struct mm_struct
*mm
,
2064 unsigned long start
,
2065 unsigned long nr_pages
,
2066 struct page
**pages
,
2067 struct vm_area_struct
**vmas
,
2068 unsigned int gup_flags
)
2073 if (!(gup_flags
& FOLL_LONGTERM
))
2074 return __get_user_pages_locked(mm
, start
, nr_pages
, pages
, vmas
,
2076 flags
= memalloc_pin_save();
2078 rc
= __get_user_pages_locked(mm
, start
, nr_pages
, pages
, vmas
,
2082 rc
= check_and_migrate_movable_pages(rc
, pages
, gup_flags
);
2084 memalloc_pin_restore(flags
);
2089 static bool is_valid_gup_flags(unsigned int gup_flags
)
2092 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
2093 * never directly by the caller, so enforce that with an assertion:
2095 if (WARN_ON_ONCE(gup_flags
& FOLL_PIN
))
2098 * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
2099 * that is, FOLL_LONGTERM is a specific case, more restrictive case of
2102 if (WARN_ON_ONCE(gup_flags
& FOLL_LONGTERM
))
2109 static long __get_user_pages_remote(struct mm_struct
*mm
,
2110 unsigned long start
, unsigned long nr_pages
,
2111 unsigned int gup_flags
, struct page
**pages
,
2112 struct vm_area_struct
**vmas
, int *locked
)
2115 * Parts of FOLL_LONGTERM behavior are incompatible with
2116 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2117 * vmas. However, this only comes up if locked is set, and there are
2118 * callers that do request FOLL_LONGTERM, but do not set locked. So,
2119 * allow what we can.
2121 if (gup_flags
& FOLL_LONGTERM
) {
2122 if (WARN_ON_ONCE(locked
))
2125 * This will check the vmas (even if our vmas arg is NULL)
2126 * and return -ENOTSUPP if DAX isn't allowed in this case:
2128 return __gup_longterm_locked(mm
, start
, nr_pages
, pages
,
2129 vmas
, gup_flags
| FOLL_TOUCH
|
2133 return __get_user_pages_locked(mm
, start
, nr_pages
, pages
, vmas
,
2135 gup_flags
| FOLL_TOUCH
| FOLL_REMOTE
);
2139 * get_user_pages_remote() - pin user pages in memory
2140 * @mm: mm_struct of target mm
2141 * @start: starting user address
2142 * @nr_pages: number of pages from start to pin
2143 * @gup_flags: flags modifying lookup behaviour
2144 * @pages: array that receives pointers to the pages pinned.
2145 * Should be at least nr_pages long. Or NULL, if caller
2146 * only intends to ensure the pages are faulted in.
2147 * @vmas: array of pointers to vmas corresponding to each page.
2148 * Or NULL if the caller does not require them.
2149 * @locked: pointer to lock flag indicating whether lock is held and
2150 * subsequently whether VM_FAULT_RETRY functionality can be
2151 * utilised. Lock must initially be held.
2153 * Returns either number of pages pinned (which may be less than the
2154 * number requested), or an error. Details about the return value:
2156 * -- If nr_pages is 0, returns 0.
2157 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2158 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2159 * pages pinned. Again, this may be less than nr_pages.
2161 * The caller is responsible for releasing returned @pages, via put_page().
2163 * @vmas are valid only as long as mmap_lock is held.
2165 * Must be called with mmap_lock held for read or write.
2167 * get_user_pages_remote walks a process's page tables and takes a reference
2168 * to each struct page that each user address corresponds to at a given
2169 * instant. That is, it takes the page that would be accessed if a user
2170 * thread accesses the given user virtual address at that instant.
2172 * This does not guarantee that the page exists in the user mappings when
2173 * get_user_pages_remote returns, and there may even be a completely different
2174 * page there in some cases (eg. if mmapped pagecache has been invalidated
2175 * and subsequently re faulted). However it does guarantee that the page
2176 * won't be freed completely. And mostly callers simply care that the page
2177 * contains data that was valid *at some point in time*. Typically, an IO
2178 * or similar operation cannot guarantee anything stronger anyway because
2179 * locks can't be held over the syscall boundary.
2181 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2182 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
2183 * be called after the page is finished with, and before put_page is called.
2185 * get_user_pages_remote is typically used for fewer-copy IO operations,
2186 * to get a handle on the memory by some means other than accesses
2187 * via the user virtual addresses. The pages may be submitted for
2188 * DMA to devices or accessed via their kernel linear mapping (via the
2189 * kmap APIs). Care should be taken to use the correct cache flushing APIs.
2191 * See also get_user_pages_fast, for performance critical applications.
2193 * get_user_pages_remote should be phased out in favor of
2194 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
2195 * should use get_user_pages_remote because it cannot pass
2196 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
2198 long get_user_pages_remote(struct mm_struct
*mm
,
2199 unsigned long start
, unsigned long nr_pages
,
2200 unsigned int gup_flags
, struct page
**pages
,
2201 struct vm_area_struct
**vmas
, int *locked
)
2203 if (!is_valid_gup_flags(gup_flags
))
2206 return __get_user_pages_remote(mm
, start
, nr_pages
, gup_flags
,
2207 pages
, vmas
, locked
);
2209 EXPORT_SYMBOL(get_user_pages_remote
);
2211 #else /* CONFIG_MMU */
2212 long get_user_pages_remote(struct mm_struct
*mm
,
2213 unsigned long start
, unsigned long nr_pages
,
2214 unsigned int gup_flags
, struct page
**pages
,
2215 struct vm_area_struct
**vmas
, int *locked
)
2220 static long __get_user_pages_remote(struct mm_struct
*mm
,
2221 unsigned long start
, unsigned long nr_pages
,
2222 unsigned int gup_flags
, struct page
**pages
,
2223 struct vm_area_struct
**vmas
, int *locked
)
2227 #endif /* !CONFIG_MMU */
2230 * get_user_pages() - pin user pages in memory
2231 * @start: starting user address
2232 * @nr_pages: number of pages from start to pin
2233 * @gup_flags: flags modifying lookup behaviour
2234 * @pages: array that receives pointers to the pages pinned.
2235 * Should be at least nr_pages long. Or NULL, if caller
2236 * only intends to ensure the pages are faulted in.
2237 * @vmas: array of pointers to vmas corresponding to each page.
2238 * Or NULL if the caller does not require them.
2240 * This is the same as get_user_pages_remote(), just with a less-flexible
2241 * calling convention where we assume that the mm being operated on belongs to
2242 * the current task, and doesn't allow passing of a locked parameter. We also
2243 * obviously don't pass FOLL_REMOTE in here.
2245 long get_user_pages(unsigned long start
, unsigned long nr_pages
,
2246 unsigned int gup_flags
, struct page
**pages
,
2247 struct vm_area_struct
**vmas
)
2249 if (!is_valid_gup_flags(gup_flags
))
2252 return __gup_longterm_locked(current
->mm
, start
, nr_pages
,
2253 pages
, vmas
, gup_flags
| FOLL_TOUCH
);
2255 EXPORT_SYMBOL(get_user_pages
);
2258 * get_user_pages_unlocked() is suitable to replace the form:
2260 * mmap_read_lock(mm);
2261 * get_user_pages(mm, ..., pages, NULL);
2262 * mmap_read_unlock(mm);
2266 * get_user_pages_unlocked(mm, ..., pages);
2268 * It is functionally equivalent to get_user_pages_fast so
2269 * get_user_pages_fast should be used instead if specific gup_flags
2270 * (e.g. FOLL_FORCE) are not required.
2272 long get_user_pages_unlocked(unsigned long start
, unsigned long nr_pages
,
2273 struct page
**pages
, unsigned int gup_flags
)
2275 struct mm_struct
*mm
= current
->mm
;
2280 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2281 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2282 * vmas. As there are no users of this flag in this call we simply
2283 * disallow this option for now.
2285 if (WARN_ON_ONCE(gup_flags
& FOLL_LONGTERM
))
2289 ret
= __get_user_pages_locked(mm
, start
, nr_pages
, pages
, NULL
,
2290 &locked
, gup_flags
| FOLL_TOUCH
);
2292 mmap_read_unlock(mm
);
2295 EXPORT_SYMBOL(get_user_pages_unlocked
);
2300 * get_user_pages_fast attempts to pin user pages by walking the page
2301 * tables directly and avoids taking locks. Thus the walker needs to be
2302 * protected from page table pages being freed from under it, and should
2303 * block any THP splits.
2305 * One way to achieve this is to have the walker disable interrupts, and
2306 * rely on IPIs from the TLB flushing code blocking before the page table
2307 * pages are freed. This is unsuitable for architectures that do not need
2308 * to broadcast an IPI when invalidating TLBs.
2310 * Another way to achieve this is to batch up page table containing pages
2311 * belonging to more than one mm_user, then rcu_sched a callback to free those
2312 * pages. Disabling interrupts will allow the fast_gup walker to both block
2313 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2314 * (which is a relatively rare event). The code below adopts this strategy.
2316 * Before activating this code, please be aware that the following assumptions
2317 * are currently made:
2319 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2320 * free pages containing page tables or TLB flushing requires IPI broadcast.
2322 * *) ptes can be read atomically by the architecture.
2324 * *) access_ok is sufficient to validate userspace address ranges.
2326 * The last two assumptions can be relaxed by the addition of helper functions.
2328 * This code is based heavily on the PowerPC implementation by Nick Piggin.
2330 #ifdef CONFIG_HAVE_FAST_GUP
2332 static void __maybe_unused
undo_dev_pagemap(int *nr
, int nr_start
,
2334 struct page
**pages
)
2336 while ((*nr
) - nr_start
) {
2337 struct page
*page
= pages
[--(*nr
)];
2339 ClearPageReferenced(page
);
2340 if (flags
& FOLL_PIN
)
2341 unpin_user_page(page
);
2347 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2348 static int gup_pte_range(pmd_t pmd
, unsigned long addr
, unsigned long end
,
2349 unsigned int flags
, struct page
**pages
, int *nr
)
2351 struct dev_pagemap
*pgmap
= NULL
;
2352 int nr_start
= *nr
, ret
= 0;
2355 ptem
= ptep
= pte_offset_map(&pmd
, addr
);
2357 pte_t pte
= ptep_get_lockless(ptep
);
2359 struct folio
*folio
;
2362 * Similar to the PMD case below, NUMA hinting must take slow
2363 * path using the pte_protnone check.
2365 if (pte_protnone(pte
))
2368 if (!pte_access_permitted(pte
, flags
& FOLL_WRITE
))
2371 if (pte_devmap(pte
)) {
2372 if (unlikely(flags
& FOLL_LONGTERM
))
2375 pgmap
= get_dev_pagemap(pte_pfn(pte
), pgmap
);
2376 if (unlikely(!pgmap
)) {
2377 undo_dev_pagemap(nr
, nr_start
, flags
, pages
);
2380 } else if (pte_special(pte
))
2383 VM_BUG_ON(!pfn_valid(pte_pfn(pte
)));
2384 page
= pte_page(pte
);
2386 folio
= try_grab_folio(page
, 1, flags
);
2390 if (unlikely(page_is_secretmem(page
))) {
2391 gup_put_folio(folio
, 1, flags
);
2395 if (unlikely(pte_val(pte
) != pte_val(*ptep
))) {
2396 gup_put_folio(folio
, 1, flags
);
2400 if (!pte_write(pte
) && gup_must_unshare(flags
, page
)) {
2401 gup_put_folio(folio
, 1, flags
);
2406 * We need to make the page accessible if and only if we are
2407 * going to access its content (the FOLL_PIN case). Please
2408 * see Documentation/core-api/pin_user_pages.rst for
2411 if (flags
& FOLL_PIN
) {
2412 ret
= arch_make_page_accessible(page
);
2414 gup_put_folio(folio
, 1, flags
);
2418 folio_set_referenced(folio
);
2421 } while (ptep
++, addr
+= PAGE_SIZE
, addr
!= end
);
2427 put_dev_pagemap(pgmap
);
2434 * If we can't determine whether or not a pte is special, then fail immediately
2435 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2438 * For a futex to be placed on a THP tail page, get_futex_key requires a
2439 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2440 * useful to have gup_huge_pmd even if we can't operate on ptes.
2442 static int gup_pte_range(pmd_t pmd
, unsigned long addr
, unsigned long end
,
2443 unsigned int flags
, struct page
**pages
, int *nr
)
2447 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2449 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2450 static int __gup_device_huge(unsigned long pfn
, unsigned long addr
,
2451 unsigned long end
, unsigned int flags
,
2452 struct page
**pages
, int *nr
)
2455 struct dev_pagemap
*pgmap
= NULL
;
2458 struct page
*page
= pfn_to_page(pfn
);
2460 pgmap
= get_dev_pagemap(pfn
, pgmap
);
2461 if (unlikely(!pgmap
)) {
2462 undo_dev_pagemap(nr
, nr_start
, flags
, pages
);
2465 SetPageReferenced(page
);
2467 if (unlikely(!try_grab_page(page
, flags
))) {
2468 undo_dev_pagemap(nr
, nr_start
, flags
, pages
);
2473 } while (addr
+= PAGE_SIZE
, addr
!= end
);
2475 put_dev_pagemap(pgmap
);
2479 static int __gup_device_huge_pmd(pmd_t orig
, pmd_t
*pmdp
, unsigned long addr
,
2480 unsigned long end
, unsigned int flags
,
2481 struct page
**pages
, int *nr
)
2483 unsigned long fault_pfn
;
2486 fault_pfn
= pmd_pfn(orig
) + ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
2487 if (!__gup_device_huge(fault_pfn
, addr
, end
, flags
, pages
, nr
))
2490 if (unlikely(pmd_val(orig
) != pmd_val(*pmdp
))) {
2491 undo_dev_pagemap(nr
, nr_start
, flags
, pages
);
2497 static int __gup_device_huge_pud(pud_t orig
, pud_t
*pudp
, unsigned long addr
,
2498 unsigned long end
, unsigned int flags
,
2499 struct page
**pages
, int *nr
)
2501 unsigned long fault_pfn
;
2504 fault_pfn
= pud_pfn(orig
) + ((addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
2505 if (!__gup_device_huge(fault_pfn
, addr
, end
, flags
, pages
, nr
))
2508 if (unlikely(pud_val(orig
) != pud_val(*pudp
))) {
2509 undo_dev_pagemap(nr
, nr_start
, flags
, pages
);
2515 static int __gup_device_huge_pmd(pmd_t orig
, pmd_t
*pmdp
, unsigned long addr
,
2516 unsigned long end
, unsigned int flags
,
2517 struct page
**pages
, int *nr
)
2523 static int __gup_device_huge_pud(pud_t pud
, pud_t
*pudp
, unsigned long addr
,
2524 unsigned long end
, unsigned int flags
,
2525 struct page
**pages
, int *nr
)
2532 static int record_subpages(struct page
*page
, unsigned long addr
,
2533 unsigned long end
, struct page
**pages
)
2537 for (nr
= 0; addr
!= end
; nr
++, addr
+= PAGE_SIZE
)
2538 pages
[nr
] = nth_page(page
, nr
);
2543 #ifdef CONFIG_ARCH_HAS_HUGEPD
2544 static unsigned long hugepte_addr_end(unsigned long addr
, unsigned long end
,
2547 unsigned long __boundary
= (addr
+ sz
) & ~(sz
-1);
2548 return (__boundary
- 1 < end
- 1) ? __boundary
: end
;
2551 static int gup_hugepte(pte_t
*ptep
, unsigned long sz
, unsigned long addr
,
2552 unsigned long end
, unsigned int flags
,
2553 struct page
**pages
, int *nr
)
2555 unsigned long pte_end
;
2557 struct folio
*folio
;
2561 pte_end
= (addr
+ sz
) & ~(sz
-1);
2565 pte
= huge_ptep_get(ptep
);
2567 if (!pte_access_permitted(pte
, flags
& FOLL_WRITE
))
2570 /* hugepages are never "special" */
2571 VM_BUG_ON(!pfn_valid(pte_pfn(pte
)));
2573 page
= nth_page(pte_page(pte
), (addr
& (sz
- 1)) >> PAGE_SHIFT
);
2574 refs
= record_subpages(page
, addr
, end
, pages
+ *nr
);
2576 folio
= try_grab_folio(page
, refs
, flags
);
2580 if (unlikely(pte_val(pte
) != pte_val(*ptep
))) {
2581 gup_put_folio(folio
, refs
, flags
);
2585 if (!pte_write(pte
) && gup_must_unshare(flags
, &folio
->page
)) {
2586 gup_put_folio(folio
, refs
, flags
);
2591 folio_set_referenced(folio
);
2595 static int gup_huge_pd(hugepd_t hugepd
, unsigned long addr
,
2596 unsigned int pdshift
, unsigned long end
, unsigned int flags
,
2597 struct page
**pages
, int *nr
)
2600 unsigned long sz
= 1UL << hugepd_shift(hugepd
);
2603 ptep
= hugepte_offset(hugepd
, addr
, pdshift
);
2605 next
= hugepte_addr_end(addr
, end
, sz
);
2606 if (!gup_hugepte(ptep
, sz
, addr
, end
, flags
, pages
, nr
))
2608 } while (ptep
++, addr
= next
, addr
!= end
);
2613 static inline int gup_huge_pd(hugepd_t hugepd
, unsigned long addr
,
2614 unsigned int pdshift
, unsigned long end
, unsigned int flags
,
2615 struct page
**pages
, int *nr
)
2619 #endif /* CONFIG_ARCH_HAS_HUGEPD */
2621 static int gup_huge_pmd(pmd_t orig
, pmd_t
*pmdp
, unsigned long addr
,
2622 unsigned long end
, unsigned int flags
,
2623 struct page
**pages
, int *nr
)
2626 struct folio
*folio
;
2629 if (!pmd_access_permitted(orig
, flags
& FOLL_WRITE
))
2632 if (pmd_devmap(orig
)) {
2633 if (unlikely(flags
& FOLL_LONGTERM
))
2635 return __gup_device_huge_pmd(orig
, pmdp
, addr
, end
, flags
,
2639 page
= nth_page(pmd_page(orig
), (addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
2640 refs
= record_subpages(page
, addr
, end
, pages
+ *nr
);
2642 folio
= try_grab_folio(page
, refs
, flags
);
2646 if (unlikely(pmd_val(orig
) != pmd_val(*pmdp
))) {
2647 gup_put_folio(folio
, refs
, flags
);
2651 if (!pmd_write(orig
) && gup_must_unshare(flags
, &folio
->page
)) {
2652 gup_put_folio(folio
, refs
, flags
);
2657 folio_set_referenced(folio
);
2661 static int gup_huge_pud(pud_t orig
, pud_t
*pudp
, unsigned long addr
,
2662 unsigned long end
, unsigned int flags
,
2663 struct page
**pages
, int *nr
)
2666 struct folio
*folio
;
2669 if (!pud_access_permitted(orig
, flags
& FOLL_WRITE
))
2672 if (pud_devmap(orig
)) {
2673 if (unlikely(flags
& FOLL_LONGTERM
))
2675 return __gup_device_huge_pud(orig
, pudp
, addr
, end
, flags
,
2679 page
= nth_page(pud_page(orig
), (addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
2680 refs
= record_subpages(page
, addr
, end
, pages
+ *nr
);
2682 folio
= try_grab_folio(page
, refs
, flags
);
2686 if (unlikely(pud_val(orig
) != pud_val(*pudp
))) {
2687 gup_put_folio(folio
, refs
, flags
);
2691 if (!pud_write(orig
) && gup_must_unshare(flags
, &folio
->page
)) {
2692 gup_put_folio(folio
, refs
, flags
);
2697 folio_set_referenced(folio
);
2701 static int gup_huge_pgd(pgd_t orig
, pgd_t
*pgdp
, unsigned long addr
,
2702 unsigned long end
, unsigned int flags
,
2703 struct page
**pages
, int *nr
)
2707 struct folio
*folio
;
2709 if (!pgd_access_permitted(orig
, flags
& FOLL_WRITE
))
2712 BUILD_BUG_ON(pgd_devmap(orig
));
2714 page
= nth_page(pgd_page(orig
), (addr
& ~PGDIR_MASK
) >> PAGE_SHIFT
);
2715 refs
= record_subpages(page
, addr
, end
, pages
+ *nr
);
2717 folio
= try_grab_folio(page
, refs
, flags
);
2721 if (unlikely(pgd_val(orig
) != pgd_val(*pgdp
))) {
2722 gup_put_folio(folio
, refs
, flags
);
2727 folio_set_referenced(folio
);
2731 static int gup_pmd_range(pud_t
*pudp
, pud_t pud
, unsigned long addr
, unsigned long end
,
2732 unsigned int flags
, struct page
**pages
, int *nr
)
2737 pmdp
= pmd_offset_lockless(pudp
, pud
, addr
);
2739 pmd_t pmd
= READ_ONCE(*pmdp
);
2741 next
= pmd_addr_end(addr
, end
);
2742 if (!pmd_present(pmd
))
2745 if (unlikely(pmd_trans_huge(pmd
) || pmd_huge(pmd
) ||
2748 * NUMA hinting faults need to be handled in the GUP
2749 * slowpath for accounting purposes and so that they
2750 * can be serialised against THP migration.
2752 if (pmd_protnone(pmd
))
2755 if (!gup_huge_pmd(pmd
, pmdp
, addr
, next
, flags
,
2759 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd
))))) {
2761 * architecture have different format for hugetlbfs
2762 * pmd format and THP pmd format
2764 if (!gup_huge_pd(__hugepd(pmd_val(pmd
)), addr
,
2765 PMD_SHIFT
, next
, flags
, pages
, nr
))
2767 } else if (!gup_pte_range(pmd
, addr
, next
, flags
, pages
, nr
))
2769 } while (pmdp
++, addr
= next
, addr
!= end
);
2774 static int gup_pud_range(p4d_t
*p4dp
, p4d_t p4d
, unsigned long addr
, unsigned long end
,
2775 unsigned int flags
, struct page
**pages
, int *nr
)
2780 pudp
= pud_offset_lockless(p4dp
, p4d
, addr
);
2782 pud_t pud
= READ_ONCE(*pudp
);
2784 next
= pud_addr_end(addr
, end
);
2785 if (unlikely(!pud_present(pud
)))
2787 if (unlikely(pud_huge(pud
))) {
2788 if (!gup_huge_pud(pud
, pudp
, addr
, next
, flags
,
2791 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud
))))) {
2792 if (!gup_huge_pd(__hugepd(pud_val(pud
)), addr
,
2793 PUD_SHIFT
, next
, flags
, pages
, nr
))
2795 } else if (!gup_pmd_range(pudp
, pud
, addr
, next
, flags
, pages
, nr
))
2797 } while (pudp
++, addr
= next
, addr
!= end
);
2802 static int gup_p4d_range(pgd_t
*pgdp
, pgd_t pgd
, unsigned long addr
, unsigned long end
,
2803 unsigned int flags
, struct page
**pages
, int *nr
)
2808 p4dp
= p4d_offset_lockless(pgdp
, pgd
, addr
);
2810 p4d_t p4d
= READ_ONCE(*p4dp
);
2812 next
= p4d_addr_end(addr
, end
);
2815 BUILD_BUG_ON(p4d_huge(p4d
));
2816 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d
))))) {
2817 if (!gup_huge_pd(__hugepd(p4d_val(p4d
)), addr
,
2818 P4D_SHIFT
, next
, flags
, pages
, nr
))
2820 } else if (!gup_pud_range(p4dp
, p4d
, addr
, next
, flags
, pages
, nr
))
2822 } while (p4dp
++, addr
= next
, addr
!= end
);
2827 static void gup_pgd_range(unsigned long addr
, unsigned long end
,
2828 unsigned int flags
, struct page
**pages
, int *nr
)
2833 pgdp
= pgd_offset(current
->mm
, addr
);
2835 pgd_t pgd
= READ_ONCE(*pgdp
);
2837 next
= pgd_addr_end(addr
, end
);
2840 if (unlikely(pgd_huge(pgd
))) {
2841 if (!gup_huge_pgd(pgd
, pgdp
, addr
, next
, flags
,
2844 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd
))))) {
2845 if (!gup_huge_pd(__hugepd(pgd_val(pgd
)), addr
,
2846 PGDIR_SHIFT
, next
, flags
, pages
, nr
))
2848 } else if (!gup_p4d_range(pgdp
, pgd
, addr
, next
, flags
, pages
, nr
))
2850 } while (pgdp
++, addr
= next
, addr
!= end
);
2853 static inline void gup_pgd_range(unsigned long addr
, unsigned long end
,
2854 unsigned int flags
, struct page
**pages
, int *nr
)
2857 #endif /* CONFIG_HAVE_FAST_GUP */
2859 #ifndef gup_fast_permitted
2861 * Check if it's allowed to use get_user_pages_fast_only() for the range, or
2862 * we need to fall back to the slow version:
2864 static bool gup_fast_permitted(unsigned long start
, unsigned long end
)
2870 static int __gup_longterm_unlocked(unsigned long start
, int nr_pages
,
2871 unsigned int gup_flags
, struct page
**pages
)
2876 * FIXME: FOLL_LONGTERM does not work with
2877 * get_user_pages_unlocked() (see comments in that function)
2879 if (gup_flags
& FOLL_LONGTERM
) {
2880 mmap_read_lock(current
->mm
);
2881 ret
= __gup_longterm_locked(current
->mm
,
2883 pages
, NULL
, gup_flags
);
2884 mmap_read_unlock(current
->mm
);
2886 ret
= get_user_pages_unlocked(start
, nr_pages
,
2893 static unsigned long lockless_pages_from_mm(unsigned long start
,
2895 unsigned int gup_flags
,
2896 struct page
**pages
)
2898 unsigned long flags
;
2902 if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP
) ||
2903 !gup_fast_permitted(start
, end
))
2906 if (gup_flags
& FOLL_PIN
) {
2907 seq
= raw_read_seqcount(¤t
->mm
->write_protect_seq
);
2913 * Disable interrupts. The nested form is used, in order to allow full,
2914 * general purpose use of this routine.
2916 * With interrupts disabled, we block page table pages from being freed
2917 * from under us. See struct mmu_table_batch comments in
2918 * include/asm-generic/tlb.h for more details.
2920 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2921 * that come from THPs splitting.
2923 local_irq_save(flags
);
2924 gup_pgd_range(start
, end
, gup_flags
, pages
, &nr_pinned
);
2925 local_irq_restore(flags
);
2928 * When pinning pages for DMA there could be a concurrent write protect
2929 * from fork() via copy_page_range(), in this case always fail fast GUP.
2931 if (gup_flags
& FOLL_PIN
) {
2932 if (read_seqcount_retry(¤t
->mm
->write_protect_seq
, seq
)) {
2933 unpin_user_pages_lockless(pages
, nr_pinned
);
2936 sanity_check_pinned_pages(pages
, nr_pinned
);
2942 static int internal_get_user_pages_fast(unsigned long start
,
2943 unsigned long nr_pages
,
2944 unsigned int gup_flags
,
2945 struct page
**pages
)
2947 unsigned long len
, end
;
2948 unsigned long nr_pinned
;
2951 if (WARN_ON_ONCE(gup_flags
& ~(FOLL_WRITE
| FOLL_LONGTERM
|
2952 FOLL_FORCE
| FOLL_PIN
| FOLL_GET
|
2953 FOLL_FAST_ONLY
| FOLL_NOFAULT
)))
2956 if (gup_flags
& FOLL_PIN
)
2957 mm_set_has_pinned_flag(¤t
->mm
->flags
);
2959 if (!(gup_flags
& FOLL_FAST_ONLY
))
2960 might_lock_read(¤t
->mm
->mmap_lock
);
2962 start
= untagged_addr(start
) & PAGE_MASK
;
2963 len
= nr_pages
<< PAGE_SHIFT
;
2964 if (check_add_overflow(start
, len
, &end
))
2966 if (unlikely(!access_ok((void __user
*)start
, len
)))
2969 nr_pinned
= lockless_pages_from_mm(start
, end
, gup_flags
, pages
);
2970 if (nr_pinned
== nr_pages
|| gup_flags
& FOLL_FAST_ONLY
)
2973 /* Slow path: try to get the remaining pages with get_user_pages */
2974 start
+= nr_pinned
<< PAGE_SHIFT
;
2976 ret
= __gup_longterm_unlocked(start
, nr_pages
- nr_pinned
, gup_flags
,
2980 * The caller has to unpin the pages we already pinned so
2981 * returning -errno is not an option
2987 return ret
+ nr_pinned
;
2991 * get_user_pages_fast_only() - pin user pages in memory
2992 * @start: starting user address
2993 * @nr_pages: number of pages from start to pin
2994 * @gup_flags: flags modifying pin behaviour
2995 * @pages: array that receives pointers to the pages pinned.
2996 * Should be at least nr_pages long.
2998 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
3000 * Note a difference with get_user_pages_fast: this always returns the
3001 * number of pages pinned, 0 if no pages were pinned.
3003 * If the architecture does not support this function, simply return with no
3006 * Careful, careful! COW breaking can go either way, so a non-write
3007 * access can get ambiguous page results. If you call this function without
3008 * 'write' set, you'd better be sure that you're ok with that ambiguity.
3010 int get_user_pages_fast_only(unsigned long start
, int nr_pages
,
3011 unsigned int gup_flags
, struct page
**pages
)
3015 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
3016 * because gup fast is always a "pin with a +1 page refcount" request.
3018 * FOLL_FAST_ONLY is required in order to match the API description of
3019 * this routine: no fall back to regular ("slow") GUP.
3021 gup_flags
|= FOLL_GET
| FOLL_FAST_ONLY
;
3023 nr_pinned
= internal_get_user_pages_fast(start
, nr_pages
, gup_flags
,
3027 * As specified in the API description above, this routine is not
3028 * allowed to return negative values. However, the common core
3029 * routine internal_get_user_pages_fast() *can* return -errno.
3030 * Therefore, correct for that here:
3037 EXPORT_SYMBOL_GPL(get_user_pages_fast_only
);
3040 * get_user_pages_fast() - pin user pages in memory
3041 * @start: starting user address
3042 * @nr_pages: number of pages from start to pin
3043 * @gup_flags: flags modifying pin behaviour
3044 * @pages: array that receives pointers to the pages pinned.
3045 * Should be at least nr_pages long.
3047 * Attempt to pin user pages in memory without taking mm->mmap_lock.
3048 * If not successful, it will fall back to taking the lock and
3049 * calling get_user_pages().
3051 * Returns number of pages pinned. This may be fewer than the number requested.
3052 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3055 int get_user_pages_fast(unsigned long start
, int nr_pages
,
3056 unsigned int gup_flags
, struct page
**pages
)
3058 if (!is_valid_gup_flags(gup_flags
))
3062 * The caller may or may not have explicitly set FOLL_GET; either way is
3063 * OK. However, internally (within mm/gup.c), gup fast variants must set
3064 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
3067 gup_flags
|= FOLL_GET
;
3068 return internal_get_user_pages_fast(start
, nr_pages
, gup_flags
, pages
);
3070 EXPORT_SYMBOL_GPL(get_user_pages_fast
);
3073 * pin_user_pages_fast() - pin user pages in memory without taking locks
3075 * @start: starting user address
3076 * @nr_pages: number of pages from start to pin
3077 * @gup_flags: flags modifying pin behaviour
3078 * @pages: array that receives pointers to the pages pinned.
3079 * Should be at least nr_pages long.
3081 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
3082 * get_user_pages_fast() for documentation on the function arguments, because
3083 * the arguments here are identical.
3085 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3086 * see Documentation/core-api/pin_user_pages.rst for further details.
3088 int pin_user_pages_fast(unsigned long start
, int nr_pages
,
3089 unsigned int gup_flags
, struct page
**pages
)
3091 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3092 if (WARN_ON_ONCE(gup_flags
& FOLL_GET
))
3095 if (WARN_ON_ONCE(!pages
))
3098 gup_flags
|= FOLL_PIN
;
3099 return internal_get_user_pages_fast(start
, nr_pages
, gup_flags
, pages
);
3101 EXPORT_SYMBOL_GPL(pin_user_pages_fast
);
3104 * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
3105 * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
3107 * The API rules are the same, too: no negative values may be returned.
3109 int pin_user_pages_fast_only(unsigned long start
, int nr_pages
,
3110 unsigned int gup_flags
, struct page
**pages
)
3115 * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
3116 * rules require returning 0, rather than -errno:
3118 if (WARN_ON_ONCE(gup_flags
& FOLL_GET
))
3121 if (WARN_ON_ONCE(!pages
))
3124 * FOLL_FAST_ONLY is required in order to match the API description of
3125 * this routine: no fall back to regular ("slow") GUP.
3127 gup_flags
|= (FOLL_PIN
| FOLL_FAST_ONLY
);
3128 nr_pinned
= internal_get_user_pages_fast(start
, nr_pages
, gup_flags
,
3131 * This routine is not allowed to return negative values. However,
3132 * internal_get_user_pages_fast() *can* return -errno. Therefore,
3133 * correct for that here:
3140 EXPORT_SYMBOL_GPL(pin_user_pages_fast_only
);
3143 * pin_user_pages_remote() - pin pages of a remote process
3145 * @mm: mm_struct of target mm
3146 * @start: starting user address
3147 * @nr_pages: number of pages from start to pin
3148 * @gup_flags: flags modifying lookup behaviour
3149 * @pages: array that receives pointers to the pages pinned.
3150 * Should be at least nr_pages long.
3151 * @vmas: array of pointers to vmas corresponding to each page.
3152 * Or NULL if the caller does not require them.
3153 * @locked: pointer to lock flag indicating whether lock is held and
3154 * subsequently whether VM_FAULT_RETRY functionality can be
3155 * utilised. Lock must initially be held.
3157 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
3158 * get_user_pages_remote() for documentation on the function arguments, because
3159 * the arguments here are identical.
3161 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3162 * see Documentation/core-api/pin_user_pages.rst for details.
3164 long pin_user_pages_remote(struct mm_struct
*mm
,
3165 unsigned long start
, unsigned long nr_pages
,
3166 unsigned int gup_flags
, struct page
**pages
,
3167 struct vm_area_struct
**vmas
, int *locked
)
3169 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3170 if (WARN_ON_ONCE(gup_flags
& FOLL_GET
))
3173 if (WARN_ON_ONCE(!pages
))
3176 gup_flags
|= FOLL_PIN
;
3177 return __get_user_pages_remote(mm
, start
, nr_pages
, gup_flags
,
3178 pages
, vmas
, locked
);
3180 EXPORT_SYMBOL(pin_user_pages_remote
);
3183 * pin_user_pages() - pin user pages in memory for use by other devices
3185 * @start: starting user address
3186 * @nr_pages: number of pages from start to pin
3187 * @gup_flags: flags modifying lookup behaviour
3188 * @pages: array that receives pointers to the pages pinned.
3189 * Should be at least nr_pages long.
3190 * @vmas: array of pointers to vmas corresponding to each page.
3191 * Or NULL if the caller does not require them.
3193 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3196 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3197 * see Documentation/core-api/pin_user_pages.rst for details.
3199 long pin_user_pages(unsigned long start
, unsigned long nr_pages
,
3200 unsigned int gup_flags
, struct page
**pages
,
3201 struct vm_area_struct
**vmas
)
3203 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3204 if (WARN_ON_ONCE(gup_flags
& FOLL_GET
))
3207 if (WARN_ON_ONCE(!pages
))
3210 gup_flags
|= FOLL_PIN
;
3211 return __gup_longterm_locked(current
->mm
, start
, nr_pages
,
3212 pages
, vmas
, gup_flags
);
3214 EXPORT_SYMBOL(pin_user_pages
);
3217 * pin_user_pages_unlocked() is the FOLL_PIN variant of
3218 * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3219 * FOLL_PIN and rejects FOLL_GET.
3221 long pin_user_pages_unlocked(unsigned long start
, unsigned long nr_pages
,
3222 struct page
**pages
, unsigned int gup_flags
)
3224 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3225 if (WARN_ON_ONCE(gup_flags
& FOLL_GET
))
3228 if (WARN_ON_ONCE(!pages
))
3231 gup_flags
|= FOLL_PIN
;
3232 return get_user_pages_unlocked(start
, nr_pages
, pages
, gup_flags
);
3234 EXPORT_SYMBOL(pin_user_pages_unlocked
);