]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - mm/gup.c
Merge tag 'for-linus-5.15-1' of git://github.com/cminyard/linux-ipmi
[mirror_ubuntu-kernels.git] / mm / gup.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
4bbd4c77
KS
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/err.h>
5#include <linux/spinlock.h>
6
4bbd4c77 7#include <linux/mm.h>
3565fce3 8#include <linux/memremap.h>
4bbd4c77
KS
9#include <linux/pagemap.h>
10#include <linux/rmap.h>
11#include <linux/swap.h>
12#include <linux/swapops.h>
1507f512 13#include <linux/secretmem.h>
4bbd4c77 14
174cd4b1 15#include <linux/sched/signal.h>
2667f50e 16#include <linux/rwsem.h>
f30c59e9 17#include <linux/hugetlb.h>
9a4e9f3b
AK
18#include <linux/migrate.h>
19#include <linux/mm_inline.h>
20#include <linux/sched/mm.h>
1027e443 21
33a709b2 22#include <asm/mmu_context.h>
1027e443 23#include <asm/tlbflush.h>
2667f50e 24
4bbd4c77
KS
25#include "internal.h"
26
df06b37f
KB
27struct follow_page_context {
28 struct dev_pagemap *pgmap;
29 unsigned int page_mask;
30};
31
47e29d32
JH
32static void hpage_pincount_add(struct page *page, int refs)
33{
34 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
35 VM_BUG_ON_PAGE(page != compound_head(page), page);
36
37 atomic_add(refs, compound_pincount_ptr(page));
38}
39
40static void hpage_pincount_sub(struct page *page, int refs)
41{
42 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
43 VM_BUG_ON_PAGE(page != compound_head(page), page);
44
45 atomic_sub(refs, compound_pincount_ptr(page));
46}
47
c24d3732
JH
48/* Equivalent to calling put_page() @refs times. */
49static void put_page_refs(struct page *page, int refs)
50{
51#ifdef CONFIG_DEBUG_VM
52 if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page))
53 return;
54#endif
55
56 /*
57 * Calling put_page() for each ref is unnecessarily slow. Only the last
58 * ref needs a put_page().
59 */
60 if (refs > 1)
61 page_ref_sub(page, refs - 1);
62 put_page(page);
63}
64
cd1adf1b
LT
65/*
66 * Return the compound head page with ref appropriately incremented,
67 * or NULL if that failed.
a707cdd5 68 */
cd1adf1b 69static inline struct page *try_get_compound_head(struct page *page, int refs)
a707cdd5
JH
70{
71 struct page *head = compound_head(page);
72
73 if (WARN_ON_ONCE(page_ref_count(head) < 0))
74 return NULL;
75 if (unlikely(!page_cache_add_speculative(head, refs)))
76 return NULL;
c24d3732
JH
77
78 /*
79 * At this point we have a stable reference to the head page; but it
80 * could be that between the compound_head() lookup and the refcount
81 * increment, the compound page was split, in which case we'd end up
82 * holding a reference on a page that has nothing to do with the page
83 * we were given anymore.
84 * So now that the head page is stable, recheck that the pages still
85 * belong together.
86 */
87 if (unlikely(compound_head(page) != head)) {
88 put_page_refs(head, refs);
89 return NULL;
90 }
91
a707cdd5
JH
92 return head;
93}
94
3967db22 95/**
3faa52c0
JH
96 * try_grab_compound_head() - attempt to elevate a page's refcount, by a
97 * flags-dependent amount.
98 *
3967db22
JH
99 * Even though the name includes "compound_head", this function is still
100 * appropriate for callers that have a non-compound @page to get.
101 *
102 * @page: pointer to page to be grabbed
103 * @refs: the value to (effectively) add to the page's refcount
104 * @flags: gup flags: these are the FOLL_* flag values.
105 *
3faa52c0
JH
106 * "grab" names in this file mean, "look at flags to decide whether to use
107 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
108 *
109 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
110 * same time. (That's true throughout the get_user_pages*() and
111 * pin_user_pages*() APIs.) Cases:
112 *
3967db22
JH
113 * FOLL_GET: page's refcount will be incremented by @refs.
114 *
115 * FOLL_PIN on compound pages that are > two pages long: page's refcount will
116 * be incremented by @refs, and page[2].hpage_pinned_refcount will be
117 * incremented by @refs * GUP_PIN_COUNTING_BIAS.
118 *
119 * FOLL_PIN on normal pages, or compound pages that are two pages long:
120 * page's refcount will be incremented by @refs * GUP_PIN_COUNTING_BIAS.
3faa52c0
JH
121 *
122 * Return: head page (with refcount appropriately incremented) for success, or
123 * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
124 * considered failure, and furthermore, a likely bug in the caller, so a warning
125 * is also emitted.
126 */
54d516b1
JH
127struct page *try_grab_compound_head(struct page *page,
128 int refs, unsigned int flags)
3faa52c0
JH
129{
130 if (flags & FOLL_GET)
131 return try_get_compound_head(page, refs);
132 else if (flags & FOLL_PIN) {
df3a0a21 133 /*
d1e153fe
PT
134 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
135 * right zone, so fail and let the caller fall back to the slow
136 * path.
df3a0a21 137 */
d1e153fe
PT
138 if (unlikely((flags & FOLL_LONGTERM) &&
139 !is_pinnable_page(page)))
df3a0a21
PL
140 return NULL;
141
c24d3732
JH
142 /*
143 * CAUTION: Don't use compound_head() on the page before this
144 * point, the result won't be stable.
145 */
146 page = try_get_compound_head(page, refs);
147 if (!page)
148 return NULL;
149
47e29d32
JH
150 /*
151 * When pinning a compound page of order > 1 (which is what
152 * hpage_pincount_available() checks for), use an exact count to
153 * track it, via hpage_pincount_add/_sub().
154 *
155 * However, be sure to *also* increment the normal page refcount
156 * field at least once, so that the page really is pinned.
3967db22
JH
157 * That's why the refcount from the earlier
158 * try_get_compound_head() is left intact.
47e29d32 159 */
47e29d32
JH
160 if (hpage_pincount_available(page))
161 hpage_pincount_add(page, refs);
c24d3732
JH
162 else
163 page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
47e29d32 164
1970dc6f 165 mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
0fef147b 166 refs);
1970dc6f 167
47e29d32 168 return page;
3faa52c0
JH
169 }
170
171 WARN_ON_ONCE(1);
172 return NULL;
173}
174
4509b42c
JG
175static void put_compound_head(struct page *page, int refs, unsigned int flags)
176{
177 if (flags & FOLL_PIN) {
178 mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
179 refs);
180
181 if (hpage_pincount_available(page))
182 hpage_pincount_sub(page, refs);
183 else
184 refs *= GUP_PIN_COUNTING_BIAS;
185 }
186
c24d3732 187 put_page_refs(page, refs);
4509b42c
JG
188}
189
3faa52c0
JH
190/**
191 * try_grab_page() - elevate a page's refcount by a flag-dependent amount
192 *
193 * This might not do anything at all, depending on the flags argument.
194 *
195 * "grab" names in this file mean, "look at flags to decide whether to use
196 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
197 *
198 * @page: pointer to page to be grabbed
199 * @flags: gup flags: these are the FOLL_* flag values.
200 *
201 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
3967db22
JH
202 * time. Cases: please see the try_grab_compound_head() documentation, with
203 * "refs=1".
3faa52c0
JH
204 *
205 * Return: true for success, or if no action was required (if neither FOLL_PIN
206 * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
207 * FOLL_PIN was set, but the page could not be grabbed.
208 */
209bool __must_check try_grab_page(struct page *page, unsigned int flags)
210{
54d516b1
JH
211 if (!(flags & (FOLL_GET | FOLL_PIN)))
212 return true;
3faa52c0 213
54d516b1 214 return try_grab_compound_head(page, 1, flags);
3faa52c0
JH
215}
216
3faa52c0
JH
217/**
218 * unpin_user_page() - release a dma-pinned page
219 * @page: pointer to page to be released
220 *
221 * Pages that were pinned via pin_user_pages*() must be released via either
222 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
223 * that such pages can be separately tracked and uniquely handled. In
224 * particular, interactions with RDMA and filesystems need special handling.
225 */
226void unpin_user_page(struct page *page)
227{
4509b42c 228 put_compound_head(compound_head(page), 1, FOLL_PIN);
3faa52c0
JH
229}
230EXPORT_SYMBOL(unpin_user_page);
231
458a4f78
JM
232static inline void compound_range_next(unsigned long i, unsigned long npages,
233 struct page **list, struct page **head,
234 unsigned int *ntails)
235{
236 struct page *next, *page;
237 unsigned int nr = 1;
238
239 if (i >= npages)
240 return;
241
242 next = *list + i;
243 page = compound_head(next);
244 if (PageCompound(page) && compound_order(page) >= 1)
245 nr = min_t(unsigned int,
246 page + compound_nr(page) - next, npages - i);
247
248 *head = page;
249 *ntails = nr;
250}
251
252#define for_each_compound_range(__i, __list, __npages, __head, __ntails) \
253 for (__i = 0, \
254 compound_range_next(__i, __npages, __list, &(__head), &(__ntails)); \
255 __i < __npages; __i += __ntails, \
256 compound_range_next(__i, __npages, __list, &(__head), &(__ntails)))
257
8745d7f6
JM
258static inline void compound_next(unsigned long i, unsigned long npages,
259 struct page **list, struct page **head,
260 unsigned int *ntails)
261{
262 struct page *page;
263 unsigned int nr;
264
265 if (i >= npages)
266 return;
267
268 page = compound_head(list[i]);
269 for (nr = i + 1; nr < npages; nr++) {
270 if (compound_head(list[nr]) != page)
271 break;
272 }
273
274 *head = page;
275 *ntails = nr - i;
276}
277
278#define for_each_compound_head(__i, __list, __npages, __head, __ntails) \
279 for (__i = 0, \
280 compound_next(__i, __npages, __list, &(__head), &(__ntails)); \
281 __i < __npages; __i += __ntails, \
282 compound_next(__i, __npages, __list, &(__head), &(__ntails)))
283
fc1d8e7c 284/**
f1f6a7dd 285 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
2d15eb31 286 * @pages: array of pages to be maybe marked dirty, and definitely released.
fc1d8e7c 287 * @npages: number of pages in the @pages array.
2d15eb31 288 * @make_dirty: whether to mark the pages dirty
fc1d8e7c
JH
289 *
290 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
291 * variants called on that page.
292 *
293 * For each page in the @pages array, make that page (or its head page, if a
2d15eb31 294 * compound page) dirty, if @make_dirty is true, and if the page was previously
f1f6a7dd
JH
295 * listed as clean. In any case, releases all pages using unpin_user_page(),
296 * possibly via unpin_user_pages(), for the non-dirty case.
fc1d8e7c 297 *
f1f6a7dd 298 * Please see the unpin_user_page() documentation for details.
fc1d8e7c 299 *
2d15eb31 300 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
301 * required, then the caller should a) verify that this is really correct,
302 * because _lock() is usually required, and b) hand code it:
f1f6a7dd 303 * set_page_dirty_lock(), unpin_user_page().
fc1d8e7c
JH
304 *
305 */
f1f6a7dd
JH
306void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
307 bool make_dirty)
fc1d8e7c 308{
2d15eb31 309 unsigned long index;
31b912de
JM
310 struct page *head;
311 unsigned int ntails;
2d15eb31 312
313 if (!make_dirty) {
f1f6a7dd 314 unpin_user_pages(pages, npages);
2d15eb31 315 return;
316 }
317
31b912de 318 for_each_compound_head(index, pages, npages, head, ntails) {
2d15eb31 319 /*
320 * Checking PageDirty at this point may race with
321 * clear_page_dirty_for_io(), but that's OK. Two key
322 * cases:
323 *
324 * 1) This code sees the page as already dirty, so it
325 * skips the call to set_page_dirty(). That could happen
326 * because clear_page_dirty_for_io() called
327 * page_mkclean(), followed by set_page_dirty().
328 * However, now the page is going to get written back,
329 * which meets the original intention of setting it
330 * dirty, so all is well: clear_page_dirty_for_io() goes
331 * on to call TestClearPageDirty(), and write the page
332 * back.
333 *
334 * 2) This code sees the page as clean, so it calls
335 * set_page_dirty(). The page stays dirty, despite being
336 * written back, so it gets written back again in the
337 * next writeback cycle. This is harmless.
338 */
31b912de
JM
339 if (!PageDirty(head))
340 set_page_dirty_lock(head);
341 put_compound_head(head, ntails, FOLL_PIN);
2d15eb31 342 }
fc1d8e7c 343}
f1f6a7dd 344EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
fc1d8e7c 345
458a4f78
JM
346/**
347 * unpin_user_page_range_dirty_lock() - release and optionally dirty
348 * gup-pinned page range
349 *
350 * @page: the starting page of a range maybe marked dirty, and definitely released.
351 * @npages: number of consecutive pages to release.
352 * @make_dirty: whether to mark the pages dirty
353 *
354 * "gup-pinned page range" refers to a range of pages that has had one of the
355 * pin_user_pages() variants called on that page.
356 *
357 * For the page ranges defined by [page .. page+npages], make that range (or
358 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
359 * page range was previously listed as clean.
360 *
361 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
362 * required, then the caller should a) verify that this is really correct,
363 * because _lock() is usually required, and b) hand code it:
364 * set_page_dirty_lock(), unpin_user_page().
365 *
366 */
367void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
368 bool make_dirty)
369{
370 unsigned long index;
371 struct page *head;
372 unsigned int ntails;
373
374 for_each_compound_range(index, &page, npages, head, ntails) {
375 if (make_dirty && !PageDirty(head))
376 set_page_dirty_lock(head);
377 put_compound_head(head, ntails, FOLL_PIN);
378 }
379}
380EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
381
fc1d8e7c 382/**
f1f6a7dd 383 * unpin_user_pages() - release an array of gup-pinned pages.
fc1d8e7c
JH
384 * @pages: array of pages to be marked dirty and released.
385 * @npages: number of pages in the @pages array.
386 *
f1f6a7dd 387 * For each page in the @pages array, release the page using unpin_user_page().
fc1d8e7c 388 *
f1f6a7dd 389 * Please see the unpin_user_page() documentation for details.
fc1d8e7c 390 */
f1f6a7dd 391void unpin_user_pages(struct page **pages, unsigned long npages)
fc1d8e7c
JH
392{
393 unsigned long index;
31b912de
JM
394 struct page *head;
395 unsigned int ntails;
fc1d8e7c 396
146608bb
JH
397 /*
398 * If this WARN_ON() fires, then the system *might* be leaking pages (by
399 * leaving them pinned), but probably not. More likely, gup/pup returned
400 * a hard -ERRNO error to the caller, who erroneously passed it here.
401 */
402 if (WARN_ON(IS_ERR_VALUE(npages)))
403 return;
31b912de
JM
404
405 for_each_compound_head(index, pages, npages, head, ntails)
406 put_compound_head(head, ntails, FOLL_PIN);
fc1d8e7c 407}
f1f6a7dd 408EXPORT_SYMBOL(unpin_user_pages);
fc1d8e7c 409
a458b76a
AA
410/*
411 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
412 * lifecycle. Avoid setting the bit unless necessary, or it might cause write
413 * cache bouncing on large SMP machines for concurrent pinned gups.
414 */
415static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
416{
417 if (!test_bit(MMF_HAS_PINNED, mm_flags))
418 set_bit(MMF_HAS_PINNED, mm_flags);
419}
420
050a9adc 421#ifdef CONFIG_MMU
69e68b4f
KS
422static struct page *no_page_table(struct vm_area_struct *vma,
423 unsigned int flags)
4bbd4c77 424{
69e68b4f
KS
425 /*
426 * When core dumping an enormous anonymous area that nobody
427 * has touched so far, we don't want to allocate unnecessary pages or
428 * page tables. Return error instead of NULL to skip handle_mm_fault,
429 * then get_dump_page() will return NULL to leave a hole in the dump.
430 * But we can only make this optimization where a hole would surely
431 * be zero-filled if handle_mm_fault() actually did handle it.
432 */
a0137f16
AK
433 if ((flags & FOLL_DUMP) &&
434 (vma_is_anonymous(vma) || !vma->vm_ops->fault))
69e68b4f
KS
435 return ERR_PTR(-EFAULT);
436 return NULL;
437}
4bbd4c77 438
1027e443
KS
439static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
440 pte_t *pte, unsigned int flags)
441{
442 /* No page to get reference */
443 if (flags & FOLL_GET)
444 return -EFAULT;
445
446 if (flags & FOLL_TOUCH) {
447 pte_t entry = *pte;
448
449 if (flags & FOLL_WRITE)
450 entry = pte_mkdirty(entry);
451 entry = pte_mkyoung(entry);
452
453 if (!pte_same(*pte, entry)) {
454 set_pte_at(vma->vm_mm, address, pte, entry);
455 update_mmu_cache(vma, address, pte);
456 }
457 }
458
459 /* Proper page table entry exists, but no corresponding struct page */
460 return -EEXIST;
461}
462
19be0eaf 463/*
a308c71b
PX
464 * FOLL_FORCE can write to even unwritable pte's, but only
465 * after we've gone through a COW cycle and they are dirty.
19be0eaf
LT
466 */
467static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
468{
a308c71b
PX
469 return pte_write(pte) ||
470 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
19be0eaf
LT
471}
472
69e68b4f 473static struct page *follow_page_pte(struct vm_area_struct *vma,
df06b37f
KB
474 unsigned long address, pmd_t *pmd, unsigned int flags,
475 struct dev_pagemap **pgmap)
69e68b4f
KS
476{
477 struct mm_struct *mm = vma->vm_mm;
478 struct page *page;
479 spinlock_t *ptl;
480 pte_t *ptep, pte;
f28d4363 481 int ret;
4bbd4c77 482
eddb1c22
JH
483 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
484 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
485 (FOLL_PIN | FOLL_GET)))
486 return ERR_PTR(-EINVAL);
69e68b4f 487retry:
4bbd4c77 488 if (unlikely(pmd_bad(*pmd)))
69e68b4f 489 return no_page_table(vma, flags);
4bbd4c77
KS
490
491 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
4bbd4c77
KS
492 pte = *ptep;
493 if (!pte_present(pte)) {
494 swp_entry_t entry;
495 /*
496 * KSM's break_ksm() relies upon recognizing a ksm page
497 * even while it is being migrated, so for that case we
498 * need migration_entry_wait().
499 */
500 if (likely(!(flags & FOLL_MIGRATION)))
501 goto no_page;
0661a336 502 if (pte_none(pte))
4bbd4c77
KS
503 goto no_page;
504 entry = pte_to_swp_entry(pte);
505 if (!is_migration_entry(entry))
506 goto no_page;
507 pte_unmap_unlock(ptep, ptl);
508 migration_entry_wait(mm, pmd, address);
69e68b4f 509 goto retry;
4bbd4c77 510 }
8a0516ed 511 if ((flags & FOLL_NUMA) && pte_protnone(pte))
4bbd4c77 512 goto no_page;
19be0eaf 513 if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
69e68b4f
KS
514 pte_unmap_unlock(ptep, ptl);
515 return NULL;
516 }
4bbd4c77
KS
517
518 page = vm_normal_page(vma, address, pte);
3faa52c0 519 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
3565fce3 520 /*
3faa52c0
JH
521 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
522 * case since they are only valid while holding the pgmap
523 * reference.
3565fce3 524 */
df06b37f
KB
525 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
526 if (*pgmap)
3565fce3
DW
527 page = pte_page(pte);
528 else
529 goto no_page;
530 } else if (unlikely(!page)) {
1027e443
KS
531 if (flags & FOLL_DUMP) {
532 /* Avoid special (like zero) pages in core dumps */
533 page = ERR_PTR(-EFAULT);
534 goto out;
535 }
536
537 if (is_zero_pfn(pte_pfn(pte))) {
538 page = pte_page(pte);
539 } else {
1027e443
KS
540 ret = follow_pfn_pte(vma, address, ptep, flags);
541 page = ERR_PTR(ret);
542 goto out;
543 }
4bbd4c77
KS
544 }
545
3faa52c0
JH
546 /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
547 if (unlikely(!try_grab_page(page, flags))) {
548 page = ERR_PTR(-ENOMEM);
549 goto out;
8fde12ca 550 }
f28d4363
CI
551 /*
552 * We need to make the page accessible if and only if we are going
553 * to access its content (the FOLL_PIN case). Please see
554 * Documentation/core-api/pin_user_pages.rst for details.
555 */
556 if (flags & FOLL_PIN) {
557 ret = arch_make_page_accessible(page);
558 if (ret) {
559 unpin_user_page(page);
560 page = ERR_PTR(ret);
561 goto out;
562 }
563 }
4bbd4c77
KS
564 if (flags & FOLL_TOUCH) {
565 if ((flags & FOLL_WRITE) &&
566 !pte_dirty(pte) && !PageDirty(page))
567 set_page_dirty(page);
568 /*
569 * pte_mkyoung() would be more correct here, but atomic care
570 * is needed to avoid losing the dirty bit: it is easier to use
571 * mark_page_accessed().
572 */
573 mark_page_accessed(page);
574 }
de60f5f1 575 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
e90309c9
KS
576 /* Do not mlock pte-mapped THP */
577 if (PageTransCompound(page))
578 goto out;
579
4bbd4c77
KS
580 /*
581 * The preliminary mapping check is mainly to avoid the
582 * pointless overhead of lock_page on the ZERO_PAGE
583 * which might bounce very badly if there is contention.
584 *
585 * If the page is already locked, we don't need to
586 * handle it now - vmscan will handle it later if and
587 * when it attempts to reclaim the page.
588 */
589 if (page->mapping && trylock_page(page)) {
590 lru_add_drain(); /* push cached pages to LRU */
591 /*
592 * Because we lock page here, and migration is
593 * blocked by the pte's page reference, and we
594 * know the page is still mapped, we don't even
595 * need to check for file-cache page truncation.
596 */
597 mlock_vma_page(page);
598 unlock_page(page);
599 }
600 }
1027e443 601out:
4bbd4c77 602 pte_unmap_unlock(ptep, ptl);
4bbd4c77 603 return page;
4bbd4c77
KS
604no_page:
605 pte_unmap_unlock(ptep, ptl);
606 if (!pte_none(pte))
69e68b4f
KS
607 return NULL;
608 return no_page_table(vma, flags);
609}
610
080dbb61
AK
611static struct page *follow_pmd_mask(struct vm_area_struct *vma,
612 unsigned long address, pud_t *pudp,
df06b37f
KB
613 unsigned int flags,
614 struct follow_page_context *ctx)
69e68b4f 615{
68827280 616 pmd_t *pmd, pmdval;
69e68b4f
KS
617 spinlock_t *ptl;
618 struct page *page;
619 struct mm_struct *mm = vma->vm_mm;
620
080dbb61 621 pmd = pmd_offset(pudp, address);
68827280
HY
622 /*
623 * The READ_ONCE() will stabilize the pmdval in a register or
624 * on the stack so that it will stop changing under the code.
625 */
626 pmdval = READ_ONCE(*pmd);
627 if (pmd_none(pmdval))
69e68b4f 628 return no_page_table(vma, flags);
be9d3045 629 if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
e66f17ff
NH
630 page = follow_huge_pmd(mm, address, pmd, flags);
631 if (page)
632 return page;
633 return no_page_table(vma, flags);
69e68b4f 634 }
68827280 635 if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
4dc71451 636 page = follow_huge_pd(vma, address,
68827280 637 __hugepd(pmd_val(pmdval)), flags,
4dc71451
AK
638 PMD_SHIFT);
639 if (page)
640 return page;
641 return no_page_table(vma, flags);
642 }
84c3fc4e 643retry:
68827280 644 if (!pmd_present(pmdval)) {
84c3fc4e
ZY
645 if (likely(!(flags & FOLL_MIGRATION)))
646 return no_page_table(vma, flags);
647 VM_BUG_ON(thp_migration_supported() &&
68827280
HY
648 !is_pmd_migration_entry(pmdval));
649 if (is_pmd_migration_entry(pmdval))
84c3fc4e 650 pmd_migration_entry_wait(mm, pmd);
68827280
HY
651 pmdval = READ_ONCE(*pmd);
652 /*
653 * MADV_DONTNEED may convert the pmd to null because
c1e8d7c6 654 * mmap_lock is held in read mode
68827280
HY
655 */
656 if (pmd_none(pmdval))
657 return no_page_table(vma, flags);
84c3fc4e
ZY
658 goto retry;
659 }
68827280 660 if (pmd_devmap(pmdval)) {
3565fce3 661 ptl = pmd_lock(mm, pmd);
df06b37f 662 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
3565fce3
DW
663 spin_unlock(ptl);
664 if (page)
665 return page;
666 }
68827280 667 if (likely(!pmd_trans_huge(pmdval)))
df06b37f 668 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
6742d293 669
68827280 670 if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
db08f203
AK
671 return no_page_table(vma, flags);
672
84c3fc4e 673retry_locked:
6742d293 674 ptl = pmd_lock(mm, pmd);
68827280
HY
675 if (unlikely(pmd_none(*pmd))) {
676 spin_unlock(ptl);
677 return no_page_table(vma, flags);
678 }
84c3fc4e
ZY
679 if (unlikely(!pmd_present(*pmd))) {
680 spin_unlock(ptl);
681 if (likely(!(flags & FOLL_MIGRATION)))
682 return no_page_table(vma, flags);
683 pmd_migration_entry_wait(mm, pmd);
684 goto retry_locked;
685 }
6742d293
KS
686 if (unlikely(!pmd_trans_huge(*pmd))) {
687 spin_unlock(ptl);
df06b37f 688 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
6742d293 689 }
4066c119 690 if (flags & FOLL_SPLIT_PMD) {
6742d293
KS
691 int ret;
692 page = pmd_page(*pmd);
693 if (is_huge_zero_page(page)) {
694 spin_unlock(ptl);
695 ret = 0;
78ddc534 696 split_huge_pmd(vma, pmd, address);
337d9abf
NH
697 if (pmd_trans_unstable(pmd))
698 ret = -EBUSY;
4066c119 699 } else {
bfe7b00d
SL
700 spin_unlock(ptl);
701 split_huge_pmd(vma, pmd, address);
702 ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
6742d293
KS
703 }
704
705 return ret ? ERR_PTR(ret) :
df06b37f 706 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
69e68b4f 707 }
6742d293
KS
708 page = follow_trans_huge_pmd(vma, address, pmd, flags);
709 spin_unlock(ptl);
df06b37f 710 ctx->page_mask = HPAGE_PMD_NR - 1;
6742d293 711 return page;
4bbd4c77
KS
712}
713
080dbb61
AK
714static struct page *follow_pud_mask(struct vm_area_struct *vma,
715 unsigned long address, p4d_t *p4dp,
df06b37f
KB
716 unsigned int flags,
717 struct follow_page_context *ctx)
080dbb61
AK
718{
719 pud_t *pud;
720 spinlock_t *ptl;
721 struct page *page;
722 struct mm_struct *mm = vma->vm_mm;
723
724 pud = pud_offset(p4dp, address);
725 if (pud_none(*pud))
726 return no_page_table(vma, flags);
be9d3045 727 if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
080dbb61
AK
728 page = follow_huge_pud(mm, address, pud, flags);
729 if (page)
730 return page;
731 return no_page_table(vma, flags);
732 }
4dc71451
AK
733 if (is_hugepd(__hugepd(pud_val(*pud)))) {
734 page = follow_huge_pd(vma, address,
735 __hugepd(pud_val(*pud)), flags,
736 PUD_SHIFT);
737 if (page)
738 return page;
739 return no_page_table(vma, flags);
740 }
080dbb61
AK
741 if (pud_devmap(*pud)) {
742 ptl = pud_lock(mm, pud);
df06b37f 743 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
080dbb61
AK
744 spin_unlock(ptl);
745 if (page)
746 return page;
747 }
748 if (unlikely(pud_bad(*pud)))
749 return no_page_table(vma, flags);
750
df06b37f 751 return follow_pmd_mask(vma, address, pud, flags, ctx);
080dbb61
AK
752}
753
080dbb61
AK
754static struct page *follow_p4d_mask(struct vm_area_struct *vma,
755 unsigned long address, pgd_t *pgdp,
df06b37f
KB
756 unsigned int flags,
757 struct follow_page_context *ctx)
080dbb61
AK
758{
759 p4d_t *p4d;
4dc71451 760 struct page *page;
080dbb61
AK
761
762 p4d = p4d_offset(pgdp, address);
763 if (p4d_none(*p4d))
764 return no_page_table(vma, flags);
765 BUILD_BUG_ON(p4d_huge(*p4d));
766 if (unlikely(p4d_bad(*p4d)))
767 return no_page_table(vma, flags);
768
4dc71451
AK
769 if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
770 page = follow_huge_pd(vma, address,
771 __hugepd(p4d_val(*p4d)), flags,
772 P4D_SHIFT);
773 if (page)
774 return page;
775 return no_page_table(vma, flags);
776 }
df06b37f 777 return follow_pud_mask(vma, address, p4d, flags, ctx);
080dbb61
AK
778}
779
780/**
781 * follow_page_mask - look up a page descriptor from a user-virtual address
782 * @vma: vm_area_struct mapping @address
783 * @address: virtual address to look up
784 * @flags: flags modifying lookup behaviour
78179556
MR
785 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
786 * pointer to output page_mask
080dbb61
AK
787 *
788 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
789 *
78179556
MR
790 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
791 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
792 *
793 * On output, the @ctx->page_mask is set according to the size of the page.
794 *
795 * Return: the mapped (struct page *), %NULL if no mapping exists, or
080dbb61
AK
796 * an error pointer if there is a mapping to something not represented
797 * by a page descriptor (see also vm_normal_page()).
798 */
a7030aea 799static struct page *follow_page_mask(struct vm_area_struct *vma,
080dbb61 800 unsigned long address, unsigned int flags,
df06b37f 801 struct follow_page_context *ctx)
080dbb61
AK
802{
803 pgd_t *pgd;
804 struct page *page;
805 struct mm_struct *mm = vma->vm_mm;
806
df06b37f 807 ctx->page_mask = 0;
080dbb61
AK
808
809 /* make this handle hugepd */
810 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
811 if (!IS_ERR(page)) {
3faa52c0 812 WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
080dbb61
AK
813 return page;
814 }
815
816 pgd = pgd_offset(mm, address);
817
818 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
819 return no_page_table(vma, flags);
820
faaa5b62
AK
821 if (pgd_huge(*pgd)) {
822 page = follow_huge_pgd(mm, address, pgd, flags);
823 if (page)
824 return page;
825 return no_page_table(vma, flags);
826 }
4dc71451
AK
827 if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
828 page = follow_huge_pd(vma, address,
829 __hugepd(pgd_val(*pgd)), flags,
830 PGDIR_SHIFT);
831 if (page)
832 return page;
833 return no_page_table(vma, flags);
834 }
faaa5b62 835
df06b37f
KB
836 return follow_p4d_mask(vma, address, pgd, flags, ctx);
837}
838
839struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
840 unsigned int foll_flags)
841{
842 struct follow_page_context ctx = { NULL };
843 struct page *page;
844
1507f512
MR
845 if (vma_is_secretmem(vma))
846 return NULL;
847
df06b37f
KB
848 page = follow_page_mask(vma, address, foll_flags, &ctx);
849 if (ctx.pgmap)
850 put_dev_pagemap(ctx.pgmap);
851 return page;
080dbb61
AK
852}
853
f2b495ca
KS
854static int get_gate_page(struct mm_struct *mm, unsigned long address,
855 unsigned int gup_flags, struct vm_area_struct **vma,
856 struct page **page)
857{
858 pgd_t *pgd;
c2febafc 859 p4d_t *p4d;
f2b495ca
KS
860 pud_t *pud;
861 pmd_t *pmd;
862 pte_t *pte;
863 int ret = -EFAULT;
864
865 /* user gate pages are read-only */
866 if (gup_flags & FOLL_WRITE)
867 return -EFAULT;
868 if (address > TASK_SIZE)
869 pgd = pgd_offset_k(address);
870 else
871 pgd = pgd_offset_gate(mm, address);
b5d1c39f
AL
872 if (pgd_none(*pgd))
873 return -EFAULT;
c2febafc 874 p4d = p4d_offset(pgd, address);
b5d1c39f
AL
875 if (p4d_none(*p4d))
876 return -EFAULT;
c2febafc 877 pud = pud_offset(p4d, address);
b5d1c39f
AL
878 if (pud_none(*pud))
879 return -EFAULT;
f2b495ca 880 pmd = pmd_offset(pud, address);
84c3fc4e 881 if (!pmd_present(*pmd))
f2b495ca
KS
882 return -EFAULT;
883 VM_BUG_ON(pmd_trans_huge(*pmd));
884 pte = pte_offset_map(pmd, address);
885 if (pte_none(*pte))
886 goto unmap;
887 *vma = get_gate_vma(mm);
888 if (!page)
889 goto out;
890 *page = vm_normal_page(*vma, address, *pte);
891 if (!*page) {
892 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
893 goto unmap;
894 *page = pte_page(*pte);
895 }
9fa2dd94 896 if (unlikely(!try_grab_page(*page, gup_flags))) {
8fde12ca
LT
897 ret = -ENOMEM;
898 goto unmap;
899 }
f2b495ca
KS
900out:
901 ret = 0;
902unmap:
903 pte_unmap(pte);
904 return ret;
905}
906
9a95f3cf 907/*
c1e8d7c6
ML
908 * mmap_lock must be held on entry. If @locked != NULL and *@flags
909 * does not include FOLL_NOWAIT, the mmap_lock may be released. If it
4f6da934 910 * is, *@locked will be set to 0 and -EBUSY returned.
9a95f3cf 911 */
64019a2e 912static int faultin_page(struct vm_area_struct *vma,
4f6da934 913 unsigned long address, unsigned int *flags, int *locked)
16744483 914{
16744483 915 unsigned int fault_flags = 0;
2b740303 916 vm_fault_t ret;
16744483 917
de60f5f1
EM
918 /* mlock all present pages, but do not fault in new pages */
919 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
920 return -ENOENT;
16744483
KS
921 if (*flags & FOLL_WRITE)
922 fault_flags |= FAULT_FLAG_WRITE;
1b2ee126
DH
923 if (*flags & FOLL_REMOTE)
924 fault_flags |= FAULT_FLAG_REMOTE;
4f6da934 925 if (locked)
71335f37 926 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
16744483
KS
927 if (*flags & FOLL_NOWAIT)
928 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
234b239b 929 if (*flags & FOLL_TRIED) {
4426e945
PX
930 /*
931 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
932 * can co-exist
933 */
234b239b
ALC
934 fault_flags |= FAULT_FLAG_TRIED;
935 }
16744483 936
bce617ed 937 ret = handle_mm_fault(vma, address, fault_flags, NULL);
16744483 938 if (ret & VM_FAULT_ERROR) {
9a291a7c
JM
939 int err = vm_fault_to_errno(ret, *flags);
940
941 if (err)
942 return err;
16744483
KS
943 BUG();
944 }
945
16744483 946 if (ret & VM_FAULT_RETRY) {
4f6da934
PX
947 if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
948 *locked = 0;
16744483
KS
949 return -EBUSY;
950 }
951
952 /*
953 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
954 * necessary, even if maybe_mkwrite decided not to set pte_write. We
955 * can thus safely do subsequent page lookups as if they were reads.
956 * But only do so when looping for pte_write is futile: in some cases
957 * userspace may also be wanting to write to the gotten user page,
958 * which a read fault here might prevent (a readonly page might get
959 * reCOWed by userspace write).
960 */
961 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
2923117b 962 *flags |= FOLL_COW;
16744483
KS
963 return 0;
964}
965
fa5bb209
KS
966static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
967{
968 vm_flags_t vm_flags = vma->vm_flags;
1b2ee126
DH
969 int write = (gup_flags & FOLL_WRITE);
970 int foreign = (gup_flags & FOLL_REMOTE);
fa5bb209
KS
971
972 if (vm_flags & (VM_IO | VM_PFNMAP))
973 return -EFAULT;
974
7f7ccc2c
WT
975 if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
976 return -EFAULT;
977
52650c8b
JG
978 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
979 return -EOPNOTSUPP;
980
1507f512
MR
981 if (vma_is_secretmem(vma))
982 return -EFAULT;
983
1b2ee126 984 if (write) {
fa5bb209
KS
985 if (!(vm_flags & VM_WRITE)) {
986 if (!(gup_flags & FOLL_FORCE))
987 return -EFAULT;
988 /*
989 * We used to let the write,force case do COW in a
990 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
991 * set a breakpoint in a read-only mapping of an
992 * executable, without corrupting the file (yet only
993 * when that file had been opened for writing!).
994 * Anon pages in shared mappings are surprising: now
995 * just reject it.
996 */
46435364 997 if (!is_cow_mapping(vm_flags))
fa5bb209 998 return -EFAULT;
fa5bb209
KS
999 }
1000 } else if (!(vm_flags & VM_READ)) {
1001 if (!(gup_flags & FOLL_FORCE))
1002 return -EFAULT;
1003 /*
1004 * Is there actually any vma we can reach here which does not
1005 * have VM_MAYREAD set?
1006 */
1007 if (!(vm_flags & VM_MAYREAD))
1008 return -EFAULT;
1009 }
d61172b4
DH
1010 /*
1011 * gups are always data accesses, not instruction
1012 * fetches, so execute=false here
1013 */
1014 if (!arch_vma_access_permitted(vma, write, false, foreign))
33a709b2 1015 return -EFAULT;
fa5bb209
KS
1016 return 0;
1017}
1018
4bbd4c77
KS
1019/**
1020 * __get_user_pages() - pin user pages in memory
4bbd4c77
KS
1021 * @mm: mm_struct of target mm
1022 * @start: starting user address
1023 * @nr_pages: number of pages from start to pin
1024 * @gup_flags: flags modifying pin behaviour
1025 * @pages: array that receives pointers to the pages pinned.
1026 * Should be at least nr_pages long. Or NULL, if caller
1027 * only intends to ensure the pages are faulted in.
1028 * @vmas: array of pointers to vmas corresponding to each page.
1029 * Or NULL if the caller does not require them.
c1e8d7c6 1030 * @locked: whether we're still with the mmap_lock held
4bbd4c77 1031 *
d2dfbe47
LX
1032 * Returns either number of pages pinned (which may be less than the
1033 * number requested), or an error. Details about the return value:
1034 *
1035 * -- If nr_pages is 0, returns 0.
1036 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1037 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1038 * pages pinned. Again, this may be less than nr_pages.
2d3a36a4 1039 * -- 0 return value is possible when the fault would need to be retried.
d2dfbe47
LX
1040 *
1041 * The caller is responsible for releasing returned @pages, via put_page().
1042 *
c1e8d7c6 1043 * @vmas are valid only as long as mmap_lock is held.
4bbd4c77 1044 *
c1e8d7c6 1045 * Must be called with mmap_lock held. It may be released. See below.
4bbd4c77
KS
1046 *
1047 * __get_user_pages walks a process's page tables and takes a reference to
1048 * each struct page that each user address corresponds to at a given
1049 * instant. That is, it takes the page that would be accessed if a user
1050 * thread accesses the given user virtual address at that instant.
1051 *
1052 * This does not guarantee that the page exists in the user mappings when
1053 * __get_user_pages returns, and there may even be a completely different
1054 * page there in some cases (eg. if mmapped pagecache has been invalidated
1055 * and subsequently re faulted). However it does guarantee that the page
1056 * won't be freed completely. And mostly callers simply care that the page
1057 * contains data that was valid *at some point in time*. Typically, an IO
1058 * or similar operation cannot guarantee anything stronger anyway because
1059 * locks can't be held over the syscall boundary.
1060 *
1061 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1062 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1063 * appropriate) must be called after the page is finished with, and
1064 * before put_page is called.
1065 *
c1e8d7c6 1066 * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
4f6da934
PX
1067 * released by an up_read(). That can happen if @gup_flags does not
1068 * have FOLL_NOWAIT.
9a95f3cf 1069 *
4f6da934 1070 * A caller using such a combination of @locked and @gup_flags
c1e8d7c6 1071 * must therefore hold the mmap_lock for reading only, and recognize
9a95f3cf
PC
1072 * when it's been released. Otherwise, it must be held for either
1073 * reading or writing and will not be released.
4bbd4c77
KS
1074 *
1075 * In most cases, get_user_pages or get_user_pages_fast should be used
1076 * instead of __get_user_pages. __get_user_pages should be used only if
1077 * you need some special @gup_flags.
1078 */
64019a2e 1079static long __get_user_pages(struct mm_struct *mm,
4bbd4c77
KS
1080 unsigned long start, unsigned long nr_pages,
1081 unsigned int gup_flags, struct page **pages,
4f6da934 1082 struct vm_area_struct **vmas, int *locked)
4bbd4c77 1083{
df06b37f 1084 long ret = 0, i = 0;
fa5bb209 1085 struct vm_area_struct *vma = NULL;
df06b37f 1086 struct follow_page_context ctx = { NULL };
4bbd4c77
KS
1087
1088 if (!nr_pages)
1089 return 0;
1090
f9652594
AK
1091 start = untagged_addr(start);
1092
eddb1c22 1093 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
4bbd4c77
KS
1094
1095 /*
1096 * If FOLL_FORCE is set then do not force a full fault as the hinting
1097 * fault information is unrelated to the reference behaviour of a task
1098 * using the address space
1099 */
1100 if (!(gup_flags & FOLL_FORCE))
1101 gup_flags |= FOLL_NUMA;
1102
4bbd4c77 1103 do {
fa5bb209
KS
1104 struct page *page;
1105 unsigned int foll_flags = gup_flags;
1106 unsigned int page_increm;
1107
1108 /* first iteration or cross vma bound */
1109 if (!vma || start >= vma->vm_end) {
1110 vma = find_extend_vma(mm, start);
1111 if (!vma && in_gate_area(mm, start)) {
fa5bb209
KS
1112 ret = get_gate_page(mm, start & PAGE_MASK,
1113 gup_flags, &vma,
1114 pages ? &pages[i] : NULL);
1115 if (ret)
08be37b7 1116 goto out;
df06b37f 1117 ctx.page_mask = 0;
fa5bb209
KS
1118 goto next_page;
1119 }
4bbd4c77 1120
52650c8b 1121 if (!vma) {
df06b37f
KB
1122 ret = -EFAULT;
1123 goto out;
1124 }
52650c8b
JG
1125 ret = check_vma_flags(vma, gup_flags);
1126 if (ret)
1127 goto out;
1128
fa5bb209
KS
1129 if (is_vm_hugetlb_page(vma)) {
1130 i = follow_hugetlb_page(mm, vma, pages, vmas,
1131 &start, &nr_pages, i,
a308c71b 1132 gup_flags, locked);
ad415db8
PX
1133 if (locked && *locked == 0) {
1134 /*
1135 * We've got a VM_FAULT_RETRY
c1e8d7c6 1136 * and we've lost mmap_lock.
ad415db8
PX
1137 * We must stop here.
1138 */
1139 BUG_ON(gup_flags & FOLL_NOWAIT);
ad415db8
PX
1140 goto out;
1141 }
fa5bb209 1142 continue;
4bbd4c77 1143 }
fa5bb209
KS
1144 }
1145retry:
1146 /*
1147 * If we have a pending SIGKILL, don't keep faulting pages and
1148 * potentially allocating memory.
1149 */
fa45f116 1150 if (fatal_signal_pending(current)) {
d180870d 1151 ret = -EINTR;
df06b37f
KB
1152 goto out;
1153 }
fa5bb209 1154 cond_resched();
df06b37f
KB
1155
1156 page = follow_page_mask(vma, start, foll_flags, &ctx);
fa5bb209 1157 if (!page) {
64019a2e 1158 ret = faultin_page(vma, start, &foll_flags, locked);
fa5bb209
KS
1159 switch (ret) {
1160 case 0:
1161 goto retry;
df06b37f
KB
1162 case -EBUSY:
1163 ret = 0;
e4a9bc58 1164 fallthrough;
fa5bb209
KS
1165 case -EFAULT:
1166 case -ENOMEM:
1167 case -EHWPOISON:
df06b37f 1168 goto out;
fa5bb209
KS
1169 case -ENOENT:
1170 goto next_page;
4bbd4c77 1171 }
fa5bb209 1172 BUG();
1027e443
KS
1173 } else if (PTR_ERR(page) == -EEXIST) {
1174 /*
1175 * Proper page table entry exists, but no corresponding
1176 * struct page.
1177 */
1178 goto next_page;
1179 } else if (IS_ERR(page)) {
df06b37f
KB
1180 ret = PTR_ERR(page);
1181 goto out;
1027e443 1182 }
fa5bb209
KS
1183 if (pages) {
1184 pages[i] = page;
1185 flush_anon_page(vma, page, start);
1186 flush_dcache_page(page);
df06b37f 1187 ctx.page_mask = 0;
4bbd4c77 1188 }
4bbd4c77 1189next_page:
fa5bb209
KS
1190 if (vmas) {
1191 vmas[i] = vma;
df06b37f 1192 ctx.page_mask = 0;
fa5bb209 1193 }
df06b37f 1194 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
fa5bb209
KS
1195 if (page_increm > nr_pages)
1196 page_increm = nr_pages;
1197 i += page_increm;
1198 start += page_increm * PAGE_SIZE;
1199 nr_pages -= page_increm;
4bbd4c77 1200 } while (nr_pages);
df06b37f
KB
1201out:
1202 if (ctx.pgmap)
1203 put_dev_pagemap(ctx.pgmap);
1204 return i ? i : ret;
4bbd4c77 1205}
4bbd4c77 1206
771ab430
TK
1207static bool vma_permits_fault(struct vm_area_struct *vma,
1208 unsigned int fault_flags)
d4925e00 1209{
1b2ee126
DH
1210 bool write = !!(fault_flags & FAULT_FLAG_WRITE);
1211 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
33a709b2 1212 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
d4925e00
DH
1213
1214 if (!(vm_flags & vma->vm_flags))
1215 return false;
1216
33a709b2
DH
1217 /*
1218 * The architecture might have a hardware protection
1b2ee126 1219 * mechanism other than read/write that can deny access.
d61172b4
DH
1220 *
1221 * gup always represents data access, not instruction
1222 * fetches, so execute=false here:
33a709b2 1223 */
d61172b4 1224 if (!arch_vma_access_permitted(vma, write, false, foreign))
33a709b2
DH
1225 return false;
1226
d4925e00
DH
1227 return true;
1228}
1229
adc8cb40 1230/**
4bbd4c77 1231 * fixup_user_fault() - manually resolve a user page fault
4bbd4c77
KS
1232 * @mm: mm_struct of target mm
1233 * @address: user address
1234 * @fault_flags:flags to pass down to handle_mm_fault()
c1e8d7c6 1235 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
548b6a1e
MC
1236 * does not allow retry. If NULL, the caller must guarantee
1237 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
4bbd4c77
KS
1238 *
1239 * This is meant to be called in the specific scenario where for locking reasons
1240 * we try to access user memory in atomic context (within a pagefault_disable()
1241 * section), this returns -EFAULT, and we want to resolve the user fault before
1242 * trying again.
1243 *
1244 * Typically this is meant to be used by the futex code.
1245 *
1246 * The main difference with get_user_pages() is that this function will
1247 * unconditionally call handle_mm_fault() which will in turn perform all the
1248 * necessary SW fixup of the dirty and young bits in the PTE, while
4a9e1cda 1249 * get_user_pages() only guarantees to update these in the struct page.
4bbd4c77
KS
1250 *
1251 * This is important for some architectures where those bits also gate the
1252 * access permission to the page because they are maintained in software. On
1253 * such architectures, gup() will not be enough to make a subsequent access
1254 * succeed.
1255 *
c1e8d7c6
ML
1256 * This function will not return with an unlocked mmap_lock. So it has not the
1257 * same semantics wrt the @mm->mmap_lock as does filemap_fault().
4bbd4c77 1258 */
64019a2e 1259int fixup_user_fault(struct mm_struct *mm,
4a9e1cda
DD
1260 unsigned long address, unsigned int fault_flags,
1261 bool *unlocked)
4bbd4c77
KS
1262{
1263 struct vm_area_struct *vma;
8fed2f3c 1264 vm_fault_t ret;
4a9e1cda 1265
f9652594
AK
1266 address = untagged_addr(address);
1267
4a9e1cda 1268 if (unlocked)
71335f37 1269 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
4bbd4c77 1270
4a9e1cda 1271retry:
4bbd4c77
KS
1272 vma = find_extend_vma(mm, address);
1273 if (!vma || address < vma->vm_start)
1274 return -EFAULT;
1275
d4925e00 1276 if (!vma_permits_fault(vma, fault_flags))
4bbd4c77
KS
1277 return -EFAULT;
1278
475f4dfc
PX
1279 if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1280 fatal_signal_pending(current))
1281 return -EINTR;
1282
bce617ed 1283 ret = handle_mm_fault(vma, address, fault_flags, NULL);
4bbd4c77 1284 if (ret & VM_FAULT_ERROR) {
9a291a7c
JM
1285 int err = vm_fault_to_errno(ret, 0);
1286
1287 if (err)
1288 return err;
4bbd4c77
KS
1289 BUG();
1290 }
4a9e1cda
DD
1291
1292 if (ret & VM_FAULT_RETRY) {
d8ed45c5 1293 mmap_read_lock(mm);
475f4dfc
PX
1294 *unlocked = true;
1295 fault_flags |= FAULT_FLAG_TRIED;
1296 goto retry;
4a9e1cda
DD
1297 }
1298
4bbd4c77
KS
1299 return 0;
1300}
add6a0cd 1301EXPORT_SYMBOL_GPL(fixup_user_fault);
4bbd4c77 1302
2d3a36a4
MH
1303/*
1304 * Please note that this function, unlike __get_user_pages will not
1305 * return 0 for nr_pages > 0 without FOLL_NOWAIT
1306 */
64019a2e 1307static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
f0818f47
AA
1308 unsigned long start,
1309 unsigned long nr_pages,
f0818f47
AA
1310 struct page **pages,
1311 struct vm_area_struct **vmas,
e716712f 1312 int *locked,
0fd71a56 1313 unsigned int flags)
f0818f47 1314{
f0818f47
AA
1315 long ret, pages_done;
1316 bool lock_dropped;
1317
1318 if (locked) {
1319 /* if VM_FAULT_RETRY can be returned, vmas become invalid */
1320 BUG_ON(vmas);
1321 /* check caller initialized locked */
1322 BUG_ON(*locked != 1);
1323 }
1324
a458b76a
AA
1325 if (flags & FOLL_PIN)
1326 mm_set_has_pinned_flag(&mm->flags);
008cfe44 1327
eddb1c22
JH
1328 /*
1329 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1330 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1331 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1332 * for FOLL_GET, not for the newer FOLL_PIN.
1333 *
1334 * FOLL_PIN always expects pages to be non-null, but no need to assert
1335 * that here, as any failures will be obvious enough.
1336 */
1337 if (pages && !(flags & FOLL_PIN))
f0818f47 1338 flags |= FOLL_GET;
f0818f47
AA
1339
1340 pages_done = 0;
1341 lock_dropped = false;
1342 for (;;) {
64019a2e 1343 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
f0818f47
AA
1344 vmas, locked);
1345 if (!locked)
1346 /* VM_FAULT_RETRY couldn't trigger, bypass */
1347 return ret;
1348
1349 /* VM_FAULT_RETRY cannot return errors */
1350 if (!*locked) {
1351 BUG_ON(ret < 0);
1352 BUG_ON(ret >= nr_pages);
1353 }
1354
f0818f47
AA
1355 if (ret > 0) {
1356 nr_pages -= ret;
1357 pages_done += ret;
1358 if (!nr_pages)
1359 break;
1360 }
1361 if (*locked) {
96312e61
AA
1362 /*
1363 * VM_FAULT_RETRY didn't trigger or it was a
1364 * FOLL_NOWAIT.
1365 */
f0818f47
AA
1366 if (!pages_done)
1367 pages_done = ret;
1368 break;
1369 }
df17277b
MR
1370 /*
1371 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1372 * For the prefault case (!pages) we only update counts.
1373 */
1374 if (likely(pages))
1375 pages += ret;
f0818f47 1376 start += ret << PAGE_SHIFT;
4426e945 1377 lock_dropped = true;
f0818f47 1378
4426e945 1379retry:
f0818f47
AA
1380 /*
1381 * Repeat on the address that fired VM_FAULT_RETRY
4426e945
PX
1382 * with both FAULT_FLAG_ALLOW_RETRY and
1383 * FAULT_FLAG_TRIED. Note that GUP can be interrupted
1384 * by fatal signals, so we need to check it before we
1385 * start trying again otherwise it can loop forever.
f0818f47 1386 */
4426e945 1387
ae46d2aa
HD
1388 if (fatal_signal_pending(current)) {
1389 if (!pages_done)
1390 pages_done = -EINTR;
4426e945 1391 break;
ae46d2aa 1392 }
4426e945 1393
d8ed45c5 1394 ret = mmap_read_lock_killable(mm);
71335f37
PX
1395 if (ret) {
1396 BUG_ON(ret > 0);
1397 if (!pages_done)
1398 pages_done = ret;
1399 break;
1400 }
4426e945 1401
c7b6a566 1402 *locked = 1;
64019a2e 1403 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
4426e945
PX
1404 pages, NULL, locked);
1405 if (!*locked) {
1406 /* Continue to retry until we succeeded */
1407 BUG_ON(ret != 0);
1408 goto retry;
1409 }
f0818f47
AA
1410 if (ret != 1) {
1411 BUG_ON(ret > 1);
1412 if (!pages_done)
1413 pages_done = ret;
1414 break;
1415 }
1416 nr_pages--;
1417 pages_done++;
1418 if (!nr_pages)
1419 break;
df17277b
MR
1420 if (likely(pages))
1421 pages++;
f0818f47
AA
1422 start += PAGE_SIZE;
1423 }
e716712f 1424 if (lock_dropped && *locked) {
f0818f47
AA
1425 /*
1426 * We must let the caller know we temporarily dropped the lock
1427 * and so the critical section protected by it was lost.
1428 */
d8ed45c5 1429 mmap_read_unlock(mm);
f0818f47
AA
1430 *locked = 0;
1431 }
1432 return pages_done;
1433}
1434
d3649f68
CH
1435/**
1436 * populate_vma_page_range() - populate a range of pages in the vma.
1437 * @vma: target vma
1438 * @start: start address
1439 * @end: end address
c1e8d7c6 1440 * @locked: whether the mmap_lock is still held
d3649f68
CH
1441 *
1442 * This takes care of mlocking the pages too if VM_LOCKED is set.
1443 *
0a36f7f8
TY
1444 * Return either number of pages pinned in the vma, or a negative error
1445 * code on error.
d3649f68 1446 *
c1e8d7c6 1447 * vma->vm_mm->mmap_lock must be held.
d3649f68 1448 *
4f6da934 1449 * If @locked is NULL, it may be held for read or write and will
d3649f68
CH
1450 * be unperturbed.
1451 *
4f6da934
PX
1452 * If @locked is non-NULL, it must held for read only and may be
1453 * released. If it's released, *@locked will be set to 0.
d3649f68
CH
1454 */
1455long populate_vma_page_range(struct vm_area_struct *vma,
4f6da934 1456 unsigned long start, unsigned long end, int *locked)
d3649f68
CH
1457{
1458 struct mm_struct *mm = vma->vm_mm;
1459 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1460 int gup_flags;
1461
be51eb18
ML
1462 VM_BUG_ON(!PAGE_ALIGNED(start));
1463 VM_BUG_ON(!PAGE_ALIGNED(end));
d3649f68
CH
1464 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1465 VM_BUG_ON_VMA(end > vma->vm_end, vma);
42fc5414 1466 mmap_assert_locked(mm);
d3649f68
CH
1467
1468 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1469 if (vma->vm_flags & VM_LOCKONFAULT)
1470 gup_flags &= ~FOLL_POPULATE;
1471 /*
1472 * We want to touch writable mappings with a write fault in order
1473 * to break COW, except for shared mappings because these don't COW
1474 * and we would not want to dirty them for nothing.
1475 */
1476 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1477 gup_flags |= FOLL_WRITE;
1478
1479 /*
1480 * We want mlock to succeed for regions that have any permissions
1481 * other than PROT_NONE.
1482 */
3122e80e 1483 if (vma_is_accessible(vma))
d3649f68
CH
1484 gup_flags |= FOLL_FORCE;
1485
1486 /*
1487 * We made sure addr is within a VMA, so the following will
1488 * not result in a stack expansion that recurses back here.
1489 */
64019a2e 1490 return __get_user_pages(mm, start, nr_pages, gup_flags,
4f6da934 1491 NULL, NULL, locked);
d3649f68
CH
1492}
1493
4ca9b385
DH
1494/*
1495 * faultin_vma_page_range() - populate (prefault) page tables inside the
1496 * given VMA range readable/writable
1497 *
1498 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1499 *
1500 * @vma: target vma
1501 * @start: start address
1502 * @end: end address
1503 * @write: whether to prefault readable or writable
1504 * @locked: whether the mmap_lock is still held
1505 *
1506 * Returns either number of processed pages in the vma, or a negative error
1507 * code on error (see __get_user_pages()).
1508 *
1509 * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1510 * covered by the VMA.
1511 *
1512 * If @locked is NULL, it may be held for read or write and will be unperturbed.
1513 *
1514 * If @locked is non-NULL, it must held for read only and may be released. If
1515 * it's released, *@locked will be set to 0.
1516 */
1517long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
1518 unsigned long end, bool write, int *locked)
1519{
1520 struct mm_struct *mm = vma->vm_mm;
1521 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1522 int gup_flags;
1523
1524 VM_BUG_ON(!PAGE_ALIGNED(start));
1525 VM_BUG_ON(!PAGE_ALIGNED(end));
1526 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1527 VM_BUG_ON_VMA(end > vma->vm_end, vma);
1528 mmap_assert_locked(mm);
1529
1530 /*
1531 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1532 * the page dirty with FOLL_WRITE -- which doesn't make a
1533 * difference with !FOLL_FORCE, because the page is writable
1534 * in the page table.
1535 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1536 * a poisoned page.
1537 * FOLL_POPULATE: Always populate memory with VM_LOCKONFAULT.
1538 * !FOLL_FORCE: Require proper access permissions.
1539 */
1540 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK | FOLL_HWPOISON;
1541 if (write)
1542 gup_flags |= FOLL_WRITE;
1543
1544 /*
eb2faa51
DH
1545 * We want to report -EINVAL instead of -EFAULT for any permission
1546 * problems or incompatible mappings.
4ca9b385 1547 */
eb2faa51
DH
1548 if (check_vma_flags(vma, gup_flags))
1549 return -EINVAL;
1550
4ca9b385
DH
1551 return __get_user_pages(mm, start, nr_pages, gup_flags,
1552 NULL, NULL, locked);
1553}
1554
d3649f68
CH
1555/*
1556 * __mm_populate - populate and/or mlock pages within a range of address space.
1557 *
1558 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1559 * flags. VMAs must be already marked with the desired vm_flags, and
c1e8d7c6 1560 * mmap_lock must not be held.
d3649f68
CH
1561 */
1562int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1563{
1564 struct mm_struct *mm = current->mm;
1565 unsigned long end, nstart, nend;
1566 struct vm_area_struct *vma = NULL;
1567 int locked = 0;
1568 long ret = 0;
1569
1570 end = start + len;
1571
1572 for (nstart = start; nstart < end; nstart = nend) {
1573 /*
1574 * We want to fault in pages for [nstart; end) address range.
1575 * Find first corresponding VMA.
1576 */
1577 if (!locked) {
1578 locked = 1;
d8ed45c5 1579 mmap_read_lock(mm);
d3649f68
CH
1580 vma = find_vma(mm, nstart);
1581 } else if (nstart >= vma->vm_end)
1582 vma = vma->vm_next;
1583 if (!vma || vma->vm_start >= end)
1584 break;
1585 /*
1586 * Set [nstart; nend) to intersection of desired address
1587 * range with the first VMA. Also, skip undesirable VMA types.
1588 */
1589 nend = min(end, vma->vm_end);
1590 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1591 continue;
1592 if (nstart < vma->vm_start)
1593 nstart = vma->vm_start;
1594 /*
1595 * Now fault in a range of pages. populate_vma_page_range()
1596 * double checks the vma flags, so that it won't mlock pages
1597 * if the vma was already munlocked.
1598 */
1599 ret = populate_vma_page_range(vma, nstart, nend, &locked);
1600 if (ret < 0) {
1601 if (ignore_errors) {
1602 ret = 0;
1603 continue; /* continue at next VMA */
1604 }
1605 break;
1606 }
1607 nend = nstart + ret * PAGE_SIZE;
1608 ret = 0;
1609 }
1610 if (locked)
d8ed45c5 1611 mmap_read_unlock(mm);
d3649f68
CH
1612 return ret; /* 0 or negative error code */
1613}
050a9adc 1614#else /* CONFIG_MMU */
64019a2e 1615static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
050a9adc
CH
1616 unsigned long nr_pages, struct page **pages,
1617 struct vm_area_struct **vmas, int *locked,
1618 unsigned int foll_flags)
1619{
1620 struct vm_area_struct *vma;
1621 unsigned long vm_flags;
24dc20c7 1622 long i;
050a9adc
CH
1623
1624 /* calculate required read or write permissions.
1625 * If FOLL_FORCE is set, we only require the "MAY" flags.
1626 */
1627 vm_flags = (foll_flags & FOLL_WRITE) ?
1628 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1629 vm_flags &= (foll_flags & FOLL_FORCE) ?
1630 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1631
1632 for (i = 0; i < nr_pages; i++) {
1633 vma = find_vma(mm, start);
1634 if (!vma)
1635 goto finish_or_fault;
1636
1637 /* protect what we can, including chardevs */
1638 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1639 !(vm_flags & vma->vm_flags))
1640 goto finish_or_fault;
1641
1642 if (pages) {
1643 pages[i] = virt_to_page(start);
1644 if (pages[i])
1645 get_page(pages[i]);
1646 }
1647 if (vmas)
1648 vmas[i] = vma;
1649 start = (start + PAGE_SIZE) & PAGE_MASK;
1650 }
1651
1652 return i;
1653
1654finish_or_fault:
1655 return i ? : -EFAULT;
1656}
1657#endif /* !CONFIG_MMU */
d3649f68 1658
8f942eea
JH
1659/**
1660 * get_dump_page() - pin user page in memory while writing it to core dump
1661 * @addr: user address
1662 *
1663 * Returns struct page pointer of user page pinned for dump,
1664 * to be freed afterwards by put_page().
1665 *
1666 * Returns NULL on any kind of failure - a hole must then be inserted into
1667 * the corefile, to preserve alignment with its headers; and also returns
1668 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
f0953a1b 1669 * allowing a hole to be left in the corefile to save disk space.
8f942eea 1670 *
7f3bfab5 1671 * Called without mmap_lock (takes and releases the mmap_lock by itself).
8f942eea
JH
1672 */
1673#ifdef CONFIG_ELF_CORE
1674struct page *get_dump_page(unsigned long addr)
1675{
7f3bfab5 1676 struct mm_struct *mm = current->mm;
8f942eea 1677 struct page *page;
7f3bfab5
JH
1678 int locked = 1;
1679 int ret;
8f942eea 1680
7f3bfab5 1681 if (mmap_read_lock_killable(mm))
8f942eea 1682 return NULL;
7f3bfab5
JH
1683 ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
1684 FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1685 if (locked)
1686 mmap_read_unlock(mm);
1687 return (ret == 1) ? page : NULL;
8f942eea
JH
1688}
1689#endif /* CONFIG_ELF_CORE */
1690
d1e153fe 1691#ifdef CONFIG_MIGRATION
f68749ec
PT
1692/*
1693 * Check whether all pages are pinnable, if so return number of pages. If some
1694 * pages are not pinnable, migrate them, and unpin all pages. Return zero if
1695 * pages were migrated, or if some pages were not successfully isolated.
1696 * Return negative error if migration fails.
1697 */
1698static long check_and_migrate_movable_pages(unsigned long nr_pages,
d1e153fe 1699 struct page **pages,
d1e153fe 1700 unsigned int gup_flags)
9a4e9f3b 1701{
f68749ec
PT
1702 unsigned long i;
1703 unsigned long isolation_error_count = 0;
1704 bool drain_allow = true;
d1e153fe 1705 LIST_HEAD(movable_page_list);
f68749ec
PT
1706 long ret = 0;
1707 struct page *prev_head = NULL;
1708 struct page *head;
ed03d924
JK
1709 struct migration_target_control mtc = {
1710 .nid = NUMA_NO_NODE,
c991ffef 1711 .gfp_mask = GFP_USER | __GFP_NOWARN,
ed03d924 1712 };
9a4e9f3b 1713
83c02c23
PT
1714 for (i = 0; i < nr_pages; i++) {
1715 head = compound_head(pages[i]);
1716 if (head == prev_head)
1717 continue;
1718 prev_head = head;
9a4e9f3b 1719 /*
d1e153fe
PT
1720 * If we get a movable page, since we are going to be pinning
1721 * these entries, try to move them out if possible.
9a4e9f3b 1722 */
d1e153fe 1723 if (!is_pinnable_page(head)) {
6e7f34eb 1724 if (PageHuge(head)) {
d1e153fe 1725 if (!isolate_huge_page(head, &movable_page_list))
6e7f34eb
PT
1726 isolation_error_count++;
1727 } else {
9a4e9f3b
AK
1728 if (!PageLRU(head) && drain_allow) {
1729 lru_add_drain_all();
1730 drain_allow = false;
1731 }
1732
6e7f34eb
PT
1733 if (isolate_lru_page(head)) {
1734 isolation_error_count++;
1735 continue;
9a4e9f3b 1736 }
d1e153fe 1737 list_add_tail(&head->lru, &movable_page_list);
6e7f34eb
PT
1738 mod_node_page_state(page_pgdat(head),
1739 NR_ISOLATED_ANON +
1740 page_is_file_lru(head),
1741 thp_nr_pages(head));
9a4e9f3b
AK
1742 }
1743 }
1744 }
1745
6e7f34eb
PT
1746 /*
1747 * If list is empty, and no isolation errors, means that all pages are
1748 * in the correct zone.
1749 */
d1e153fe 1750 if (list_empty(&movable_page_list) && !isolation_error_count)
f68749ec 1751 return nr_pages;
6e7f34eb 1752
f68749ec
PT
1753 if (gup_flags & FOLL_PIN) {
1754 unpin_user_pages(pages, nr_pages);
1755 } else {
1756 for (i = 0; i < nr_pages; i++)
1757 put_page(pages[i]);
1758 }
d1e153fe 1759 if (!list_empty(&movable_page_list)) {
d1e153fe 1760 ret = migrate_pages(&movable_page_list, alloc_migration_target,
f0f44638 1761 NULL, (unsigned long)&mtc, MIGRATE_SYNC,
5ac95884 1762 MR_LONGTERM_PIN, NULL);
f68749ec
PT
1763 if (ret && !list_empty(&movable_page_list))
1764 putback_movable_pages(&movable_page_list);
9a4e9f3b
AK
1765 }
1766
f68749ec 1767 return ret > 0 ? -ENOMEM : ret;
9a4e9f3b
AK
1768}
1769#else
f68749ec 1770static long check_and_migrate_movable_pages(unsigned long nr_pages,
d1e153fe 1771 struct page **pages,
d1e153fe 1772 unsigned int gup_flags)
9a4e9f3b
AK
1773{
1774 return nr_pages;
1775}
d1e153fe 1776#endif /* CONFIG_MIGRATION */
9a4e9f3b 1777
2bb6d283 1778/*
932f4a63
IW
1779 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
1780 * allows us to process the FOLL_LONGTERM flag.
2bb6d283 1781 */
64019a2e 1782static long __gup_longterm_locked(struct mm_struct *mm,
932f4a63
IW
1783 unsigned long start,
1784 unsigned long nr_pages,
1785 struct page **pages,
1786 struct vm_area_struct **vmas,
1787 unsigned int gup_flags)
2bb6d283 1788{
f68749ec 1789 unsigned int flags;
52650c8b 1790 long rc;
2bb6d283 1791
f68749ec
PT
1792 if (!(gup_flags & FOLL_LONGTERM))
1793 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1794 NULL, gup_flags);
1795 flags = memalloc_pin_save();
1796 do {
1797 rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1798 NULL, gup_flags);
1799 if (rc <= 0)
1800 break;
1801 rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
1802 } while (!rc);
1803 memalloc_pin_restore(flags);
2bb6d283 1804
2bb6d283
DW
1805 return rc;
1806}
932f4a63 1807
447f3e45
BS
1808static bool is_valid_gup_flags(unsigned int gup_flags)
1809{
1810 /*
1811 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1812 * never directly by the caller, so enforce that with an assertion:
1813 */
1814 if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1815 return false;
1816 /*
1817 * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
1818 * that is, FOLL_LONGTERM is a specific case, more restrictive case of
1819 * FOLL_PIN.
1820 */
1821 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1822 return false;
1823
1824 return true;
1825}
1826
22bf29b6 1827#ifdef CONFIG_MMU
64019a2e 1828static long __get_user_pages_remote(struct mm_struct *mm,
22bf29b6
JH
1829 unsigned long start, unsigned long nr_pages,
1830 unsigned int gup_flags, struct page **pages,
1831 struct vm_area_struct **vmas, int *locked)
1832{
1833 /*
1834 * Parts of FOLL_LONGTERM behavior are incompatible with
1835 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1836 * vmas. However, this only comes up if locked is set, and there are
1837 * callers that do request FOLL_LONGTERM, but do not set locked. So,
1838 * allow what we can.
1839 */
1840 if (gup_flags & FOLL_LONGTERM) {
1841 if (WARN_ON_ONCE(locked))
1842 return -EINVAL;
1843 /*
1844 * This will check the vmas (even if our vmas arg is NULL)
1845 * and return -ENOTSUPP if DAX isn't allowed in this case:
1846 */
64019a2e 1847 return __gup_longterm_locked(mm, start, nr_pages, pages,
22bf29b6
JH
1848 vmas, gup_flags | FOLL_TOUCH |
1849 FOLL_REMOTE);
1850 }
1851
64019a2e 1852 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
22bf29b6
JH
1853 locked,
1854 gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1855}
1856
adc8cb40 1857/**
c4237f8b 1858 * get_user_pages_remote() - pin user pages in memory
c4237f8b
JH
1859 * @mm: mm_struct of target mm
1860 * @start: starting user address
1861 * @nr_pages: number of pages from start to pin
1862 * @gup_flags: flags modifying lookup behaviour
1863 * @pages: array that receives pointers to the pages pinned.
1864 * Should be at least nr_pages long. Or NULL, if caller
1865 * only intends to ensure the pages are faulted in.
1866 * @vmas: array of pointers to vmas corresponding to each page.
1867 * Or NULL if the caller does not require them.
1868 * @locked: pointer to lock flag indicating whether lock is held and
1869 * subsequently whether VM_FAULT_RETRY functionality can be
1870 * utilised. Lock must initially be held.
1871 *
1872 * Returns either number of pages pinned (which may be less than the
1873 * number requested), or an error. Details about the return value:
1874 *
1875 * -- If nr_pages is 0, returns 0.
1876 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1877 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1878 * pages pinned. Again, this may be less than nr_pages.
1879 *
1880 * The caller is responsible for releasing returned @pages, via put_page().
1881 *
c1e8d7c6 1882 * @vmas are valid only as long as mmap_lock is held.
c4237f8b 1883 *
c1e8d7c6 1884 * Must be called with mmap_lock held for read or write.
c4237f8b 1885 *
adc8cb40
SJ
1886 * get_user_pages_remote walks a process's page tables and takes a reference
1887 * to each struct page that each user address corresponds to at a given
c4237f8b
JH
1888 * instant. That is, it takes the page that would be accessed if a user
1889 * thread accesses the given user virtual address at that instant.
1890 *
1891 * This does not guarantee that the page exists in the user mappings when
adc8cb40 1892 * get_user_pages_remote returns, and there may even be a completely different
c4237f8b
JH
1893 * page there in some cases (eg. if mmapped pagecache has been invalidated
1894 * and subsequently re faulted). However it does guarantee that the page
1895 * won't be freed completely. And mostly callers simply care that the page
1896 * contains data that was valid *at some point in time*. Typically, an IO
1897 * or similar operation cannot guarantee anything stronger anyway because
1898 * locks can't be held over the syscall boundary.
1899 *
1900 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
1901 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
1902 * be called after the page is finished with, and before put_page is called.
1903 *
adc8cb40
SJ
1904 * get_user_pages_remote is typically used for fewer-copy IO operations,
1905 * to get a handle on the memory by some means other than accesses
1906 * via the user virtual addresses. The pages may be submitted for
1907 * DMA to devices or accessed via their kernel linear mapping (via the
1908 * kmap APIs). Care should be taken to use the correct cache flushing APIs.
c4237f8b
JH
1909 *
1910 * See also get_user_pages_fast, for performance critical applications.
1911 *
adc8cb40 1912 * get_user_pages_remote should be phased out in favor of
c4237f8b 1913 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
adc8cb40 1914 * should use get_user_pages_remote because it cannot pass
c4237f8b
JH
1915 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
1916 */
64019a2e 1917long get_user_pages_remote(struct mm_struct *mm,
c4237f8b
JH
1918 unsigned long start, unsigned long nr_pages,
1919 unsigned int gup_flags, struct page **pages,
1920 struct vm_area_struct **vmas, int *locked)
1921{
447f3e45 1922 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
1923 return -EINVAL;
1924
64019a2e 1925 return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
22bf29b6 1926 pages, vmas, locked);
c4237f8b
JH
1927}
1928EXPORT_SYMBOL(get_user_pages_remote);
1929
eddb1c22 1930#else /* CONFIG_MMU */
64019a2e 1931long get_user_pages_remote(struct mm_struct *mm,
eddb1c22
JH
1932 unsigned long start, unsigned long nr_pages,
1933 unsigned int gup_flags, struct page **pages,
1934 struct vm_area_struct **vmas, int *locked)
1935{
1936 return 0;
1937}
3faa52c0 1938
64019a2e 1939static long __get_user_pages_remote(struct mm_struct *mm,
3faa52c0
JH
1940 unsigned long start, unsigned long nr_pages,
1941 unsigned int gup_flags, struct page **pages,
1942 struct vm_area_struct **vmas, int *locked)
1943{
1944 return 0;
1945}
eddb1c22
JH
1946#endif /* !CONFIG_MMU */
1947
adc8cb40
SJ
1948/**
1949 * get_user_pages() - pin user pages in memory
1950 * @start: starting user address
1951 * @nr_pages: number of pages from start to pin
1952 * @gup_flags: flags modifying lookup behaviour
1953 * @pages: array that receives pointers to the pages pinned.
1954 * Should be at least nr_pages long. Or NULL, if caller
1955 * only intends to ensure the pages are faulted in.
1956 * @vmas: array of pointers to vmas corresponding to each page.
1957 * Or NULL if the caller does not require them.
1958 *
64019a2e
PX
1959 * This is the same as get_user_pages_remote(), just with a less-flexible
1960 * calling convention where we assume that the mm being operated on belongs to
1961 * the current task, and doesn't allow passing of a locked parameter. We also
1962 * obviously don't pass FOLL_REMOTE in here.
932f4a63
IW
1963 */
1964long get_user_pages(unsigned long start, unsigned long nr_pages,
1965 unsigned int gup_flags, struct page **pages,
1966 struct vm_area_struct **vmas)
1967{
447f3e45 1968 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
1969 return -EINVAL;
1970
64019a2e 1971 return __gup_longterm_locked(current->mm, start, nr_pages,
932f4a63
IW
1972 pages, vmas, gup_flags | FOLL_TOUCH);
1973}
1974EXPORT_SYMBOL(get_user_pages);
2bb6d283 1975
adc8cb40 1976/**
a00cda3f
MCC
1977 * get_user_pages_locked() - variant of get_user_pages()
1978 *
1979 * @start: starting user address
1980 * @nr_pages: number of pages from start to pin
1981 * @gup_flags: flags modifying lookup behaviour
1982 * @pages: array that receives pointers to the pages pinned.
1983 * Should be at least nr_pages long. Or NULL, if caller
1984 * only intends to ensure the pages are faulted in.
1985 * @locked: pointer to lock flag indicating whether lock is held and
1986 * subsequently whether VM_FAULT_RETRY functionality can be
1987 * utilised. Lock must initially be held.
1988 *
1989 * It is suitable to replace the form:
acc3c8d1 1990 *
3e4e28c5 1991 * mmap_read_lock(mm);
d3649f68 1992 * do_something()
64019a2e 1993 * get_user_pages(mm, ..., pages, NULL);
3e4e28c5 1994 * mmap_read_unlock(mm);
acc3c8d1 1995 *
d3649f68 1996 * to:
acc3c8d1 1997 *
d3649f68 1998 * int locked = 1;
3e4e28c5 1999 * mmap_read_lock(mm);
d3649f68 2000 * do_something()
64019a2e 2001 * get_user_pages_locked(mm, ..., pages, &locked);
d3649f68 2002 * if (locked)
3e4e28c5 2003 * mmap_read_unlock(mm);
adc8cb40 2004 *
adc8cb40
SJ
2005 * We can leverage the VM_FAULT_RETRY functionality in the page fault
2006 * paths better by using either get_user_pages_locked() or
2007 * get_user_pages_unlocked().
2008 *
acc3c8d1 2009 */
d3649f68
CH
2010long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
2011 unsigned int gup_flags, struct page **pages,
2012 int *locked)
acc3c8d1 2013{
acc3c8d1 2014 /*
d3649f68
CH
2015 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2016 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2017 * vmas. As there are no users of this flag in this call we simply
2018 * disallow this option for now.
acc3c8d1 2019 */
d3649f68
CH
2020 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2021 return -EINVAL;
420c2091
JH
2022 /*
2023 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
2024 * never directly by the caller, so enforce that:
2025 */
2026 if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
2027 return -EINVAL;
acc3c8d1 2028
64019a2e 2029 return __get_user_pages_locked(current->mm, start, nr_pages,
d3649f68
CH
2030 pages, NULL, locked,
2031 gup_flags | FOLL_TOUCH);
acc3c8d1 2032}
d3649f68 2033EXPORT_SYMBOL(get_user_pages_locked);
acc3c8d1
KS
2034
2035/*
d3649f68 2036 * get_user_pages_unlocked() is suitable to replace the form:
acc3c8d1 2037 *
3e4e28c5 2038 * mmap_read_lock(mm);
64019a2e 2039 * get_user_pages(mm, ..., pages, NULL);
3e4e28c5 2040 * mmap_read_unlock(mm);
d3649f68
CH
2041 *
2042 * with:
2043 *
64019a2e 2044 * get_user_pages_unlocked(mm, ..., pages);
d3649f68
CH
2045 *
2046 * It is functionally equivalent to get_user_pages_fast so
2047 * get_user_pages_fast should be used instead if specific gup_flags
2048 * (e.g. FOLL_FORCE) are not required.
acc3c8d1 2049 */
d3649f68
CH
2050long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2051 struct page **pages, unsigned int gup_flags)
acc3c8d1
KS
2052{
2053 struct mm_struct *mm = current->mm;
d3649f68
CH
2054 int locked = 1;
2055 long ret;
acc3c8d1 2056
d3649f68
CH
2057 /*
2058 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2059 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2060 * vmas. As there are no users of this flag in this call we simply
2061 * disallow this option for now.
2062 */
2063 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2064 return -EINVAL;
acc3c8d1 2065
d8ed45c5 2066 mmap_read_lock(mm);
64019a2e 2067 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
d3649f68 2068 &locked, gup_flags | FOLL_TOUCH);
acc3c8d1 2069 if (locked)
d8ed45c5 2070 mmap_read_unlock(mm);
d3649f68 2071 return ret;
4bbd4c77 2072}
d3649f68 2073EXPORT_SYMBOL(get_user_pages_unlocked);
2667f50e
SC
2074
2075/*
67a929e0 2076 * Fast GUP
2667f50e
SC
2077 *
2078 * get_user_pages_fast attempts to pin user pages by walking the page
2079 * tables directly and avoids taking locks. Thus the walker needs to be
2080 * protected from page table pages being freed from under it, and should
2081 * block any THP splits.
2082 *
2083 * One way to achieve this is to have the walker disable interrupts, and
2084 * rely on IPIs from the TLB flushing code blocking before the page table
2085 * pages are freed. This is unsuitable for architectures that do not need
2086 * to broadcast an IPI when invalidating TLBs.
2087 *
2088 * Another way to achieve this is to batch up page table containing pages
2089 * belonging to more than one mm_user, then rcu_sched a callback to free those
2090 * pages. Disabling interrupts will allow the fast_gup walker to both block
2091 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2092 * (which is a relatively rare event). The code below adopts this strategy.
2093 *
2094 * Before activating this code, please be aware that the following assumptions
2095 * are currently made:
2096 *
ff2e6d72 2097 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
e585513b 2098 * free pages containing page tables or TLB flushing requires IPI broadcast.
2667f50e 2099 *
2667f50e
SC
2100 * *) ptes can be read atomically by the architecture.
2101 *
2102 * *) access_ok is sufficient to validate userspace address ranges.
2103 *
2104 * The last two assumptions can be relaxed by the addition of helper functions.
2105 *
2106 * This code is based heavily on the PowerPC implementation by Nick Piggin.
2107 */
67a929e0 2108#ifdef CONFIG_HAVE_FAST_GUP
3faa52c0 2109
790c7369 2110static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
3b78d834 2111 unsigned int flags,
790c7369 2112 struct page **pages)
b59f65fa
KS
2113{
2114 while ((*nr) - nr_start) {
2115 struct page *page = pages[--(*nr)];
2116
2117 ClearPageReferenced(page);
3faa52c0
JH
2118 if (flags & FOLL_PIN)
2119 unpin_user_page(page);
2120 else
2121 put_page(page);
b59f65fa
KS
2122 }
2123}
2124
3010a5ea 2125#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2667f50e 2126static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
b798bec4 2127 unsigned int flags, struct page **pages, int *nr)
2667f50e 2128{
b59f65fa
KS
2129 struct dev_pagemap *pgmap = NULL;
2130 int nr_start = *nr, ret = 0;
2667f50e 2131 pte_t *ptep, *ptem;
2667f50e
SC
2132
2133 ptem = ptep = pte_offset_map(&pmd, addr);
2134 do {
2a4a06da 2135 pte_t pte = ptep_get_lockless(ptep);
7aef4172 2136 struct page *head, *page;
2667f50e
SC
2137
2138 /*
2139 * Similar to the PMD case below, NUMA hinting must take slow
8a0516ed 2140 * path using the pte_protnone check.
2667f50e 2141 */
e7884f8e
KS
2142 if (pte_protnone(pte))
2143 goto pte_unmap;
2144
b798bec4 2145 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
e7884f8e
KS
2146 goto pte_unmap;
2147
b59f65fa 2148 if (pte_devmap(pte)) {
7af75561
IW
2149 if (unlikely(flags & FOLL_LONGTERM))
2150 goto pte_unmap;
2151
b59f65fa
KS
2152 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2153 if (unlikely(!pgmap)) {
3b78d834 2154 undo_dev_pagemap(nr, nr_start, flags, pages);
b59f65fa
KS
2155 goto pte_unmap;
2156 }
2157 } else if (pte_special(pte))
2667f50e
SC
2158 goto pte_unmap;
2159
2160 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2161 page = pte_page(pte);
2162
3faa52c0 2163 head = try_grab_compound_head(page, 1, flags);
8fde12ca 2164 if (!head)
2667f50e
SC
2165 goto pte_unmap;
2166
1507f512
MR
2167 if (unlikely(page_is_secretmem(page))) {
2168 put_compound_head(head, 1, flags);
2169 goto pte_unmap;
2170 }
2171
2667f50e 2172 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
3faa52c0 2173 put_compound_head(head, 1, flags);
2667f50e
SC
2174 goto pte_unmap;
2175 }
2176
7aef4172 2177 VM_BUG_ON_PAGE(compound_head(page) != head, page);
e9348053 2178
f28d4363
CI
2179 /*
2180 * We need to make the page accessible if and only if we are
2181 * going to access its content (the FOLL_PIN case). Please
2182 * see Documentation/core-api/pin_user_pages.rst for
2183 * details.
2184 */
2185 if (flags & FOLL_PIN) {
2186 ret = arch_make_page_accessible(page);
2187 if (ret) {
2188 unpin_user_page(page);
2189 goto pte_unmap;
2190 }
2191 }
e9348053 2192 SetPageReferenced(page);
2667f50e
SC
2193 pages[*nr] = page;
2194 (*nr)++;
2195
2196 } while (ptep++, addr += PAGE_SIZE, addr != end);
2197
2198 ret = 1;
2199
2200pte_unmap:
832d7aa0
CH
2201 if (pgmap)
2202 put_dev_pagemap(pgmap);
2667f50e
SC
2203 pte_unmap(ptem);
2204 return ret;
2205}
2206#else
2207
2208/*
2209 * If we can't determine whether or not a pte is special, then fail immediately
2210 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2211 * to be special.
2212 *
2213 * For a futex to be placed on a THP tail page, get_futex_key requires a
dadbb612 2214 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2667f50e
SC
2215 * useful to have gup_huge_pmd even if we can't operate on ptes.
2216 */
2217static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
b798bec4 2218 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2219{
2220 return 0;
2221}
3010a5ea 2222#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2667f50e 2223
17596731 2224#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
b59f65fa 2225static int __gup_device_huge(unsigned long pfn, unsigned long addr,
86dfbed4
JH
2226 unsigned long end, unsigned int flags,
2227 struct page **pages, int *nr)
b59f65fa
KS
2228{
2229 int nr_start = *nr;
2230 struct dev_pagemap *pgmap = NULL;
6401c4eb 2231 int ret = 1;
b59f65fa
KS
2232
2233 do {
2234 struct page *page = pfn_to_page(pfn);
2235
2236 pgmap = get_dev_pagemap(pfn, pgmap);
2237 if (unlikely(!pgmap)) {
3b78d834 2238 undo_dev_pagemap(nr, nr_start, flags, pages);
6401c4eb
ML
2239 ret = 0;
2240 break;
b59f65fa
KS
2241 }
2242 SetPageReferenced(page);
2243 pages[*nr] = page;
3faa52c0
JH
2244 if (unlikely(!try_grab_page(page, flags))) {
2245 undo_dev_pagemap(nr, nr_start, flags, pages);
6401c4eb
ML
2246 ret = 0;
2247 break;
3faa52c0 2248 }
b59f65fa
KS
2249 (*nr)++;
2250 pfn++;
2251 } while (addr += PAGE_SIZE, addr != end);
832d7aa0 2252
6401c4eb
ML
2253 put_dev_pagemap(pgmap);
2254 return ret;
b59f65fa
KS
2255}
2256
a9b6de77 2257static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
86dfbed4
JH
2258 unsigned long end, unsigned int flags,
2259 struct page **pages, int *nr)
b59f65fa
KS
2260{
2261 unsigned long fault_pfn;
a9b6de77
DW
2262 int nr_start = *nr;
2263
2264 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
86dfbed4 2265 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
a9b6de77 2266 return 0;
b59f65fa 2267
a9b6de77 2268 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3b78d834 2269 undo_dev_pagemap(nr, nr_start, flags, pages);
a9b6de77
DW
2270 return 0;
2271 }
2272 return 1;
b59f65fa
KS
2273}
2274
a9b6de77 2275static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2276 unsigned long end, unsigned int flags,
2277 struct page **pages, int *nr)
b59f65fa
KS
2278{
2279 unsigned long fault_pfn;
a9b6de77
DW
2280 int nr_start = *nr;
2281
2282 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
86dfbed4 2283 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
a9b6de77 2284 return 0;
b59f65fa 2285
a9b6de77 2286 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3b78d834 2287 undo_dev_pagemap(nr, nr_start, flags, pages);
a9b6de77
DW
2288 return 0;
2289 }
2290 return 1;
b59f65fa
KS
2291}
2292#else
a9b6de77 2293static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
86dfbed4
JH
2294 unsigned long end, unsigned int flags,
2295 struct page **pages, int *nr)
b59f65fa
KS
2296{
2297 BUILD_BUG();
2298 return 0;
2299}
2300
a9b6de77 2301static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2302 unsigned long end, unsigned int flags,
2303 struct page **pages, int *nr)
b59f65fa
KS
2304{
2305 BUILD_BUG();
2306 return 0;
2307}
2308#endif
2309
a43e9820
JH
2310static int record_subpages(struct page *page, unsigned long addr,
2311 unsigned long end, struct page **pages)
2312{
2313 int nr;
2314
2315 for (nr = 0; addr != end; addr += PAGE_SIZE)
2316 pages[nr++] = page++;
2317
2318 return nr;
2319}
2320
cbd34da7
CH
2321#ifdef CONFIG_ARCH_HAS_HUGEPD
2322static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2323 unsigned long sz)
2324{
2325 unsigned long __boundary = (addr + sz) & ~(sz-1);
2326 return (__boundary - 1 < end - 1) ? __boundary : end;
2327}
2328
2329static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
0cd22afd
JH
2330 unsigned long end, unsigned int flags,
2331 struct page **pages, int *nr)
cbd34da7
CH
2332{
2333 unsigned long pte_end;
2334 struct page *head, *page;
2335 pte_t pte;
2336 int refs;
2337
2338 pte_end = (addr + sz) & ~(sz-1);
2339 if (pte_end < end)
2340 end = pte_end;
2341
55ca2263 2342 pte = huge_ptep_get(ptep);
cbd34da7 2343
0cd22afd 2344 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
cbd34da7
CH
2345 return 0;
2346
2347 /* hugepages are never "special" */
2348 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2349
cbd34da7 2350 head = pte_page(pte);
cbd34da7 2351 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
a43e9820 2352 refs = record_subpages(page, addr, end, pages + *nr);
cbd34da7 2353
3faa52c0 2354 head = try_grab_compound_head(head, refs, flags);
a43e9820 2355 if (!head)
cbd34da7 2356 return 0;
cbd34da7
CH
2357
2358 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
3b78d834 2359 put_compound_head(head, refs, flags);
cbd34da7
CH
2360 return 0;
2361 }
2362
a43e9820 2363 *nr += refs;
520b4a44 2364 SetPageReferenced(head);
cbd34da7
CH
2365 return 1;
2366}
2367
2368static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
0cd22afd 2369 unsigned int pdshift, unsigned long end, unsigned int flags,
cbd34da7
CH
2370 struct page **pages, int *nr)
2371{
2372 pte_t *ptep;
2373 unsigned long sz = 1UL << hugepd_shift(hugepd);
2374 unsigned long next;
2375
2376 ptep = hugepte_offset(hugepd, addr, pdshift);
2377 do {
2378 next = hugepte_addr_end(addr, end, sz);
0cd22afd 2379 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
cbd34da7
CH
2380 return 0;
2381 } while (ptep++, addr = next, addr != end);
2382
2383 return 1;
2384}
2385#else
2386static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
0cd22afd 2387 unsigned int pdshift, unsigned long end, unsigned int flags,
cbd34da7
CH
2388 struct page **pages, int *nr)
2389{
2390 return 0;
2391}
2392#endif /* CONFIG_ARCH_HAS_HUGEPD */
2393
2667f50e 2394static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
0cd22afd
JH
2395 unsigned long end, unsigned int flags,
2396 struct page **pages, int *nr)
2667f50e 2397{
ddc58f27 2398 struct page *head, *page;
2667f50e
SC
2399 int refs;
2400
b798bec4 2401 if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2667f50e
SC
2402 return 0;
2403
7af75561
IW
2404 if (pmd_devmap(orig)) {
2405 if (unlikely(flags & FOLL_LONGTERM))
2406 return 0;
86dfbed4
JH
2407 return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2408 pages, nr);
7af75561 2409 }
b59f65fa 2410
d63206ee 2411 page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
a43e9820 2412 refs = record_subpages(page, addr, end, pages + *nr);
2667f50e 2413
3faa52c0 2414 head = try_grab_compound_head(pmd_page(orig), refs, flags);
a43e9820 2415 if (!head)
2667f50e 2416 return 0;
2667f50e
SC
2417
2418 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3b78d834 2419 put_compound_head(head, refs, flags);
2667f50e
SC
2420 return 0;
2421 }
2422
a43e9820 2423 *nr += refs;
e9348053 2424 SetPageReferenced(head);
2667f50e
SC
2425 return 1;
2426}
2427
2428static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2429 unsigned long end, unsigned int flags,
2430 struct page **pages, int *nr)
2667f50e 2431{
ddc58f27 2432 struct page *head, *page;
2667f50e
SC
2433 int refs;
2434
b798bec4 2435 if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2667f50e
SC
2436 return 0;
2437
7af75561
IW
2438 if (pud_devmap(orig)) {
2439 if (unlikely(flags & FOLL_LONGTERM))
2440 return 0;
86dfbed4
JH
2441 return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2442 pages, nr);
7af75561 2443 }
b59f65fa 2444
d63206ee 2445 page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
a43e9820 2446 refs = record_subpages(page, addr, end, pages + *nr);
2667f50e 2447
3faa52c0 2448 head = try_grab_compound_head(pud_page(orig), refs, flags);
a43e9820 2449 if (!head)
2667f50e 2450 return 0;
2667f50e
SC
2451
2452 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3b78d834 2453 put_compound_head(head, refs, flags);
2667f50e
SC
2454 return 0;
2455 }
2456
a43e9820 2457 *nr += refs;
e9348053 2458 SetPageReferenced(head);
2667f50e
SC
2459 return 1;
2460}
2461
f30c59e9 2462static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
b798bec4 2463 unsigned long end, unsigned int flags,
f30c59e9
AK
2464 struct page **pages, int *nr)
2465{
2466 int refs;
ddc58f27 2467 struct page *head, *page;
f30c59e9 2468
b798bec4 2469 if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
f30c59e9
AK
2470 return 0;
2471
b59f65fa 2472 BUILD_BUG_ON(pgd_devmap(orig));
a43e9820 2473
d63206ee 2474 page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
a43e9820 2475 refs = record_subpages(page, addr, end, pages + *nr);
f30c59e9 2476
3faa52c0 2477 head = try_grab_compound_head(pgd_page(orig), refs, flags);
a43e9820 2478 if (!head)
f30c59e9 2479 return 0;
f30c59e9
AK
2480
2481 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
3b78d834 2482 put_compound_head(head, refs, flags);
f30c59e9
AK
2483 return 0;
2484 }
2485
a43e9820 2486 *nr += refs;
e9348053 2487 SetPageReferenced(head);
f30c59e9
AK
2488 return 1;
2489}
2490
d3f7b1bb 2491static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
b798bec4 2492 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2493{
2494 unsigned long next;
2495 pmd_t *pmdp;
2496
d3f7b1bb 2497 pmdp = pmd_offset_lockless(pudp, pud, addr);
2667f50e 2498 do {
38c5ce93 2499 pmd_t pmd = READ_ONCE(*pmdp);
2667f50e
SC
2500
2501 next = pmd_addr_end(addr, end);
84c3fc4e 2502 if (!pmd_present(pmd))
2667f50e
SC
2503 return 0;
2504
414fd080
YZ
2505 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2506 pmd_devmap(pmd))) {
2667f50e
SC
2507 /*
2508 * NUMA hinting faults need to be handled in the GUP
2509 * slowpath for accounting purposes and so that they
2510 * can be serialised against THP migration.
2511 */
8a0516ed 2512 if (pmd_protnone(pmd))
2667f50e
SC
2513 return 0;
2514
b798bec4 2515 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2667f50e
SC
2516 pages, nr))
2517 return 0;
2518
f30c59e9
AK
2519 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2520 /*
2521 * architecture have different format for hugetlbfs
2522 * pmd format and THP pmd format
2523 */
2524 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
b798bec4 2525 PMD_SHIFT, next, flags, pages, nr))
f30c59e9 2526 return 0;
b798bec4 2527 } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
2923117b 2528 return 0;
2667f50e
SC
2529 } while (pmdp++, addr = next, addr != end);
2530
2531 return 1;
2532}
2533
d3f7b1bb 2534static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
b798bec4 2535 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2536{
2537 unsigned long next;
2538 pud_t *pudp;
2539
d3f7b1bb 2540 pudp = pud_offset_lockless(p4dp, p4d, addr);
2667f50e 2541 do {
e37c6982 2542 pud_t pud = READ_ONCE(*pudp);
2667f50e
SC
2543
2544 next = pud_addr_end(addr, end);
15494520 2545 if (unlikely(!pud_present(pud)))
2667f50e 2546 return 0;
f30c59e9 2547 if (unlikely(pud_huge(pud))) {
b798bec4 2548 if (!gup_huge_pud(pud, pudp, addr, next, flags,
f30c59e9
AK
2549 pages, nr))
2550 return 0;
2551 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2552 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
b798bec4 2553 PUD_SHIFT, next, flags, pages, nr))
2667f50e 2554 return 0;
d3f7b1bb 2555 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2667f50e
SC
2556 return 0;
2557 } while (pudp++, addr = next, addr != end);
2558
2559 return 1;
2560}
2561
d3f7b1bb 2562static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
b798bec4 2563 unsigned int flags, struct page **pages, int *nr)
c2febafc
KS
2564{
2565 unsigned long next;
2566 p4d_t *p4dp;
2567
d3f7b1bb 2568 p4dp = p4d_offset_lockless(pgdp, pgd, addr);
c2febafc
KS
2569 do {
2570 p4d_t p4d = READ_ONCE(*p4dp);
2571
2572 next = p4d_addr_end(addr, end);
2573 if (p4d_none(p4d))
2574 return 0;
2575 BUILD_BUG_ON(p4d_huge(p4d));
2576 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2577 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
b798bec4 2578 P4D_SHIFT, next, flags, pages, nr))
c2febafc 2579 return 0;
d3f7b1bb 2580 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
c2febafc
KS
2581 return 0;
2582 } while (p4dp++, addr = next, addr != end);
2583
2584 return 1;
2585}
2586
5b65c467 2587static void gup_pgd_range(unsigned long addr, unsigned long end,
b798bec4 2588 unsigned int flags, struct page **pages, int *nr)
5b65c467
KS
2589{
2590 unsigned long next;
2591 pgd_t *pgdp;
2592
2593 pgdp = pgd_offset(current->mm, addr);
2594 do {
2595 pgd_t pgd = READ_ONCE(*pgdp);
2596
2597 next = pgd_addr_end(addr, end);
2598 if (pgd_none(pgd))
2599 return;
2600 if (unlikely(pgd_huge(pgd))) {
b798bec4 2601 if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
5b65c467
KS
2602 pages, nr))
2603 return;
2604 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2605 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
b798bec4 2606 PGDIR_SHIFT, next, flags, pages, nr))
5b65c467 2607 return;
d3f7b1bb 2608 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
5b65c467
KS
2609 return;
2610 } while (pgdp++, addr = next, addr != end);
2611}
050a9adc
CH
2612#else
2613static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2614 unsigned int flags, struct page **pages, int *nr)
2615{
2616}
2617#endif /* CONFIG_HAVE_FAST_GUP */
5b65c467
KS
2618
2619#ifndef gup_fast_permitted
2620/*
dadbb612 2621 * Check if it's allowed to use get_user_pages_fast_only() for the range, or
5b65c467
KS
2622 * we need to fall back to the slow version:
2623 */
26f4c328 2624static bool gup_fast_permitted(unsigned long start, unsigned long end)
5b65c467 2625{
26f4c328 2626 return true;
5b65c467
KS
2627}
2628#endif
2629
7af75561
IW
2630static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2631 unsigned int gup_flags, struct page **pages)
2632{
2633 int ret;
2634
2635 /*
2636 * FIXME: FOLL_LONGTERM does not work with
2637 * get_user_pages_unlocked() (see comments in that function)
2638 */
2639 if (gup_flags & FOLL_LONGTERM) {
d8ed45c5 2640 mmap_read_lock(current->mm);
64019a2e 2641 ret = __gup_longterm_locked(current->mm,
7af75561
IW
2642 start, nr_pages,
2643 pages, NULL, gup_flags);
d8ed45c5 2644 mmap_read_unlock(current->mm);
7af75561
IW
2645 } else {
2646 ret = get_user_pages_unlocked(start, nr_pages,
2647 pages, gup_flags);
2648 }
2649
2650 return ret;
2651}
2652
c28b1fc7
JG
2653static unsigned long lockless_pages_from_mm(unsigned long start,
2654 unsigned long end,
2655 unsigned int gup_flags,
2656 struct page **pages)
2657{
2658 unsigned long flags;
2659 int nr_pinned = 0;
57efa1fe 2660 unsigned seq;
c28b1fc7
JG
2661
2662 if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2663 !gup_fast_permitted(start, end))
2664 return 0;
2665
57efa1fe
JG
2666 if (gup_flags & FOLL_PIN) {
2667 seq = raw_read_seqcount(&current->mm->write_protect_seq);
2668 if (seq & 1)
2669 return 0;
2670 }
2671
c28b1fc7
JG
2672 /*
2673 * Disable interrupts. The nested form is used, in order to allow full,
2674 * general purpose use of this routine.
2675 *
2676 * With interrupts disabled, we block page table pages from being freed
2677 * from under us. See struct mmu_table_batch comments in
2678 * include/asm-generic/tlb.h for more details.
2679 *
2680 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2681 * that come from THPs splitting.
2682 */
2683 local_irq_save(flags);
2684 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2685 local_irq_restore(flags);
57efa1fe
JG
2686
2687 /*
2688 * When pinning pages for DMA there could be a concurrent write protect
2689 * from fork() via copy_page_range(), in this case always fail fast GUP.
2690 */
2691 if (gup_flags & FOLL_PIN) {
2692 if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
2693 unpin_user_pages(pages, nr_pinned);
2694 return 0;
2695 }
2696 }
c28b1fc7
JG
2697 return nr_pinned;
2698}
2699
2700static int internal_get_user_pages_fast(unsigned long start,
2701 unsigned long nr_pages,
eddb1c22
JH
2702 unsigned int gup_flags,
2703 struct page **pages)
2667f50e 2704{
c28b1fc7
JG
2705 unsigned long len, end;
2706 unsigned long nr_pinned;
2707 int ret;
2667f50e 2708
f4000fdf 2709 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
376a34ef
JH
2710 FOLL_FORCE | FOLL_PIN | FOLL_GET |
2711 FOLL_FAST_ONLY)))
817be129
CH
2712 return -EINVAL;
2713
a458b76a
AA
2714 if (gup_flags & FOLL_PIN)
2715 mm_set_has_pinned_flag(&current->mm->flags);
008cfe44 2716
f81cd178 2717 if (!(gup_flags & FOLL_FAST_ONLY))
da1c55f1 2718 might_lock_read(&current->mm->mmap_lock);
f81cd178 2719
f455c854 2720 start = untagged_addr(start) & PAGE_MASK;
c28b1fc7
JG
2721 len = nr_pages << PAGE_SHIFT;
2722 if (check_add_overflow(start, len, &end))
c61611f7 2723 return 0;
96d4f267 2724 if (unlikely(!access_ok((void __user *)start, len)))
c61611f7 2725 return -EFAULT;
73e10a61 2726
c28b1fc7
JG
2727 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2728 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2729 return nr_pinned;
2667f50e 2730
c28b1fc7
JG
2731 /* Slow path: try to get the remaining pages with get_user_pages */
2732 start += nr_pinned << PAGE_SHIFT;
2733 pages += nr_pinned;
2734 ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
2735 pages);
2736 if (ret < 0) {
2737 /*
2738 * The caller has to unpin the pages we already pinned so
2739 * returning -errno is not an option
2740 */
2741 if (nr_pinned)
2742 return nr_pinned;
2743 return ret;
2667f50e 2744 }
c28b1fc7 2745 return ret + nr_pinned;
2667f50e 2746}
c28b1fc7 2747
dadbb612
SJ
2748/**
2749 * get_user_pages_fast_only() - pin user pages in memory
2750 * @start: starting user address
2751 * @nr_pages: number of pages from start to pin
2752 * @gup_flags: flags modifying pin behaviour
2753 * @pages: array that receives pointers to the pages pinned.
2754 * Should be at least nr_pages long.
2755 *
9e1f0580
JH
2756 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2757 * the regular GUP.
2758 * Note a difference with get_user_pages_fast: this always returns the
2759 * number of pages pinned, 0 if no pages were pinned.
2760 *
2761 * If the architecture does not support this function, simply return with no
2762 * pages pinned.
2763 *
2764 * Careful, careful! COW breaking can go either way, so a non-write
2765 * access can get ambiguous page results. If you call this function without
2766 * 'write' set, you'd better be sure that you're ok with that ambiguity.
2767 */
dadbb612
SJ
2768int get_user_pages_fast_only(unsigned long start, int nr_pages,
2769 unsigned int gup_flags, struct page **pages)
9e1f0580 2770{
376a34ef 2771 int nr_pinned;
9e1f0580
JH
2772 /*
2773 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2774 * because gup fast is always a "pin with a +1 page refcount" request.
376a34ef
JH
2775 *
2776 * FOLL_FAST_ONLY is required in order to match the API description of
2777 * this routine: no fall back to regular ("slow") GUP.
9e1f0580 2778 */
dadbb612 2779 gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
9e1f0580 2780
376a34ef
JH
2781 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2782 pages);
9e1f0580
JH
2783
2784 /*
376a34ef
JH
2785 * As specified in the API description above, this routine is not
2786 * allowed to return negative values. However, the common core
2787 * routine internal_get_user_pages_fast() *can* return -errno.
2788 * Therefore, correct for that here:
9e1f0580 2789 */
376a34ef
JH
2790 if (nr_pinned < 0)
2791 nr_pinned = 0;
9e1f0580
JH
2792
2793 return nr_pinned;
2794}
dadbb612 2795EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
9e1f0580 2796
eddb1c22
JH
2797/**
2798 * get_user_pages_fast() - pin user pages in memory
3faa52c0
JH
2799 * @start: starting user address
2800 * @nr_pages: number of pages from start to pin
2801 * @gup_flags: flags modifying pin behaviour
2802 * @pages: array that receives pointers to the pages pinned.
2803 * Should be at least nr_pages long.
eddb1c22 2804 *
c1e8d7c6 2805 * Attempt to pin user pages in memory without taking mm->mmap_lock.
eddb1c22
JH
2806 * If not successful, it will fall back to taking the lock and
2807 * calling get_user_pages().
2808 *
2809 * Returns number of pages pinned. This may be fewer than the number requested.
2810 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2811 * -errno.
2812 */
2813int get_user_pages_fast(unsigned long start, int nr_pages,
2814 unsigned int gup_flags, struct page **pages)
2815{
447f3e45 2816 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
2817 return -EINVAL;
2818
94202f12
JH
2819 /*
2820 * The caller may or may not have explicitly set FOLL_GET; either way is
2821 * OK. However, internally (within mm/gup.c), gup fast variants must set
2822 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
2823 * request.
2824 */
2825 gup_flags |= FOLL_GET;
eddb1c22
JH
2826 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2827}
050a9adc 2828EXPORT_SYMBOL_GPL(get_user_pages_fast);
eddb1c22
JH
2829
2830/**
2831 * pin_user_pages_fast() - pin user pages in memory without taking locks
2832 *
3faa52c0
JH
2833 * @start: starting user address
2834 * @nr_pages: number of pages from start to pin
2835 * @gup_flags: flags modifying pin behaviour
2836 * @pages: array that receives pointers to the pages pinned.
2837 * Should be at least nr_pages long.
2838 *
2839 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
2840 * get_user_pages_fast() for documentation on the function arguments, because
2841 * the arguments here are identical.
2842 *
2843 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 2844 * see Documentation/core-api/pin_user_pages.rst for further details.
eddb1c22
JH
2845 */
2846int pin_user_pages_fast(unsigned long start, int nr_pages,
2847 unsigned int gup_flags, struct page **pages)
2848{
3faa52c0
JH
2849 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2850 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2851 return -EINVAL;
2852
2853 gup_flags |= FOLL_PIN;
2854 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
eddb1c22
JH
2855}
2856EXPORT_SYMBOL_GPL(pin_user_pages_fast);
2857
104acc32 2858/*
dadbb612
SJ
2859 * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
2860 * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
104acc32
JH
2861 *
2862 * The API rules are the same, too: no negative values may be returned.
2863 */
2864int pin_user_pages_fast_only(unsigned long start, int nr_pages,
2865 unsigned int gup_flags, struct page **pages)
2866{
2867 int nr_pinned;
2868
2869 /*
2870 * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
2871 * rules require returning 0, rather than -errno:
2872 */
2873 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2874 return 0;
2875 /*
2876 * FOLL_FAST_ONLY is required in order to match the API description of
2877 * this routine: no fall back to regular ("slow") GUP.
2878 */
2879 gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
2880 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2881 pages);
2882 /*
2883 * This routine is not allowed to return negative values. However,
2884 * internal_get_user_pages_fast() *can* return -errno. Therefore,
2885 * correct for that here:
2886 */
2887 if (nr_pinned < 0)
2888 nr_pinned = 0;
2889
2890 return nr_pinned;
2891}
2892EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
2893
eddb1c22 2894/**
64019a2e 2895 * pin_user_pages_remote() - pin pages of a remote process
eddb1c22 2896 *
3faa52c0
JH
2897 * @mm: mm_struct of target mm
2898 * @start: starting user address
2899 * @nr_pages: number of pages from start to pin
2900 * @gup_flags: flags modifying lookup behaviour
2901 * @pages: array that receives pointers to the pages pinned.
2902 * Should be at least nr_pages long. Or NULL, if caller
2903 * only intends to ensure the pages are faulted in.
2904 * @vmas: array of pointers to vmas corresponding to each page.
2905 * Or NULL if the caller does not require them.
2906 * @locked: pointer to lock flag indicating whether lock is held and
2907 * subsequently whether VM_FAULT_RETRY functionality can be
2908 * utilised. Lock must initially be held.
2909 *
2910 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
2911 * get_user_pages_remote() for documentation on the function arguments, because
2912 * the arguments here are identical.
2913 *
2914 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 2915 * see Documentation/core-api/pin_user_pages.rst for details.
eddb1c22 2916 */
64019a2e 2917long pin_user_pages_remote(struct mm_struct *mm,
eddb1c22
JH
2918 unsigned long start, unsigned long nr_pages,
2919 unsigned int gup_flags, struct page **pages,
2920 struct vm_area_struct **vmas, int *locked)
2921{
3faa52c0
JH
2922 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2923 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2924 return -EINVAL;
2925
2926 gup_flags |= FOLL_PIN;
64019a2e 2927 return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
3faa52c0 2928 pages, vmas, locked);
eddb1c22
JH
2929}
2930EXPORT_SYMBOL(pin_user_pages_remote);
2931
2932/**
2933 * pin_user_pages() - pin user pages in memory for use by other devices
2934 *
3faa52c0
JH
2935 * @start: starting user address
2936 * @nr_pages: number of pages from start to pin
2937 * @gup_flags: flags modifying lookup behaviour
2938 * @pages: array that receives pointers to the pages pinned.
2939 * Should be at least nr_pages long. Or NULL, if caller
2940 * only intends to ensure the pages are faulted in.
2941 * @vmas: array of pointers to vmas corresponding to each page.
2942 * Or NULL if the caller does not require them.
2943 *
2944 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
2945 * FOLL_PIN is set.
2946 *
2947 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 2948 * see Documentation/core-api/pin_user_pages.rst for details.
eddb1c22
JH
2949 */
2950long pin_user_pages(unsigned long start, unsigned long nr_pages,
2951 unsigned int gup_flags, struct page **pages,
2952 struct vm_area_struct **vmas)
2953{
3faa52c0
JH
2954 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2955 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2956 return -EINVAL;
2957
2958 gup_flags |= FOLL_PIN;
64019a2e 2959 return __gup_longterm_locked(current->mm, start, nr_pages,
3faa52c0 2960 pages, vmas, gup_flags);
eddb1c22
JH
2961}
2962EXPORT_SYMBOL(pin_user_pages);
91429023
JH
2963
2964/*
2965 * pin_user_pages_unlocked() is the FOLL_PIN variant of
2966 * get_user_pages_unlocked(). Behavior is the same, except that this one sets
2967 * FOLL_PIN and rejects FOLL_GET.
2968 */
2969long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2970 struct page **pages, unsigned int gup_flags)
2971{
2972 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2973 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2974 return -EINVAL;
2975
2976 gup_flags |= FOLL_PIN;
2977 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
2978}
2979EXPORT_SYMBOL(pin_user_pages_unlocked);
420c2091
JH
2980
2981/*
2982 * pin_user_pages_locked() is the FOLL_PIN variant of get_user_pages_locked().
2983 * Behavior is the same, except that this one sets FOLL_PIN and rejects
2984 * FOLL_GET.
2985 */
2986long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
2987 unsigned int gup_flags, struct page **pages,
2988 int *locked)
2989{
2990 /*
2991 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2992 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2993 * vmas. As there are no users of this flag in this call we simply
2994 * disallow this option for now.
2995 */
2996 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2997 return -EINVAL;
2998
2999 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3000 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3001 return -EINVAL;
3002
3003 gup_flags |= FOLL_PIN;
64019a2e 3004 return __get_user_pages_locked(current->mm, start, nr_pages,
420c2091
JH
3005 pages, NULL, locked,
3006 gup_flags | FOLL_TOUCH);
3007}
3008EXPORT_SYMBOL(pin_user_pages_locked);