1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2015 Red Hat, Inc.
9 #include <linux/sched/signal.h>
10 #include <linux/pagemap.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/userfaultfd_k.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/hugetlb.h>
17 #include <linux/shmem_fs.h>
18 #include <asm/tlbflush.h>
21 static __always_inline
22 struct vm_area_struct
*find_dst_vma(struct mm_struct
*dst_mm
,
23 unsigned long dst_start
,
27 * Make sure that the dst range is both valid and fully within a
28 * single existing vma.
30 struct vm_area_struct
*dst_vma
;
32 dst_vma
= find_vma(dst_mm
, dst_start
);
36 if (dst_start
< dst_vma
->vm_start
||
37 dst_start
+ len
> dst_vma
->vm_end
)
41 * Check the vma is registered in uffd, this is required to
42 * enforce the VM_MAYWRITE check done at uffd registration
45 if (!dst_vma
->vm_userfaultfd_ctx
.ctx
)
51 static int mcopy_atomic_pte(struct mm_struct
*dst_mm
,
53 struct vm_area_struct
*dst_vma
,
54 unsigned long dst_addr
,
55 unsigned long src_addr
,
59 struct mem_cgroup
*memcg
;
60 pte_t _dst_pte
, *dst_pte
;
65 pgoff_t offset
, max_off
;
70 page
= alloc_page_vma(GFP_HIGHUSER_MOVABLE
, dst_vma
, dst_addr
);
74 page_kaddr
= kmap_atomic(page
);
75 ret
= copy_from_user(page_kaddr
,
76 (const void __user
*) src_addr
,
78 kunmap_atomic(page_kaddr
);
80 /* fallback to copy_from_user outside mmap_sem */
84 /* don't free the page */
93 * The memory barrier inside __SetPageUptodate makes sure that
94 * preceding stores to the page contents become visible before
95 * the set_pte_at() write.
97 __SetPageUptodate(page
);
100 if (mem_cgroup_try_charge(page
, dst_mm
, GFP_KERNEL
, &memcg
, false))
103 _dst_pte
= pte_mkdirty(mk_pte(page
, dst_vma
->vm_page_prot
));
104 if (dst_vma
->vm_flags
& VM_WRITE
) {
106 _dst_pte
= pte_mkuffd_wp(_dst_pte
);
108 _dst_pte
= pte_mkwrite(_dst_pte
);
111 dst_pte
= pte_offset_map_lock(dst_mm
, dst_pmd
, dst_addr
, &ptl
);
112 if (dst_vma
->vm_file
) {
113 /* the shmem MAP_PRIVATE case requires checking the i_size */
114 inode
= dst_vma
->vm_file
->f_inode
;
115 offset
= linear_page_index(dst_vma
, dst_addr
);
116 max_off
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
118 if (unlikely(offset
>= max_off
))
119 goto out_release_uncharge_unlock
;
122 if (!pte_none(*dst_pte
))
123 goto out_release_uncharge_unlock
;
125 inc_mm_counter(dst_mm
, MM_ANONPAGES
);
126 page_add_new_anon_rmap(page
, dst_vma
, dst_addr
, false);
127 mem_cgroup_commit_charge(page
, memcg
, false, false);
128 lru_cache_add_active_or_unevictable(page
, dst_vma
);
130 set_pte_at(dst_mm
, dst_addr
, dst_pte
, _dst_pte
);
132 /* No need to invalidate - it was non-present before */
133 update_mmu_cache(dst_vma
, dst_addr
, dst_pte
);
135 pte_unmap_unlock(dst_pte
, ptl
);
139 out_release_uncharge_unlock
:
140 pte_unmap_unlock(dst_pte
, ptl
);
141 mem_cgroup_cancel_charge(page
, memcg
, false);
147 static int mfill_zeropage_pte(struct mm_struct
*dst_mm
,
149 struct vm_area_struct
*dst_vma
,
150 unsigned long dst_addr
)
152 pte_t _dst_pte
, *dst_pte
;
155 pgoff_t offset
, max_off
;
158 _dst_pte
= pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr
),
159 dst_vma
->vm_page_prot
));
160 dst_pte
= pte_offset_map_lock(dst_mm
, dst_pmd
, dst_addr
, &ptl
);
161 if (dst_vma
->vm_file
) {
162 /* the shmem MAP_PRIVATE case requires checking the i_size */
163 inode
= dst_vma
->vm_file
->f_inode
;
164 offset
= linear_page_index(dst_vma
, dst_addr
);
165 max_off
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
167 if (unlikely(offset
>= max_off
))
171 if (!pte_none(*dst_pte
))
173 set_pte_at(dst_mm
, dst_addr
, dst_pte
, _dst_pte
);
174 /* No need to invalidate - it was non-present before */
175 update_mmu_cache(dst_vma
, dst_addr
, dst_pte
);
178 pte_unmap_unlock(dst_pte
, ptl
);
182 static pmd_t
*mm_alloc_pmd(struct mm_struct
*mm
, unsigned long address
)
188 pgd
= pgd_offset(mm
, address
);
189 p4d
= p4d_alloc(mm
, pgd
, address
);
192 pud
= pud_alloc(mm
, p4d
, address
);
196 * Note that we didn't run this because the pmd was
197 * missing, the *pmd may be already established and in
198 * turn it may also be a trans_huge_pmd.
200 return pmd_alloc(mm
, pud
, address
);
203 #ifdef CONFIG_HUGETLB_PAGE
205 * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is
206 * called with mmap_sem held, it will release mmap_sem before returning.
208 static __always_inline ssize_t
__mcopy_atomic_hugetlb(struct mm_struct
*dst_mm
,
209 struct vm_area_struct
*dst_vma
,
210 unsigned long dst_start
,
211 unsigned long src_start
,
215 int vm_alloc_shared
= dst_vma
->vm_flags
& VM_SHARED
;
216 int vm_shared
= dst_vma
->vm_flags
& VM_SHARED
;
219 unsigned long src_addr
, dst_addr
;
222 unsigned long vma_hpagesize
;
225 struct address_space
*mapping
;
228 * There is no default zero huge page for all huge page sizes as
229 * supported by hugetlb. A PMD_SIZE huge pages may exist as used
230 * by THP. Since we can not reliably insert a zero page, this
231 * feature is not supported.
234 up_read(&dst_mm
->mmap_sem
);
238 src_addr
= src_start
;
239 dst_addr
= dst_start
;
242 vma_hpagesize
= vma_kernel_pagesize(dst_vma
);
245 * Validate alignment based on huge page size
248 if (dst_start
& (vma_hpagesize
- 1) || len
& (vma_hpagesize
- 1))
253 * On routine entry dst_vma is set. If we had to drop mmap_sem and
254 * retry, dst_vma will be set to NULL and we must lookup again.
258 dst_vma
= find_dst_vma(dst_mm
, dst_start
, len
);
259 if (!dst_vma
|| !is_vm_hugetlb_page(dst_vma
))
263 if (vma_hpagesize
!= vma_kernel_pagesize(dst_vma
))
266 vm_shared
= dst_vma
->vm_flags
& VM_SHARED
;
270 * If not shared, ensure the dst_vma has a anon_vma.
274 if (unlikely(anon_vma_prepare(dst_vma
)))
278 while (src_addr
< src_start
+ len
) {
281 BUG_ON(dst_addr
>= dst_start
+ len
);
284 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
285 * i_mmap_rwsem ensures the dst_pte remains valid even
286 * in the case of shared pmds. fault mutex prevents
287 * races with other faulting threads.
289 mapping
= dst_vma
->vm_file
->f_mapping
;
290 i_mmap_lock_read(mapping
);
291 idx
= linear_page_index(dst_vma
, dst_addr
);
292 hash
= hugetlb_fault_mutex_hash(mapping
, idx
);
293 mutex_lock(&hugetlb_fault_mutex_table
[hash
]);
296 dst_pte
= huge_pte_alloc(dst_mm
, dst_addr
, vma_hpagesize
);
298 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
299 i_mmap_unlock_read(mapping
);
304 dst_pteval
= huge_ptep_get(dst_pte
);
305 if (!huge_pte_none(dst_pteval
)) {
306 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
307 i_mmap_unlock_read(mapping
);
311 err
= hugetlb_mcopy_atomic_pte(dst_mm
, dst_pte
, dst_vma
,
312 dst_addr
, src_addr
, &page
);
314 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
315 i_mmap_unlock_read(mapping
);
316 vm_alloc_shared
= vm_shared
;
320 if (unlikely(err
== -ENOENT
)) {
321 up_read(&dst_mm
->mmap_sem
);
324 err
= copy_huge_page_from_user(page
,
325 (const void __user
*)src_addr
,
326 vma_hpagesize
/ PAGE_SIZE
,
332 down_read(&dst_mm
->mmap_sem
);
340 dst_addr
+= vma_hpagesize
;
341 src_addr
+= vma_hpagesize
;
342 copied
+= vma_hpagesize
;
344 if (fatal_signal_pending(current
))
352 up_read(&dst_mm
->mmap_sem
);
356 * We encountered an error and are about to free a newly
357 * allocated huge page.
359 * Reservation handling is very subtle, and is different for
360 * private and shared mappings. See the routine
361 * restore_reserve_on_error for details. Unfortunately, we
362 * can not call restore_reserve_on_error now as it would
363 * require holding mmap_sem.
365 * If a reservation for the page existed in the reservation
366 * map of a private mapping, the map was modified to indicate
367 * the reservation was consumed when the page was allocated.
368 * We clear the PagePrivate flag now so that the global
369 * reserve count will not be incremented in free_huge_page.
370 * The reservation map will still indicate the reservation
371 * was consumed and possibly prevent later page allocation.
372 * This is better than leaking a global reservation. If no
373 * reservation existed, it is still safe to clear PagePrivate
374 * as no adjustments to reservation counts were made during
377 * The reservation map for shared mappings indicates which
378 * pages have reservations. When a huge page is allocated
379 * for an address with a reservation, no change is made to
380 * the reserve map. In this case PagePrivate will be set
381 * to indicate that the global reservation count should be
382 * incremented when the page is freed. This is the desired
383 * behavior. However, when a huge page is allocated for an
384 * address without a reservation a reservation entry is added
385 * to the reservation map, and PagePrivate will not be set.
386 * When the page is freed, the global reserve count will NOT
387 * be incremented and it will appear as though we have leaked
388 * reserved page. In this case, set PagePrivate so that the
389 * global reserve count will be incremented to match the
390 * reservation map entry which was created.
392 * Note that vm_alloc_shared is based on the flags of the vma
393 * for which the page was originally allocated. dst_vma could
394 * be different or NULL on error.
397 SetPagePrivate(page
);
399 ClearPagePrivate(page
);
404 BUG_ON(!copied
&& !err
);
405 return copied
? copied
: err
;
407 #else /* !CONFIG_HUGETLB_PAGE */
408 /* fail at build time if gcc attempts to use this */
409 extern ssize_t
__mcopy_atomic_hugetlb(struct mm_struct
*dst_mm
,
410 struct vm_area_struct
*dst_vma
,
411 unsigned long dst_start
,
412 unsigned long src_start
,
415 #endif /* CONFIG_HUGETLB_PAGE */
417 static __always_inline ssize_t
mfill_atomic_pte(struct mm_struct
*dst_mm
,
419 struct vm_area_struct
*dst_vma
,
420 unsigned long dst_addr
,
421 unsigned long src_addr
,
429 * The normal page fault path for a shmem will invoke the
430 * fault, fill the hole in the file and COW it right away. The
431 * result generates plain anonymous memory. So when we are
432 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
433 * generate anonymous memory directly without actually filling
434 * the hole. For the MAP_PRIVATE case the robustness check
435 * only happens in the pagetable (to verify it's still none)
436 * and not in the radix tree.
438 if (!(dst_vma
->vm_flags
& VM_SHARED
)) {
440 err
= mcopy_atomic_pte(dst_mm
, dst_pmd
, dst_vma
,
441 dst_addr
, src_addr
, page
,
444 err
= mfill_zeropage_pte(dst_mm
, dst_pmd
,
447 VM_WARN_ON_ONCE(wp_copy
);
449 err
= shmem_mcopy_atomic_pte(dst_mm
, dst_pmd
,
453 err
= shmem_mfill_zeropage_pte(dst_mm
, dst_pmd
,
460 static __always_inline ssize_t
__mcopy_atomic(struct mm_struct
*dst_mm
,
461 unsigned long dst_start
,
462 unsigned long src_start
,
468 struct vm_area_struct
*dst_vma
;
471 unsigned long src_addr
, dst_addr
;
477 * Sanitize the command parameters:
479 BUG_ON(dst_start
& ~PAGE_MASK
);
480 BUG_ON(len
& ~PAGE_MASK
);
482 /* Does the address range wrap, or is the span zero-sized? */
483 BUG_ON(src_start
+ len
<= src_start
);
484 BUG_ON(dst_start
+ len
<= dst_start
);
486 src_addr
= src_start
;
487 dst_addr
= dst_start
;
491 down_read(&dst_mm
->mmap_sem
);
494 * If memory mappings are changing because of non-cooperative
495 * operation (e.g. mremap) running in parallel, bail out and
496 * request the user to retry later
499 if (mmap_changing
&& READ_ONCE(*mmap_changing
))
503 * Make sure the vma is not shared, that the dst range is
504 * both valid and fully within a single existing vma.
507 dst_vma
= find_dst_vma(dst_mm
, dst_start
, len
);
513 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
514 * it will overwrite vm_ops, so vma_is_anonymous must return false.
516 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma
) &&
517 dst_vma
->vm_flags
& VM_SHARED
))
521 * validate 'mode' now that we know the dst_vma: don't allow
522 * a wrprotect copy if the userfaultfd didn't register as WP.
524 wp_copy
= mode
& UFFDIO_COPY_MODE_WP
;
525 if (wp_copy
&& !(dst_vma
->vm_flags
& VM_UFFD_WP
))
529 * If this is a HUGETLB vma, pass off to appropriate routine
531 if (is_vm_hugetlb_page(dst_vma
))
532 return __mcopy_atomic_hugetlb(dst_mm
, dst_vma
, dst_start
,
533 src_start
, len
, zeropage
);
535 if (!vma_is_anonymous(dst_vma
) && !vma_is_shmem(dst_vma
))
539 * Ensure the dst_vma has a anon_vma or this page
540 * would get a NULL anon_vma when moved in the
544 if (!(dst_vma
->vm_flags
& VM_SHARED
) &&
545 unlikely(anon_vma_prepare(dst_vma
)))
548 while (src_addr
< src_start
+ len
) {
551 BUG_ON(dst_addr
>= dst_start
+ len
);
553 dst_pmd
= mm_alloc_pmd(dst_mm
, dst_addr
);
554 if (unlikely(!dst_pmd
)) {
559 dst_pmdval
= pmd_read_atomic(dst_pmd
);
561 * If the dst_pmd is mapped as THP don't
562 * override it and just be strict.
564 if (unlikely(pmd_trans_huge(dst_pmdval
))) {
568 if (unlikely(pmd_none(dst_pmdval
)) &&
569 unlikely(__pte_alloc(dst_mm
, dst_pmd
))) {
573 /* If an huge pmd materialized from under us fail */
574 if (unlikely(pmd_trans_huge(*dst_pmd
))) {
579 BUG_ON(pmd_none(*dst_pmd
));
580 BUG_ON(pmd_trans_huge(*dst_pmd
));
582 err
= mfill_atomic_pte(dst_mm
, dst_pmd
, dst_vma
, dst_addr
,
583 src_addr
, &page
, zeropage
, wp_copy
);
586 if (unlikely(err
== -ENOENT
)) {
589 up_read(&dst_mm
->mmap_sem
);
592 page_kaddr
= kmap(page
);
593 err
= copy_from_user(page_kaddr
,
594 (const void __user
*) src_addr
,
606 dst_addr
+= PAGE_SIZE
;
607 src_addr
+= PAGE_SIZE
;
610 if (fatal_signal_pending(current
))
618 up_read(&dst_mm
->mmap_sem
);
624 BUG_ON(!copied
&& !err
);
625 return copied
? copied
: err
;
628 ssize_t
mcopy_atomic(struct mm_struct
*dst_mm
, unsigned long dst_start
,
629 unsigned long src_start
, unsigned long len
,
630 bool *mmap_changing
, __u64 mode
)
632 return __mcopy_atomic(dst_mm
, dst_start
, src_start
, len
, false,
633 mmap_changing
, mode
);
636 ssize_t
mfill_zeropage(struct mm_struct
*dst_mm
, unsigned long start
,
637 unsigned long len
, bool *mmap_changing
)
639 return __mcopy_atomic(dst_mm
, start
, 0, len
, true, mmap_changing
, 0);
642 int mwriteprotect_range(struct mm_struct
*dst_mm
, unsigned long start
,
643 unsigned long len
, bool enable_wp
, bool *mmap_changing
)
645 struct vm_area_struct
*dst_vma
;
650 * Sanitize the command parameters:
652 BUG_ON(start
& ~PAGE_MASK
);
653 BUG_ON(len
& ~PAGE_MASK
);
655 /* Does the address range wrap, or is the span zero-sized? */
656 BUG_ON(start
+ len
<= start
);
658 down_read(&dst_mm
->mmap_sem
);
661 * If memory mappings are changing because of non-cooperative
662 * operation (e.g. mremap) running in parallel, bail out and
663 * request the user to retry later
666 if (mmap_changing
&& READ_ONCE(*mmap_changing
))
670 dst_vma
= find_dst_vma(dst_mm
, start
, len
);
672 * Make sure the vma is not shared, that the dst range is
673 * both valid and fully within a single existing vma.
675 if (!dst_vma
|| (dst_vma
->vm_flags
& VM_SHARED
))
677 if (!userfaultfd_wp(dst_vma
))
679 if (!vma_is_anonymous(dst_vma
))
683 newprot
= vm_get_page_prot(dst_vma
->vm_flags
& ~(VM_WRITE
));
685 newprot
= vm_get_page_prot(dst_vma
->vm_flags
);
687 change_protection(dst_vma
, start
, start
+ len
, newprot
,
688 enable_wp
? MM_CP_UFFD_WP
: MM_CP_UFFD_WP_RESOLVE
);
692 up_read(&dst_mm
->mmap_sem
);