]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - mm/userfaultfd.c
userfaultfd: shmem: allocate anonymous memory for MAP_PRIVATE shmem
[mirror_ubuntu-bionic-kernel.git] / mm / userfaultfd.c
index 81192701964d36d609d015ce76cf14dbbb1a0dd9..cb82e50becf7a1e01f8ffec31ff4f451eb5e558b 100644 (file)
@@ -49,7 +49,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 
                /* fallback to copy_from_user outside mmap_sem */
                if (unlikely(ret)) {
-                       ret = -EFAULT;
+                       ret = -ENOENT;
                        *pagep = page;
                        /* don't free the page */
                        goto out;
@@ -275,7 +275,7 @@ retry:
 
                cond_resched();
 
-               if (unlikely(err == -EFAULT)) {
+               if (unlikely(err == -ENOENT)) {
                        up_read(&dst_mm->mmap_sem);
                        BUG_ON(!page);
 
@@ -381,7 +381,17 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
 {
        ssize_t err;
 
-       if (vma_is_anonymous(dst_vma)) {
+       /*
+        * The normal page fault path for a shmem will invoke the
+        * fault, fill the hole in the file and COW it right away. The
+        * result generates plain anonymous memory. So when we are
+        * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
+        * generate anonymous memory directly without actually filling
+        * the hole. For the MAP_PRIVATE case the robustness check
+        * only happens in the pagetable (to verify it's still none)
+        * and not in the radix tree.
+        */
+       if (!(dst_vma->vm_flags & VM_SHARED)) {
                if (!zeropage)
                        err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
                                               dst_addr, src_addr, page);
@@ -480,7 +490,8 @@ retry:
         * dst_vma.
         */
        err = -ENOMEM;
-       if (vma_is_anonymous(dst_vma) && unlikely(anon_vma_prepare(dst_vma)))
+       if (!(dst_vma->vm_flags & VM_SHARED) &&
+           unlikely(anon_vma_prepare(dst_vma)))
                goto out_unlock;
 
        while (src_addr < src_start + len) {
@@ -521,7 +532,7 @@ retry:
                                       src_addr, &page, zeropage);
                cond_resched();
 
-               if (unlikely(err == -EFAULT)) {
+               if (unlikely(err == -ENOENT)) {
                        void *page_kaddr;
 
                        up_read(&dst_mm->mmap_sem);