]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/commitdiff
mm: provide helper for finishing mkwrite faults
authorJan Kara <jack@suse.cz>
Wed, 14 Dec 2016 23:07:39 +0000 (15:07 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Dec 2016 00:04:09 +0000 (16:04 -0800)
Provide a helper function for finishing write faults due to PTE being
read-only.  The helper will be used by DAX to avoid the need of
complicating generic MM code with DAX locking specifics.

Link: http://lkml.kernel.org/r/1479460644-25076-16-git-send-email-jack@suse.cz
Signed-off-by: Jan Kara <jack@suse.cz>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/memory.c

index 59a4da1742e5e0ad63721ad250ec02154119241f..cec967e93f959a448dba04c454c230b1fa7a64f0 100644 (file)
@@ -615,6 +615,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
                struct page *page);
 int finish_fault(struct vm_fault *vmf);
+int finish_mkwrite_fault(struct vm_fault *vmf);
 #endif
 
 /*
index 82e7689e3059ec757b4b4554a9bef34ae5cbaa72..bbc25da48a184c9186b9eb5bad1a6418165bcab7 100644 (file)
@@ -2269,6 +2269,38 @@ oom:
        return VM_FAULT_OOM;
 }
 
+/**
+ * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
+ *                       writeable once the page is prepared
+ *
+ * @vmf: structure describing the fault
+ *
+ * This function handles all that is needed to finish a write page fault in a
+ * shared mapping due to PTE being read-only once the mapped page is prepared.
+ * It handles locking of PTE and modifying it. The function returns
+ * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE
+ * lock.
+ *
+ * The function expects the page to be locked or other protection against
+ * concurrent faults / writeback (such as DAX radix tree locks).
+ */
+int finish_mkwrite_fault(struct vm_fault *vmf)
+{
+       WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
+       vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
+                                      &vmf->ptl);
+       /*
+        * We might have raced with another page fault while we released the
+        * pte_offset_map_lock.
+        */
+       if (!pte_same(*vmf->pte, vmf->orig_pte)) {
+               pte_unmap_unlock(vmf->pte, vmf->ptl);
+               return 0;
+       }
+       wp_page_reuse(vmf);
+       return VM_FAULT_WRITE;
+}
+
 /*
  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
  * mapping
@@ -2285,16 +2317,7 @@ static int wp_pfn_shared(struct vm_fault *vmf)
                ret = vma->vm_ops->pfn_mkwrite(vma, vmf);
                if (ret & VM_FAULT_ERROR)
                        return ret;
-               vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
-                               vmf->address, &vmf->ptl);
-               /*
-                * We might have raced with another page fault while we
-                * released the pte_offset_map_lock.
-                */
-               if (!pte_same(*vmf->pte, vmf->orig_pte)) {
-                       pte_unmap_unlock(vmf->pte, vmf->ptl);
-                       return 0;
-               }
+               return finish_mkwrite_fault(vmf);
        }
        wp_page_reuse(vmf);
        return VM_FAULT_WRITE;
@@ -2304,7 +2327,6 @@ static int wp_page_shared(struct vm_fault *vmf)
        __releases(vmf->ptl)
 {
        struct vm_area_struct *vma = vmf->vma;
-       int page_mkwrite = 0;
 
        get_page(vmf->page);
 
@@ -2318,26 +2340,17 @@ static int wp_page_shared(struct vm_fault *vmf)
                        put_page(vmf->page);
                        return tmp;
                }
-               /*
-                * Since we dropped the lock we need to revalidate
-                * the PTE as someone else may have changed it.  If
-                * they did, we just return, as we can count on the
-                * MMU to tell us if they didn't also make it writable.
-                */
-               vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
-                                               vmf->address, &vmf->ptl);
-               if (!pte_same(*vmf->pte, vmf->orig_pte)) {
+               tmp = finish_mkwrite_fault(vmf);
+               if (unlikely(!tmp || (tmp &
+                                     (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
                        unlock_page(vmf->page);
-                       pte_unmap_unlock(vmf->pte, vmf->ptl);
                        put_page(vmf->page);
-                       return 0;
+                       return tmp;
                }
-               page_mkwrite = 1;
-       }
-
-       wp_page_reuse(vmf);
-       if (!page_mkwrite)
+       } else {
+               wp_page_reuse(vmf);
                lock_page(vmf->page);
+       }
        fault_dirty_shared_page(vma, vmf->page);
        put_page(vmf->page);