]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - mm/memory.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
[mirror_ubuntu-zesty-kernel.git] / mm / memory.c
index 7d23b505024846301d0e1bb77783489455c8c742..6bf2b471e30ca566a55160e4131bf7e7b9c3c4ea 100644 (file)
@@ -3008,13 +3008,6 @@ static int do_set_pmd(struct vm_fault *vmf, struct page *page)
        ret = 0;
        count_vm_event(THP_FILE_MAPPED);
 out:
-       /*
-        * If we are going to fallback to pte mapping, do a
-        * withdraw with pmd lock held.
-        */
-       if (arch_needs_pgtable_deposit() && ret == VM_FAULT_FALLBACK)
-               vmf->prealloc_pte = pgtable_trans_huge_withdraw(vma->vm_mm,
-                                                               vmf->pmd);
        spin_unlock(vmf->ptl);
        return ret;
 }
@@ -3055,20 +3048,18 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 
                ret = do_set_pmd(vmf, page);
                if (ret != VM_FAULT_FALLBACK)
-                       goto fault_handled;
+                       return ret;
        }
 
        if (!vmf->pte) {
                ret = pte_alloc_one_map(vmf);
                if (ret)
-                       goto fault_handled;
+                       return ret;
        }
 
        /* Re-check under ptl */
-       if (unlikely(!pte_none(*vmf->pte))) {
-               ret = VM_FAULT_NOPAGE;
-               goto fault_handled;
-       }
+       if (unlikely(!pte_none(*vmf->pte)))
+               return VM_FAULT_NOPAGE;
 
        flush_icache_page(vma, page);
        entry = mk_pte(page, vma->vm_page_prot);
@@ -3088,15 +3079,8 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 
        /* no need to invalidate: a not-present page won't be cached */
        update_mmu_cache(vma, vmf->address, vmf->pte);
-       ret = 0;
 
-fault_handled:
-       /* preallocated pagetable is unused: free it */
-       if (vmf->prealloc_pte) {
-               pte_free(vmf->vma->vm_mm, vmf->prealloc_pte);
-               vmf->prealloc_pte = 0;
-       }
-       return ret;
+       return 0;
 }
 
 
@@ -3360,15 +3344,24 @@ static int do_shared_fault(struct vm_fault *vmf)
 static int do_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
+       int ret;
 
        /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
        if (!vma->vm_ops->fault)
-               return VM_FAULT_SIGBUS;
-       if (!(vmf->flags & FAULT_FLAG_WRITE))
-               return do_read_fault(vmf);
-       if (!(vma->vm_flags & VM_SHARED))
-               return do_cow_fault(vmf);
-       return do_shared_fault(vmf);
+               ret = VM_FAULT_SIGBUS;
+       else if (!(vmf->flags & FAULT_FLAG_WRITE))
+               ret = do_read_fault(vmf);
+       else if (!(vma->vm_flags & VM_SHARED))
+               ret = do_cow_fault(vmf);
+       else
+               ret = do_shared_fault(vmf);
+
+       /* preallocated pagetable is unused: free it */
+       if (vmf->prealloc_pte) {
+               pte_free(vma->vm_mm, vmf->prealloc_pte);
+               vmf->prealloc_pte = 0;
+       }
+       return ret;
 }
 
 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
@@ -3779,8 +3772,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 }
 #endif /* __PAGETABLE_PMD_FOLDED */
 
-static int __follow_pte(struct mm_struct *mm, unsigned long address,
-               pte_t **ptepp, spinlock_t **ptlp)
+static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+               pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -3797,11 +3790,20 @@ static int __follow_pte(struct mm_struct *mm, unsigned long address,
 
        pmd = pmd_offset(pud, address);
        VM_BUG_ON(pmd_trans_huge(*pmd));
-       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
-               goto out;
 
-       /* We cannot handle huge page PFN maps. Luckily they don't exist. */
-       if (pmd_huge(*pmd))
+       if (pmd_huge(*pmd)) {
+               if (!pmdpp)
+                       goto out;
+
+               *ptlp = pmd_lock(mm, pmd);
+               if (pmd_huge(*pmd)) {
+                       *pmdpp = pmd;
+                       return 0;
+               }
+               spin_unlock(*ptlp);
+       }
+
+       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
                goto out;
 
        ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
@@ -3817,16 +3819,30 @@ out:
        return -EINVAL;
 }
 
-int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
-              spinlock_t **ptlp)
+static inline int follow_pte(struct mm_struct *mm, unsigned long address,
+                            pte_t **ptepp, spinlock_t **ptlp)
+{
+       int res;
+
+       /* (void) is needed to make gcc happy */
+       (void) __cond_lock(*ptlp,
+                          !(res = __follow_pte_pmd(mm, address, ptepp, NULL,
+                                          ptlp)));
+       return res;
+}
+
+int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+                            pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
 {
        int res;
 
        /* (void) is needed to make gcc happy */
        (void) __cond_lock(*ptlp,
-                          !(res = __follow_pte(mm, address, ptepp, ptlp)));
+                          !(res = __follow_pte_pmd(mm, address, ptepp, pmdpp,
+                                          ptlp)));
        return res;
 }
+EXPORT_SYMBOL(follow_pte_pmd);
 
 /**
  * follow_pfn - look up PFN at a user virtual address