]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - mm/gup.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / mm / gup.c
index e0d82b6706d72d82637bca5eaef1e35e15a1abdf..12b9626b1a9ed181991ffd2c8d4ead8db7e120e4 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -153,7 +153,10 @@ retry:
        }
 
        if (flags & FOLL_GET) {
-               get_page(page);
+               if (unlikely(!try_get_page(page))) {
+                       page = ERR_PTR(-ENOMEM);
+                       goto out;
+               }
 
                /* drop the pgmap reference now that we hold the page */
                if (pgmap) {
@@ -280,7 +283,10 @@ retry_locked:
                        if (pmd_trans_unstable(pmd))
                                ret = -EBUSY;
                } else {
-                       get_page(page);
+                       if (unlikely(!try_get_page(page))) {
+                               spin_unlock(ptl);
+                               return ERR_PTR(-ENOMEM);
+                       }
                        spin_unlock(ptl);
                        lock_page(page);
                        ret = split_huge_page(page);
@@ -436,11 +442,14 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
                pgd = pgd_offset_k(address);
        else
                pgd = pgd_offset_gate(mm, address);
-       BUG_ON(pgd_none(*pgd));
+       if (pgd_none(*pgd))
+               return -EFAULT;
        p4d = p4d_offset(pgd, address);
-       BUG_ON(p4d_none(*p4d));
+       if (p4d_none(*p4d))
+               return -EFAULT;
        pud = pud_offset(p4d, address);
-       BUG_ON(pud_none(*pud));
+       if (pud_none(*pud))
+               return -EFAULT;
        pmd = pmd_offset(pud, address);
        if (!pmd_present(*pmd))
                return -EFAULT;
@@ -464,7 +473,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
                if (is_device_public_page(*page))
                        goto unmap;
        }
-       get_page(*page);
+       if (unlikely(!try_get_page(*page))) {
+               ret = -ENOMEM;
+               goto unmap;
+       }
 out:
        ret = 0;
 unmap:
@@ -544,6 +556,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
        if (vm_flags & (VM_IO | VM_PFNMAP))
                return -EFAULT;
 
+       if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
+               return -EFAULT;
+
        if (write) {
                if (!(vm_flags & VM_WRITE)) {
                        if (!(gup_flags & FOLL_FORCE))
@@ -1232,8 +1247,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
        int locked = 0;
        long ret = 0;
 
-       VM_BUG_ON(start & ~PAGE_MASK);
-       VM_BUG_ON(len != PAGE_ALIGN(len));
        end = start + len;
 
        for (nstart = start; nstart < end; nstart = nend) {
@@ -1354,7 +1367,8 @@ static inline pte_t gup_get_pte(pte_t *ptep)
 }
 #endif
 
-static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
+static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
+                                           struct page **pages)
 {
        while ((*nr) - nr_start) {
                struct page *page = pages[--(*nr)];
@@ -1364,6 +1378,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
        }
 }
 
+/*
+ * Return the compund head page with ref appropriately incremented,
+ * or NULL if that failed.
+ */
+static inline struct page *try_get_compound_head(struct page *page, int refs)
+{
+       struct page *head = compound_head(page);
+       if (WARN_ON_ONCE(page_ref_count(head) < 0))
+               return NULL;
+       if (unlikely(!page_cache_add_speculative(head, refs)))
+               return NULL;
+       return head;
+}
+
 #ifdef __HAVE_ARCH_PTE_SPECIAL
 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
                         int write, struct page **pages, int *nr)
@@ -1398,9 +1426,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
 
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
                page = pte_page(pte);
-               head = compound_head(page);
 
-               if (!page_cache_get_speculative(head))
+               head = try_get_compound_head(page, 1);
+               if (!head)
                        goto pte_unmap;
 
                if (unlikely(pte_val(pte) != pte_val(*ptep))) {
@@ -1466,32 +1494,48 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
        return 1;
 }
 
-static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
+static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
                unsigned long end, struct page **pages, int *nr)
 {
        unsigned long fault_pfn;
+       int nr_start = *nr;
 
-       fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
-       return __gup_device_huge(fault_pfn, addr, end, pages, nr);
+       fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+       if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
+               return 0;
+
+       if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
+               undo_dev_pagemap(nr, nr_start, pages);
+               return 0;
+       }
+       return 1;
 }
 
-static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
+static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
                unsigned long end, struct page **pages, int *nr)
 {
        unsigned long fault_pfn;
+       int nr_start = *nr;
 
-       fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
-       return __gup_device_huge(fault_pfn, addr, end, pages, nr);
+       fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+       if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
+               return 0;
+
+       if (unlikely(pud_val(orig) != pud_val(*pudp))) {
+               undo_dev_pagemap(nr, nr_start, pages);
+               return 0;
+       }
+       return 1;
 }
 #else
-static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
+static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
                unsigned long end, struct page **pages, int *nr)
 {
        BUILD_BUG();
        return 0;
 }
 
-static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
+static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
                unsigned long end, struct page **pages, int *nr)
 {
        BUILD_BUG();
@@ -1509,7 +1553,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
                return 0;
 
        if (pmd_devmap(orig))
-               return __gup_device_huge_pmd(orig, addr, end, pages, nr);
+               return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
 
        refs = 0;
        page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
@@ -1520,8 +1564,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pmd_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pmd_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
@@ -1547,7 +1591,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
                return 0;
 
        if (pud_devmap(orig))
-               return __gup_device_huge_pud(orig, addr, end, pages, nr);
+               return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
 
        refs = 0;
        page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
@@ -1558,8 +1602,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pud_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pud_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
@@ -1595,8 +1639,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pgd_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pgd_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
@@ -1626,7 +1670,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
                if (!pmd_present(pmd))
                        return 0;
 
-               if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
+               if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
+                            pmd_devmap(pmd))) {
                        /*
                         * NUMA hinting faults need to be handled in the GUP
                         * slowpath for accounting purposes and so that they
@@ -1816,9 +1861,12 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
        len = (unsigned long) nr_pages << PAGE_SHIFT;
        end = start + len;
 
+       if (nr_pages <= 0)
+               return 0;
+
        if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
                                        (void __user *)start, len)))
-               return 0;
+               return -EFAULT;
 
        if (gup_fast_permitted(start, nr_pages, write)) {
                local_irq_disable();