]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - mm/huge_memory.c
mm, vmscan: consider eligible zones in get_scan_count
[mirror_ubuntu-zesty-kernel.git] / mm / huge_memory.c
index 9a6bd6c8d55a6691047e516a46c2cf6b931b912d..87303c6bacf4af675cb8947f91a2225bd3aedd22 100644 (file)
@@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
+       /*
+        * When we COW a devmap PMD entry, we split it into PTEs, so we should
+        * not be in this function with `flags & FOLL_COW` set.
+        */
+       WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
+
        if (flags & FOLL_WRITE && !pmd_write(*pmd))
                return NULL;
 
@@ -1128,6 +1134,16 @@ out_unlock:
        return ret;
 }
 
+/*
+ * FOLL_FORCE can write to even unwritable pmd's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+{
+       return pmd_write(pmd) ||
+              ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+}
+
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                   unsigned long addr,
                                   pmd_t *pmd,
@@ -1138,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
-       if (flags & FOLL_WRITE && !pmd_write(*pmd))
+       if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
                goto out;
 
        /* Avoid dumping huge zero page */
@@ -1237,7 +1253,7 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
        }
 
        /* See similar comment in do_numa_page for explanation */
-       if (!pmd_write(pmd))
+       if (!pmd_savedwrite(pmd))
                flags |= TNF_NO_GROUP;
 
        /*
@@ -1300,7 +1316,7 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
        goto out;
 clear_pmdnuma:
        BUG_ON(!PageLocked(page));
-       was_writable = pmd_write(pmd);
+       was_writable = pmd_savedwrite(pmd);
        pmd = pmd_modify(pmd, vma->vm_page_prot);
        pmd = pmd_mkyoung(pmd);
        if (was_writable)
@@ -1317,7 +1333,7 @@ out:
 
        if (page_nid != -1)
                task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
-                               vmf->flags);
+                               flags);
 
        return 0;
 }
@@ -1555,7 +1571,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
                        entry = pmd_modify(entry, newprot);
                        if (preserve_write)
-                               entry = pmd_mkwrite(entry);
+                               entry = pmd_mk_savedwrite(entry);
                        ret = HPAGE_PMD_NR;
                        set_pmd_at(mm, addr, pmd, entry);
                        BUG_ON(vma_is_anonymous(vma) && !preserve_write &&
@@ -1862,9 +1878,12 @@ static void freeze_page(struct page *page)
 static void unfreeze_page(struct page *page)
 {
        int i;
-
-       for (i = 0; i < HPAGE_PMD_NR; i++)
-               remove_migration_ptes(page + i, page + i, true);
+       if (PageTransHuge(page)) {
+               remove_migration_ptes(page, page, true);
+       } else {
+               for (i = 0; i < HPAGE_PMD_NR; i++)
+                       remove_migration_ptes(page + i, page + i, true);
+       }
 }
 
 static void __split_huge_page_tail(struct page *head, int tail,