]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
mm/ksm: handle protnone saved writes when making page write protect
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Fri, 24 Feb 2017 22:59:19 +0000 (14:59 -0800)
committerTim Gardner <tim.gardner@canonical.com>
Fri, 10 Mar 2017 13:22:05 +0000 (06:22 -0700)
BugLink: http://bugs.launchpad.net/bugs/1671613
Without this KSM will consider the page write protected, but a numa
fault can later mark the page writable.  This can result in memory
corruption.

Link: http://lkml.kernel.org/r/1487498625-10891-3-git-send-email-aneesh.kumar@linux.vnet.ibm.com
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit 595cd8f256d24face93b2722927ec9c980419c26)
Signed-off-by: Tim Gardner <tim.gardner@canonical.com>
include/asm-generic/pgtable.h
mm/ksm.c

index b6f3a8a4b73829e9bc62f6fde9b2d14d2e2f506d..8c8ba48bef0ba88e9744ad0bebc511f1726e02b1 100644 (file)
@@ -200,6 +200,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
 #define pte_mk_savedwrite pte_mkwrite
 #endif
 
+#ifndef pte_clear_savedwrite
+#define pte_clear_savedwrite pte_wrprotect
+#endif
+
 #ifndef pmd_savedwrite
 #define pmd_savedwrite pmd_write
 #endif
@@ -208,6 +212,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
 #define pmd_mk_savedwrite pmd_mkwrite
 #endif
 
+#ifndef pmd_clear_savedwrite
+#define pmd_clear_savedwrite pmd_wrprotect
+#endif
+
 #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
index 9dd2e58fb6dc93184c5a9dba3347a2b06cc57c42..abc05187168a5cebdd2f6d3aca97fbe1c3fc623c 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -880,7 +880,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
        if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
                goto out_unlock;
 
-       if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte)) {
+       if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
+           (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) {
                pte_t entry;
 
                swapped = PageSwapCache(page);
@@ -905,7 +906,11 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                }
                if (pte_dirty(entry))
                        set_page_dirty(page);
-               entry = pte_mkclean(pte_wrprotect(entry));
+
+               if (pte_protnone(entry))
+                       entry = pte_mkclean(pte_clear_savedwrite(entry));
+               else
+                       entry = pte_mkclean(pte_wrprotect(entry));
                set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
        }
        *orig_pte = *pvmw.pte;