]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
Revert "mm, ksm: convert write_protect_page() to use page_vma_mapped_walk()"
authorSeth Forshee <seth.forshee@canonical.com>
Fri, 5 May 2017 15:32:37 +0000 (10:32 -0500)
committerThadeu Lima de Souza Cascardo <cascardo@canonical.com>
Wed, 17 May 2017 16:40:32 +0000 (13:40 -0300)
BugLink: http://bugs.launchpad.net/bugs/1674838
This reverts commit 3000e033152a70fa139765b4dbb5baec46b1cc1b.

Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Colin Ian King <colin.king@canonical.com>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
mm/ksm.c

index 9dd2e58fb6dc93184c5a9dba3347a2b06cc57c42..fed4afd8293bdb5096c6f790a59f3a1e37e87497 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -856,35 +856,33 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                              pte_t *orig_pte)
 {
        struct mm_struct *mm = vma->vm_mm;
-       struct page_vma_mapped_walk pvmw = {
-               .page = page,
-               .vma = vma,
-       };
+       unsigned long addr;
+       pte_t *ptep;
+       spinlock_t *ptl;
        int swapped;
        int err = -EFAULT;
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
 
-       pvmw.address = page_address_in_vma(page, vma);
-       if (pvmw.address == -EFAULT)
+       addr = page_address_in_vma(page, vma);
+       if (addr == -EFAULT)
                goto out;
 
        BUG_ON(PageTransCompound(page));
 
-       mmun_start = pvmw.address;
-       mmun_end   = pvmw.address + PAGE_SIZE;
+       mmun_start = addr;
+       mmun_end   = addr + PAGE_SIZE;
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 
-       if (!page_vma_mapped_walk(&pvmw))
+       ptep = page_check_address(page, mm, addr, &ptl, 0);
+       if (!ptep)
                goto out_mn;
-       if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
-               goto out_unlock;
 
-       if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte)) {
+       if (pte_write(*ptep) || pte_dirty(*ptep)) {
                pte_t entry;
 
                swapped = PageSwapCache(page);
-               flush_cache_page(vma, pvmw.address, page_to_pfn(page));
+               flush_cache_page(vma, addr, page_to_pfn(page));
                /*
                 * Ok this is tricky, when get_user_pages_fast() run it doesn't
                 * take any lock, therefore the check that we are going to make
@@ -894,25 +892,25 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                 * this assure us that no O_DIRECT can happen after the check
                 * or in the middle of the check.
                 */
-               entry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
+               entry = ptep_clear_flush_notify(vma, addr, ptep);
                /*
                 * Check that no O_DIRECT or similar I/O is in progress on the
                 * page
                 */
                if (page_mapcount(page) + 1 + swapped != page_count(page)) {
-                       set_pte_at(mm, pvmw.address, pvmw.pte, entry);
+                       set_pte_at(mm, addr, ptep, entry);
                        goto out_unlock;
                }
                if (pte_dirty(entry))
                        set_page_dirty(page);
                entry = pte_mkclean(pte_wrprotect(entry));
-               set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
+               set_pte_at_notify(mm, addr, ptep, entry);
        }
-       *orig_pte = *pvmw.pte;
+       *orig_pte = *ptep;
        err = 0;
 
 out_unlock:
-       page_vma_mapped_walk_done(&pvmw);
+       pte_unmap_unlock(ptep, ptl);
 out_mn:
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 out: