]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
mm: numa: call MMU notifiers on THP migration
authorMel Gorman <mgorman@suse.de>
Thu, 19 Dec 2013 01:08:33 +0000 (17:08 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 19 Dec 2013 03:04:51 +0000 (19:04 -0800)
MMU notifiers must be called on THP page migration or secondary MMUs
will get very confused.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Alex Thorlton <athorlton@sgi.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/migrate.c

index 2cabbd5fa5bf4806e3f75b27119aff8cfc65f12b..be787d506fbbf3347da5b2a4f8e984b9f0450f18 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/hugetlb_cgroup.h>
 #include <linux/gfp.h>
 #include <linux/balloon_compaction.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/tlbflush.h>
 
@@ -1716,12 +1717,13 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                                struct page *page, int node)
 {
        spinlock_t *ptl;
-       unsigned long haddr = address & HPAGE_PMD_MASK;
        pg_data_t *pgdat = NODE_DATA(node);
        int isolated = 0;
        struct page *new_page = NULL;
        struct mem_cgroup *memcg = NULL;
        int page_lru = page_is_file_cache(page);
+       unsigned long mmun_start = address & HPAGE_PMD_MASK;
+       unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
        pmd_t orig_entry;
 
        /*
@@ -1756,10 +1758,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        WARN_ON(PageLRU(new_page));
 
        /* Recheck the target PMD */
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        ptl = pmd_lock(mm, pmd);
        if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
 fail_putback:
                spin_unlock(ptl);
+               mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
                /* Reverse changes made by migrate_page_copy() */
                if (TestClearPageActive(new_page))
@@ -1800,15 +1804,16 @@ fail_putback:
         * The SetPageUptodate on the new page and page_add_new_anon_rmap
         * guarantee the copy is visible before the pagetable update.
         */
-       flush_cache_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
-       page_add_new_anon_rmap(new_page, vma, haddr);
-       pmdp_clear_flush(vma, haddr, pmd);
-       set_pmd_at(mm, haddr, pmd, entry);
+       flush_cache_range(vma, mmun_start, mmun_end);
+       page_add_new_anon_rmap(new_page, vma, mmun_start);
+       pmdp_clear_flush(vma, mmun_start, pmd);
+       set_pmd_at(mm, mmun_start, pmd, entry);
+       flush_tlb_range(vma, mmun_start, mmun_end);
        update_mmu_cache_pmd(vma, address, &entry);
 
        if (page_count(page) != 2) {
-               set_pmd_at(mm, haddr, pmd, orig_entry);
-               flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
+               set_pmd_at(mm, mmun_start, pmd, orig_entry);
+               flush_tlb_range(vma, mmun_start, mmun_end);
                update_mmu_cache_pmd(vma, address, &entry);
                page_remove_rmap(new_page);
                goto fail_putback;
@@ -1823,6 +1828,7 @@ fail_putback:
         */
        mem_cgroup_end_migration(memcg, page, new_page, true);
        spin_unlock(ptl);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
        unlock_page(new_page);
        unlock_page(page);
@@ -1843,7 +1849,7 @@ out_dropref:
        ptl = pmd_lock(mm, pmd);
        if (pmd_same(*pmd, entry)) {
                entry = pmd_mknonnuma(entry);
-               set_pmd_at(mm, haddr, pmd, entry);
+               set_pmd_at(mm, mmun_start, pmd, entry);
                update_mmu_cache_pmd(vma, address, &entry);
        }
        spin_unlock(ptl);