]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
thp: cleanup split_huge_page()
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tue, 15 Mar 2016 21:57:30 +0000 (14:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Mar 2016 23:55:16 +0000 (16:55 -0700)
After one of bugfixes to freeze_page(), we don't have freezed pages in
rmap, therefore mapcount of all subpages of freezed THP is zero.  And we
have assert for that.

Let's drop code which deal with non-zero mapcount of subpages.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/huge_memory.c

index e10a4fee88d2bcf1f4bba89285391cbadfb96098..1ea21e203a700114587dedca262f9267bb5a005a 100644 (file)
@@ -3220,28 +3220,26 @@ static void unfreeze_page(struct anon_vma *anon_vma, struct page *page)
        }
 }
 
-static int __split_huge_page_tail(struct page *head, int tail,
+static void __split_huge_page_tail(struct page *head, int tail,
                struct lruvec *lruvec, struct list_head *list)
 {
-       int mapcount;
        struct page *page_tail = head + tail;
 
-       mapcount = atomic_read(&page_tail->_mapcount) + 1;
+       VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
        VM_BUG_ON_PAGE(atomic_read(&page_tail->_count) != 0, page_tail);
 
        /*
         * tail_page->_count is zero and not changing from under us. But
         * get_page_unless_zero() may be running from under us on the
-        * tail_page. If we used atomic_set() below instead of atomic_add(), we
+        * tail_page. If we used atomic_set() below instead of atomic_inc(), we
         * would then run atomic_set() concurrently with
         * get_page_unless_zero(), and atomic_set() is implemented in C not
         * using locked ops. spin_unlock on x86 sometime uses locked ops
         * because of PPro errata 66, 92, so unless somebody can guarantee
         * atomic_set() here would be safe on all archs (and not only on x86),
-        * it's safer to use atomic_add().
+        * it's safer to use atomic_inc().
         */
-       atomic_add(mapcount + 1, &page_tail->_count);
-
+       atomic_inc(&page_tail->_count);
 
        page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
        page_tail->flags |= (head->flags &
@@ -3275,8 +3273,6 @@ static int __split_huge_page_tail(struct page *head, int tail,
        page_tail->index = head->index + tail;
        page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
        lru_add_page_tail(head, page_tail, lruvec, list);
-
-       return mapcount;
 }
 
 static void __split_huge_page(struct page *page, struct list_head *list)
@@ -3284,7 +3280,7 @@ static void __split_huge_page(struct page *page, struct list_head *list)
        struct page *head = compound_head(page);
        struct zone *zone = page_zone(head);
        struct lruvec *lruvec;
-       int i, tail_mapcount;
+       int i;
 
        /* prevent PageLRU to go away from under us, and freeze lru stats */
        spin_lock_irq(&zone->lru_lock);
@@ -3293,10 +3289,8 @@ static void __split_huge_page(struct page *page, struct list_head *list)
        /* complete memcg works before add pages to LRU */
        mem_cgroup_split_huge_fixup(head);
 
-       tail_mapcount = 0;
        for (i = HPAGE_PMD_NR - 1; i >= 1; i--)
-               tail_mapcount += __split_huge_page_tail(head, i, lruvec, list);
-       atomic_sub(tail_mapcount, &head->_count);
+               __split_huge_page_tail(head, i, lruvec, list);
 
        ClearPageCompound(head);
        spin_unlock_irq(&zone->lru_lock);