]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
mm: fold mlocked_vma_newpage() into its only call site
authorJianyu Zhan <nasa4836@gmail.com>
Wed, 4 Jun 2014 23:09:52 +0000 (16:09 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 4 Jun 2014 23:54:07 +0000 (16:54 -0700)
In previous commit(mm: use the light version __mod_zone_page_state in
mlocked_vma_newpage()) a irq-unsafe __mod_zone_page_state is used.  And as
suggested by Andrew, to reduce the risks that new call sites incorrectly
using mlocked_vma_newpage() without knowing they are adding racing, this
patch folds mlocked_vma_newpage() into its only call site,
page_add_new_anon_rmap, to make it open-cocded for people to know what is
going on.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Jianyu Zhan <nasa4836@gmail.com>
Suggested-by: Andrew Morton <akpm@linux-foundation.org>
Suggested-by: Hugh Dickins <hughd@google.com>
Acked-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/internal.h
mm/rmap.c

index e067984bafa07c4ab34ef32419406c92e95a4902..802c3a4fc03aef572aef92f2c6315f78632fcf86 100644 (file)
@@ -188,31 +188,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
        munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
 }
 
-/*
- * Called only in fault path, to determine if a new page is being
- * mapped into a LOCKED vma.  If it is, mark page as mlocked.
- */
-static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
-                                   struct page *page)
-{
-       VM_BUG_ON_PAGE(PageLRU(page), page);
-
-       if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
-               return 0;
-
-       if (!TestSetPageMlocked(page)) {
-               /*
-                * We use the irq-unsafe __mod_zone_page_stat because this
-                * counter is not modified from interrupt context, and the pte
-                * lock is held(spinlock), which implies preemption disabled.
-                */
-               __mod_zone_page_state(page_zone(page), NR_MLOCK,
-                                   hpage_nr_pages(page));
-               count_vm_event(UNEVICTABLE_PGMLOCKED);
-       }
-       return 1;
-}
-
 /*
  * must be called with vma's mmap_sem held for read or write, and page locked.
  */
@@ -255,10 +230,6 @@ extern unsigned long vma_address(struct page *page,
                                 struct vm_area_struct *vma);
 #endif
 #else /* !CONFIG_MMU */
-static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
-{
-       return 0;
-}
 static inline void clear_page_mlock(struct page *page) { }
 static inline void mlock_vma_page(struct page *page) { }
 static inline void mlock_migrate_page(struct page *new, struct page *old) { }
index 4644e10248f0f096c631484877be64a95194dbf2..e375ce4bd93e335678c6152313f9259ecaae9ce9 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1032,11 +1032,25 @@ void page_add_new_anon_rmap(struct page *page,
        __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
                        hpage_nr_pages(page));
        __page_set_anon_rmap(page, vma, address, 1);
-       if (!mlocked_vma_newpage(vma, page)) {
+
+       VM_BUG_ON_PAGE(PageLRU(page), page);
+       if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
                SetPageActive(page);
                lru_cache_add(page);
-       } else
-               add_page_to_unevictable_list(page);
+               return;
+       }
+
+       if (!TestSetPageMlocked(page)) {
+               /*
+                * We use the irq-unsafe __mod_zone_page_stat because this
+                * counter is not modified from interrupt context, and the pte
+                * lock is held(spinlock), which implies preemption disabled.
+                */
+               __mod_zone_page_state(page_zone(page), NR_MLOCK,
+                                   hpage_nr_pages(page));
+               count_vm_event(UNEVICTABLE_PGMLOCKED);
+       }
+       add_page_to_unevictable_list(page);
 }
 
 /**