]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
thp: copy_huge_pmd(): copy huge zero page
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Wed, 12 Dec 2012 21:50:51 +0000 (13:50 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 13 Dec 2012 01:38:31 +0000 (17:38 -0800)
It's easy to copy huge zero page. Just set destination pmd to huge zero
page.

It's safe to copy huge zero page since we have none yet :-p

[rientjes@google.com: fix comment]
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/huge_memory.c

index 1ee34ddb46adc77b36e676eea28368eed3eef1b2..650625390f616ddb1a17e8e072bd76476e675199 100644 (file)
@@ -708,6 +708,18 @@ static inline struct page *alloc_hugepage(int defrag)
 }
 #endif
 
+static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+               struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd)
+{
+       pmd_t entry;
+       entry = pfn_pmd(huge_zero_pfn, vma->vm_page_prot);
+       entry = pmd_wrprotect(entry);
+       entry = pmd_mkhuge(entry);
+       set_pmd_at(mm, haddr, pmd, entry);
+       pgtable_trans_huge_deposit(mm, pgtable);
+       mm->nr_ptes++;
+}
+
 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                               unsigned long address, pmd_t *pmd,
                               unsigned int flags)
@@ -785,6 +797,16 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                pte_free(dst_mm, pgtable);
                goto out_unlock;
        }
+       /*
+        * mm->page_table_lock is enough to be sure that huge zero pmd is not
+        * under splitting since we don't split the page itself, only pmd to
+        * a page table.
+        */
+       if (is_huge_zero_pmd(pmd)) {
+               set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd);
+               ret = 0;
+               goto out_unlock;
+       }
        if (unlikely(pmd_trans_splitting(pmd))) {
                /* split huge page running from under us */
                spin_unlock(&src_mm->page_table_lock);