]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
x86-64: use vmemmap_populate_basepages() for !pse setups
authorJohannes Weiner <hannes@cmpxchg.org>
Mon, 29 Apr 2013 22:07:54 +0000 (15:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 29 Apr 2013 22:54:35 +0000 (15:54 -0700)
We already have generic code to allocate vmemmap with regular pages, use
it.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Bernhard Schmidt <Bernhard.Schmidt@lrz.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: David Miller <davem@davemloft.net>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/mm/init_64.c

index 8c696c9cc6c169bd813aedcc61eef73c119fb436..9f6347c468b005f9fa0239815e3726d16ad89baf 100644 (file)
@@ -1281,7 +1281,8 @@ static long __meminitdata addr_start, addr_end;
 static void __meminitdata *p_start, *p_end;
 static int __meminitdata node_start;
 
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
+static int __meminit vmemmap_populate_hugepages(unsigned long start,
+                                               unsigned long end, int node)
 {
        unsigned long addr;
        unsigned long next;
@@ -1290,7 +1291,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
        pmd_t *pmd;
 
        for (addr = start; addr < end; addr = next) {
-               void *p = NULL;
+               next = pmd_addr_end(addr, end);
 
                pgd = vmemmap_pgd_populate(addr, node);
                if (!pgd)
@@ -1300,53 +1301,50 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
                if (!pud)
                        return -ENOMEM;
 
-               if (!cpu_has_pse) {
-                       next = (addr + PAGE_SIZE) & PAGE_MASK;
-                       pmd = vmemmap_pmd_populate(pud, addr, node);
-
-                       if (!pmd)
-                               return -ENOMEM;
-
-                       p = vmemmap_pte_populate(pmd, addr, node);
+               pmd = pmd_offset(pud, addr);
+               if (pmd_none(*pmd)) {
+                       pte_t entry;
+                       void *p;
 
+                       p = vmemmap_alloc_block_buf(PMD_SIZE, node);
                        if (!p)
                                return -ENOMEM;
-               } else {
-                       next = pmd_addr_end(addr, end);
-
-                       pmd = pmd_offset(pud, addr);
-                       if (pmd_none(*pmd)) {
-                               pte_t entry;
-
-                               p = vmemmap_alloc_block_buf(PMD_SIZE, node);
-                               if (!p)
-                                       return -ENOMEM;
-
-                               entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
-                                               PAGE_KERNEL_LARGE);
-                               set_pmd(pmd, __pmd(pte_val(entry)));
-
-                               /* check to see if we have contiguous blocks */
-                               if (p_end != p || node_start != node) {
-                                       if (p_start)
-                                               printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
-                                                      addr_start, addr_end-1, p_start, p_end-1, node_start);
-                                       addr_start = addr;
-                                       node_start = node;
-                                       p_start = p;
-                               }
 
-                               addr_end = addr + PMD_SIZE;
-                               p_end = p + PMD_SIZE;
-                       } else
-                               vmemmap_verify((pte_t *)pmd, node, addr, next);
-               }
+                       entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
+                                       PAGE_KERNEL_LARGE);
+                       set_pmd(pmd, __pmd(pte_val(entry)));
+
+                       /* check to see if we have contiguous blocks */
+                       if (p_end != p || node_start != node) {
+                               if (p_start)
+                                       printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
+                                              addr_start, addr_end-1, p_start, p_end-1, node_start);
+                               addr_start = addr;
+                               node_start = node;
+                               p_start = p;
+                       }
 
+                       addr_end = addr + PMD_SIZE;
+                       p_end = p + PMD_SIZE;
+               } else
+                       vmemmap_verify((pte_t *)pmd, node, addr, next);
        }
-       sync_global_pgds(start, end - 1);
        return 0;
 }
 
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
+{
+       int err;
+
+       if (cpu_has_pse)
+               err = vmemmap_populate_hugepages(start, end, node);
+       else
+               err = vmemmap_populate_basepages(start, end, node);
+       if (!err)
+               sync_global_pgds(start, end - 1);
+       return err;
+}
+
 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
 void register_page_bootmem_memmap(unsigned long section_nr,
                                  struct page *start_page, unsigned long size)