]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - mm/hugetlb.c
treewide: make "nr_cpu_ids" unsigned
[mirror_ubuntu-bionic-kernel.git] / mm / hugetlb.c
index 31e207cb399bebd11371e46eb26f625a5b74487c..424b0ef08a60cb616d40e9249e5447287c1705e3 100644 (file)
@@ -1066,11 +1066,11 @@ static void free_gigantic_page(struct page *page, unsigned int order)
 }
 
 static int __alloc_gigantic_page(unsigned long start_pfn,
-                               unsigned long nr_pages)
+                               unsigned long nr_pages, gfp_t gfp_mask)
 {
        unsigned long end_pfn = start_pfn + nr_pages;
        return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
-                                 GFP_KERNEL);
+                                 gfp_mask);
 }
 
 static bool pfn_range_valid_gigantic(struct zone *z,
@@ -1108,19 +1108,24 @@ static bool zone_spans_last_pfn(const struct zone *zone,
        return zone_spans_pfn(zone, last_pfn);
 }
 
-static struct page *alloc_gigantic_page(int nid, unsigned int order)
+static struct page *alloc_gigantic_page(int nid, struct hstate *h)
 {
+       unsigned int order = huge_page_order(h);
        unsigned long nr_pages = 1 << order;
        unsigned long ret, pfn, flags;
-       struct zone *z;
+       struct zonelist *zonelist;
+       struct zone *zone;
+       struct zoneref *z;
+       gfp_t gfp_mask;
 
-       z = NODE_DATA(nid)->node_zones;
-       for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
-               spin_lock_irqsave(&z->lock, flags);
+       gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
+       zonelist = node_zonelist(nid, gfp_mask);
+       for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), NULL) {
+               spin_lock_irqsave(&zone->lock, flags);
 
-               pfn = ALIGN(z->zone_start_pfn, nr_pages);
-               while (zone_spans_last_pfn(z, pfn, nr_pages)) {
-                       if (pfn_range_valid_gigantic(z, pfn, nr_pages)) {
+               pfn = ALIGN(zone->zone_start_pfn, nr_pages);
+               while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
+                       if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
                                /*
                                 * We release the zone lock here because
                                 * alloc_contig_range() will also lock the zone
@@ -1128,16 +1133,16 @@ static struct page *alloc_gigantic_page(int nid, unsigned int order)
                                 * spinning on this lock, it may win the race
                                 * and cause alloc_contig_range() to fail...
                                 */
-                               spin_unlock_irqrestore(&z->lock, flags);
-                               ret = __alloc_gigantic_page(pfn, nr_pages);
+                               spin_unlock_irqrestore(&zone->lock, flags);
+                               ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
                                if (!ret)
                                        return pfn_to_page(pfn);
-                               spin_lock_irqsave(&z->lock, flags);
+                               spin_lock_irqsave(&zone->lock, flags);
                        }
                        pfn += nr_pages;
                }
 
-               spin_unlock_irqrestore(&z->lock, flags);
+               spin_unlock_irqrestore(&zone->lock, flags);
        }
 
        return NULL;
@@ -1150,7 +1155,7 @@ static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
 {
        struct page *page;
 
-       page = alloc_gigantic_page(nid, huge_page_order(h));
+       page = alloc_gigantic_page(nid, h);
        if (page) {
                prep_compound_gigantic_page(page, huge_page_order(h));
                prep_new_huge_page(h, page, nid);
@@ -2083,7 +2088,9 @@ struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
        return page;
 }
 
-int __weak alloc_bootmem_huge_page(struct hstate *h)
+int alloc_bootmem_huge_page(struct hstate *h)
+       __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
+int __alloc_bootmem_huge_page(struct hstate *h)
 {
        struct huge_bootmem_page *m;
        int nr_nodes, node;
@@ -2569,13 +2576,13 @@ static struct attribute *hstate_attrs[] = {
        NULL,
 };
 
-static struct attribute_group hstate_attr_group = {
+static const struct attribute_group hstate_attr_group = {
        .attrs = hstate_attrs,
 };
 
 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
                                    struct kobject **hstate_kobjs,
-                                   struct attribute_group *hstate_attr_group)
+                                   const struct attribute_group *hstate_attr_group)
 {
        int retval;
        int hi = hstate_index(h);
@@ -2633,7 +2640,7 @@ static struct attribute *per_node_hstate_attrs[] = {
        NULL,
 };
 
-static struct attribute_group per_node_hstate_attr_group = {
+static const struct attribute_group per_node_hstate_attr_group = {
        .attrs = per_node_hstate_attrs,
 };
 
@@ -4600,6 +4607,15 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
        return pte;
 }
 
+/*
+ * huge_pte_offset() - Walk the page table to resolve the hugepage
+ * entry at address @addr
+ *
+ * Return: Pointer to page table or swap entry (PUD or PMD) for
+ * address @addr, or NULL if a p*d_none() entry is encountered and the
+ * size @sz doesn't match the hugepage size at this level of the page
+ * table.
+ */
 pte_t *huge_pte_offset(struct mm_struct *mm,
                       unsigned long addr, unsigned long sz)
 {
@@ -4614,13 +4630,22 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
        p4d = p4d_offset(pgd, addr);
        if (!p4d_present(*p4d))
                return NULL;
+
        pud = pud_offset(p4d, addr);
-       if (!pud_present(*pud))
+       if (sz != PUD_SIZE && pud_none(*pud))
                return NULL;
-       if (pud_huge(*pud))
+       /* hugepage or swap? */
+       if (pud_huge(*pud) || !pud_present(*pud))
                return (pte_t *)pud;
+
        pmd = pmd_offset(pud, addr);
-       return (pte_t *) pmd;
+       if (sz != PMD_SIZE && pmd_none(*pmd))
+               return NULL;
+       /* hugepage or swap? */
+       if (pmd_huge(*pmd) || !pmd_present(*pmd))
+               return (pte_t *)pmd;
+
+       return NULL;
 }
 
 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */