]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mm/hugetlb: convert alloc_surplus_huge_page() to folios
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Fri, 13 Jan 2023 22:30:53 +0000 (16:30 -0600)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 13 Feb 2023 23:54:27 +0000 (15:54 -0800)
Change alloc_surplus_huge_page() to alloc_surplus_hugetlb_folio() and
update its callers.

Link: https://lkml.kernel.org/r/20230113223057.173292-5-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 3e648fccf33e8aa5e4efed29a61b7eccb50f97c7..fa61b4aa68cae7ff5ebb94b004bb2aae2eba0c11 100644 (file)
@@ -2378,8 +2378,8 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
 /*
  * Allocates a fresh surplus page from the page allocator.
  */
-static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
-                                               int nid, nodemask_t *nmask)
+static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
+                               gfp_t gfp_mask, int nid, nodemask_t *nmask)
 {
        struct folio *folio = NULL;
 
@@ -2416,7 +2416,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
 out_unlock:
        spin_unlock_irq(&hugetlb_lock);
 
-       return &folio->page;
+       return folio;
 }
 
 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
@@ -2449,7 +2449,7 @@ static
 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
                struct vm_area_struct *vma, unsigned long addr)
 {
-       struct page *page = NULL;
+       struct folio *folio = NULL;
        struct mempolicy *mpol;
        gfp_t gfp_mask = htlb_alloc_mask(h);
        int nid;
@@ -2460,16 +2460,16 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
                gfp_t gfp = gfp_mask | __GFP_NOWARN;
 
                gfp &=  ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
-               page = alloc_surplus_huge_page(h, gfp, nid, nodemask);
+               folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
 
                /* Fallback to all nodes if page==NULL */
                nodemask = NULL;
        }
 
-       if (!page)
-               page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
+       if (!folio)
+               folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
        mpol_cond_put(mpol);
-       return page;
+       return &folio->page;
 }
 
 /* page migration callback function */
@@ -2518,6 +2518,7 @@ static int gather_surplus_pages(struct hstate *h, long delta)
        __must_hold(&hugetlb_lock)
 {
        LIST_HEAD(surplus_list);
+       struct folio *folio;
        struct page *page, *tmp;
        int ret;
        long i;
@@ -2537,13 +2538,13 @@ static int gather_surplus_pages(struct hstate *h, long delta)
 retry:
        spin_unlock_irq(&hugetlb_lock);
        for (i = 0; i < needed; i++) {
-               page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
+               folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
                                NUMA_NO_NODE, NULL);
-               if (!page) {
+               if (!folio) {
                        alloc_ok = false;
                        break;
                }
-               list_add(&page->lru, &surplus_list);
+               list_add(&folio->lru, &surplus_list);
                cond_resched();
        }
        allocated += i;
@@ -3496,7 +3497,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
         * First take pages out of surplus state.  Then make up the
         * remaining difference by allocating fresh huge pages.
         *
-        * We might race with alloc_surplus_huge_page() here and be unable
+        * We might race with alloc_surplus_hugetlb_folio() here and be unable
         * to convert a surplus huge page to a normal huge page. That is
         * not critical, though, it just means the overall size of the
         * pool might be one hugepage larger than it needs to be, but
@@ -3539,7 +3540,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
         * By placing pages into the surplus state independent of the
         * overcommit value, we are allowing the surplus pool size to
         * exceed overcommit. There are few sane options here. Since
-        * alloc_surplus_huge_page() is checking the global counter,
+        * alloc_surplus_hugetlb_folio() is checking the global counter,
         * though, we'll note that we're not allowed to exceed surplus
         * and won't grow the pool anywhere else. Not until one of the
         * sysctls are changed, or the surplus pages go out of use.