]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mm/hugetlb: convert restore_reserve_on_error() to folios
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Fri, 13 Jan 2023 22:30:56 +0000 (16:30 -0600)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 13 Feb 2023 23:54:28 +0000 (15:54 -0800)
Use the hugetlb folio flag macros inside restore_reserve_on_error() and
update the comments to reflect the use of folios.

Link: https://lkml.kernel.org/r/20230113223057.173292-8-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 57894beb33828c1b3589a70b77096f2b21aee1cb..3120c3db60c4aa914c21b899a84ff5696b2713fe 100644 (file)
@@ -2819,22 +2819,23 @@ static long vma_del_reservation(struct hstate *h,
 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
                        unsigned long address, struct page *page)
 {
+       struct folio *folio = page_folio(page);
        long rc = vma_needs_reservation(h, vma, address);
 
-       if (HPageRestoreReserve(page)) {
+       if (folio_test_hugetlb_restore_reserve(folio)) {
                if (unlikely(rc < 0))
                        /*
                         * Rare out of memory condition in reserve map
-                        * manipulation.  Clear HPageRestoreReserve so that
-                        * global reserve count will not be incremented
+                        * manipulation.  Clear hugetlb_restore_reserve so
+                        * that global reserve count will not be incremented
                         * by free_huge_page.  This will make it appear
-                        * as though the reservation for this page was
+                        * as though the reservation for this folio was
                         * consumed.  This may prevent the task from
-                        * faulting in the page at a later time.  This
+                        * faulting in the folio at a later time.  This
                         * is better than inconsistent global huge page
                         * accounting of reserve counts.
                         */
-                       ClearHPageRestoreReserve(page);
+                       folio_clear_hugetlb_restore_reserve(folio);
                else if (rc)
                        (void)vma_add_reservation(h, vma, address);
                else
@@ -2845,7 +2846,7 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
                         * This indicates there is an entry in the reserve map
                         * not added by alloc_huge_page.  We know it was added
                         * before the alloc_huge_page call, otherwise
-                        * HPageRestoreReserve would be set on the page.
+                        * hugetlb_restore_reserve would be set on the folio.
                         * Remove the entry so that a subsequent allocation
                         * does not consume a reservation.
                         */
@@ -2854,12 +2855,12 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
                                /*
                                 * VERY rare out of memory condition.  Since
                                 * we can not delete the entry, set
-                                * HPageRestoreReserve so that the reserve
-                                * count will be incremented when the page
+                                * hugetlb_restore_reserve so that the reserve
+                                * count will be incremented when the folio
                                 * is freed.  This reserve will be consumed
                                 * on a subsequent allocation.
                                 */
-                               SetHPageRestoreReserve(page);
+                               folio_set_hugetlb_restore_reserve(folio);
                } else if (rc < 0) {
                        /*
                         * Rare out of memory condition from
@@ -2875,12 +2876,12 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
                                /*
                                 * For private mappings, no entry indicates
                                 * a reservation is present.  Since we can
-                                * not add an entry, set SetHPageRestoreReserve
-                                * on the page so reserve count will be
+                                * not add an entry, set hugetlb_restore_reserve
+                                * on the folio so reserve count will be
                                 * incremented when freed.  This reserve will
                                 * be consumed on a subsequent allocation.
                                 */
-                               SetHPageRestoreReserve(page);
+                               folio_set_hugetlb_restore_reserve(folio);
                } else
                        /*
                         * No reservation present, do nothing