]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blobdiff - mm/damon/vaddr.c
mm/damon/vaddr: convert hugetlb related functions to use a folio
[mirror_ubuntu-kernels.git] / mm / damon / vaddr.c
index 15f03df66db60e5db517bc73413a72e834f53563..9d92c5eb3a1fb83ca839078264cf00c937b396db 100644 (file)
@@ -335,9 +335,9 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
 {
        bool referenced = false;
        pte_t entry = huge_ptep_get(pte);
-       struct page *page = pte_page(entry);
+       struct folio *folio = pfn_folio(pte_pfn(entry));
 
-       get_page(page);
+       folio_get(folio);
 
        if (pte_young(entry)) {
                referenced = true;
@@ -352,10 +352,10 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
 #endif /* CONFIG_MMU_NOTIFIER */
 
        if (referenced)
-               set_page_young(page);
+               folio_set_young(folio);
 
-       set_page_idle(page);
-       put_page(page);
+       folio_set_idle(folio);
+       folio_put(folio);
 }
 
 static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
@@ -431,7 +431,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
 {
        pte_t *pte;
        spinlock_t *ptl;
-       struct page *page;
+       struct folio *folio;
        struct damon_young_walk_private *priv = walk->private;
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -446,16 +446,16 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
                        spin_unlock(ptl);
                        goto regular_page;
                }
-               page = damon_get_page(pmd_pfn(*pmd));
-               if (!page)
+               folio = damon_get_folio(pmd_pfn(*pmd));
+               if (!folio)
                        goto huge_out;
-               if (pmd_young(*pmd) || !page_is_idle(page) ||
+               if (pmd_young(*pmd) || !folio_test_idle(folio) ||
                                        mmu_notifier_test_young(walk->mm,
                                                addr)) {
                        *priv->page_sz = HPAGE_PMD_SIZE;
                        priv->young = true;
                }
-               put_page(page);
+               folio_put(folio);
 huge_out:
                spin_unlock(ptl);
                return 0;
@@ -469,15 +469,15 @@ regular_page:
        pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
        if (!pte_present(*pte))
                goto out;
-       page = damon_get_page(pte_pfn(*pte));
-       if (!page)
+       folio = damon_get_folio(pte_pfn(*pte));
+       if (!folio)
                goto out;
-       if (pte_young(*pte) || !page_is_idle(page) ||
+       if (pte_young(*pte) || !folio_test_idle(folio) ||
                        mmu_notifier_test_young(walk->mm, addr)) {
                *priv->page_sz = PAGE_SIZE;
                priv->young = true;
        }
-       put_page(page);
+       folio_put(folio);
 out:
        pte_unmap_unlock(pte, ptl);
        return 0;
@@ -490,7 +490,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
 {
        struct damon_young_walk_private *priv = walk->private;
        struct hstate *h = hstate_vma(walk->vma);
-       struct page *page;
+       struct folio *folio;
        spinlock_t *ptl;
        pte_t entry;
 
@@ -499,16 +499,16 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
        if (!pte_present(entry))
                goto out;
 
-       page = pte_page(entry);
-       get_page(page);
+       folio = pfn_folio(pte_pfn(entry));
+       folio_get(folio);
 
-       if (pte_young(entry) || !page_is_idle(page) ||
+       if (pte_young(entry) || !folio_test_idle(folio) ||
            mmu_notifier_test_young(walk->mm, addr)) {
                *priv->page_sz = huge_page_size(h);
                priv->young = true;
        }
 
-       put_page(page);
+       folio_put(folio);
 
 out:
        spin_unlock(ptl);