{
pte_t *pte;
spinlock_t *ptl;
- struct page *page;
+ struct folio *folio;
struct damon_young_walk_private *priv = walk->private;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
spin_unlock(ptl);
goto regular_page;
}
- page = damon_get_page(pmd_pfn(*pmd));
- if (!page)
+ folio = damon_get_folio(pmd_pfn(*pmd));
+ if (!folio)
goto huge_out;
- if (pmd_young(*pmd) || !page_is_idle(page) ||
+ if (pmd_young(*pmd) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm,
addr)) {
*priv->page_sz = HPAGE_PMD_SIZE;
priv->young = true;
}
- put_page(page);
+ folio_put(folio);
huge_out:
spin_unlock(ptl);
return 0;
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!pte_present(*pte))
goto out;
- page = damon_get_page(pte_pfn(*pte));
- if (!page)
+ folio = damon_get_folio(pte_pfn(*pte));
+ if (!folio)
goto out;
- if (pte_young(*pte) || !page_is_idle(page) ||
+ if (pte_young(*pte) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm, addr)) {
*priv->page_sz = PAGE_SIZE;
priv->young = true;
}
- put_page(page);
+ folio_put(folio);
out:
pte_unmap_unlock(pte, ptl);
return 0;