From e17eae2b839937817d771e2f5d2b30e5e2b81bb7 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 28 Jun 2021 19:36:43 -0700 Subject: [PATCH] mm: pagewalk: fix walk for hugepage tables Pagewalk ignores hugepd entries and walk down the tables as if it was traditionnal entries, leading to crazy result. Add walk_hugepd_range() and use it to walk hugepage tables. Link: https://lkml.kernel.org/r/38d04410700c8d02f28ba37e020b62c55d6f3d2c.1624597695.git.christophe.leroy@csgroup.eu Signed-off-by: Christophe Leroy Reviewed-by: Steven Price Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Daniel Axtens Cc: "Oliver O'Halloran" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/pagewalk.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 53 insertions(+), 5 deletions(-) diff --git a/mm/pagewalk.c b/mm/pagewalk.c index e81640d9f177..9b3db11a4d1d 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -58,6 +58,45 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, return err; } +#ifdef CONFIG_ARCH_HAS_HUGEPD +static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr, + unsigned long end, struct mm_walk *walk, int pdshift) +{ + int err = 0; + const struct mm_walk_ops *ops = walk->ops; + int shift = hugepd_shift(*phpd); + int page_size = 1 << shift; + + if (!ops->pte_entry) + return 0; + + if (addr & (page_size - 1)) + return 0; + + for (;;) { + pte_t *pte; + + spin_lock(&walk->mm->page_table_lock); + pte = hugepte_offset(*phpd, addr, pdshift); + err = ops->pte_entry(pte, addr, addr + page_size, walk); + spin_unlock(&walk->mm->page_table_lock); + + if (err) + break; + if (addr >= end - page_size) + break; + addr += page_size; + } + return err; +} +#else +static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr, + unsigned long end, struct mm_walk *walk, int pdshift) +{ + return 0; +} +#endif + static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, struct mm_walk *walk) { @@ -108,7 +147,10 @@ again: goto again; } - err = walk_pte_range(pmd, addr, next, walk); + if (is_hugepd(__hugepd(pmd_val(*pmd)))) + err = walk_hugepd_range((hugepd_t *)pmd, addr, next, walk, PMD_SHIFT); + else + err = walk_pte_range(pmd, addr, next, walk); if (err) break; } while (pmd++, addr = next, addr != end); @@ -157,7 +199,10 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, if (pud_none(*pud)) goto again; - err = walk_pmd_range(pud, addr, next, walk); + if (is_hugepd(__hugepd(pud_val(*pud)))) + err = walk_hugepd_range((hugepd_t *)pud, addr, next, walk, PUD_SHIFT); + else + err = walk_pmd_range(pud, addr, next, walk); if (err) break; } while (pud++, addr = next, addr != end); @@ -189,7 +234,9 @@ static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, if (err) break; } - if (ops->pud_entry || ops->pmd_entry || ops->pte_entry) + if (is_hugepd(__hugepd(p4d_val(*p4d)))) + err = walk_hugepd_range((hugepd_t *)p4d, addr, next, walk, P4D_SHIFT); + else if (ops->pud_entry || ops->pmd_entry || ops->pte_entry) err = walk_pud_range(p4d, addr, next, walk); if (err) break; @@ -224,8 +271,9 @@ static int walk_pgd_range(unsigned long addr, unsigned long end, if (err) break; } - if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || - ops->pte_entry) + if (is_hugepd(__hugepd(pgd_val(*pgd)))) + err = walk_hugepd_range((hugepd_t *)pgd, addr, next, walk, PGDIR_SHIFT); + else if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || ops->pte_entry) err = walk_p4d_range(pgd, addr, next, walk); if (err) break; -- 2.39.5