]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mm/hugetlb: make follow_hugetlb_page() safe to pmd unshare
authorPeter Xu <peterx@redhat.com>
Fri, 16 Dec 2022 15:52:23 +0000 (10:52 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 19 Jan 2023 01:12:39 +0000 (17:12 -0800)
Since follow_hugetlb_page() walks the pgtable, it needs the vma lock to
make sure the pgtable page will not be freed concurrently.

Link: https://lkml.kernel.org/r/20221216155223.2043727-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Rik van Riel <riel@surriel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 807edc1410e576fa54691e4206e012f4fe788b77..da4c37553c08cc6c690128d15028636baa3fede4 100644 (file)
@@ -6454,6 +6454,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        break;
                }
 
+               hugetlb_vma_lock_read(vma);
                /*
                 * Some archs (sparc64, sh*) have multiple pte_ts to
                 * each hugepage.  We have to make sure we get the
@@ -6478,6 +6479,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
                        if (pte)
                                spin_unlock(ptl);
+                       hugetlb_vma_unlock_read(vma);
                        remainder = 0;
                        break;
                }
@@ -6499,6 +6501,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
                        if (pte)
                                spin_unlock(ptl);
+                       hugetlb_vma_unlock_read(vma);
+
                        if (flags & FOLL_WRITE)
                                fault_flags |= FAULT_FLAG_WRITE;
                        else if (unshare)
@@ -6561,6 +6565,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        remainder -= pages_per_huge_page(h);
                        i += pages_per_huge_page(h);
                        spin_unlock(ptl);
+                       hugetlb_vma_unlock_read(vma);
                        continue;
                }
 
@@ -6590,6 +6595,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs,
                                                         flags))) {
                                spin_unlock(ptl);
+                               hugetlb_vma_unlock_read(vma);
                                remainder = 0;
                                err = -ENOMEM;
                                break;
@@ -6601,6 +6607,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                i += refs;
 
                spin_unlock(ptl);
+               hugetlb_vma_unlock_read(vma);
        }
        *nr_pages = remainder;
        /*