]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mm/hugetlb: refactor subpage recording
authorJoao Martins <joao.m.martins@oracle.com>
Wed, 24 Feb 2021 20:07:16 +0000 (12:07 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 24 Feb 2021 21:38:32 +0000 (13:38 -0800)
For a given hugepage backing a VA, there's a rather ineficient loop which
is solely responsible for storing subpages in GUP @pages/@vmas array.  For
each subpage we check whether it's within range or size of @pages and keep
increment @pfn_offset and a couple other variables per subpage iteration.

Simplify this logic and minimize the cost of each iteration to just store
the output page/vma.  Instead of incrementing number of @refs iteratively,
we do it through pre-calculation of @refs and only with a tight loop for
storing pinned subpages/vmas.

Additionally, retain existing behaviour with using mem_map_offset() when
recording the subpages for configurations that don't have a contiguous
mem_map.

pinning consequently improves bringing us close to
{pin,get}_user_pages_fast:

  - 16G with 1G huge page size
  gup_test -f /mnt/huge/file -m 16384 -r 30 -L -S -n 512 -w

PIN_LONGTERM_BENCHMARK: ~12.8k us -> ~5.8k us
PIN_FAST_BENCHMARK: ~3.7k us

Link: https://lkml.kernel.org/r/20210128182632.24562-3-joao.m.martins@oracle.com
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/hugetlb.c

index d4fc5db6bc3224814db47ff4e39c723b1966f37c..cf6653879bb4b04761a54fb245c320dae9fc9b4a 100644 (file)
@@ -4787,6 +4787,20 @@ out_release_nounlock:
        goto out;
 }
 
+static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
+                                int refs, struct page **pages,
+                                struct vm_area_struct **vmas)
+{
+       int nr;
+
+       for (nr = 0; nr < refs; nr++) {
+               if (likely(pages))
+                       pages[nr] = mem_map_offset(page, nr);
+               if (vmas)
+                       vmas[nr] = vma;
+       }
+}
+
 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                         struct page **pages, struct vm_area_struct **vmas,
                         unsigned long *position, unsigned long *nr_pages,
@@ -4916,28 +4930,16 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        continue;
                }
 
-               refs = 0;
+               refs = min3(pages_per_huge_page(h) - pfn_offset,
+                           (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
 
-same_page:
-               if (pages)
-                       pages[i] = mem_map_offset(page, pfn_offset);
+               if (pages || vmas)
+                       record_subpages_vmas(mem_map_offset(page, pfn_offset),
+                                            vma, refs,
+                                            likely(pages) ? pages + i : NULL,
+                                            vmas ? vmas + i : NULL);
 
-               if (vmas)
-                       vmas[i] = vma;
-
-               vaddr += PAGE_SIZE;
-               ++pfn_offset;
-               --remainder;
-               ++i;
-               ++refs;
-               if (vaddr < vma->vm_end && remainder &&
-                               pfn_offset < pages_per_huge_page(h)) {
-                       /*
-                        * We use pfn_offset to avoid touching the pageframes
-                        * of this compound page.
-                        */
-                       goto same_page;
-               } else if (pages) {
+               if (pages) {
                        /*
                         * try_grab_compound_head() should always succeed here,
                         * because: a) we hold the ptl lock, and b) we've just
@@ -4948,7 +4950,7 @@ same_page:
                         * any way. So this page must be available at this
                         * point, unless the page refcount overflowed:
                         */
-                       if (WARN_ON_ONCE(!try_grab_compound_head(pages[i-1],
+                       if (WARN_ON_ONCE(!try_grab_compound_head(pages[i],
                                                                 refs,
                                                                 flags))) {
                                spin_unlock(ptl);
@@ -4957,6 +4959,11 @@ same_page:
                                break;
                        }
                }
+
+               vaddr += (refs << PAGE_SHIFT);
+               remainder -= refs;
+               i += refs;
+
                spin_unlock(ptl);
        }
        *nr_pages = remainder;