]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
mm/mremap: it is sure to have enough space when extent meets requirement
authorWei Yang <richard.weiyang@linux.alibaba.com>
Fri, 7 Aug 2020 06:23:40 +0000 (23:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 7 Aug 2020 18:33:27 +0000 (11:33 -0700)
Patch series "mm/mremap: cleanup move_page_tables() a little", v5.

move_page_tables() tries to move page table by PMD or PTE.

The root reason is if it tries to move PMD, both old and new range should
be PMD aligned.  But current code calculate old range and new range
separately.  This leads to some redundant check and calculation.

This cleanup tries to consolidate the range check in one place to reduce
some extra range handling.

This patch (of 3):

old_end is passed to these two functions to check whether there is enough
space to do the move, while this check is done before invoking these
functions.

These two functions only would be invoked when extent meets the
requirement and there is one check before invoking these functions:

    if (extent > old_end - old_addr)
        extent = old_end - old_addr;

This implies (old_end - old_addr) won't fail the check in these two
functions.

Signed-off-by: Wei Yang <richard.weiyang@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Cc: Thomas Hellstrom (VMware) <thomas_os@shipmail.org>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Link: http://lkml.kernel.org/r/20200710092835.56368-1-richard.weiyang@linux.alibaba.com
Link: http://lkml.kernel.org/r/20200710092835.56368-2-richard.weiyang@linux.alibaba.com
Link: http://lkml.kernel.org/r/20200708095028.41706-1-richard.weiyang@linux.alibaba.com
Link: http://lkml.kernel.org/r/20200708095028.41706-2-richard.weiyang@linux.alibaba.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/huge_mm.h
mm/huge_memory.c
mm/mremap.c

index 71f20776b06ca300a1562aadf3e733302f3e7831..17c4c49751451f4d7e5a652895a5819fb7b79be7 100644 (file)
@@ -42,7 +42,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        unsigned long addr, unsigned long end,
                        unsigned char *vec);
 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
-                        unsigned long new_addr, unsigned long old_end,
+                        unsigned long new_addr,
                         pmd_t *old_pmd, pmd_t *new_pmd);
 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        unsigned long addr, pgprot_t newprot,
index 78c84bee7e294f3bb48e43b1015fd82c077c5698..1e580fdad4d039a3e97a3cbffd7d84da15cff219 100644 (file)
@@ -1722,17 +1722,14 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd)
 }
 
 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
-                 unsigned long new_addr, unsigned long old_end,
-                 pmd_t *old_pmd, pmd_t *new_pmd)
+                 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
 {
        spinlock_t *old_ptl, *new_ptl;
        pmd_t pmd;
        struct mm_struct *mm = vma->vm_mm;
        bool force_flush = false;
 
-       if ((old_addr & ~HPAGE_PMD_MASK) ||
-           (new_addr & ~HPAGE_PMD_MASK) ||
-           old_end - old_addr < HPAGE_PMD_SIZE)
+       if ((old_addr & ~HPAGE_PMD_MASK) || (new_addr & ~HPAGE_PMD_MASK))
                return false;
 
        /*
index 6b153dc05fe4882c76eaac4150c8c20e12956af3..4f434062d154030f5ce3a6757b1fe31eb0c7d45e 100644 (file)
@@ -193,15 +193,13 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
 
 #ifdef CONFIG_HAVE_MOVE_PMD
 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
-                 unsigned long new_addr, unsigned long old_end,
-                 pmd_t *old_pmd, pmd_t *new_pmd)
+                 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
 {
        spinlock_t *old_ptl, *new_ptl;
        struct mm_struct *mm = vma->vm_mm;
        pmd_t pmd;
 
-       if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK)
-           || old_end - old_addr < PMD_SIZE)
+       if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK))
                return false;
 
        /*
@@ -292,7 +290,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                                if (need_rmap_locks)
                                        take_rmap_locks(vma);
                                moved = move_huge_pmd(vma, old_addr, new_addr,
-                                                   old_end, old_pmd, new_pmd);
+                                                     old_pmd, new_pmd);
                                if (need_rmap_locks)
                                        drop_rmap_locks(vma);
                                if (moved)
@@ -312,7 +310,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                        if (need_rmap_locks)
                                take_rmap_locks(vma);
                        moved = move_normal_pmd(vma, old_addr, new_addr,
-                                       old_end, old_pmd, new_pmd);
+                                               old_pmd, new_pmd);
                        if (need_rmap_locks)
                                drop_rmap_locks(vma);
                        if (moved)