]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
pagewalk: add walk_page_vma()
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Wed, 11 Feb 2015 23:27:40 +0000 (15:27 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 12 Feb 2015 01:06:05 +0000 (17:06 -0800)
Introduce walk_page_vma(), which is useful for the callers which want to
walk over a given vma.  It's used by later patches.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/pagewalk.c

index 3891a368e5e041f2d66ce71e5c094a6802293735..a4d24f3c5430fefe78090cec27ff2bfe5992dcaa 100644 (file)
@@ -1201,6 +1201,7 @@ struct mm_walk {
 
 int walk_page_range(unsigned long addr, unsigned long end,
                struct mm_walk *walk);
+int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
                unsigned long end, unsigned long floor, unsigned long ceiling);
 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
index d9cc3caae802964397d84f31cf9b32af5cdd5a12..4c9a653ba563fd61533193a4bc6a72e78b89a86f 100644 (file)
@@ -272,3 +272,21 @@ int walk_page_range(unsigned long start, unsigned long end,
        } while (start = next, start < end);
        return err;
 }
+
+int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
+{
+       int err;
+
+       if (!walk->mm)
+               return -EINVAL;
+
+       VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
+       VM_BUG_ON(!vma);
+       walk->vma = vma;
+       err = walk_page_test(vma->vm_start, vma->vm_end, walk);
+       if (err > 0)
+               return 0;
+       if (err < 0)
+               return err;
+       return __walk_page_range(vma->vm_start, vma->vm_end, walk);
+}