]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
thp: mprotect: pass vma down to page table walkers
authorJohannes Weiner <hannes@cmpxchg.org>
Thu, 13 Jan 2011 23:47:03 +0000 (15:47 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Jan 2011 01:32:44 +0000 (17:32 -0800)
Flushing the tlb for huge pmds requires the vma's anon_vma, so pass along
the vma instead of the mm, we can always get the latter when we need it.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/mprotect.c

index bd27db6b992b80c4c39d84b83d910996357727fd..9402080d0d4a88a622598b6dcfacb85f264204a8 100644 (file)
@@ -78,7 +78,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
        pte_unmap_unlock(pte - 1, ptl);
 }
 
-static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
+static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
                unsigned long addr, unsigned long end, pgprot_t newprot,
                int dirty_accountable)
 {
@@ -88,14 +88,15 @@ static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
-               split_huge_page_pmd(mm, pmd);
+               split_huge_page_pmd(vma->vm_mm, pmd);
                if (pmd_none_or_clear_bad(pmd))
                        continue;
-               change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
+               change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
+                                dirty_accountable);
        } while (pmd++, addr = next, addr != end);
 }
 
-static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
+static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
                unsigned long addr, unsigned long end, pgprot_t newprot,
                int dirty_accountable)
 {
@@ -107,7 +108,8 @@ static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
                next = pud_addr_end(addr, end);
                if (pud_none_or_clear_bad(pud))
                        continue;
-               change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
+               change_pmd_range(vma, pud, addr, next, newprot,
+                                dirty_accountable);
        } while (pud++, addr = next, addr != end);
 }
 
@@ -127,7 +129,8 @@ static void change_protection(struct vm_area_struct *vma,
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
+               change_pud_range(vma, pgd, addr, next, newprot,
+                                dirty_accountable);
        } while (pgd++, addr = next, addr != end);
        flush_tlb_range(vma, start, end);
 }