]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mm: fix page table unmap for stack guard page properly
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 Aug 2010 18:44:56 +0000 (11:44 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 Aug 2010 18:44:56 +0000 (11:44 -0700)
We do in fact need to unmap the page table _before_ doing the whole
stack guard page logic, because if it is needed (mainly 32-bit x86 with
PAE and CONFIG_HIGHPTE, but other architectures may use it too) then it
will do a kmap_atomic/kunmap_atomic.

And those kmaps will create an atomic region that we cannot do
allocations in.  However, the whole stack expand code will need to do
anon_vma_prepare() and vma_lock_anon_vma() and they cannot do that in an
atomic region.

Now, a better model might actually be to do the anon_vma_prepare() when
_creating_ a VM_GROWSDOWN segment, and not have to worry about any of
this at page fault time.  But in the meantime, this is the
straightforward fix for the issue.

See https://bugzilla.kernel.org/show_bug.cgi?id=16588 for details.

Reported-by: Wylda <wylda@volny.cz>
Reported-by: Sedat Dilek <sedat.dilek@gmail.com>
Reported-by: Mike Pagano <mpagano@gentoo.org>
Reported-by: François Valenduc <francois.valenduc@tvcablenet.be>
Tested-by: Ed Tomlinson <edt@aei.ca>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Greg KH <gregkh@suse.de>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memory.c

index 9b3b73f4ae9c77350abd052abbdc889634258801..b6e5fd23cc5a48e13f49b0d824ca3cc9713bd3ab 100644 (file)
@@ -2792,24 +2792,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        spinlock_t *ptl;
        pte_t entry;
 
-       if (check_stack_guard_page(vma, address) < 0) {
-               pte_unmap(page_table);
+       pte_unmap(page_table);
+
+       /* Check if we need to add a guard page to the stack */
+       if (check_stack_guard_page(vma, address) < 0)
                return VM_FAULT_SIGBUS;
-       }
 
+       /* Use the zero-page for reads */
        if (!(flags & FAULT_FLAG_WRITE)) {
                entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
                                                vma->vm_page_prot));
-               ptl = pte_lockptr(mm, pmd);
-               spin_lock(ptl);
+               page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
                if (!pte_none(*page_table))
                        goto unlock;
                goto setpte;
        }
 
        /* Allocate our own private page. */
-       pte_unmap(page_table);
-
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
        page = alloc_zeroed_user_highpage_movable(vma, address);