int get_cmdline(struct task_struct *task, char *buffer, int buflen);
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{
return !vma->vm_ops;
}
-static inline int stack_guard_page_start(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSDOWN) &&
- (vma->vm_start == addr) &&
- !vma_growsdown(vma->vm_prev, addr);
-}
-
-/* Is the vma a continuation of the stack vma below it? */
-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
-}
-
-static inline int stack_guard_page_end(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSUP) &&
- (vma->vm_end == addr) &&
- !vma_growsup(vma->vm_next, addr);
-}
-
int vma_is_stack_for_current(struct vm_area_struct *vma);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
pgoff_t offset,
unsigned long size);
+extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+extern int stack_guard_area(struct vm_area_struct *vma, unsigned long address);
/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
extern int expand_downwards(struct vm_area_struct *vma,
- unsigned long address);
+ unsigned long address, unsigned long gap);
+unsigned long expandable_stack_area(struct vm_area_struct *vma,
+ unsigned long address, unsigned long *gap);
+
#if VM_GROWSUP
-extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+extern int expand_upwards(struct vm_area_struct *vma,
+ unsigned long address, unsigned long gap);
#else
- #define expand_upwards(vma, address) (0)
+ #define expand_upwards(vma, address, gap) (0)
#endif
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
return ret;
}
-/*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
-{
- address &= PAGE_MASK;
- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
- struct vm_area_struct *prev = vma->vm_prev;
-
- /*
- * Is there a mapping abutting this one below?
- *
- * That's only ok if it's the same stack mapping
- * that has gotten split..
- */
- if (prev && prev->vm_end == address)
- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
- return expand_downwards(vma, address - PAGE_SIZE);
- }
- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
- struct vm_area_struct *next = vma->vm_next;
-
- /* As VM_GROWSDOWN but s/below/above/ */
- if (next && next->vm_start == address + PAGE_SIZE)
- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
- return expand_upwards(vma, address + PAGE_SIZE);
- }
- return 0;
-}
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
return VM_FAULT_SIGBUS;
/* Check if we need to add a guard page to the stack */
- if (check_stack_guard_page(vma, vmf->address) < 0)
+ if ((vma->vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) &&
+ expand_stack(vma, vmf->address) < 0)
return VM_FAULT_SIGSEGV;
/*
* update accounting. This is shared with both the
* grow-up and grow-down cases.
*/
-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow,
+ unsigned long gap)
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
/* Stack limit test */
actual_size = size;
if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
- actual_size -= PAGE_SIZE;
+ actual_size -= gap;
if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
-int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned long gap)
{
struct mm_struct *mm = vma->vm_mm;
int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
- /* Guard against wrapping around to address 0. */
- if (address < PAGE_ALIGN(address+4))
- address = PAGE_ALIGN(address+4);
- else
- return -ENOMEM;
-
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
error = -ENOMEM;
if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
- error = acct_stack_growth(vma, size, grow);
+ error = acct_stack_growth(vma, size, grow, gap);
if (!error) {
/*
* vma_gap_update() doesn't support concurrent
* vma is the first one with address < vma->vm_start. Have to extend vma.
*/
int expand_downwards(struct vm_area_struct *vma,
- unsigned long address)
+ unsigned long address, unsigned long gap)
{
struct mm_struct *mm = vma->vm_mm;
int error;
error = -ENOMEM;
if (grow <= vma->vm_pgoff) {
- error = acct_stack_growth(vma, size, grow);
+ error = acct_stack_growth(vma, size, grow, gap);
if (!error) {
/*
* vma_gap_update() doesn't support concurrent
return error;
}
+/* enforced gap between the expanding stack and other mappings. */
+unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+
/*
* Note how expand_stack() refuses to expand the stack all the way to
* abut the next virtual mapping, *unless* that mapping itself is also
- * a stack mapping. We want to leave room for a guard page, after all
+ * a stack mapping. We want to leave room for a guard area, after all
* (the guard page itself is not added here, that is done by the
* actual page faulting logic)
- *
- * This matches the behavior of the guard page logic (see mm/memory.c:
- * check_stack_guard_page()), which only allows the guard page to be
- * removed under these circumstances.
*/
#ifdef CONFIG_STACK_GROWSUP
+unsigned long expandable_stack_area(struct vm_area_struct *vma,
+ unsigned long address, unsigned long *gap)
+{
+ struct vm_area_struct *next = vma->vm_next;
+ unsigned long guard_gap = stack_guard_gap;
+ unsigned long guard_addr;
+
+ address = ALIGN(address, PAGE_SIZE);;
+ if (!next)
+ goto out;
+
+ if (next->vm_flags & VM_GROWSUP) {
+ guard_gap = min(guard_gap, next->vm_start - address);
+ goto out;
+ }
+
+ if (next->vm_start - address < guard_gap)
+ return -ENOMEM;
+out:
+ if (TASK_SIZE - address < guard_gap)
+ guard_gap = TASK_SIZE - address;
+ guard_addr = address + guard_gap;
+ *gap = guard_gap;
+
+ return guard_addr;
+}
+
int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+ unsigned long gap;
+
+ address = expandable_stack_area(vma, address, &gap);
+ if (IS_ERR_VALUE(address))
+ return -ENOMEM;
+ return expand_upwards(vma, address, gap);
+}
+
+int stack_guard_area(struct vm_area_struct *vma, unsigned long address)
{
struct vm_area_struct *next;
- address &= PAGE_MASK;
+ if (!(vma->vm_flags & VM_GROWSUP))
+ return 0;
+
+ /*
+ * strictly speaking there is a guard gap between disjoint stacks
+ * but the gap is not canonical (it might be smaller) and it is
+ * reasonably safe to assume that we can ignore that gap for stack
+ * POPULATE or /proc/<pid>[s]maps purposes
+ */
next = vma->vm_next;
- if (next && next->vm_start == address + PAGE_SIZE) {
- if (!(next->vm_flags & VM_GROWSUP))
- return -ENOMEM;
- }
- return expand_upwards(vma, address);
+ if (next && next->vm_flags & VM_GROWSUP)
+ return 0;
+
+ return vma->vm_end - address <= stack_guard_gap;
}
struct vm_area_struct *
return prev;
}
#else
+unsigned long expandable_stack_area(struct vm_area_struct *vma,
+ unsigned long address, unsigned long *gap)
+{
+ struct vm_area_struct *prev = vma->vm_prev;
+ unsigned long guard_gap = stack_guard_gap;
+ unsigned long guard_addr;
+
+ address &= PAGE_MASK;
+ if (!prev)
+ goto out;
+
+ /*
+ * Is there a mapping abutting this one below?
+ *
+ * That's only ok if it's the same stack mapping
+ * that has gotten split or there is sufficient gap
+ * between mappings
+ */
+ if (prev->vm_flags & VM_GROWSDOWN) {
+ guard_gap = min(guard_gap, address - prev->vm_end);
+ goto out;
+ }
+
+ if (address - prev->vm_end < guard_gap)
+ return -ENOMEM;
+
+out:
+ /* make sure we won't underflow */
+ if (address < mmap_min_addr)
+ return -ENOMEM;
+ if (address - mmap_min_addr < guard_gap)
+ guard_gap = address - mmap_min_addr;
+
+ guard_addr = address - guard_gap;
+ *gap = guard_gap;
+
+ return guard_addr;
+}
+
int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+ unsigned long gap;
+
+ address = expandable_stack_area(vma, address, &gap);
+ if (IS_ERR_VALUE(address))
+ return -ENOMEM;
+ return expand_downwards(vma, address, gap);
+}
+
+int stack_guard_area(struct vm_area_struct *vma, unsigned long address)
{
struct vm_area_struct *prev;
- address &= PAGE_MASK;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ return 0;
+
+ /*
+ * strictly speaking there is a guard gap between disjoint stacks
+ * but the gap is not canonical (it might be smaller) and it is
+ * reasonably safe to assume that we can ignore that gap for stack
+ * POPULATE or /proc/<pid>[s]maps purposes
+ */
prev = vma->vm_prev;
- if (prev && prev->vm_end == address) {
- if (!(prev->vm_flags & VM_GROWSDOWN))
- return -ENOMEM;
- }
- return expand_downwards(vma, address);
+ if (prev && prev->vm_flags & VM_GROWSDOWN)
+ return 0;
+
+ return address - vma->vm_start < stack_guard_gap;
}
struct vm_area_struct *