]> git.proxmox.com Git - pve-kernel.git/blame - CVE-2017-100364-0001-Revert-mm-enlarge-stack-guard-gap.patch
update kernel source to Ubuntu-4.10.0-26.30
[pve-kernel.git] / CVE-2017-100364-0001-Revert-mm-enlarge-stack-guard-gap.patch
CommitLineData
b4b80805
FG
1From 0a0e88a03210365fb82b7ab9cebfccca31aff608 Mon Sep 17 00:00:00 2001
2From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= <f.gruenbichler@proxmox.com>
3Date: Fri, 23 Jun 2017 08:25:20 +0200
4Subject: [PATCH 1/4] Revert "mm: enlarge stack guard gap"
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9This reverts commit fe388e5751e74b3534ee21d01b999795dfc83d39.
10
11Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
12---
13 include/linux/mm.h | 40 +++++++++++---
14 arch/ia64/mm/fault.c | 2 +-
15 fs/exec.c | 8 +--
16 fs/proc/task_mmu.c | 11 ++--
17 mm/gup.c | 4 +-
18 mm/memory.c | 35 +++++++++++-
19 mm/mmap.c | 152 ++++++++++-----------------------------------------
20 7 files changed, 102 insertions(+), 150 deletions(-)
21
22diff --git a/include/linux/mm.h b/include/linux/mm.h
23index fbe65ceafb94..3978a350e9e4 100644
24--- a/include/linux/mm.h
25+++ b/include/linux/mm.h
26@@ -1366,11 +1366,39 @@ int clear_page_dirty_for_io(struct page *page);
27
28 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
29
30+/* Is the vma a continuation of the stack vma above it? */
31+static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
32+{
33+ return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
34+}
35+
36 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
37 {
38 return !vma->vm_ops;
39 }
40
41+static inline int stack_guard_page_start(struct vm_area_struct *vma,
42+ unsigned long addr)
43+{
44+ return (vma->vm_flags & VM_GROWSDOWN) &&
45+ (vma->vm_start == addr) &&
46+ !vma_growsdown(vma->vm_prev, addr);
47+}
48+
49+/* Is the vma a continuation of the stack vma below it? */
50+static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
51+{
52+ return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53+}
54+
55+static inline int stack_guard_page_end(struct vm_area_struct *vma,
56+ unsigned long addr)
57+{
58+ return (vma->vm_flags & VM_GROWSUP) &&
59+ (vma->vm_end == addr) &&
60+ !vma_growsup(vma->vm_next, addr);
61+}
62+
63 int vma_is_stack_for_current(struct vm_area_struct *vma);
64
65 extern unsigned long move_page_tables(struct vm_area_struct *vma,
66@@ -2111,22 +2139,16 @@ void page_cache_async_readahead(struct address_space *mapping,
67 pgoff_t offset,
68 unsigned long size);
69
70-extern unsigned long stack_guard_gap;
71 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
72 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
73-extern int stack_guard_area(struct vm_area_struct *vma, unsigned long address);
74
75 /* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
76 extern int expand_downwards(struct vm_area_struct *vma,
77- unsigned long address, unsigned long gap);
78-unsigned long expandable_stack_area(struct vm_area_struct *vma,
79- unsigned long address, unsigned long *gap);
80-
81+ unsigned long address);
82 #if VM_GROWSUP
83-extern int expand_upwards(struct vm_area_struct *vma,
84- unsigned long address, unsigned long gap);
85+extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
86 #else
87- #define expand_upwards(vma, address, gap) (0)
88+ #define expand_upwards(vma, address) (0)
89 #endif
90
91 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
92diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
93index d5caa3cab925..fa6ad95e992e 100644
94--- a/arch/ia64/mm/fault.c
95+++ b/arch/ia64/mm/fault.c
96@@ -224,7 +224,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
97 */
98 if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
99 goto bad_area;
100- if (expand_upwards(vma, address, 0))
101+ if (expand_upwards(vma, address))
102 goto bad_area;
103 }
104 goto good_area;
105diff --git a/fs/exec.c b/fs/exec.c
106index 5b6383208379..1825e64f8bf3 100644
107--- a/fs/exec.c
108+++ b/fs/exec.c
109@@ -205,7 +205,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
110
111 #ifdef CONFIG_STACK_GROWSUP
112 if (write) {
113- ret = expand_downwards(bprm->vma, pos, 0);
114+ ret = expand_downwards(bprm->vma, pos);
115 if (ret < 0)
116 return NULL;
117 }
118@@ -227,12 +227,6 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
119 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
120 struct rlimit *rlim;
121
122- /*
123- * GRWOSUP doesn't really have any gap at this stage because we grow
124- * the stack down now. See the expand_downwards above.
125- */
126- if (!IS_ENABLED(CONFIG_STACK_GROWSUP))
127- size -= stack_guard_gap;
128 acct_arg_size(bprm, size / PAGE_SIZE);
129
130 /*
131diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
132index 75f9099e2ecf..52049b595b0f 100644
133--- a/fs/proc/task_mmu.c
134+++ b/fs/proc/task_mmu.c
135@@ -302,14 +302,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
136
137 /* We don't show the stack guard page in /proc/maps */
138 start = vma->vm_start;
139+ if (stack_guard_page_start(vma, start))
140+ start += PAGE_SIZE;
141 end = vma->vm_end;
142- if (vma->vm_flags & VM_GROWSDOWN) {
143- if (stack_guard_area(vma, start))
144- start += stack_guard_gap;
145- } else if (vma->vm_flags & VM_GROWSUP) {
146- if (stack_guard_area(vma, end))
147- end -= stack_guard_gap;
148- }
149+ if (stack_guard_page_end(vma, end))
150+ end -= PAGE_SIZE;
151
152 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
153 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
154diff --git a/mm/gup.c b/mm/gup.c
155index 90252d1038b9..bb5f3d69f87e 100644
156--- a/mm/gup.c
157+++ b/mm/gup.c
158@@ -372,7 +372,9 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
159 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
160 return -ENOENT;
161 /* For mm_populate(), just skip the stack guard page. */
162- if ((*flags & FOLL_POPULATE) && stack_guard_area(vma, address))
163+ if ((*flags & FOLL_POPULATE) &&
164+ (stack_guard_page_start(vma, address) ||
165+ stack_guard_page_end(vma, address + PAGE_SIZE)))
166 return -ENOENT;
167 if (*flags & FOLL_WRITE)
168 fault_flags |= FAULT_FLAG_WRITE;
169diff --git a/mm/memory.c b/mm/memory.c
170index fca9dc75a04d..c89214451507 100644
171--- a/mm/memory.c
172+++ b/mm/memory.c
173@@ -2714,7 +2714,39 @@ int do_swap_page(struct vm_fault *vmf)
174 return ret;
175 }
176
177+/*
178+ * This is like a special single-page "expand_{down|up}wards()",
179+ * except we must first make sure that 'address{-|+}PAGE_SIZE'
180+ * doesn't hit another vma.
181+ */
182+static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
183+{
184+ address &= PAGE_MASK;
185+ if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
186+ struct vm_area_struct *prev = vma->vm_prev;
187+
188+ /*
189+ * Is there a mapping abutting this one below?
190+ *
191+ * That's only ok if it's the same stack mapping
192+ * that has gotten split..
193+ */
194+ if (prev && prev->vm_end == address)
195+ return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
196
197+ return expand_downwards(vma, address - PAGE_SIZE);
198+ }
199+ if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
200+ struct vm_area_struct *next = vma->vm_next;
201+
202+ /* As VM_GROWSDOWN but s/below/above/ */
203+ if (next && next->vm_start == address + PAGE_SIZE)
204+ return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
205+
206+ return expand_upwards(vma, address + PAGE_SIZE);
207+ }
208+ return 0;
209+}
210
211 /*
212 * We enter with non-exclusive mmap_sem (to exclude vma changes,
213@@ -2733,8 +2765,7 @@ static int do_anonymous_page(struct vm_fault *vmf)
214 return VM_FAULT_SIGBUS;
215
216 /* Check if we need to add a guard page to the stack */
217- if ((vma->vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) &&
218- expand_stack(vma, vmf->address) < 0)
219+ if (check_stack_guard_page(vma, vmf->address) < 0)
220 return VM_FAULT_SIGSEGV;
221
222 /*
223diff --git a/mm/mmap.c b/mm/mmap.c
224index 2a3bdf11baf0..4b3a2aaa18e9 100644
225--- a/mm/mmap.c
226+++ b/mm/mmap.c
227@@ -2151,8 +2151,7 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
228 * update accounting. This is shared with both the
229 * grow-up and grow-down cases.
230 */
231-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow,
232- unsigned long gap)
233+static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
234 {
235 struct mm_struct *mm = vma->vm_mm;
236 struct rlimit *rlim = current->signal->rlim;
237@@ -2165,7 +2164,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
238 /* Stack limit test */
239 actual_size = size;
240 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
241- actual_size -= gap;
242+ actual_size -= PAGE_SIZE;
243 if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
244 return -ENOMEM;
245
246@@ -2201,7 +2200,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
247 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
248 * vma is the last one with address > vma->vm_end. Have to extend vma.
249 */
250-int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned long gap)
251+int expand_upwards(struct vm_area_struct *vma, unsigned long address)
252 {
253 struct mm_struct *mm = vma->vm_mm;
254 int error = 0;
255@@ -2209,6 +2208,12 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l
256 if (!(vma->vm_flags & VM_GROWSUP))
257 return -EFAULT;
258
259+ /* Guard against wrapping around to address 0. */
260+ if (address < PAGE_ALIGN(address+4))
261+ address = PAGE_ALIGN(address+4);
262+ else
263+ return -ENOMEM;
264+
265 /* We must make sure the anon_vma is allocated. */
266 if (unlikely(anon_vma_prepare(vma)))
267 return -ENOMEM;
268@@ -2229,7 +2234,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l
269
270 error = -ENOMEM;
271 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
272- error = acct_stack_growth(vma, size, grow, gap);
273+ error = acct_stack_growth(vma, size, grow);
274 if (!error) {
275 /*
276 * vma_gap_update() doesn't support concurrent
277@@ -2270,7 +2275,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l
278 * vma is the first one with address < vma->vm_start. Have to extend vma.
279 */
280 int expand_downwards(struct vm_area_struct *vma,
281- unsigned long address, unsigned long gap)
282+ unsigned long address)
283 {
284 struct mm_struct *mm = vma->vm_mm;
285 int error;
286@@ -2300,7 +2305,7 @@ int expand_downwards(struct vm_area_struct *vma,
287
288 error = -ENOMEM;
289 if (grow <= vma->vm_pgoff) {
290- error = acct_stack_growth(vma, size, grow, gap);
291+ error = acct_stack_growth(vma, size, grow);
292 if (!error) {
293 /*
294 * vma_gap_update() doesn't support concurrent
295@@ -2334,72 +2339,29 @@ int expand_downwards(struct vm_area_struct *vma,
296 return error;
297 }
298
299-/* enforced gap between the expanding stack and other mappings. */
300-unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
301-
302 /*
303 * Note how expand_stack() refuses to expand the stack all the way to
304 * abut the next virtual mapping, *unless* that mapping itself is also
305- * a stack mapping. We want to leave room for a guard area, after all
306+ * a stack mapping. We want to leave room for a guard page, after all
307 * (the guard page itself is not added here, that is done by the
308 * actual page faulting logic)
309+ *
310+ * This matches the behavior of the guard page logic (see mm/memory.c:
311+ * check_stack_guard_page()), which only allows the guard page to be
312+ * removed under these circumstances.
313 */
314 #ifdef CONFIG_STACK_GROWSUP
315-unsigned long expandable_stack_area(struct vm_area_struct *vma,
316- unsigned long address, unsigned long *gap)
317-{
318- struct vm_area_struct *next = vma->vm_next;
319- unsigned long guard_gap = stack_guard_gap;
320- unsigned long guard_addr;
321-
322- address = ALIGN(address, PAGE_SIZE);;
323- if (!next)
324- goto out;
325-
326- if (next->vm_flags & VM_GROWSUP) {
327- guard_gap = min(guard_gap, next->vm_start - address);
328- goto out;
329- }
330-
331- if (next->vm_start - address < guard_gap)
332- return -ENOMEM;
333-out:
334- if (TASK_SIZE - address < guard_gap)
335- guard_gap = TASK_SIZE - address;
336- guard_addr = address + guard_gap;
337- *gap = guard_gap;
338-
339- return guard_addr;
340-}
341-
342 int expand_stack(struct vm_area_struct *vma, unsigned long address)
343 {
344- unsigned long gap;
345-
346- address = expandable_stack_area(vma, address, &gap);
347- if (IS_ERR_VALUE(address))
348- return -ENOMEM;
349- return expand_upwards(vma, address, gap);
350-}
351-
352-int stack_guard_area(struct vm_area_struct *vma, unsigned long address)
353-{
354 struct vm_area_struct *next;
355
356- if (!(vma->vm_flags & VM_GROWSUP))
357- return 0;
358-
359- /*
360- * strictly speaking there is a guard gap between disjoint stacks
361- * but the gap is not canonical (it might be smaller) and it is
362- * reasonably safe to assume that we can ignore that gap for stack
363- * POPULATE or /proc/<pid>[s]maps purposes
364- */
365+ address &= PAGE_MASK;
366 next = vma->vm_next;
367- if (next && next->vm_flags & VM_GROWSUP)
368- return 0;
369-
370- return vma->vm_end - address <= stack_guard_gap;
371+ if (next && next->vm_start == address + PAGE_SIZE) {
372+ if (!(next->vm_flags & VM_GROWSUP))
373+ return -ENOMEM;
374+ }
375+ return expand_upwards(vma, address);
376 }
377
378 struct vm_area_struct *
379@@ -2418,73 +2380,17 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
380 return prev;
381 }
382 #else
383-unsigned long expandable_stack_area(struct vm_area_struct *vma,
384- unsigned long address, unsigned long *gap)
385-{
386- struct vm_area_struct *prev = vma->vm_prev;
387- unsigned long guard_gap = stack_guard_gap;
388- unsigned long guard_addr;
389-
390- address &= PAGE_MASK;
391- if (!prev)
392- goto out;
393-
394- /*
395- * Is there a mapping abutting this one below?
396- *
397- * That's only ok if it's the same stack mapping
398- * that has gotten split or there is sufficient gap
399- * between mappings
400- */
401- if (prev->vm_flags & VM_GROWSDOWN) {
402- guard_gap = min(guard_gap, address - prev->vm_end);
403- goto out;
404- }
405-
406- if (address - prev->vm_end < guard_gap)
407- return -ENOMEM;
408-
409-out:
410- /* make sure we won't underflow */
411- if (address < mmap_min_addr)
412- return -ENOMEM;
413- if (address - mmap_min_addr < guard_gap)
414- guard_gap = address - mmap_min_addr;
415-
416- guard_addr = address - guard_gap;
417- *gap = guard_gap;
418-
419- return guard_addr;
420-}
421-
422 int expand_stack(struct vm_area_struct *vma, unsigned long address)
423 {
424- unsigned long gap;
425-
426- address = expandable_stack_area(vma, address, &gap);
427- if (IS_ERR_VALUE(address))
428- return -ENOMEM;
429- return expand_downwards(vma, address, gap);
430-}
431-
432-int stack_guard_area(struct vm_area_struct *vma, unsigned long address)
433-{
434 struct vm_area_struct *prev;
435
436- if (!(vma->vm_flags & VM_GROWSDOWN))
437- return 0;
438-
439- /*
440- * strictly speaking there is a guard gap between disjoint stacks
441- * but the gap is not canonical (it might be smaller) and it is
442- * reasonably safe to assume that we can ignore that gap for stack
443- * POPULATE or /proc/<pid>[s]maps purposes
444- */
445+ address &= PAGE_MASK;
446 prev = vma->vm_prev;
447- if (prev && prev->vm_flags & VM_GROWSDOWN)
448- return 0;
449-
450- return address - vma->vm_start < stack_guard_gap;
451+ if (prev && prev->vm_end == address) {
452+ if (!(prev->vm_flags & VM_GROWSDOWN))
453+ return -ENOMEM;
454+ }
455+ return expand_downwards(vma, address);
456 }
457
458 struct vm_area_struct *
459--
4602.11.0
461