]> git.proxmox.com Git - pve-kernel-3.10.0.git/blob - CVE-2016-5195.patch
2805cc1d7468e994862554bc8c275064a25cd13f
[pve-kernel-3.10.0.git] / CVE-2016-5195.patch
1 From 19be0eaffa3ac7d8eb6784ad9bdbc7d67ed8e619 Mon Sep 17 00:00:00 2001
2 From: Linus Torvalds <torvalds@linux-foundation.org>
3 Date: Thu, 13 Oct 2016 13:07:36 -0700
4 Subject: mm: remove gup_flags FOLL_WRITE games from __get_user_pages()
5
6 This is an ancient bug that was actually attempted to be fixed once
7 (badly) by me eleven years ago in commit 4ceb5db9757a ("Fix
8 get_user_pages() race for write access") but that was then undone due to
9 problems on s390 by commit f33ea7f404e5 ("fix get_user_pages bug").
10
11 In the meantime, the s390 situation has long been fixed, and we can now
12 fix it by checking the pte_dirty() bit properly (and do it better). The
13 s390 dirty bit was implemented in abf09bed3cce ("s390/mm: implement
14 software dirty bits") which made it into v3.9. Earlier kernels will
15 have to look at the page state itself.
16
17 Also, the VM has become more scalable, and what used a purely
18 theoretical race back then has become easier to trigger.
19
20 To fix it, we introduce a new internal FOLL_COW flag to mark the "yes,
21 we already did a COW" rather than play racy games with FOLL_WRITE that
22 is very fundamental, and then use the pte dirty flag to validate that
23 the FOLL_COW flag is still valid.
24
25 Reported-and-tested-by: Phil "not Paul" Oester <kernel@linuxace.com>
26 Acked-by: Hugh Dickins <hughd@google.com>
27 Reviewed-by: Michal Hocko <mhocko@suse.com>
28 Cc: Andy Lutomirski <luto@kernel.org>
29 Cc: Kees Cook <keescook@chromium.org>
30 Cc: Oleg Nesterov <oleg@redhat.com>
31 Cc: Willy Tarreau <w@1wt.eu>
32 Cc: Nick Piggin <npiggin@gmail.com>
33 Cc: Greg Thelen <gthelen@google.com>
34 Cc: stable@vger.kernel.org
35 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
36 ---
37 include/linux/mm.h | 1 +
38 mm/memory.c | 15 ++++++++++++--
39 2 files changed, 14 insertions(+), 2 deletions(-)
40
41 diff --git a/include/linux/mm.h b/include/linux/mm.h
42 index e9caec6..ed85879 100644
43 --- a/include/linux/mm.h
44 +++ b/include/linux/mm.h
45 @@ -1971,6 +1971,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
46 #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
47 #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
48 #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
49 +#define FOLL_COW 0x4000 /* internal GUP flag */
50
51 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
52 void *data);
53 diff --git a/mm/memory.c b/mm/memory.c
54 index fce5131..f62b9f7 100644
55 --- a/mm/memory.c
56 +++ b/mm/memory.c
57 @@ -1467,6 +1467,17 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
58 }
59 EXPORT_SYMBOL_GPL(zap_vma_ptes);
60
61 +/*
62 + * FOLL_FORCE can write to even unwritable pte's, but only
63 + * after we've gone through a COW cycle and they are dirty.
64 + */
65 +static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
66 +{
67 + return pte_write(pte) ||
68 + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
69 +}
70 +
71 +
72 /**
73 * follow_page_mask - look up a page descriptor from a user-virtual address
74 * @vma: vm_area_struct mapping @address
75 @@ -1588,7 +1599,7 @@ split_fallthrough:
76 }
77 if ((flags & FOLL_NUMA) && pte_numa(pte))
78 goto no_page;
79 - if ((flags & FOLL_WRITE) && !pte_write(pte))
80 + if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags))
81 goto unlock;
82
83 page = vm_normal_page(vma, address, pte);
84 @@ -1900,7 +1911,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
85 */
86 if ((ret & VM_FAULT_WRITE) &&
87 !(vma->vm_flags & VM_WRITE))
88 - foll_flags &= ~FOLL_WRITE;
89 + foll_flags |= FOLL_COW;
90
91 cond_resched();
92 }