]>
Commit | Line | Data |
---|---|---|
59d5af67 | 1 | From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 |
321d628a FG |
2 | From: Andy Lutomirski <luto@kernel.org> |
3 | Date: Thu, 29 Jun 2017 08:53:15 -0700 | |
59d5af67 | 4 | Subject: [PATCH] x86/mm: Give each mm TLB flush generation a unique ID |
321d628a FG |
5 | MIME-Version: 1.0 |
6 | Content-Type: text/plain; charset=UTF-8 | |
7 | Content-Transfer-Encoding: 8bit | |
8 | ||
9 | CVE-2017-5754 | |
10 | ||
11 | This adds two new variables to mmu_context_t: ctx_id and tlb_gen. | |
12 | ctx_id uniquely identifies the mm_struct and will never be reused. | |
13 | For a given mm_struct (and hence ctx_id), tlb_gen is a monotonic | |
14 | count of the number of times that a TLB flush has been requested. | |
15 | The pair (ctx_id, tlb_gen) can be used as an identifier for TLB | |
16 | flush actions and will be used in subsequent patches to reliably | |
17 | determine whether all needed TLB flushes have occurred on a given | |
18 | CPU. | |
19 | ||
20 | This patch is split out for ease of review. By itself, it has no | |
21 | real effect other than creating and updating the new variables. | |
22 | ||
23 | Signed-off-by: Andy Lutomirski <luto@kernel.org> | |
24 | Reviewed-by: Nadav Amit <nadav.amit@gmail.com> | |
25 | Reviewed-by: Thomas Gleixner <tglx@linutronix.de> | |
26 | Cc: Andrew Morton <akpm@linux-foundation.org> | |
27 | Cc: Arjan van de Ven <arjan@linux.intel.com> | |
28 | Cc: Borislav Petkov <bp@alien8.de> | |
29 | Cc: Dave Hansen <dave.hansen@intel.com> | |
30 | Cc: Linus Torvalds <torvalds@linux-foundation.org> | |
31 | Cc: Mel Gorman <mgorman@suse.de> | |
32 | Cc: Peter Zijlstra <peterz@infradead.org> | |
33 | Cc: Rik van Riel <riel@redhat.com> | |
34 | Cc: linux-mm@kvack.org | |
35 | Link: http://lkml.kernel.org/r/413a91c24dab3ed0caa5f4e4d017d87b0857f920.1498751203.git.luto@kernel.org | |
36 | Signed-off-by: Ingo Molnar <mingo@kernel.org> | |
37 | (cherry picked from commit f39681ed0f48498b80455095376f11535feea332) | |
38 | Signed-off-by: Andy Whitcroft <apw@canonical.com> | |
39 | Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com> | |
40 | (cherry picked from commit e566a0dfbb2a5f7ea90dd66ce384740372739e14) | |
41 | Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com> | |
42 | --- | |
43 | arch/x86/include/asm/mmu.h | 25 +++++++++++++++++++++++-- | |
44 | arch/x86/include/asm/mmu_context.h | 6 ++++++ | |
45 | arch/x86/include/asm/tlbflush.h | 18 ++++++++++++++++++ | |
46 | arch/x86/mm/tlb.c | 6 ++++-- | |
47 | 4 files changed, 51 insertions(+), 4 deletions(-) | |
48 | ||
49 | diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h | |
50 | index 79b647a7ebd0..bb8c597c2248 100644 | |
51 | --- a/arch/x86/include/asm/mmu.h | |
52 | +++ b/arch/x86/include/asm/mmu.h | |
53 | @@ -3,12 +3,28 @@ | |
54 | ||
55 | #include <linux/spinlock.h> | |
56 | #include <linux/mutex.h> | |
57 | +#include <linux/atomic.h> | |
58 | ||
59 | /* | |
60 | - * The x86 doesn't have a mmu context, but | |
61 | - * we put the segment information here. | |
62 | + * x86 has arch-specific MMU state beyond what lives in mm_struct. | |
63 | */ | |
64 | typedef struct { | |
65 | + /* | |
66 | + * ctx_id uniquely identifies this mm_struct. A ctx_id will never | |
67 | + * be reused, and zero is not a valid ctx_id. | |
68 | + */ | |
69 | + u64 ctx_id; | |
70 | + | |
71 | + /* | |
72 | + * Any code that needs to do any sort of TLB flushing for this | |
73 | + * mm will first make its changes to the page tables, then | |
74 | + * increment tlb_gen, then flush. This lets the low-level | |
75 | + * flushing code keep track of what needs flushing. | |
76 | + * | |
77 | + * This is not used on Xen PV. | |
78 | + */ | |
79 | + atomic64_t tlb_gen; | |
80 | + | |
81 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | |
82 | struct ldt_struct *ldt; | |
83 | #endif | |
84 | @@ -37,6 +53,11 @@ typedef struct { | |
85 | #endif | |
86 | } mm_context_t; | |
87 | ||
88 | +#define INIT_MM_CONTEXT(mm) \ | |
89 | + .context = { \ | |
90 | + .ctx_id = 1, \ | |
91 | + } | |
92 | + | |
93 | void leave_mm(int cpu); | |
94 | ||
95 | #endif /* _ASM_X86_MMU_H */ | |
96 | diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h | |
97 | index 7a234be7e298..6c05679c715b 100644 | |
98 | --- a/arch/x86/include/asm/mmu_context.h | |
99 | +++ b/arch/x86/include/asm/mmu_context.h | |
100 | @@ -12,6 +12,9 @@ | |
101 | #include <asm/tlbflush.h> | |
102 | #include <asm/paravirt.h> | |
103 | #include <asm/mpx.h> | |
104 | + | |
105 | +extern atomic64_t last_mm_ctx_id; | |
106 | + | |
107 | #ifndef CONFIG_PARAVIRT | |
108 | static inline void paravirt_activate_mm(struct mm_struct *prev, | |
109 | struct mm_struct *next) | |
110 | @@ -132,6 +135,9 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
111 | static inline int init_new_context(struct task_struct *tsk, | |
112 | struct mm_struct *mm) | |
113 | { | |
114 | + mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); | |
115 | + atomic64_set(&mm->context.tlb_gen, 0); | |
116 | + | |
117 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | |
118 | if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { | |
119 | /* pkey 0 is the default and always allocated */ | |
120 | diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h | |
121 | index 2b3d68093235..f1f2e73b7b77 100644 | |
122 | --- a/arch/x86/include/asm/tlbflush.h | |
123 | +++ b/arch/x86/include/asm/tlbflush.h | |
124 | @@ -57,6 +57,23 @@ static inline void invpcid_flush_all_nonglobals(void) | |
125 | __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); | |
126 | } | |
127 | ||
128 | +static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) | |
129 | +{ | |
130 | + u64 new_tlb_gen; | |
131 | + | |
132 | + /* | |
133 | + * Bump the generation count. This also serves as a full barrier | |
134 | + * that synchronizes with switch_mm(): callers are required to order | |
135 | + * their read of mm_cpumask after their writes to the paging | |
136 | + * structures. | |
137 | + */ | |
138 | + smp_mb__before_atomic(); | |
139 | + new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen); | |
140 | + smp_mb__after_atomic(); | |
141 | + | |
142 | + return new_tlb_gen; | |
143 | +} | |
144 | + | |
145 | #ifdef CONFIG_PARAVIRT | |
146 | #include <asm/paravirt.h> | |
147 | #else | |
148 | @@ -270,6 +287,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, | |
149 | static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, | |
150 | struct mm_struct *mm) | |
151 | { | |
152 | + inc_mm_tlb_gen(mm); | |
153 | cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); | |
154 | } | |
155 | ||
156 | diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c | |
157 | index 014d07a80053..14f4f8f66aa8 100644 | |
158 | --- a/arch/x86/mm/tlb.c | |
159 | +++ b/arch/x86/mm/tlb.c | |
160 | @@ -28,6 +28,8 @@ | |
161 | * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi | |
162 | */ | |
163 | ||
164 | +atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); | |
165 | + | |
166 | void leave_mm(int cpu) | |
167 | { | |
168 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); | |
169 | @@ -250,8 +252,8 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | |
170 | ||
171 | cpu = get_cpu(); | |
172 | ||
173 | - /* Synchronize with switch_mm. */ | |
174 | - smp_mb(); | |
175 | + /* This is also a barrier that synchronizes with switch_mm(). */ | |
176 | + inc_mm_tlb_gen(mm); | |
177 | ||
178 | /* Should we flush just the requested range? */ | |
179 | if ((end != TLB_FLUSH_ALL) && | |
180 | -- | |
181 | 2.14.2 | |
182 |