]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0207-x86-mm-Allow-flushing-for-future-ASID-switches.patch
KPTI: add follow-up fixes
[pve-kernel.git] / patches / kernel / 0207-x86-mm-Allow-flushing-for-future-ASID-switches.patch
1 From c84a1d7630152f64ebe07519e757de5c1cf70808 Mon Sep 17 00:00:00 2001
2 From: Dave Hansen <dave.hansen@linux.intel.com>
3 Date: Mon, 4 Dec 2017 15:07:57 +0100
4 Subject: [PATCH 207/241] x86/mm: Allow flushing for future ASID switches
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 CVE-2017-5754
10
11 If changing the page tables in such a way that an invalidation of all
12 contexts (aka. PCIDs / ASIDs) is required, they can be actively invalidated
13 by:
14
15 1. INVPCID for each PCID (works for single pages too).
16
17 2. Load CR3 with each PCID without the NOFLUSH bit set
18
19 3. Load CR3 with the NOFLUSH bit set for each and do INVLPG for each address.
20
21 But, none of these are really feasible since there are ~6 ASIDs (12 with
22 PAGE_TABLE_ISOLATION) at the time that invalidation is required.
23 Instead of actively invalidating them, invalidate the *current* context and
24 also mark the cpu_tlbstate _quickly_ to indicate future invalidation to be
25 required.
26
27 At the next context-switch, look for this indicator
28 ('invalidate_other' being set) invalidate all of the
29 cpu_tlbstate.ctxs[] entries.
30
31 This ensures that any future context switches will do a full flush
32 of the TLB, picking up the previous changes.
33
34 [ tglx: Folded more fixups from Peter ]
35
36 Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
37 Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
38 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
39 Cc: Andy Lutomirski <luto@kernel.org>
40 Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
41 Cc: Borislav Petkov <bp@alien8.de>
42 Cc: Brian Gerst <brgerst@gmail.com>
43 Cc: David Laight <David.Laight@aculab.com>
44 Cc: Denys Vlasenko <dvlasenk@redhat.com>
45 Cc: Eduardo Valentin <eduval@amazon.com>
46 Cc: Greg KH <gregkh@linuxfoundation.org>
47 Cc: H. Peter Anvin <hpa@zytor.com>
48 Cc: Josh Poimboeuf <jpoimboe@redhat.com>
49 Cc: Juergen Gross <jgross@suse.com>
50 Cc: Linus Torvalds <torvalds@linux-foundation.org>
51 Cc: Peter Zijlstra <peterz@infradead.org>
52 Cc: Will Deacon <will.deacon@arm.com>
53 Cc: aliguori@amazon.com
54 Cc: daniel.gruss@iaik.tugraz.at
55 Cc: hughd@google.com
56 Cc: keescook@google.com
57 Signed-off-by: Ingo Molnar <mingo@kernel.org>
58 (cherry picked from commit 2ea907c4fe7b78e5840c1dc07800eae93248cad1)
59 Signed-off-by: Andy Whitcroft <apw@canonical.com>
60 Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
61 (cherry picked from commit fbb7e6e9e7e7cedecc164d660d08563f88103b56)
62 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
63 ---
64 arch/x86/include/asm/tlbflush.h | 37 +++++++++++++++++++++++++++++--------
65 arch/x86/mm/tlb.c | 35 +++++++++++++++++++++++++++++++++++
66 2 files changed, 64 insertions(+), 8 deletions(-)
67
68 diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
69 index 503f87c30c15..3769ce182eac 100644
70 --- a/arch/x86/include/asm/tlbflush.h
71 +++ b/arch/x86/include/asm/tlbflush.h
72 @@ -124,6 +124,17 @@ struct tlb_state {
73 */
74 bool is_lazy;
75
76 + /*
77 + * If set we changed the page tables in such a way that we
78 + * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
79 + * This tells us to go invalidate all the non-loaded ctxs[]
80 + * on the next context switch.
81 + *
82 + * The current ctx was kept up-to-date as it ran and does not
83 + * need to be invalidated.
84 + */
85 + bool invalidate_other;
86 +
87 /*
88 * Access to this CR4 shadow and to H/W CR4 is protected by
89 * disabling interrupts when modifying either one.
90 @@ -201,6 +212,14 @@ static inline unsigned long cr4_read_shadow(void)
91 return this_cpu_read(cpu_tlbstate.cr4);
92 }
93
94 +/*
95 + * Mark all other ASIDs as invalid, preserves the current.
96 + */
97 +static inline void invalidate_other_asid(void)
98 +{
99 + this_cpu_write(cpu_tlbstate.invalidate_other, true);
100 +}
101 +
102 /*
103 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
104 * enable and PPro Global page enable), so that any CPU's that boot
105 @@ -287,14 +306,6 @@ static inline void __flush_tlb_all(void)
106 */
107 __flush_tlb();
108 }
109 -
110 - /*
111 - * Note: if we somehow had PCID but not PGE, then this wouldn't work --
112 - * we'd end up flushing kernel translations for the current ASID but
113 - * we might fail to flush kernel translations for other cached ASIDs.
114 - *
115 - * To avoid this issue, we force PCID off if PGE is off.
116 - */
117 }
118
119 /*
120 @@ -304,6 +315,16 @@ static inline void __flush_tlb_one(unsigned long addr)
121 {
122 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
123 __flush_tlb_single(addr);
124 +
125 + if (!static_cpu_has(X86_FEATURE_PTI))
126 + return;
127 +
128 + /*
129 + * __flush_tlb_single() will have cleared the TLB entry for this ASID,
130 + * but since kernel space is replicated across all, we must also
131 + * invalidate all others.
132 + */
133 + invalidate_other_asid();
134 }
135
136 #define TLB_FLUSH_ALL -1UL
137 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
138 index 87d4f961bcb4..ce87b69fb4e0 100644
139 --- a/arch/x86/mm/tlb.c
140 +++ b/arch/x86/mm/tlb.c
141 @@ -28,6 +28,38 @@
142 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
143 */
144
145 +/*
146 + * We get here when we do something requiring a TLB invalidation
147 + * but could not go invalidate all of the contexts. We do the
148 + * necessary invalidation by clearing out the 'ctx_id' which
149 + * forces a TLB flush when the context is loaded.
150 + */
151 +void clear_asid_other(void)
152 +{
153 + u16 asid;
154 +
155 + /*
156 + * This is only expected to be set if we have disabled
157 + * kernel _PAGE_GLOBAL pages.
158 + */
159 + if (!static_cpu_has(X86_FEATURE_PTI)) {
160 + WARN_ON_ONCE(1);
161 + return;
162 + }
163 +
164 + for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
165 + /* Do not need to flush the current asid */
166 + if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
167 + continue;
168 + /*
169 + * Make sure the next time we go to switch to
170 + * this asid, we do a flush:
171 + */
172 + this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
173 + }
174 + this_cpu_write(cpu_tlbstate.invalidate_other, false);
175 +}
176 +
177 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
178
179 DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
180 @@ -43,6 +75,9 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
181 return;
182 }
183
184 + if (this_cpu_read(cpu_tlbstate.invalidate_other))
185 + clear_asid_other();
186 +
187 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
188 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
189 next->context.ctx_id)
190 --
191 2.14.2
192