]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0202-x86-cpu_entry_area-Add-debugstore-entries-to-cpu_ent.patch
add KPTI and related patches
[pve-kernel.git] / patches / kernel / 0202-x86-cpu_entry_area-Add-debugstore-entries-to-cpu_ent.patch
1 From a1ccda197e7a758c8e9b7be299e9beaf3ca3ed51 Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Mon, 4 Dec 2017 15:07:49 +0100
4 Subject: [PATCH 202/231] x86/cpu_entry_area: Add debugstore entries to
5 cpu_entry_area
6 MIME-Version: 1.0
7 Content-Type: text/plain; charset=UTF-8
8 Content-Transfer-Encoding: 8bit
9
10 CVE-2017-5754
11
12 The Intel PEBS/BTS debug store is a design trainwreck as it expects virtual
13 addresses which must be visible in any execution context.
14
15 So it is required to make these mappings visible to user space when kernel
16 page table isolation is active.
17
18 Provide enough room for the buffer mappings in the cpu_entry_area so the
19 buffers are available in the user space visible page tables.
20
21 At the point where the kernel side entry area is populated there is no
22 buffer available yet, but the kernel PMD must be populated. To achieve this
23 set the entries for these buffers to non present.
24
25 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
26 Cc: Andy Lutomirski <luto@kernel.org>
27 Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
28 Cc: Borislav Petkov <bp@alien8.de>
29 Cc: Brian Gerst <brgerst@gmail.com>
30 Cc: Dave Hansen <dave.hansen@linux.intel.com>
31 Cc: David Laight <David.Laight@aculab.com>
32 Cc: Denys Vlasenko <dvlasenk@redhat.com>
33 Cc: Eduardo Valentin <eduval@amazon.com>
34 Cc: Greg KH <gregkh@linuxfoundation.org>
35 Cc: H. Peter Anvin <hpa@zytor.com>
36 Cc: Josh Poimboeuf <jpoimboe@redhat.com>
37 Cc: Juergen Gross <jgross@suse.com>
38 Cc: Linus Torvalds <torvalds@linux-foundation.org>
39 Cc: Peter Zijlstra <peterz@infradead.org>
40 Cc: Will Deacon <will.deacon@arm.com>
41 Cc: aliguori@amazon.com
42 Cc: daniel.gruss@iaik.tugraz.at
43 Cc: hughd@google.com
44 Cc: keescook@google.com
45 Signed-off-by: Ingo Molnar <mingo@kernel.org>
46 (cherry picked from commit 10043e02db7f8a4161f76434931051e7d797a5f6)
47 Signed-off-by: Andy Whitcroft <apw@canonical.com>
48 Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
49 (cherry picked from commit 4b9996f9c2d35d23a9fa2afe4f161402e6f28309)
50 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
51 ---
52 arch/x86/events/perf_event.h | 21 ++------------------
53 arch/x86/include/asm/cpu_entry_area.h | 13 +++++++++++++
54 arch/x86/include/asm/intel_ds.h | 36 +++++++++++++++++++++++++++++++++++
55 arch/x86/events/intel/ds.c | 5 +++--
56 arch/x86/mm/cpu_entry_area.c | 27 ++++++++++++++++++++++++++
57 5 files changed, 81 insertions(+), 21 deletions(-)
58 create mode 100644 arch/x86/include/asm/intel_ds.h
59
60 diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
61 index 590eaf7c2c3e..308bc14f58af 100644
62 --- a/arch/x86/events/perf_event.h
63 +++ b/arch/x86/events/perf_event.h
64 @@ -14,6 +14,8 @@
65
66 #include <linux/perf_event.h>
67
68 +#include <asm/intel_ds.h>
69 +
70 /* To enable MSR tracing please use the generic trace points. */
71
72 /*
73 @@ -77,8 +79,6 @@ struct amd_nb {
74 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
75 };
76
77 -/* The maximal number of PEBS events: */
78 -#define MAX_PEBS_EVENTS 8
79 #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
80
81 /*
82 @@ -95,23 +95,6 @@ struct amd_nb {
83 PERF_SAMPLE_TRANSACTION | \
84 PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)
85
86 -/*
87 - * A debug store configuration.
88 - *
89 - * We only support architectures that use 64bit fields.
90 - */
91 -struct debug_store {
92 - u64 bts_buffer_base;
93 - u64 bts_index;
94 - u64 bts_absolute_maximum;
95 - u64 bts_interrupt_threshold;
96 - u64 pebs_buffer_base;
97 - u64 pebs_index;
98 - u64 pebs_absolute_maximum;
99 - u64 pebs_interrupt_threshold;
100 - u64 pebs_event_reset[MAX_PEBS_EVENTS];
101 -};
102 -
103 #define PEBS_REGS \
104 (PERF_REG_X86_AX | \
105 PERF_REG_X86_BX | \
106 diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
107 index 2fbc69a0916e..4a7884b8dca5 100644
108 --- a/arch/x86/include/asm/cpu_entry_area.h
109 +++ b/arch/x86/include/asm/cpu_entry_area.h
110 @@ -5,6 +5,7 @@
111
112 #include <linux/percpu-defs.h>
113 #include <asm/processor.h>
114 +#include <asm/intel_ds.h>
115
116 /*
117 * cpu_entry_area is a percpu region that contains things needed by the CPU
118 @@ -40,6 +41,18 @@ struct cpu_entry_area {
119 */
120 char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
121 #endif
122 +#ifdef CONFIG_CPU_SUP_INTEL
123 + /*
124 + * Per CPU debug store for Intel performance monitoring. Wastes a
125 + * full page at the moment.
126 + */
127 + struct debug_store cpu_debug_store;
128 + /*
129 + * The actual PEBS/BTS buffers must be mapped to user space
130 + * Reserve enough fixmap PTEs.
131 + */
132 + struct debug_store_buffers cpu_debug_buffers;
133 +#endif
134 };
135
136 #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
137 diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h
138 new file mode 100644
139 index 000000000000..62a9f4966b42
140 --- /dev/null
141 +++ b/arch/x86/include/asm/intel_ds.h
142 @@ -0,0 +1,36 @@
143 +#ifndef _ASM_INTEL_DS_H
144 +#define _ASM_INTEL_DS_H
145 +
146 +#include <linux/percpu-defs.h>
147 +
148 +#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
149 +#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4)
150 +
151 +/* The maximal number of PEBS events: */
152 +#define MAX_PEBS_EVENTS 8
153 +
154 +/*
155 + * A debug store configuration.
156 + *
157 + * We only support architectures that use 64bit fields.
158 + */
159 +struct debug_store {
160 + u64 bts_buffer_base;
161 + u64 bts_index;
162 + u64 bts_absolute_maximum;
163 + u64 bts_interrupt_threshold;
164 + u64 pebs_buffer_base;
165 + u64 pebs_index;
166 + u64 pebs_absolute_maximum;
167 + u64 pebs_interrupt_threshold;
168 + u64 pebs_event_reset[MAX_PEBS_EVENTS];
169 +} __aligned(PAGE_SIZE);
170 +
171 +DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
172 +
173 +struct debug_store_buffers {
174 + char bts_buffer[BTS_BUFFER_SIZE];
175 + char pebs_buffer[PEBS_BUFFER_SIZE];
176 +};
177 +
178 +#endif
179 diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
180 index 98e36e0c791c..21a4ed789ec0 100644
181 --- a/arch/x86/events/intel/ds.c
182 +++ b/arch/x86/events/intel/ds.c
183 @@ -7,11 +7,12 @@
184
185 #include "../perf_event.h"
186
187 +/* Waste a full page so it can be mapped into the cpu_entry_area */
188 +DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
189 +
190 /* The size of a BTS record in bytes: */
191 #define BTS_RECORD_SIZE 24
192
193 -#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
194 -#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4)
195 #define PEBS_FIXUP_SIZE PAGE_SIZE
196
197 /*
198 diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
199 index fe814fd5e014..b9283cc27622 100644
200 --- a/arch/x86/mm/cpu_entry_area.c
201 +++ b/arch/x86/mm/cpu_entry_area.c
202 @@ -38,6 +38,32 @@ cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
203 cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
204 }
205
206 +static void percpu_setup_debug_store(int cpu)
207 +{
208 +#ifdef CONFIG_CPU_SUP_INTEL
209 + int npages;
210 + void *cea;
211 +
212 + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
213 + return;
214 +
215 + cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
216 + npages = sizeof(struct debug_store) / PAGE_SIZE;
217 + BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
218 + cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
219 + PAGE_KERNEL);
220 +
221 + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
222 + /*
223 + * Force the population of PMDs for not yet allocated per cpu
224 + * memory like debug store buffers.
225 + */
226 + npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
227 + for (; npages; npages--, cea += PAGE_SIZE)
228 + cea_set_pte(cea, 0, PAGE_NONE);
229 +#endif
230 +}
231 +
232 /* Setup the fixmap mappings only once per-processor */
233 static void __init setup_cpu_entry_area(int cpu)
234 {
235 @@ -109,6 +135,7 @@ static void __init setup_cpu_entry_area(int cpu)
236 cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
237 __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
238 #endif
239 + percpu_setup_debug_store(cpu);
240 }
241
242 static __init void setup_cpu_entry_area_ptes(void)
243 --
244 2.14.2
245