]>
Commit | Line | Data |
---|---|---|
59d5af67 | 1 | From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 |
321d628a FG |
2 | From: Thomas Gleixner <tglx@linutronix.de> |
3 | Date: Wed, 20 Dec 2017 18:51:31 +0100 | |
59d5af67 | 4 | Subject: [PATCH] x86/cpu_entry_area: Move it out of the fixmap |
321d628a FG |
5 | MIME-Version: 1.0 |
6 | Content-Type: text/plain; charset=UTF-8 | |
7 | Content-Transfer-Encoding: 8bit | |
8 | ||
9 | CVE-2017-5754 | |
10 | ||
11 | Put the cpu_entry_area into a separate P4D entry. The fixmap gets too big | |
12 | and 0-day already hit a case where the fixmap PTEs were cleared by | |
13 | cleanup_highmap(). | |
14 | ||
15 | Aside of that the fixmap API is a pain as it's all backwards. | |
16 | ||
17 | Signed-off-by: Thomas Gleixner <tglx@linutronix.de> | |
18 | Cc: Andy Lutomirski <luto@kernel.org> | |
19 | Cc: Borislav Petkov <bp@alien8.de> | |
20 | Cc: Dave Hansen <dave.hansen@linux.intel.com> | |
21 | Cc: H. Peter Anvin <hpa@zytor.com> | |
22 | Cc: Josh Poimboeuf <jpoimboe@redhat.com> | |
23 | Cc: Juergen Gross <jgross@suse.com> | |
24 | Cc: Linus Torvalds <torvalds@linux-foundation.org> | |
25 | Cc: Peter Zijlstra <peterz@infradead.org> | |
26 | Cc: linux-kernel@vger.kernel.org | |
27 | Signed-off-by: Ingo Molnar <mingo@kernel.org> | |
28 | (backported from commit 92a0f81d89571e3e8759366e050ee05cc545ef99) | |
29 | Signed-off-by: Andy Whitcroft <apw@canonical.com> | |
30 | Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com> | |
31 | (cherry picked from commit bda9eb328d9ce3757f22794f79da73dd5886c93a) | |
32 | Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com> | |
33 | --- | |
34 | Documentation/x86/x86_64/mm.txt | 2 + | |
35 | arch/x86/include/asm/cpu_entry_area.h | 18 ++++++++- | |
36 | arch/x86/include/asm/desc.h | 2 + | |
37 | arch/x86/include/asm/fixmap.h | 32 +--------------- | |
38 | arch/x86/include/asm/pgtable_32_types.h | 15 ++++++-- | |
39 | arch/x86/include/asm/pgtable_64_types.h | 47 +++++++++++++---------- | |
40 | arch/x86/kernel/dumpstack.c | 1 + | |
41 | arch/x86/kernel/traps.c | 5 ++- | |
42 | arch/x86/mm/cpu_entry_area.c | 66 +++++++++++++++++++++++++-------- | |
43 | arch/x86/mm/dump_pagetables.c | 6 ++- | |
44 | arch/x86/mm/init_32.c | 6 +++ | |
45 | arch/x86/mm/kasan_init_64.c | 30 ++++++++------- | |
46 | arch/x86/mm/pgtable_32.c | 1 + | |
47 | arch/x86/xen/mmu_pv.c | 2 - | |
48 | 14 files changed, 145 insertions(+), 88 deletions(-) | |
49 | ||
50 | diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt | |
51 | index 63a41671d25b..51101708a03a 100644 | |
52 | --- a/Documentation/x86/x86_64/mm.txt | |
53 | +++ b/Documentation/x86/x86_64/mm.txt | |
54 | @@ -12,6 +12,7 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) | |
55 | ... unused hole ... | |
56 | ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) | |
57 | ... unused hole ... | |
58 | +fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping | |
59 | ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks | |
60 | ... unused hole ... | |
61 | ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space | |
62 | @@ -35,6 +36,7 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) | |
63 | ... unused hole ... | |
64 | ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) | |
65 | ... unused hole ... | |
66 | +fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping | |
67 | ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks | |
68 | ... unused hole ... | |
69 | ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space | |
70 | diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h | |
71 | index 5471826803af..2fbc69a0916e 100644 | |
72 | --- a/arch/x86/include/asm/cpu_entry_area.h | |
73 | +++ b/arch/x86/include/asm/cpu_entry_area.h | |
74 | @@ -43,10 +43,26 @@ struct cpu_entry_area { | |
75 | }; | |
76 | ||
77 | #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) | |
78 | -#define CPU_ENTRY_AREA_PAGES (CPU_ENTRY_AREA_SIZE / PAGE_SIZE) | |
79 | +#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) | |
80 | ||
81 | DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); | |
82 | ||
83 | extern void setup_cpu_entry_areas(void); | |
84 | +extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); | |
85 | + | |
86 | +#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE | |
87 | +#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) | |
88 | + | |
89 | +#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) | |
90 | + | |
91 | +#define CPU_ENTRY_AREA_MAP_SIZE \ | |
92 | + (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE) | |
93 | + | |
94 | +extern struct cpu_entry_area *get_cpu_entry_area(int cpu); | |
95 | + | |
96 | +static inline struct entry_stack *cpu_entry_stack(int cpu) | |
97 | +{ | |
98 | + return &get_cpu_entry_area(cpu)->entry_stack_page.stack; | |
99 | +} | |
100 | ||
101 | #endif | |
102 | diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h | |
103 | index b817fe247506..de40c514ba25 100644 | |
104 | --- a/arch/x86/include/asm/desc.h | |
105 | +++ b/arch/x86/include/asm/desc.h | |
106 | @@ -5,6 +5,8 @@ | |
107 | #include <asm/ldt.h> | |
108 | #include <asm/mmu.h> | |
109 | #include <asm/fixmap.h> | |
110 | +#include <asm/pgtable.h> | |
111 | +#include <asm/cpu_entry_area.h> | |
112 | ||
113 | #include <linux/smp.h> | |
114 | #include <linux/percpu.h> | |
115 | diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h | |
116 | index 1b2521473480..a6ff9e1a6189 100644 | |
117 | --- a/arch/x86/include/asm/fixmap.h | |
118 | +++ b/arch/x86/include/asm/fixmap.h | |
119 | @@ -25,7 +25,6 @@ | |
120 | #else | |
121 | #include <uapi/asm/vsyscall.h> | |
122 | #endif | |
123 | -#include <asm/cpu_entry_area.h> | |
124 | ||
125 | /* | |
126 | * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall | |
127 | @@ -84,7 +83,6 @@ enum fixed_addresses { | |
128 | FIX_IO_APIC_BASE_0, | |
129 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, | |
130 | #endif | |
131 | - FIX_RO_IDT, /* Virtual mapping for read-only IDT */ | |
132 | #ifdef CONFIG_X86_32 | |
133 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | |
134 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | |
135 | @@ -100,9 +98,6 @@ enum fixed_addresses { | |
136 | #ifdef CONFIG_X86_INTEL_MID | |
137 | FIX_LNW_VRTC, | |
138 | #endif | |
139 | - /* Fixmap entries to remap the GDTs, one per processor. */ | |
140 | - FIX_CPU_ENTRY_AREA_TOP, | |
141 | - FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1, | |
142 | ||
143 | #ifdef CONFIG_ACPI_APEI_GHES | |
144 | /* Used for GHES mapping from assorted contexts */ | |
145 | @@ -143,7 +138,7 @@ enum fixed_addresses { | |
146 | extern void reserve_top_address(unsigned long reserve); | |
147 | ||
148 | #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) | |
149 | -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | |
150 | +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | |
151 | ||
152 | extern int fixmaps_set; | |
153 | ||
154 | @@ -171,30 +166,5 @@ static inline void __set_fixmap(enum fixed_addresses idx, | |
155 | void __early_set_fixmap(enum fixed_addresses idx, | |
156 | phys_addr_t phys, pgprot_t flags); | |
157 | ||
158 | -static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page) | |
159 | -{ | |
160 | - BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); | |
161 | - | |
162 | - return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page; | |
163 | -} | |
164 | - | |
165 | -#define __get_cpu_entry_area_offset_index(cpu, offset) ({ \ | |
166 | - BUILD_BUG_ON(offset % PAGE_SIZE != 0); \ | |
167 | - __get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE); \ | |
168 | - }) | |
169 | - | |
170 | -#define get_cpu_entry_area_index(cpu, field) \ | |
171 | - __get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field)) | |
172 | - | |
173 | -static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) | |
174 | -{ | |
175 | - return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0)); | |
176 | -} | |
177 | - | |
178 | -static inline struct entry_stack *cpu_entry_stack(int cpu) | |
179 | -{ | |
180 | - return &get_cpu_entry_area(cpu)->entry_stack_page.stack; | |
181 | -} | |
182 | - | |
183 | #endif /* !__ASSEMBLY__ */ | |
184 | #endif /* _ASM_X86_FIXMAP_H */ | |
185 | diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h | |
186 | index 9fb2f2bc8245..67b60e11b70d 100644 | |
187 | --- a/arch/x86/include/asm/pgtable_32_types.h | |
188 | +++ b/arch/x86/include/asm/pgtable_32_types.h | |
189 | @@ -37,13 +37,22 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ | |
190 | #define LAST_PKMAP 1024 | |
191 | #endif | |
192 | ||
193 | -#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ | |
194 | - & PMD_MASK) | |
195 | +/* | |
196 | + * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c | |
197 | + * to avoid include recursion hell | |
198 | + */ | |
199 | +#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) | |
200 | + | |
201 | +#define CPU_ENTRY_AREA_BASE \ | |
202 | + ((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK) | |
203 | + | |
204 | +#define PKMAP_BASE \ | |
205 | + ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) | |
206 | ||
207 | #ifdef CONFIG_HIGHMEM | |
208 | # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) | |
209 | #else | |
210 | -# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) | |
211 | +# define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE) | |
212 | #endif | |
213 | ||
214 | #define MODULES_VADDR VMALLOC_START | |
215 | diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h | |
216 | index 06470da156ba..42e2750da525 100644 | |
217 | --- a/arch/x86/include/asm/pgtable_64_types.h | |
218 | +++ b/arch/x86/include/asm/pgtable_64_types.h | |
219 | @@ -75,32 +75,41 @@ typedef struct { pteval_t pte; } pte_t; | |
220 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) | |
221 | ||
222 | /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ | |
223 | -#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) | |
224 | +#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) | |
225 | + | |
226 | #ifdef CONFIG_X86_5LEVEL | |
227 | -#define VMALLOC_SIZE_TB _AC(16384, UL) | |
228 | -#define __VMALLOC_BASE _AC(0xff92000000000000, UL) | |
229 | -#define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) | |
230 | +# define VMALLOC_SIZE_TB _AC(16384, UL) | |
231 | +# define __VMALLOC_BASE _AC(0xff92000000000000, UL) | |
232 | +# define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) | |
233 | #else | |
234 | -#define VMALLOC_SIZE_TB _AC(32, UL) | |
235 | -#define __VMALLOC_BASE _AC(0xffffc90000000000, UL) | |
236 | -#define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) | |
237 | +# define VMALLOC_SIZE_TB _AC(32, UL) | |
238 | +# define __VMALLOC_BASE _AC(0xffffc90000000000, UL) | |
239 | +# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) | |
240 | #endif | |
241 | + | |
242 | #ifdef CONFIG_RANDOMIZE_MEMORY | |
243 | -#define VMALLOC_START vmalloc_base | |
244 | -#define VMEMMAP_START vmemmap_base | |
245 | +# define VMALLOC_START vmalloc_base | |
246 | +# define VMEMMAP_START vmemmap_base | |
247 | #else | |
248 | -#define VMALLOC_START __VMALLOC_BASE | |
249 | -#define VMEMMAP_START __VMEMMAP_BASE | |
250 | +# define VMALLOC_START __VMALLOC_BASE | |
251 | +# define VMEMMAP_START __VMEMMAP_BASE | |
252 | #endif /* CONFIG_RANDOMIZE_MEMORY */ | |
253 | -#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) | |
254 | -#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) | |
255 | + | |
256 | +#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) | |
257 | + | |
258 | +#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) | |
259 | /* The module sections ends with the start of the fixmap */ | |
260 | -#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) | |
261 | -#define MODULES_LEN (MODULES_END - MODULES_VADDR) | |
262 | -#define ESPFIX_PGD_ENTRY _AC(-2, UL) | |
263 | -#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) | |
264 | -#define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) | |
265 | -#define EFI_VA_END (-68 * (_AC(1, UL) << 30)) | |
266 | +#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) | |
267 | +#define MODULES_LEN (MODULES_END - MODULES_VADDR) | |
268 | + | |
269 | +#define ESPFIX_PGD_ENTRY _AC(-2, UL) | |
270 | +#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) | |
271 | + | |
272 | +#define CPU_ENTRY_AREA_PGD _AC(-3, UL) | |
273 | +#define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT) | |
274 | + | |
275 | +#define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) | |
276 | +#define EFI_VA_END (-68 * (_AC(1, UL) << 30)) | |
277 | ||
278 | #define EARLY_DYNAMIC_PAGE_TABLES 64 | |
279 | ||
280 | diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c | |
281 | index 55bf1c3b5319..2bdeb983b9d8 100644 | |
282 | --- a/arch/x86/kernel/dumpstack.c | |
283 | +++ b/arch/x86/kernel/dumpstack.c | |
284 | @@ -18,6 +18,7 @@ | |
285 | #include <linux/nmi.h> | |
286 | #include <linux/sysfs.h> | |
287 | ||
288 | +#include <asm/cpu_entry_area.h> | |
289 | #include <asm/stacktrace.h> | |
290 | #include <asm/unwind.h> | |
291 | ||
292 | diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c | |
293 | index ef2d1b8a0516..5808ccb59266 100644 | |
294 | --- a/arch/x86/kernel/traps.c | |
295 | +++ b/arch/x86/kernel/traps.c | |
296 | @@ -1041,8 +1041,9 @@ void __init trap_init(void) | |
297 | * "sidt" instruction will not leak the location of the kernel, and | |
298 | * to defend the IDT against arbitrary memory write vulnerabilities. | |
299 | * It will be reloaded in cpu_init() */ | |
300 | - __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); | |
301 | - idt_descr.address = fix_to_virt(FIX_RO_IDT); | |
302 | + cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table), | |
303 | + PAGE_KERNEL_RO); | |
304 | + idt_descr.address = CPU_ENTRY_AREA_RO_IDT; | |
305 | ||
306 | /* | |
307 | * Should be a barrier for any external CPU state: | |
308 | diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c | |
309 | index 235ff9cfaaf4..21e8b595cbb1 100644 | |
310 | --- a/arch/x86/mm/cpu_entry_area.c | |
311 | +++ b/arch/x86/mm/cpu_entry_area.c | |
312 | @@ -15,11 +15,27 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks | |
313 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); | |
314 | #endif | |
315 | ||
316 | +struct cpu_entry_area *get_cpu_entry_area(int cpu) | |
317 | +{ | |
318 | + unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; | |
319 | + BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); | |
320 | + | |
321 | + return (struct cpu_entry_area *) va; | |
322 | +} | |
323 | +EXPORT_SYMBOL(get_cpu_entry_area); | |
324 | + | |
325 | +void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags) | |
326 | +{ | |
327 | + unsigned long va = (unsigned long) cea_vaddr; | |
328 | + | |
329 | + set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags)); | |
330 | +} | |
331 | + | |
332 | static void __init | |
333 | -set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) | |
334 | +cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) | |
335 | { | |
336 | - for ( ; pages; pages--, idx--, ptr += PAGE_SIZE) | |
337 | - __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot); | |
338 | + for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE) | |
339 | + cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); | |
340 | } | |
341 | ||
342 | /* Setup the fixmap mappings only once per-processor */ | |
343 | @@ -47,10 +63,12 @@ static void __init setup_cpu_entry_area(int cpu) | |
344 | pgprot_t tss_prot = PAGE_KERNEL; | |
345 | #endif | |
346 | ||
347 | - __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); | |
348 | - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page), | |
349 | - per_cpu_ptr(&entry_stack_storage, cpu), 1, | |
350 | - PAGE_KERNEL); | |
351 | + cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu), | |
352 | + gdt_prot); | |
353 | + | |
354 | + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page, | |
355 | + per_cpu_ptr(&entry_stack_storage, cpu), 1, | |
356 | + PAGE_KERNEL); | |
357 | ||
358 | /* | |
359 | * The Intel SDM says (Volume 3, 7.2.1): | |
360 | @@ -72,10 +90,9 @@ static void __init setup_cpu_entry_area(int cpu) | |
361 | BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ | |
362 | offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); | |
363 | BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); | |
364 | - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss), | |
365 | - &per_cpu(cpu_tss_rw, cpu), | |
366 | - sizeof(struct tss_struct) / PAGE_SIZE, | |
367 | - tss_prot); | |
368 | + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss, | |
369 | + &per_cpu(cpu_tss_rw, cpu), | |
370 | + sizeof(struct tss_struct) / PAGE_SIZE, tss_prot); | |
371 | ||
372 | #ifdef CONFIG_X86_32 | |
373 | per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); | |
374 | @@ -85,20 +102,37 @@ static void __init setup_cpu_entry_area(int cpu) | |
375 | BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); | |
376 | BUILD_BUG_ON(sizeof(exception_stacks) != | |
377 | sizeof(((struct cpu_entry_area *)0)->exception_stacks)); | |
378 | - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks), | |
379 | - &per_cpu(exception_stacks, cpu), | |
380 | - sizeof(exception_stacks) / PAGE_SIZE, | |
381 | - PAGE_KERNEL); | |
382 | + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks, | |
383 | + &per_cpu(exception_stacks, cpu), | |
384 | + sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL); | |
385 | ||
386 | - __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline), | |
387 | + cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline, | |
388 | __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); | |
389 | #endif | |
390 | } | |
391 | ||
392 | +static __init void setup_cpu_entry_area_ptes(void) | |
393 | +{ | |
394 | +#ifdef CONFIG_X86_32 | |
395 | + unsigned long start, end; | |
396 | + | |
397 | + BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); | |
398 | + BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); | |
399 | + | |
400 | + start = CPU_ENTRY_AREA_BASE; | |
401 | + end = start + CPU_ENTRY_AREA_MAP_SIZE; | |
402 | + | |
403 | + for (; start < end; start += PMD_SIZE) | |
404 | + populate_extra_pte(start); | |
405 | +#endif | |
406 | +} | |
407 | + | |
408 | void __init setup_cpu_entry_areas(void) | |
409 | { | |
410 | unsigned int cpu; | |
411 | ||
412 | + setup_cpu_entry_area_ptes(); | |
413 | + | |
414 | for_each_possible_cpu(cpu) | |
415 | setup_cpu_entry_area(cpu); | |
416 | } | |
417 | diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c | |
418 | index 318a7c30e87e..3b7720404a9f 100644 | |
419 | --- a/arch/x86/mm/dump_pagetables.c | |
420 | +++ b/arch/x86/mm/dump_pagetables.c | |
421 | @@ -58,6 +58,7 @@ enum address_markers_idx { | |
422 | KASAN_SHADOW_START_NR, | |
423 | KASAN_SHADOW_END_NR, | |
424 | #endif | |
425 | + CPU_ENTRY_AREA_NR, | |
426 | #ifdef CONFIG_X86_ESPFIX64 | |
427 | ESPFIX_START_NR, | |
428 | #endif | |
429 | @@ -81,6 +82,7 @@ static struct addr_marker address_markers[] = { | |
430 | [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, | |
431 | [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, | |
432 | #endif | |
433 | + [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, | |
434 | #ifdef CONFIG_X86_ESPFIX64 | |
435 | [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, | |
436 | #endif | |
437 | @@ -104,6 +106,7 @@ enum address_markers_idx { | |
438 | #ifdef CONFIG_HIGHMEM | |
439 | PKMAP_BASE_NR, | |
440 | #endif | |
441 | + CPU_ENTRY_AREA_NR, | |
442 | FIXADDR_START_NR, | |
443 | END_OF_SPACE_NR, | |
444 | }; | |
445 | @@ -116,6 +119,7 @@ static struct addr_marker address_markers[] = { | |
446 | #ifdef CONFIG_HIGHMEM | |
447 | [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, | |
448 | #endif | |
449 | + [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" }, | |
450 | [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, | |
451 | [END_OF_SPACE_NR] = { -1, NULL } | |
452 | }; | |
453 | @@ -522,8 +526,8 @@ static int __init pt_dump_init(void) | |
454 | address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; | |
455 | # endif | |
456 | address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; | |
457 | + address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE; | |
458 | #endif | |
459 | - | |
460 | return 0; | |
461 | } | |
462 | __initcall(pt_dump_init); | |
463 | diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c | |
464 | index 8a64a6f2848d..135c9a7898c7 100644 | |
465 | --- a/arch/x86/mm/init_32.c | |
466 | +++ b/arch/x86/mm/init_32.c | |
467 | @@ -50,6 +50,7 @@ | |
468 | #include <asm/setup.h> | |
469 | #include <asm/set_memory.h> | |
470 | #include <asm/page_types.h> | |
471 | +#include <asm/cpu_entry_area.h> | |
472 | #include <asm/init.h> | |
473 | ||
474 | #include "mm_internal.h" | |
475 | @@ -766,6 +767,7 @@ void __init mem_init(void) | |
476 | mem_init_print_info(NULL); | |
477 | printk(KERN_INFO "virtual kernel memory layout:\n" | |
478 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
479 | + " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
480 | #ifdef CONFIG_HIGHMEM | |
481 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
482 | #endif | |
483 | @@ -777,6 +779,10 @@ void __init mem_init(void) | |
484 | FIXADDR_START, FIXADDR_TOP, | |
485 | (FIXADDR_TOP - FIXADDR_START) >> 10, | |
486 | ||
487 | + CPU_ENTRY_AREA_BASE, | |
488 | + CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE, | |
489 | + CPU_ENTRY_AREA_MAP_SIZE >> 10, | |
490 | + | |
491 | #ifdef CONFIG_HIGHMEM | |
492 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | |
493 | (LAST_PKMAP*PAGE_SIZE) >> 10, | |
494 | diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c | |
495 | index d8836e45bc07..4cd556a30ee1 100644 | |
496 | --- a/arch/x86/mm/kasan_init_64.c | |
497 | +++ b/arch/x86/mm/kasan_init_64.c | |
498 | @@ -13,6 +13,8 @@ | |
499 | #include <asm/pgalloc.h> | |
500 | #include <asm/tlbflush.h> | |
501 | #include <asm/sections.h> | |
502 | +#include <asm/pgtable.h> | |
503 | +#include <asm/cpu_entry_area.h> | |
504 | ||
505 | extern pgd_t early_top_pgt[PTRS_PER_PGD]; | |
506 | extern struct range pfn_mapped[E820_MAX_ENTRIES]; | |
507 | @@ -321,31 +323,33 @@ void __init kasan_init(void) | |
508 | map_range(&pfn_mapped[i]); | |
509 | } | |
510 | ||
511 | - kasan_populate_zero_shadow( | |
512 | - kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), | |
513 | - kasan_mem_to_shadow((void *)__START_KERNEL_map)); | |
514 | - | |
515 | - kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), | |
516 | - (unsigned long)kasan_mem_to_shadow(_end), | |
517 | - early_pfn_to_nid(__pa(_stext))); | |
518 | - | |
519 | - shadow_cpu_entry_begin = (void *)__fix_to_virt(FIX_CPU_ENTRY_AREA_BOTTOM); | |
520 | + shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; | |
521 | shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); | |
522 | shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, | |
523 | PAGE_SIZE); | |
524 | ||
525 | - shadow_cpu_entry_end = (void *)(__fix_to_virt(FIX_CPU_ENTRY_AREA_TOP) + PAGE_SIZE); | |
526 | + shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + | |
527 | + CPU_ENTRY_AREA_MAP_SIZE); | |
528 | shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); | |
529 | shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, | |
530 | PAGE_SIZE); | |
531 | ||
532 | - kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), | |
533 | - shadow_cpu_entry_begin); | |
534 | + kasan_populate_zero_shadow( | |
535 | + kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), | |
536 | + shadow_cpu_entry_begin); | |
537 | ||
538 | kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, | |
539 | (unsigned long)shadow_cpu_entry_end, 0); | |
540 | ||
541 | - kasan_populate_zero_shadow(shadow_cpu_entry_end, (void *)KASAN_SHADOW_END); | |
542 | + kasan_populate_zero_shadow(shadow_cpu_entry_end, | |
543 | + kasan_mem_to_shadow((void *)__START_KERNEL_map)); | |
544 | + | |
545 | + kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), | |
546 | + (unsigned long)kasan_mem_to_shadow(_end), | |
547 | + early_pfn_to_nid(__pa(_stext))); | |
548 | + | |
549 | + kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), | |
550 | + (void *)KASAN_SHADOW_END); | |
551 | ||
552 | load_cr3(init_top_pgt); | |
553 | __flush_tlb_all(); | |
554 | diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c | |
555 | index b9bd5b8b14fa..77909bae5943 100644 | |
556 | --- a/arch/x86/mm/pgtable_32.c | |
557 | +++ b/arch/x86/mm/pgtable_32.c | |
558 | @@ -9,6 +9,7 @@ | |
559 | #include <linux/pagemap.h> | |
560 | #include <linux/spinlock.h> | |
561 | ||
562 | +#include <asm/cpu_entry_area.h> | |
563 | #include <asm/pgtable.h> | |
564 | #include <asm/pgalloc.h> | |
565 | #include <asm/fixmap.h> | |
566 | diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c | |
567 | index 53e65f605bdd..cd4b91b8d614 100644 | |
568 | --- a/arch/x86/xen/mmu_pv.c | |
569 | +++ b/arch/x86/xen/mmu_pv.c | |
570 | @@ -2286,7 +2286,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |
571 | ||
572 | switch (idx) { | |
573 | case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: | |
574 | - case FIX_RO_IDT: | |
575 | #ifdef CONFIG_X86_32 | |
576 | case FIX_WP_TEST: | |
577 | # ifdef CONFIG_HIGHMEM | |
578 | @@ -2297,7 +2296,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |
579 | #endif | |
580 | case FIX_TEXT_POKE0: | |
581 | case FIX_TEXT_POKE1: | |
582 | - case FIX_CPU_ENTRY_AREA_TOP ... FIX_CPU_ENTRY_AREA_BOTTOM: | |
583 | /* All local page mappings */ | |
584 | pte = pfn_pte(phys, prot); | |
585 | break; | |
586 | -- | |
587 | 2.14.2 | |
588 |