]>
Commit | Line | Data |
---|---|---|
59d5af67 | 1 | From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 |
e4cdf2a5 FG |
2 | From: Thomas Gleixner <tglx@linutronix.de> |
3 | Date: Thu, 4 Jan 2018 12:32:03 +0100 | |
59d5af67 | 4 | Subject: [PATCH] x86/kaslr: Fix the vaddr_end mess |
e4cdf2a5 FG |
5 | MIME-Version: 1.0 |
6 | Content-Type: text/plain; charset=UTF-8 | |
7 | Content-Transfer-Encoding: 8bit | |
8 | ||
9 | commit 1dddd25125112ba49706518ac9077a1026a18f37 upstream. | |
10 | ||
11 | vaddr_end for KASLR is only documented in the KASLR code itself and is | |
12 | adjusted depending on config options. So it's not surprising that a change | |
13 | of the memory layout causes KASLR to have the wrong vaddr_end. This can map | |
14 | arbitrary stuff into other areas causing hard to understand problems. | |
15 | ||
16 | Remove the whole ifdef magic and define the start of the cpu_entry_area to | |
17 | be the end of the KASLR vaddr range. | |
18 | ||
19 | Add documentation to that effect. | |
20 | ||
21 | Fixes: 92a0f81d8957 ("x86/cpu_entry_area: Move it out of the fixmap") | |
22 | Reported-by: Benjamin Gilbert <benjamin.gilbert@coreos.com> | |
23 | Signed-off-by: Thomas Gleixner <tglx@linutronix.de> | |
24 | Tested-by: Benjamin Gilbert <benjamin.gilbert@coreos.com> | |
25 | Cc: Andy Lutomirski <luto@kernel.org> | |
26 | Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
27 | Cc: Dave Hansen <dave.hansen@linux.intel.com> | |
28 | Cc: Peter Zijlstra <peterz@infradead.org> | |
29 | Cc: Thomas Garnier <thgarnie@google.com>, | |
30 | Cc: Alexander Kuleshov <kuleshovmail@gmail.com> | |
31 | Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801041320360.1771@nanos | |
32 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
33 | Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com> | |
34 | --- | |
35 | Documentation/x86/x86_64/mm.txt | 6 ++++++ | |
36 | arch/x86/include/asm/pgtable_64_types.h | 8 +++++++- | |
37 | arch/x86/mm/kaslr.c | 32 +++++++++----------------------- | |
38 | 3 files changed, 22 insertions(+), 24 deletions(-) | |
39 | ||
40 | diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt | |
41 | index f7dabe1f01e9..ea91cb61a602 100644 | |
42 | --- a/Documentation/x86/x86_64/mm.txt | |
43 | +++ b/Documentation/x86/x86_64/mm.txt | |
44 | @@ -12,6 +12,7 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) | |
45 | ... unused hole ... | |
46 | ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) | |
47 | ... unused hole ... | |
48 | + vaddr_end for KASLR | |
49 | fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping | |
50 | fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI | |
51 | ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks | |
52 | @@ -37,6 +38,7 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) | |
53 | ... unused hole ... | |
54 | ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) | |
55 | ... unused hole ... | |
56 | + vaddr_end for KASLR | |
57 | fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping | |
58 | ... unused hole ... | |
59 | ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks | |
60 | @@ -71,3 +73,7 @@ during EFI runtime calls. | |
61 | Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all | |
62 | physical memory, vmalloc/ioremap space and virtual memory map are randomized. | |
63 | Their order is preserved but their base will be offset early at boot time. | |
64 | + | |
65 | +Be very careful vs. KASLR when changing anything here. The KASLR address | |
66 | +range must not overlap with anything except the KASAN shadow area, which is | |
67 | +correct as KASAN disables KASLR. | |
68 | diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h | |
69 | index 0dd48d17a4a1..928d558e7778 100644 | |
70 | --- a/arch/x86/include/asm/pgtable_64_types.h | |
71 | +++ b/arch/x86/include/asm/pgtable_64_types.h | |
72 | @@ -74,7 +74,13 @@ typedef struct { pteval_t pte; } pte_t; | |
73 | #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) | |
74 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) | |
75 | ||
76 | -/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ | |
77 | +/* | |
78 | + * See Documentation/x86/x86_64/mm.txt for a description of the memory map. | |
79 | + * | |
80 | + * Be very careful vs. KASLR when changing anything here. The KASLR address | |
81 | + * range must not overlap with anything except the KASAN shadow area, which | |
82 | + * is correct as KASAN disables KASLR. | |
83 | + */ | |
84 | #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) | |
85 | ||
86 | #ifdef CONFIG_X86_5LEVEL | |
87 | diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c | |
88 | index af599167fe3c..debc7cc8e152 100644 | |
89 | --- a/arch/x86/mm/kaslr.c | |
90 | +++ b/arch/x86/mm/kaslr.c | |
91 | @@ -33,25 +33,14 @@ | |
92 | #define TB_SHIFT 40 | |
93 | ||
94 | /* | |
95 | - * Virtual address start and end range for randomization. The end changes base | |
96 | - * on configuration to have the highest amount of space for randomization. | |
97 | - * It increases the possible random position for each randomized region. | |
98 | + * Virtual address start and end range for randomization. | |
99 | * | |
100 | - * You need to add an if/def entry if you introduce a new memory region | |
101 | - * compatible with KASLR. Your entry must be in logical order with memory | |
102 | - * layout. For example, ESPFIX is before EFI because its virtual address is | |
103 | - * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to | |
104 | - * ensure that this order is correct and won't be changed. | |
105 | + * The end address could depend on more configuration options to make the | |
106 | + * highest amount of space for randomization available, but that's too hard | |
107 | + * to keep straight and caused issues already. | |
108 | */ | |
109 | static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; | |
110 | - | |
111 | -#if defined(CONFIG_X86_ESPFIX64) | |
112 | -static const unsigned long vaddr_end = ESPFIX_BASE_ADDR; | |
113 | -#elif defined(CONFIG_EFI) | |
114 | -static const unsigned long vaddr_end = EFI_VA_END; | |
115 | -#else | |
116 | -static const unsigned long vaddr_end = __START_KERNEL_map; | |
117 | -#endif | |
118 | +static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE; | |
119 | ||
120 | /* Default values */ | |
121 | unsigned long page_offset_base = __PAGE_OFFSET_BASE; | |
122 | @@ -100,15 +89,12 @@ void __init kernel_randomize_memory(void) | |
123 | unsigned long remain_entropy; | |
124 | ||
125 | /* | |
126 | - * All these BUILD_BUG_ON checks ensures the memory layout is | |
127 | - * consistent with the vaddr_start/vaddr_end variables. | |
128 | + * These BUILD_BUG_ON checks ensure the memory layout is consistent | |
129 | + * with the vaddr_start/vaddr_end variables. These checks are very | |
130 | + * limited.... | |
131 | */ | |
132 | BUILD_BUG_ON(vaddr_start >= vaddr_end); | |
133 | - BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && | |
134 | - vaddr_end >= EFI_VA_END); | |
135 | - BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || | |
136 | - IS_ENABLED(CONFIG_EFI)) && | |
137 | - vaddr_end >= __START_KERNEL_map); | |
138 | + BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE); | |
139 | BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); | |
140 | ||
141 | if (!kaslr_memory_enabled()) | |
142 | -- | |
143 | 2.14.2 | |
144 |