]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0075-x86-kasan-Use-the-same-shadow-offset-for-4-and-5-lev.patch
add objtool build fix
[pve-kernel.git] / patches / kernel / 0075-x86-kasan-Use-the-same-shadow-offset-for-4-and-5-lev.patch
CommitLineData
321d628a
FG
1From f6bb8e560b2229af5dcf3127fc92e732539b4823 Mon Sep 17 00:00:00 2001
2From: Andrey Ryabinin <aryabinin@virtuozzo.com>
3Date: Fri, 29 Sep 2017 17:08:18 +0300
b378f209 4Subject: [PATCH 075/233] x86/kasan: Use the same shadow offset for 4- and
321d628a
FG
5 5-level paging
6MIME-Version: 1.0
7Content-Type: text/plain; charset=UTF-8
8Content-Transfer-Encoding: 8bit
9
10CVE-2017-5754
11
12We are going to support boot-time switching between 4- and 5-level
13paging. For KASAN it means we cannot have different KASAN_SHADOW_OFFSET
14for different paging modes: the constant is passed to gcc to generate
15code and cannot be changed at runtime.
16
17This patch changes KASAN code to use 0xdffffc0000000000 as shadow offset
18for both 4- and 5-level paging.
19
20For 5-level paging it means that shadow memory region is not aligned to
21PGD boundary anymore and we have to handle unaligned parts of the region
22properly.
23
24In addition, we have to exclude paravirt code from KASAN instrumentation
25as we now use set_pgd() before KASAN is fully ready.
26
27[kirill.shutemov@linux.intel.com: clenaup, changelog message]
28Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
29Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
30Cc: Andrew Morton <akpm@linux-foundation.org>
31Cc: Andy Lutomirski <luto@amacapital.net>
32Cc: Borislav Petkov <bp@suse.de>
33Cc: Cyrill Gorcunov <gorcunov@openvz.org>
34Cc: Linus Torvalds <torvalds@linux-foundation.org>
35Cc: Peter Zijlstra <peterz@infradead.org>
36Cc: Thomas Gleixner <tglx@linutronix.de>
37Cc: linux-mm@kvack.org
38Link: http://lkml.kernel.org/r/20170929140821.37654-4-kirill.shutemov@linux.intel.com
39Signed-off-by: Ingo Molnar <mingo@kernel.org>
40(cherry picked from commit 12a8cc7fcf54a8575f094be1e99032ec38aa045c)
41Signed-off-by: Andy Whitcroft <apw@canonical.com>
42Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
43(cherry picked from commit 2ce428150e002623aa0ed2a1ab840fde5f860f32)
44Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
45---
46 Documentation/x86/x86_64/mm.txt | 2 +-
47 arch/x86/kernel/Makefile | 3 +-
48 arch/x86/mm/kasan_init_64.c | 101 +++++++++++++++++++++++++++++++---------
49 arch/x86/Kconfig | 1 -
50 4 files changed, 83 insertions(+), 24 deletions(-)
51
52diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
53index b0798e281aa6..3448e675b462 100644
54--- a/Documentation/x86/x86_64/mm.txt
55+++ b/Documentation/x86/x86_64/mm.txt
56@@ -34,7 +34,7 @@ ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space
57 ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole
58 ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
59 ... unused hole ...
60-ffd8000000000000 - fff7ffffffffffff (=53 bits) kasan shadow memory (8PB)
61+ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
62 ... unused hole ...
63 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
64 ... unused hole ...
65diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
66index 5bf0d5a473b4..aa059806201d 100644
67--- a/arch/x86/kernel/Makefile
68+++ b/arch/x86/kernel/Makefile
69@@ -24,7 +24,8 @@ endif
70 KASAN_SANITIZE_head$(BITS).o := n
71 KASAN_SANITIZE_dumpstack.o := n
72 KASAN_SANITIZE_dumpstack_$(BITS).o := n
73-KASAN_SANITIZE_stacktrace.o := n
74+KASAN_SANITIZE_stacktrace.o := n
75+KASAN_SANITIZE_paravirt.o := n
76
77 OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
78 OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
79diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
80index 02c9d7553409..464089f33e80 100644
81--- a/arch/x86/mm/kasan_init_64.c
82+++ b/arch/x86/mm/kasan_init_64.c
83@@ -15,6 +15,8 @@
84 extern pgd_t early_top_pgt[PTRS_PER_PGD];
85 extern struct range pfn_mapped[E820_MAX_ENTRIES];
86
87+static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
88+
89 static int __init map_range(struct range *range)
90 {
91 unsigned long start;
92@@ -30,8 +32,10 @@ static void __init clear_pgds(unsigned long start,
93 unsigned long end)
94 {
95 pgd_t *pgd;
96+ /* See comment in kasan_init() */
97+ unsigned long pgd_end = end & PGDIR_MASK;
98
99- for (; start < end; start += PGDIR_SIZE) {
100+ for (; start < pgd_end; start += PGDIR_SIZE) {
101 pgd = pgd_offset_k(start);
102 /*
103 * With folded p4d, pgd_clear() is nop, use p4d_clear()
104@@ -42,29 +46,61 @@ static void __init clear_pgds(unsigned long start,
105 else
106 pgd_clear(pgd);
107 }
108+
109+ pgd = pgd_offset_k(start);
110+ for (; start < end; start += P4D_SIZE)
111+ p4d_clear(p4d_offset(pgd, start));
112+}
113+
114+static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
115+{
116+ unsigned long p4d;
117+
118+ if (!IS_ENABLED(CONFIG_X86_5LEVEL))
119+ return (p4d_t *)pgd;
120+
121+ p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
122+ p4d += __START_KERNEL_map - phys_base;
123+ return (p4d_t *)p4d + p4d_index(addr);
124+}
125+
126+static void __init kasan_early_p4d_populate(pgd_t *pgd,
127+ unsigned long addr,
128+ unsigned long end)
129+{
130+ pgd_t pgd_entry;
131+ p4d_t *p4d, p4d_entry;
132+ unsigned long next;
133+
134+ if (pgd_none(*pgd)) {
135+ pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
136+ set_pgd(pgd, pgd_entry);
137+ }
138+
139+ p4d = early_p4d_offset(pgd, addr);
140+ do {
141+ next = p4d_addr_end(addr, end);
142+
143+ if (!p4d_none(*p4d))
144+ continue;
145+
146+ p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
147+ set_p4d(p4d, p4d_entry);
148+ } while (p4d++, addr = next, addr != end && p4d_none(*p4d));
149 }
150
151 static void __init kasan_map_early_shadow(pgd_t *pgd)
152 {
153- int i;
154- unsigned long start = KASAN_SHADOW_START;
155+ /* See comment in kasan_init() */
156+ unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
157 unsigned long end = KASAN_SHADOW_END;
158+ unsigned long next;
159
160- for (i = pgd_index(start); start < end; i++) {
161- switch (CONFIG_PGTABLE_LEVELS) {
162- case 4:
163- pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
164- _KERNPG_TABLE);
165- break;
166- case 5:
167- pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
168- _KERNPG_TABLE);
169- break;
170- default:
171- BUILD_BUG();
172- }
173- start += PGDIR_SIZE;
174- }
175+ pgd += pgd_index(addr);
176+ do {
177+ next = pgd_addr_end(addr, end);
178+ kasan_early_p4d_populate(pgd, addr, next);
179+ } while (pgd++, addr = next, addr != end);
180 }
181
182 #ifdef CONFIG_KASAN_INLINE
183@@ -101,7 +137,7 @@ void __init kasan_early_init(void)
184 for (i = 0; i < PTRS_PER_PUD; i++)
185 kasan_zero_pud[i] = __pud(pud_val);
186
187- for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
188+ for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++)
189 kasan_zero_p4d[i] = __p4d(p4d_val);
190
191 kasan_map_early_shadow(early_top_pgt);
192@@ -117,12 +153,35 @@ void __init kasan_init(void)
193 #endif
194
195 memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
196+
197+ /*
198+ * We use the same shadow offset for 4- and 5-level paging to
199+ * facilitate boot-time switching between paging modes.
200+ * As result in 5-level paging mode KASAN_SHADOW_START and
201+ * KASAN_SHADOW_END are not aligned to PGD boundary.
202+ *
203+ * KASAN_SHADOW_START doesn't share PGD with anything else.
204+ * We claim whole PGD entry to make things easier.
205+ *
206+ * KASAN_SHADOW_END lands in the last PGD entry and it collides with
207+ * bunch of things like kernel code, modules, EFI mapping, etc.
208+ * We need to take extra steps to not overwrite them.
209+ */
210+ if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
211+ void *ptr;
212+
213+ ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
214+ memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
215+ set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
216+ __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
217+ }
218+
219 load_cr3(early_top_pgt);
220 __flush_tlb_all();
221
222- clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
223+ clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
224
225- kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
226+ kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
227 kasan_mem_to_shadow((void *)PAGE_OFFSET));
228
229 for (i = 0; i < E820_MAX_ENTRIES; i++) {
230diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
231index bf9f03740c30..67d07802ae95 100644
232--- a/arch/x86/Kconfig
233+++ b/arch/x86/Kconfig
234@@ -300,7 +300,6 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
235 config KASAN_SHADOW_OFFSET
236 hex
237 depends on KASAN
238- default 0xdff8000000000000 if X86_5LEVEL
239 default 0xdffffc0000000000
240
241 config HAVE_INTEL_TXT
242--
2432.14.2
244