]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0127-x86-mm-kasan-Don-t-use-vmemmap_populate-to-initializ.patch
revert buggy SCSI error handler commit
[pve-kernel.git] / patches / kernel / 0127-x86-mm-kasan-Don-t-use-vmemmap_populate-to-initializ.patch
CommitLineData
321d628a
FG
1From 95ee3aee92e32b90ff10f47cb6cfc414e1fd92b2 Mon Sep 17 00:00:00 2001
2From: Andrey Ryabinin <aryabinin@virtuozzo.com>
3Date: Wed, 15 Nov 2017 17:36:35 -0800
633c5ed1 4Subject: [PATCH 127/242] x86/mm/kasan: Don't use vmemmap_populate() to
321d628a
FG
5 initialize shadow
6MIME-Version: 1.0
7Content-Type: text/plain; charset=UTF-8
8Content-Transfer-Encoding: 8bit
9
10CVE-2017-5754
11
12[ Note, this is a Git cherry-pick of the following commit:
13
14 d17a1d97dc20: ("x86/mm/kasan: don't use vmemmap_populate() to initialize shadow")
15
16 ... for easier x86 PTI code testing and back-porting. ]
17
18The KASAN shadow is currently mapped using vmemmap_populate() since that
19provides a semi-convenient way to map pages into init_top_pgt. However,
20since that no longer zeroes the mapped pages, it is not suitable for
21KASAN, which requires zeroed shadow memory.
22
23Add kasan_populate_shadow() interface and use it instead of
24vmemmap_populate(). Besides, this allows us to take advantage of
25gigantic pages and use them to populate the shadow, which should save us
26some memory wasted on page tables and reduce TLB pressure.
27
28Link: http://lkml.kernel.org/r/20171103185147.2688-2-pasha.tatashin@oracle.com
29Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
30Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
31Cc: Andy Lutomirski <luto@kernel.org>
32Cc: Steven Sistare <steven.sistare@oracle.com>
33Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
34Cc: Bob Picco <bob.picco@oracle.com>
35Cc: Michal Hocko <mhocko@suse.com>
36Cc: Alexander Potapenko <glider@google.com>
37Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
38Cc: Catalin Marinas <catalin.marinas@arm.com>
39Cc: Christian Borntraeger <borntraeger@de.ibm.com>
40Cc: David S. Miller <davem@davemloft.net>
41Cc: Dmitry Vyukov <dvyukov@google.com>
42Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
43Cc: "H. Peter Anvin" <hpa@zytor.com>
44Cc: Ingo Molnar <mingo@redhat.com>
45Cc: Mark Rutland <mark.rutland@arm.com>
46Cc: Matthew Wilcox <willy@infradead.org>
47Cc: Mel Gorman <mgorman@techsingularity.net>
48Cc: Michal Hocko <mhocko@kernel.org>
49Cc: Sam Ravnborg <sam@ravnborg.org>
50Cc: Thomas Gleixner <tglx@linutronix.de>
51Cc: Will Deacon <will.deacon@arm.com>
52Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
53Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
54Signed-off-by: Ingo Molnar <mingo@kernel.org>
55(cherry picked from commit 2aeb07365bcd489620f71390a7d2031cd4dfb83e)
56Signed-off-by: Andy Whitcroft <apw@canonical.com>
57Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
58(cherry picked from commit f60ab0015a57d9fbf659b212d504682f069b0590)
59Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
60---
61 arch/x86/mm/kasan_init_64.c | 143 +++++++++++++++++++++++++++++++++++++++++---
62 arch/x86/Kconfig | 2 +-
63 2 files changed, 137 insertions(+), 8 deletions(-)
64
65diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
66index 464089f33e80..3d7341986e13 100644
67--- a/arch/x86/mm/kasan_init_64.c
68+++ b/arch/x86/mm/kasan_init_64.c
69@@ -3,12 +3,14 @@
70 #include <linux/bootmem.h>
71 #include <linux/kasan.h>
72 #include <linux/kdebug.h>
73+#include <linux/memblock.h>
74 #include <linux/mm.h>
75 #include <linux/sched.h>
76 #include <linux/sched/task.h>
77 #include <linux/vmalloc.h>
78
79 #include <asm/e820/types.h>
80+#include <asm/pgalloc.h>
81 #include <asm/tlbflush.h>
82 #include <asm/sections.h>
83
84@@ -17,7 +19,134 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
85
86 static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
87
88-static int __init map_range(struct range *range)
89+static __init void *early_alloc(size_t size, int nid)
90+{
91+ return memblock_virt_alloc_try_nid_nopanic(size, size,
92+ __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
93+}
94+
95+static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
96+ unsigned long end, int nid)
97+{
98+ pte_t *pte;
99+
100+ if (pmd_none(*pmd)) {
101+ void *p;
102+
103+ if (boot_cpu_has(X86_FEATURE_PSE) &&
104+ ((end - addr) == PMD_SIZE) &&
105+ IS_ALIGNED(addr, PMD_SIZE)) {
106+ p = early_alloc(PMD_SIZE, nid);
107+ if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
108+ return;
109+ else if (p)
110+ memblock_free(__pa(p), PMD_SIZE);
111+ }
112+
113+ p = early_alloc(PAGE_SIZE, nid);
114+ pmd_populate_kernel(&init_mm, pmd, p);
115+ }
116+
117+ pte = pte_offset_kernel(pmd, addr);
118+ do {
119+ pte_t entry;
120+ void *p;
121+
122+ if (!pte_none(*pte))
123+ continue;
124+
125+ p = early_alloc(PAGE_SIZE, nid);
126+ entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
127+ set_pte_at(&init_mm, addr, pte, entry);
128+ } while (pte++, addr += PAGE_SIZE, addr != end);
129+}
130+
131+static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
132+ unsigned long end, int nid)
133+{
134+ pmd_t *pmd;
135+ unsigned long next;
136+
137+ if (pud_none(*pud)) {
138+ void *p;
139+
140+ if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
141+ ((end - addr) == PUD_SIZE) &&
142+ IS_ALIGNED(addr, PUD_SIZE)) {
143+ p = early_alloc(PUD_SIZE, nid);
144+ if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
145+ return;
146+ else if (p)
147+ memblock_free(__pa(p), PUD_SIZE);
148+ }
149+
150+ p = early_alloc(PAGE_SIZE, nid);
151+ pud_populate(&init_mm, pud, p);
152+ }
153+
154+ pmd = pmd_offset(pud, addr);
155+ do {
156+ next = pmd_addr_end(addr, end);
157+ if (!pmd_large(*pmd))
158+ kasan_populate_pmd(pmd, addr, next, nid);
159+ } while (pmd++, addr = next, addr != end);
160+}
161+
162+static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
163+ unsigned long end, int nid)
164+{
165+ pud_t *pud;
166+ unsigned long next;
167+
168+ if (p4d_none(*p4d)) {
169+ void *p = early_alloc(PAGE_SIZE, nid);
170+
171+ p4d_populate(&init_mm, p4d, p);
172+ }
173+
174+ pud = pud_offset(p4d, addr);
175+ do {
176+ next = pud_addr_end(addr, end);
177+ if (!pud_large(*pud))
178+ kasan_populate_pud(pud, addr, next, nid);
179+ } while (pud++, addr = next, addr != end);
180+}
181+
182+static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
183+ unsigned long end, int nid)
184+{
185+ void *p;
186+ p4d_t *p4d;
187+ unsigned long next;
188+
189+ if (pgd_none(*pgd)) {
190+ p = early_alloc(PAGE_SIZE, nid);
191+ pgd_populate(&init_mm, pgd, p);
192+ }
193+
194+ p4d = p4d_offset(pgd, addr);
195+ do {
196+ next = p4d_addr_end(addr, end);
197+ kasan_populate_p4d(p4d, addr, next, nid);
198+ } while (p4d++, addr = next, addr != end);
199+}
200+
201+static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
202+ int nid)
203+{
204+ pgd_t *pgd;
205+ unsigned long next;
206+
207+ addr = addr & PAGE_MASK;
208+ end = round_up(end, PAGE_SIZE);
209+ pgd = pgd_offset_k(addr);
210+ do {
211+ next = pgd_addr_end(addr, end);
212+ kasan_populate_pgd(pgd, addr, next, nid);
213+ } while (pgd++, addr = next, addr != end);
214+}
215+
216+static void __init map_range(struct range *range)
217 {
218 unsigned long start;
219 unsigned long end;
220@@ -25,7 +154,7 @@ static int __init map_range(struct range *range)
221 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
222 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
223
224- return vmemmap_populate(start, end, NUMA_NO_NODE);
225+ kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
226 }
227
228 static void __init clear_pgds(unsigned long start,
229@@ -188,16 +317,16 @@ void __init kasan_init(void)
230 if (pfn_mapped[i].end == 0)
231 break;
232
233- if (map_range(&pfn_mapped[i]))
234- panic("kasan: unable to allocate shadow!");
235+ map_range(&pfn_mapped[i]);
236 }
237+
238 kasan_populate_zero_shadow(
239 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
240 kasan_mem_to_shadow((void *)__START_KERNEL_map));
241
242- vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
243- (unsigned long)kasan_mem_to_shadow(_end),
244- NUMA_NO_NODE);
245+ kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
246+ (unsigned long)kasan_mem_to_shadow(_end),
247+ early_pfn_to_nid(__pa(_stext)));
248
249 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
250 (void *)KASAN_SHADOW_END);
251diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
252index 67d07802ae95..8b5499bb24bb 100644
253--- a/arch/x86/Kconfig
254+++ b/arch/x86/Kconfig
255@@ -106,7 +106,7 @@ config X86
256 select HAVE_ARCH_AUDITSYSCALL
257 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
258 select HAVE_ARCH_JUMP_LABEL
259- select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
260+ select HAVE_ARCH_KASAN if X86_64
261 select HAVE_ARCH_KGDB
262 select HAVE_ARCH_KMEMCHECK
263 select HAVE_ARCH_MMAP_RND_BITS if MMU
264--
2652.14.2
266