]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0077-x86-xen-Drop-5-level-paging-support-code-from-the-XE.patch
KPTI: add follow-up fixes
[pve-kernel.git] / patches / kernel / 0077-x86-xen-Drop-5-level-paging-support-code-from-the-XE.patch
CommitLineData
321d628a
FG
1From ed422950e50aeb9a05920e7387b4dd7c8dc2fc67 Mon Sep 17 00:00:00 2001
2From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
3Date: Fri, 29 Sep 2017 17:08:20 +0300
e4cdf2a5 4Subject: [PATCH 077/241] x86/xen: Drop 5-level paging support code from the
321d628a
FG
5 XEN_PV code
6MIME-Version: 1.0
7Content-Type: text/plain; charset=UTF-8
8Content-Transfer-Encoding: 8bit
9
10CVE-2017-5754
11
12It was decided 5-level paging is not going to be supported in XEN_PV.
13
14Let's drop the dead code from the XEN_PV code.
15
16Tested-by: Juergen Gross <jgross@suse.com>
17Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
18Reviewed-by: Juergen Gross <jgross@suse.com>
19Cc: Andrew Morton <akpm@linux-foundation.org>
20Cc: Andy Lutomirski <luto@amacapital.net>
21Cc: Borislav Petkov <bp@suse.de>
22Cc: Cyrill Gorcunov <gorcunov@openvz.org>
23Cc: Linus Torvalds <torvalds@linux-foundation.org>
24Cc: Peter Zijlstra <peterz@infradead.org>
25Cc: Thomas Gleixner <tglx@linutronix.de>
26Cc: linux-mm@kvack.org
27Link: http://lkml.kernel.org/r/20170929140821.37654-6-kirill.shutemov@linux.intel.com
28Signed-off-by: Ingo Molnar <mingo@kernel.org>
29(cherry picked from commit 773dd2fca581b0a80e5a33332cc8ee67e5a79cba)
30Signed-off-by: Andy Whitcroft <apw@canonical.com>
31Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
32(cherry picked from commit 3fd0b7ef0094fd8bb3c8172d9b137ebe0d81ecbc)
33Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
34---
35 arch/x86/xen/mmu_pv.c | 159 +++++++++++++++++++-------------------------------
36 1 file changed, 60 insertions(+), 99 deletions(-)
37
38diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
39index ba76f3ce997f..45bb2d462e44 100644
40--- a/arch/x86/xen/mmu_pv.c
41+++ b/arch/x86/xen/mmu_pv.c
42@@ -469,7 +469,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd)
43 }
44 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
45
46-#if CONFIG_PGTABLE_LEVELS == 4
47+#ifdef CONFIG_X86_64
48 __visible pudval_t xen_pud_val(pud_t pud)
49 {
50 return pte_mfn_to_pfn(pud.pud);
51@@ -558,7 +558,7 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val)
52
53 xen_mc_issue(PARAVIRT_LAZY_MMU);
54 }
55-#endif /* CONFIG_PGTABLE_LEVELS == 4 */
56+#endif /* CONFIG_X86_64 */
57
58 static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
59 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
60@@ -600,21 +600,17 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
61 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
62 bool last, unsigned long limit)
63 {
64- int i, nr, flush = 0;
65+ int flush = 0;
66+ pud_t *pud;
67
68- nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D;
69- for (i = 0; i < nr; i++) {
70- pud_t *pud;
71
72- if (p4d_none(p4d[i]))
73- continue;
74+ if (p4d_none(*p4d))
75+ return flush;
76
77- pud = pud_offset(&p4d[i], 0);
78- if (PTRS_PER_PUD > 1)
79- flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
80- flush |= xen_pud_walk(mm, pud, func,
81- last && i == nr - 1, limit);
82- }
83+ pud = pud_offset(p4d, 0);
84+ if (PTRS_PER_PUD > 1)
85+ flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
86+ flush |= xen_pud_walk(mm, pud, func, last, limit);
87 return flush;
88 }
89
90@@ -664,8 +660,6 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
91 continue;
92
93 p4d = p4d_offset(&pgd[i], 0);
94- if (PTRS_PER_P4D > 1)
95- flush |= (*func)(mm, virt_to_page(p4d), PT_P4D);
96 flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
97 }
98
99@@ -1196,22 +1190,14 @@ static void __init xen_cleanmfnmap(unsigned long vaddr)
100 {
101 pgd_t *pgd;
102 p4d_t *p4d;
103- unsigned int i;
104 bool unpin;
105
106 unpin = (vaddr == 2 * PGDIR_SIZE);
107 vaddr &= PMD_MASK;
108 pgd = pgd_offset_k(vaddr);
109 p4d = p4d_offset(pgd, 0);
110- for (i = 0; i < PTRS_PER_P4D; i++) {
111- if (p4d_none(p4d[i]))
112- continue;
113- xen_cleanmfnmap_p4d(p4d + i, unpin);
114- }
115- if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
116- set_pgd(pgd, __pgd(0));
117- xen_cleanmfnmap_free_pgtbl(p4d, unpin);
118- }
119+ if (!p4d_none(*p4d))
120+ xen_cleanmfnmap_p4d(p4d, unpin);
121 }
122
123 static void __init xen_pagetable_p2m_free(void)
124@@ -1717,7 +1703,7 @@ static void xen_release_pmd(unsigned long pfn)
125 xen_release_ptpage(pfn, PT_PMD);
126 }
127
128-#if CONFIG_PGTABLE_LEVELS >= 4
129+#ifdef CONFIG_X86_64
130 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
131 {
132 xen_alloc_ptpage(mm, pfn, PT_PUD);
133@@ -2054,13 +2040,12 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
134 */
135 void __init xen_relocate_p2m(void)
136 {
137- phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys;
138+ phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
139 unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
140- int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d;
141+ int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
142 pte_t *pt;
143 pmd_t *pmd;
144 pud_t *pud;
145- p4d_t *p4d = NULL;
146 pgd_t *pgd;
147 unsigned long *new_p2m;
148 int save_pud;
149@@ -2070,11 +2055,7 @@ void __init xen_relocate_p2m(void)
150 n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
151 n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
152 n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
153- if (PTRS_PER_P4D > 1)
154- n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
155- else
156- n_p4d = 0;
157- n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d;
158+ n_frames = n_pte + n_pt + n_pmd + n_pud;
159
160 new_area = xen_find_free_area(PFN_PHYS(n_frames));
161 if (!new_area) {
162@@ -2090,76 +2071,56 @@ void __init xen_relocate_p2m(void)
163 * To avoid any possible virtual address collision, just use
164 * 2 * PUD_SIZE for the new area.
165 */
166- p4d_phys = new_area;
167- pud_phys = p4d_phys + PFN_PHYS(n_p4d);
168+ pud_phys = new_area;
169 pmd_phys = pud_phys + PFN_PHYS(n_pud);
170 pt_phys = pmd_phys + PFN_PHYS(n_pmd);
171 p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
172
173 pgd = __va(read_cr3_pa());
174 new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
175- idx_p4d = 0;
176 save_pud = n_pud;
177- do {
178- if (n_p4d > 0) {
179- p4d = early_memremap(p4d_phys, PAGE_SIZE);
180- clear_page(p4d);
181- n_pud = min(save_pud, PTRS_PER_P4D);
182- }
183- for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
184- pud = early_memremap(pud_phys, PAGE_SIZE);
185- clear_page(pud);
186- for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
187- idx_pmd++) {
188- pmd = early_memremap(pmd_phys, PAGE_SIZE);
189- clear_page(pmd);
190- for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
191- idx_pt++) {
192- pt = early_memremap(pt_phys, PAGE_SIZE);
193- clear_page(pt);
194- for (idx_pte = 0;
195- idx_pte < min(n_pte, PTRS_PER_PTE);
196- idx_pte++) {
197- set_pte(pt + idx_pte,
198- pfn_pte(p2m_pfn, PAGE_KERNEL));
199- p2m_pfn++;
200- }
201- n_pte -= PTRS_PER_PTE;
202- early_memunmap(pt, PAGE_SIZE);
203- make_lowmem_page_readonly(__va(pt_phys));
204- pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
205- PFN_DOWN(pt_phys));
206- set_pmd(pmd + idx_pt,
207- __pmd(_PAGE_TABLE | pt_phys));
208- pt_phys += PAGE_SIZE;
209+ for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
210+ pud = early_memremap(pud_phys, PAGE_SIZE);
211+ clear_page(pud);
212+ for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
213+ idx_pmd++) {
214+ pmd = early_memremap(pmd_phys, PAGE_SIZE);
215+ clear_page(pmd);
216+ for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
217+ idx_pt++) {
218+ pt = early_memremap(pt_phys, PAGE_SIZE);
219+ clear_page(pt);
220+ for (idx_pte = 0;
221+ idx_pte < min(n_pte, PTRS_PER_PTE);
222+ idx_pte++) {
223+ set_pte(pt + idx_pte,
224+ pfn_pte(p2m_pfn, PAGE_KERNEL));
225+ p2m_pfn++;
226 }
227- n_pt -= PTRS_PER_PMD;
228- early_memunmap(pmd, PAGE_SIZE);
229- make_lowmem_page_readonly(__va(pmd_phys));
230- pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
231- PFN_DOWN(pmd_phys));
232- set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
233- pmd_phys += PAGE_SIZE;
234+ n_pte -= PTRS_PER_PTE;
235+ early_memunmap(pt, PAGE_SIZE);
236+ make_lowmem_page_readonly(__va(pt_phys));
237+ pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
238+ PFN_DOWN(pt_phys));
239+ set_pmd(pmd + idx_pt,
240+ __pmd(_PAGE_TABLE | pt_phys));
241+ pt_phys += PAGE_SIZE;
242 }
243- n_pmd -= PTRS_PER_PUD;
244- early_memunmap(pud, PAGE_SIZE);
245- make_lowmem_page_readonly(__va(pud_phys));
246- pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
247- if (n_p4d > 0)
248- set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys));
249- else
250- set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
251- pud_phys += PAGE_SIZE;
252- }
253- if (n_p4d > 0) {
254- save_pud -= PTRS_PER_P4D;
255- early_memunmap(p4d, PAGE_SIZE);
256- make_lowmem_page_readonly(__va(p4d_phys));
257- pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys));
258- set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys));
259- p4d_phys += PAGE_SIZE;
260+ n_pt -= PTRS_PER_PMD;
261+ early_memunmap(pmd, PAGE_SIZE);
262+ make_lowmem_page_readonly(__va(pmd_phys));
263+ pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
264+ PFN_DOWN(pmd_phys));
265+ set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
266+ pmd_phys += PAGE_SIZE;
267 }
268- } while (++idx_p4d < n_p4d);
269+ n_pmd -= PTRS_PER_PUD;
270+ early_memunmap(pud, PAGE_SIZE);
271+ make_lowmem_page_readonly(__va(pud_phys));
272+ pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
273+ set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
274+ pud_phys += PAGE_SIZE;
275+ }
276
277 /* Now copy the old p2m info to the new area. */
278 memcpy(new_p2m, xen_p2m_addr, size);
279@@ -2386,7 +2347,7 @@ static void __init xen_post_allocator_init(void)
280 pv_mmu_ops.set_pte = xen_set_pte;
281 pv_mmu_ops.set_pmd = xen_set_pmd;
282 pv_mmu_ops.set_pud = xen_set_pud;
283-#if CONFIG_PGTABLE_LEVELS >= 4
284+#ifdef CONFIG_X86_64
285 pv_mmu_ops.set_p4d = xen_set_p4d;
286 #endif
287
288@@ -2396,7 +2357,7 @@ static void __init xen_post_allocator_init(void)
289 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
290 pv_mmu_ops.release_pte = xen_release_pte;
291 pv_mmu_ops.release_pmd = xen_release_pmd;
292-#if CONFIG_PGTABLE_LEVELS >= 4
293+#ifdef CONFIG_X86_64
294 pv_mmu_ops.alloc_pud = xen_alloc_pud;
295 pv_mmu_ops.release_pud = xen_release_pud;
296 #endif
297@@ -2460,14 +2421,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
298 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
299 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
300
301-#if CONFIG_PGTABLE_LEVELS >= 4
302+#ifdef CONFIG_X86_64
303 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
304 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
305 .set_p4d = xen_set_p4d_hyper,
306
307 .alloc_pud = xen_alloc_pmd_init,
308 .release_pud = xen_release_pmd_init,
309-#endif /* CONFIG_PGTABLE_LEVELS == 4 */
310+#endif /* CONFIG_X86_64 */
311
312 .activate_mm = xen_activate_mm,
313 .dup_mmap = xen_dup_mmap,
314--
3152.14.2
316