]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/mm/pti.c
x86/pti: Disallow global kernel text with RANDSTRUCT
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / mm / pti.c
CommitLineData
aa8c6248
TG
1/*
2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * This code is based in part on work published here:
14 *
15 * https://github.com/IAIK/KAISER
16 *
17 * The original work was written by and and signed off by for the Linux
18 * kernel by:
19 *
20 * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
21 * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
22 * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
23 * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
24 *
25 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
26 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
27 * Andy Lutomirsky <luto@amacapital.net>
28 */
29#include <linux/kernel.h>
30#include <linux/errno.h>
31#include <linux/string.h>
32#include <linux/types.h>
33#include <linux/bug.h>
34#include <linux/init.h>
35#include <linux/spinlock.h>
36#include <linux/mm.h>
37#include <linux/uaccess.h>
38
39#include <asm/cpufeature.h>
40#include <asm/hypervisor.h>
85900ea5 41#include <asm/vsyscall.h>
aa8c6248
TG
42#include <asm/cmdline.h>
43#include <asm/pti.h>
44#include <asm/pgtable.h>
45#include <asm/pgalloc.h>
46#include <asm/tlbflush.h>
47#include <asm/desc.h>
f901f138 48#include <asm/sections.h>
aa8c6248
TG
49
50#undef pr_fmt
51#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
52
03f4424f
AL
53/* Backporting helper */
54#ifndef __GFP_NOTRACK
55#define __GFP_NOTRACK 0
56#endif
57
aa8c6248
TG
58static void __init pti_print_if_insecure(const char *reason)
59{
de791821 60 if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
aa8c6248
TG
61 pr_info("%s\n", reason);
62}
63
41f4c20b
BP
64static void __init pti_print_if_secure(const char *reason)
65{
de791821 66 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
41f4c20b
BP
67 pr_info("%s\n", reason);
68}
69
9c64e474
DH
70enum pti_mode {
71 PTI_AUTO = 0,
72 PTI_FORCE_OFF,
73 PTI_FORCE_ON
74} pti_mode;
75
aa8c6248
TG
76void __init pti_check_boottime_disable(void)
77{
41f4c20b
BP
78 char arg[5];
79 int ret;
80
9c64e474
DH
81 /* Assume mode is auto unless overridden. */
82 pti_mode = PTI_AUTO;
83
aa8c6248 84 if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
9c64e474 85 pti_mode = PTI_FORCE_OFF;
aa8c6248
TG
86 pti_print_if_insecure("disabled on XEN PV.");
87 return;
88 }
89
41f4c20b
BP
90 ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
91 if (ret > 0) {
92 if (ret == 3 && !strncmp(arg, "off", 3)) {
9c64e474 93 pti_mode = PTI_FORCE_OFF;
41f4c20b
BP
94 pti_print_if_insecure("disabled on command line.");
95 return;
96 }
97 if (ret == 2 && !strncmp(arg, "on", 2)) {
9c64e474 98 pti_mode = PTI_FORCE_ON;
41f4c20b
BP
99 pti_print_if_secure("force enabled on command line.");
100 goto enable;
101 }
9c64e474
DH
102 if (ret == 4 && !strncmp(arg, "auto", 4)) {
103 pti_mode = PTI_AUTO;
41f4c20b 104 goto autosel;
9c64e474 105 }
41f4c20b
BP
106 }
107
aa8c6248 108 if (cmdline_find_option_bool(boot_command_line, "nopti")) {
9c64e474 109 pti_mode = PTI_FORCE_OFF;
aa8c6248
TG
110 pti_print_if_insecure("disabled on command line.");
111 return;
112 }
113
41f4c20b 114autosel:
de791821 115 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
aa8c6248 116 return;
41f4c20b 117enable:
aa8c6248
TG
118 setup_force_cpu_cap(X86_FEATURE_PTI);
119}
120
61e9b367
DH
121pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
122{
123 /*
124 * Changes to the high (kernel) portion of the kernelmode page
125 * tables are not automatically propagated to the usermode tables.
126 *
127 * Users should keep in mind that, unlike the kernelmode tables,
128 * there is no vmalloc_fault equivalent for the usermode tables.
129 * Top-level entries added to init_mm's usermode pgd after boot
130 * will not be automatically propagated to other mms.
131 */
132 if (!pgdp_maps_userspace(pgdp))
133 return pgd;
134
135 /*
136 * The user page tables get the full PGD, accessible from
137 * userspace:
138 */
139 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
140
141 /*
142 * If this is normal user memory, make it NX in the kernel
143 * pagetables so that, if we somehow screw up and return to
144 * usermode with the kernel CR3 loaded, we'll get a page fault
145 * instead of allowing user code to execute with the wrong CR3.
146 *
147 * As exceptions, we don't set NX if:
148 * - _PAGE_USER is not set. This could be an executable
149 * EFI runtime mapping or something similar, and the kernel
150 * may execute from it
151 * - we don't have NX support
152 * - we're clearing the PGD (i.e. the new pgd is not present).
153 */
154 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
155 (__supported_pte_mask & _PAGE_NX))
156 pgd.pgd |= _PAGE_NX;
157
158 /* return the copy of the PGD we want the kernel to use: */
159 return pgd;
160}
161
03f4424f
AL
162/*
163 * Walk the user copy of the page tables (optionally) trying to allocate
164 * page table pages on the way down.
165 *
166 * Returns a pointer to a P4D on success, or NULL on failure.
167 */
9c64e474 168static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
03f4424f
AL
169{
170 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
171 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
172
173 if (address < PAGE_OFFSET) {
174 WARN_ONCE(1, "attempt to walk user address\n");
175 return NULL;
176 }
177
178 if (pgd_none(*pgd)) {
179 unsigned long new_p4d_page = __get_free_page(gfp);
180 if (!new_p4d_page)
181 return NULL;
182
8d56eff2 183 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
03f4424f
AL
184 }
185 BUILD_BUG_ON(pgd_large(*pgd) != 0);
186
187 return p4d_offset(pgd, address);
188}
189
190/*
191 * Walk the user copy of the page tables (optionally) trying to allocate
192 * page table pages on the way down.
193 *
194 * Returns a pointer to a PMD on success, or NULL on failure.
195 */
9c64e474 196static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
03f4424f
AL
197{
198 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
199 p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
200 pud_t *pud;
201
202 BUILD_BUG_ON(p4d_large(*p4d) != 0);
203 if (p4d_none(*p4d)) {
204 unsigned long new_pud_page = __get_free_page(gfp);
205 if (!new_pud_page)
206 return NULL;
207
8d56eff2 208 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
03f4424f
AL
209 }
210
211 pud = pud_offset(p4d, address);
212 /* The user page tables do not use large mappings: */
213 if (pud_large(*pud)) {
214 WARN_ON(1);
215 return NULL;
216 }
217 if (pud_none(*pud)) {
218 unsigned long new_pmd_page = __get_free_page(gfp);
219 if (!new_pmd_page)
220 return NULL;
221
8d56eff2 222 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
03f4424f
AL
223 }
224
225 return pmd_offset(pud, address);
226}
227
85900ea5
AL
228#ifdef CONFIG_X86_VSYSCALL_EMULATION
229/*
230 * Walk the shadow copy of the page tables (optionally) trying to allocate
231 * page table pages on the way down. Does not support large pages.
232 *
233 * Note: this is only used when mapping *new* kernel data into the
234 * user/shadow page tables. It is never used for userspace data.
235 *
236 * Returns a pointer to a PTE on success, or NULL on failure.
237 */
238static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
239{
240 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
241 pmd_t *pmd = pti_user_pagetable_walk_pmd(address);
242 pte_t *pte;
243
244 /* We can't do anything sensible if we hit a large mapping. */
245 if (pmd_large(*pmd)) {
246 WARN_ON(1);
247 return NULL;
248 }
249
250 if (pmd_none(*pmd)) {
251 unsigned long new_pte_page = __get_free_page(gfp);
252 if (!new_pte_page)
253 return NULL;
254
8d56eff2 255 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
85900ea5
AL
256 }
257
258 pte = pte_offset_kernel(pmd, address);
259 if (pte_flags(*pte) & _PAGE_USER) {
260 WARN_ONCE(1, "attempt to walk to user pte\n");
261 return NULL;
262 }
263 return pte;
264}
265
266static void __init pti_setup_vsyscall(void)
267{
268 pte_t *pte, *target_pte;
269 unsigned int level;
270
271 pte = lookup_address(VSYSCALL_ADDR, &level);
272 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
273 return;
274
275 target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
276 if (WARN_ON(!target_pte))
277 return;
278
279 *target_pte = *pte;
280 set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
281}
282#else
283static void __init pti_setup_vsyscall(void) { }
284#endif
285
9c64e474 286static void
03f4424f
AL
287pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
288{
289 unsigned long addr;
290
291 /*
292 * Clone the populated PMDs which cover start to end. These PMD areas
293 * can have holes.
294 */
295 for (addr = start; addr < end; addr += PMD_SIZE) {
296 pmd_t *pmd, *target_pmd;
297 pgd_t *pgd;
298 p4d_t *p4d;
299 pud_t *pud;
300
301 pgd = pgd_offset_k(addr);
302 if (WARN_ON(pgd_none(*pgd)))
303 return;
304 p4d = p4d_offset(pgd, addr);
305 if (WARN_ON(p4d_none(*p4d)))
306 return;
307 pud = pud_offset(p4d, addr);
308 if (pud_none(*pud))
309 continue;
310 pmd = pmd_offset(pud, addr);
311 if (pmd_none(*pmd))
312 continue;
313
314 target_pmd = pti_user_pagetable_walk_pmd(addr);
315 if (WARN_ON(!target_pmd))
316 return;
317
ab665bae
DH
318 /*
319 * Only clone present PMDs. This ensures only setting
320 * _PAGE_GLOBAL on present PMDs. This should only be
321 * called on well-known addresses anyway, so a non-
322 * present PMD would be a surprise.
323 */
324 if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
325 return;
326
327 /*
328 * Setting 'target_pmd' below creates a mapping in both
329 * the user and kernel page tables. It is effectively
330 * global, so set it as global in both copies. Note:
331 * the X86_FEATURE_PGE check is not _required_ because
332 * the CPU ignores _PAGE_GLOBAL when PGE is not
333 * supported. The check keeps consistentency with
334 * code that only set this bit when supported.
335 */
336 if (boot_cpu_has(X86_FEATURE_PGE))
337 *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
338
03f4424f
AL
339 /*
340 * Copy the PMD. That is, the kernelmode and usermode
341 * tables will share the last-level page tables of this
342 * address range
343 */
344 *target_pmd = pmd_clear_flags(*pmd, clear);
345 }
346}
347
f7cfbee9
AL
348/*
349 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
350 * next-level entry on 5-level systems.
351 */
352static void __init pti_clone_p4d(unsigned long addr)
353{
354 p4d_t *kernel_p4d, *user_p4d;
355 pgd_t *kernel_pgd;
356
357 user_p4d = pti_user_pagetable_walk_p4d(addr);
358 kernel_pgd = pgd_offset_k(addr);
359 kernel_p4d = p4d_offset(kernel_pgd, addr);
360 *user_p4d = *kernel_p4d;
361}
362
363/*
364 * Clone the CPU_ENTRY_AREA into the user space visible page table.
365 */
366static void __init pti_clone_user_shared(void)
367{
368 pti_clone_p4d(CPU_ENTRY_AREA_BASE);
369}
370
4b6bbe95
AL
371/*
372 * Clone the ESPFIX P4D into the user space visinble page table
373 */
374static void __init pti_setup_espfix64(void)
375{
376#ifdef CONFIG_X86_ESPFIX64
377 pti_clone_p4d(ESPFIX_BASE_ADDR);
378#endif
379}
380
6dc72c3c
TG
381/*
382 * Clone the populated PMDs of the entry and irqentry text and force it RO.
383 */
384static void __init pti_clone_entry_text(void)
385{
386 pti_clone_pmds((unsigned long) __entry_text_start,
52994c25 387 (unsigned long) __irqentry_text_end,
ab665bae 388 _PAGE_RW);
6dc72c3c
TG
389}
390
9c64e474
DH
391/*
392 * Global pages and PCIDs are both ways to make kernel TLB entries
393 * live longer, reduce TLB misses and improve kernel performance.
394 * But, leaving all kernel text Global makes it potentially accessible
395 * to Meltdown-style attacks which make it trivial to find gadgets or
396 * defeat KASLR.
397 *
398 * Only use global pages when it is really worth it.
399 */
400static inline bool pti_kernel_image_global_ok(void)
401{
402 /*
403 * Systems with PCIDs get litlle benefit from global
404 * kernel text and are not worth the downsides.
405 */
406 if (cpu_feature_enabled(X86_FEATURE_PCID))
407 return false;
408
409 /*
410 * Only do global kernel image for pti=auto. Do the most
411 * secure thing (not global) if pti=on specified.
412 */
413 if (pti_mode != PTI_AUTO)
414 return false;
415
416 /*
417 * K8 may not tolerate the cleared _PAGE_RW on the userspace
418 * global kernel image pages. Do the safe thing (disable
419 * global kernel image). This is unlikely to ever be
420 * noticed because PTI is disabled by default on AMD CPUs.
421 */
422 if (boot_cpu_has(X86_FEATURE_K8))
423 return false;
424
171b6b40
DH
425 /*
426 * RANDSTRUCT derives its hardening benefits from the
427 * attacker's lack of knowledge about the layout of kernel
428 * data structures. Keep the kernel image non-global in
429 * cases where RANDSTRUCT is in use to help keep the layout a
430 * secret.
431 */
432 if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
433 return false;
434
9c64e474
DH
435 return true;
436}
437
438/*
439 * For some configurations, map all of kernel text into the user page
440 * tables. This reduces TLB misses, especially on non-PCID systems.
441 */
442void pti_clone_kernel_text(void)
443{
8dd9b8a5
DH
444 /*
445 * rodata is part of the kernel image and is normally
446 * readable on the filesystem or on the web. But, do not
447 * clone the areas past rodata, they might contain secrets.
448 */
9c64e474 449 unsigned long start = PFN_ALIGN(_text);
8dd9b8a5 450 unsigned long end = (unsigned long)__end_rodata_hpage_align;
9c64e474
DH
451
452 if (!pti_kernel_image_global_ok())
453 return;
454
8dd9b8a5
DH
455 pr_debug("mapping partial kernel image into user address space\n");
456
457 /*
458 * Note that this will undo _some_ of the work that
459 * pti_set_kernel_image_nonglobal() did to clear the
460 * global bit.
461 */
9c64e474
DH
462 pti_clone_pmds(start, end, _PAGE_RW);
463}
464
d473bd10
DH
465/*
466 * This is the only user for it and it is not arch-generic like
467 * the other set_memory.h functions. Just extern it.
468 */
469extern int set_memory_nonglobal(unsigned long addr, int numpages);
470void pti_set_kernel_image_nonglobal(void)
471{
472 /*
473 * The identity map is created with PMDs, regardless of the
474 * actual length of the kernel. We need to clear
475 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
476 * of the image.
477 */
478 unsigned long start = PFN_ALIGN(_text);
479 unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
480
9c64e474
DH
481 if (pti_kernel_image_global_ok())
482 return;
483
d473bd10
DH
484 set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
485}
486
aa8c6248
TG
487/*
488 * Initialize kernel page table isolation
489 */
490void __init pti_init(void)
491{
492 if (!static_cpu_has(X86_FEATURE_PTI))
493 return;
494
495 pr_info("enabled\n");
f7cfbee9
AL
496
497 pti_clone_user_shared();
d473bd10
DH
498
499 /* Undo all global bits from the init pagetables in head_64.S: */
500 pti_set_kernel_image_nonglobal();
501 /* Replace some of the global bits just for shared entry text: */
6dc72c3c 502 pti_clone_entry_text();
4b6bbe95 503 pti_setup_espfix64();
85900ea5 504 pti_setup_vsyscall();
aa8c6248 505}