]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/mm/pti.c
3858b97e0965e6e3f5193760a2884578d9f88637
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / mm / pti.c
1 /*
2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * This code is based in part on work published here:
14 *
15 * https://github.com/IAIK/KAISER
16 *
17 * The original work was written by and and signed off by for the Linux
18 * kernel by:
19 *
20 * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
21 * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
22 * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
23 * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
24 *
25 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
26 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
27 * Andy Lutomirsky <luto@amacapital.net>
28 */
29 #include <linux/kernel.h>
30 #include <linux/errno.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 #include <linux/bug.h>
34 #include <linux/init.h>
35 #include <linux/spinlock.h>
36 #include <linux/mm.h>
37 #include <linux/uaccess.h>
38
39 #include <asm/cpufeature.h>
40 #include <asm/hypervisor.h>
41 #include <asm/vsyscall.h>
42 #include <asm/cmdline.h>
43 #include <asm/pti.h>
44 #include <asm/pgtable.h>
45 #include <asm/pgalloc.h>
46 #include <asm/tlbflush.h>
47 #include <asm/desc.h>
48 #include <asm/sections.h>
49
50 #undef pr_fmt
51 #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
52
53 /* Backporting helper */
54 #ifndef __GFP_NOTRACK
55 #define __GFP_NOTRACK 0
56 #endif
57
58 /*
59 * Define the page-table levels we clone for user-space on 32
60 * and 64 bit.
61 */
62 #ifdef CONFIG_X86_64
63 #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD
64 #else
65 #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE
66 #endif
67
68 static void __init pti_print_if_insecure(const char *reason)
69 {
70 if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
71 pr_info("%s\n", reason);
72 }
73
74 static void __init pti_print_if_secure(const char *reason)
75 {
76 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
77 pr_info("%s\n", reason);
78 }
79
80 enum pti_mode {
81 PTI_AUTO = 0,
82 PTI_FORCE_OFF,
83 PTI_FORCE_ON
84 } pti_mode;
85
86 void __init pti_check_boottime_disable(void)
87 {
88 char arg[5];
89 int ret;
90
91 /* Assume mode is auto unless overridden. */
92 pti_mode = PTI_AUTO;
93
94 if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
95 pti_mode = PTI_FORCE_OFF;
96 pti_print_if_insecure("disabled on XEN PV.");
97 return;
98 }
99
100 ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
101 if (ret > 0) {
102 if (ret == 3 && !strncmp(arg, "off", 3)) {
103 pti_mode = PTI_FORCE_OFF;
104 pti_print_if_insecure("disabled on command line.");
105 return;
106 }
107 if (ret == 2 && !strncmp(arg, "on", 2)) {
108 pti_mode = PTI_FORCE_ON;
109 pti_print_if_secure("force enabled on command line.");
110 goto enable;
111 }
112 if (ret == 4 && !strncmp(arg, "auto", 4)) {
113 pti_mode = PTI_AUTO;
114 goto autosel;
115 }
116 }
117
118 if (cmdline_find_option_bool(boot_command_line, "nopti")) {
119 pti_mode = PTI_FORCE_OFF;
120 pti_print_if_insecure("disabled on command line.");
121 return;
122 }
123
124 autosel:
125 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
126 return;
127 enable:
128 setup_force_cpu_cap(X86_FEATURE_PTI);
129 }
130
131 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
132 {
133 /*
134 * Changes to the high (kernel) portion of the kernelmode page
135 * tables are not automatically propagated to the usermode tables.
136 *
137 * Users should keep in mind that, unlike the kernelmode tables,
138 * there is no vmalloc_fault equivalent for the usermode tables.
139 * Top-level entries added to init_mm's usermode pgd after boot
140 * will not be automatically propagated to other mms.
141 */
142 if (!pgdp_maps_userspace(pgdp))
143 return pgd;
144
145 /*
146 * The user page tables get the full PGD, accessible from
147 * userspace:
148 */
149 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
150
151 /*
152 * If this is normal user memory, make it NX in the kernel
153 * pagetables so that, if we somehow screw up and return to
154 * usermode with the kernel CR3 loaded, we'll get a page fault
155 * instead of allowing user code to execute with the wrong CR3.
156 *
157 * As exceptions, we don't set NX if:
158 * - _PAGE_USER is not set. This could be an executable
159 * EFI runtime mapping or something similar, and the kernel
160 * may execute from it
161 * - we don't have NX support
162 * - we're clearing the PGD (i.e. the new pgd is not present).
163 */
164 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
165 (__supported_pte_mask & _PAGE_NX))
166 pgd.pgd |= _PAGE_NX;
167
168 /* return the copy of the PGD we want the kernel to use: */
169 return pgd;
170 }
171
172 /*
173 * Walk the user copy of the page tables (optionally) trying to allocate
174 * page table pages on the way down.
175 *
176 * Returns a pointer to a P4D on success, or NULL on failure.
177 */
178 static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
179 {
180 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
181 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
182
183 if (address < PAGE_OFFSET) {
184 WARN_ONCE(1, "attempt to walk user address\n");
185 return NULL;
186 }
187
188 if (pgd_none(*pgd)) {
189 unsigned long new_p4d_page = __get_free_page(gfp);
190 if (WARN_ON_ONCE(!new_p4d_page))
191 return NULL;
192
193 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
194 }
195 BUILD_BUG_ON(pgd_large(*pgd) != 0);
196
197 return p4d_offset(pgd, address);
198 }
199
200 /*
201 * Walk the user copy of the page tables (optionally) trying to allocate
202 * page table pages on the way down.
203 *
204 * Returns a pointer to a PMD on success, or NULL on failure.
205 */
206 static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
207 {
208 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
209 p4d_t *p4d;
210 pud_t *pud;
211
212 p4d = pti_user_pagetable_walk_p4d(address);
213 if (!p4d)
214 return NULL;
215
216 BUILD_BUG_ON(p4d_large(*p4d) != 0);
217 if (p4d_none(*p4d)) {
218 unsigned long new_pud_page = __get_free_page(gfp);
219 if (WARN_ON_ONCE(!new_pud_page))
220 return NULL;
221
222 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
223 }
224
225 pud = pud_offset(p4d, address);
226 /* The user page tables do not use large mappings: */
227 if (pud_large(*pud)) {
228 WARN_ON(1);
229 return NULL;
230 }
231 if (pud_none(*pud)) {
232 unsigned long new_pmd_page = __get_free_page(gfp);
233 if (WARN_ON_ONCE(!new_pmd_page))
234 return NULL;
235
236 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
237 }
238
239 return pmd_offset(pud, address);
240 }
241
242 /*
243 * Walk the shadow copy of the page tables (optionally) trying to allocate
244 * page table pages on the way down. Does not support large pages.
245 *
246 * Note: this is only used when mapping *new* kernel data into the
247 * user/shadow page tables. It is never used for userspace data.
248 *
249 * Returns a pointer to a PTE on success, or NULL on failure.
250 */
251 static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
252 {
253 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
254 pmd_t *pmd;
255 pte_t *pte;
256
257 pmd = pti_user_pagetable_walk_pmd(address);
258 if (!pmd)
259 return NULL;
260
261 /* We can't do anything sensible if we hit a large mapping. */
262 if (pmd_large(*pmd)) {
263 WARN_ON(1);
264 return NULL;
265 }
266
267 if (pmd_none(*pmd)) {
268 unsigned long new_pte_page = __get_free_page(gfp);
269 if (!new_pte_page)
270 return NULL;
271
272 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
273 }
274
275 pte = pte_offset_kernel(pmd, address);
276 if (pte_flags(*pte) & _PAGE_USER) {
277 WARN_ONCE(1, "attempt to walk to user pte\n");
278 return NULL;
279 }
280 return pte;
281 }
282
283 #ifdef CONFIG_X86_VSYSCALL_EMULATION
284 static void __init pti_setup_vsyscall(void)
285 {
286 pte_t *pte, *target_pte;
287 unsigned int level;
288
289 pte = lookup_address(VSYSCALL_ADDR, &level);
290 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
291 return;
292
293 target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
294 if (WARN_ON(!target_pte))
295 return;
296
297 *target_pte = *pte;
298 set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
299 }
300 #else
301 static void __init pti_setup_vsyscall(void) { }
302 #endif
303
304 enum pti_clone_level {
305 PTI_CLONE_PMD,
306 PTI_CLONE_PTE,
307 };
308
309 static void
310 pti_clone_pgtable(unsigned long start, unsigned long end,
311 enum pti_clone_level level)
312 {
313 unsigned long addr;
314
315 /*
316 * Clone the populated PMDs which cover start to end. These PMD areas
317 * can have holes.
318 */
319 for (addr = start; addr < end;) {
320 pte_t *pte, *target_pte;
321 pmd_t *pmd, *target_pmd;
322 pgd_t *pgd;
323 p4d_t *p4d;
324 pud_t *pud;
325
326 /* Overflow check */
327 if (addr < start)
328 break;
329
330 pgd = pgd_offset_k(addr);
331 if (WARN_ON(pgd_none(*pgd)))
332 return;
333 p4d = p4d_offset(pgd, addr);
334 if (WARN_ON(p4d_none(*p4d)))
335 return;
336
337 pud = pud_offset(p4d, addr);
338 if (pud_none(*pud)) {
339 addr += PUD_SIZE;
340 continue;
341 }
342
343 pmd = pmd_offset(pud, addr);
344 if (pmd_none(*pmd)) {
345 addr += PMD_SIZE;
346 continue;
347 }
348
349 if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
350 target_pmd = pti_user_pagetable_walk_pmd(addr);
351 if (WARN_ON(!target_pmd))
352 return;
353
354 /*
355 * Only clone present PMDs. This ensures only setting
356 * _PAGE_GLOBAL on present PMDs. This should only be
357 * called on well-known addresses anyway, so a non-
358 * present PMD would be a surprise.
359 */
360 if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
361 return;
362
363 /*
364 * Setting 'target_pmd' below creates a mapping in both
365 * the user and kernel page tables. It is effectively
366 * global, so set it as global in both copies. Note:
367 * the X86_FEATURE_PGE check is not _required_ because
368 * the CPU ignores _PAGE_GLOBAL when PGE is not
369 * supported. The check keeps consistentency with
370 * code that only set this bit when supported.
371 */
372 if (boot_cpu_has(X86_FEATURE_PGE))
373 *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
374
375 /*
376 * Copy the PMD. That is, the kernelmode and usermode
377 * tables will share the last-level page tables of this
378 * address range
379 */
380 *target_pmd = *pmd;
381
382 addr += PMD_SIZE;
383
384 } else if (level == PTI_CLONE_PTE) {
385
386 /* Walk the page-table down to the pte level */
387 pte = pte_offset_kernel(pmd, addr);
388 if (pte_none(*pte)) {
389 addr += PAGE_SIZE;
390 continue;
391 }
392
393 /* Only clone present PTEs */
394 if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
395 return;
396
397 /* Allocate PTE in the user page-table */
398 target_pte = pti_user_pagetable_walk_pte(addr);
399 if (WARN_ON(!target_pte))
400 return;
401
402 /* Set GLOBAL bit in both PTEs */
403 if (boot_cpu_has(X86_FEATURE_PGE))
404 *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
405
406 /* Clone the PTE */
407 *target_pte = *pte;
408
409 addr += PAGE_SIZE;
410
411 } else {
412 BUG();
413 }
414 }
415 }
416
417 #ifdef CONFIG_X86_64
418 /*
419 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
420 * next-level entry on 5-level systems.
421 */
422 static void __init pti_clone_p4d(unsigned long addr)
423 {
424 p4d_t *kernel_p4d, *user_p4d;
425 pgd_t *kernel_pgd;
426
427 user_p4d = pti_user_pagetable_walk_p4d(addr);
428 if (!user_p4d)
429 return;
430
431 kernel_pgd = pgd_offset_k(addr);
432 kernel_p4d = p4d_offset(kernel_pgd, addr);
433 *user_p4d = *kernel_p4d;
434 }
435
436 /*
437 * Clone the CPU_ENTRY_AREA into the user space visible page table.
438 */
439 static void __init pti_clone_user_shared(void)
440 {
441 pti_clone_p4d(CPU_ENTRY_AREA_BASE);
442 }
443
444 #else /* CONFIG_X86_64 */
445
446 /*
447 * On 32 bit PAE systems with 1GB of Kernel address space there is only
448 * one pgd/p4d for the whole kernel. Cloning that would map the whole
449 * address space into the user page-tables, making PTI useless. So clone
450 * the page-table on the PMD level to prevent that.
451 */
452 static void __init pti_clone_user_shared(void)
453 {
454 unsigned long start, end;
455
456 start = CPU_ENTRY_AREA_BASE;
457 end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
458
459 pti_clone_pgtable(start, end, PTI_CLONE_PMD);
460 }
461 #endif /* CONFIG_X86_64 */
462
463 /*
464 * Clone the ESPFIX P4D into the user space visinble page table
465 */
466 static void __init pti_setup_espfix64(void)
467 {
468 #ifdef CONFIG_X86_ESPFIX64
469 pti_clone_p4d(ESPFIX_BASE_ADDR);
470 #endif
471 }
472
473 /*
474 * Clone the populated PMDs of the entry and irqentry text and force it RO.
475 */
476 static void pti_clone_entry_text(void)
477 {
478 pti_clone_pgtable((unsigned long) __entry_text_start,
479 (unsigned long) __irqentry_text_end,
480 PTI_CLONE_PMD);
481 }
482
483 /*
484 * Global pages and PCIDs are both ways to make kernel TLB entries
485 * live longer, reduce TLB misses and improve kernel performance.
486 * But, leaving all kernel text Global makes it potentially accessible
487 * to Meltdown-style attacks which make it trivial to find gadgets or
488 * defeat KASLR.
489 *
490 * Only use global pages when it is really worth it.
491 */
492 static inline bool pti_kernel_image_global_ok(void)
493 {
494 /*
495 * Systems with PCIDs get litlle benefit from global
496 * kernel text and are not worth the downsides.
497 */
498 if (cpu_feature_enabled(X86_FEATURE_PCID))
499 return false;
500
501 /*
502 * Only do global kernel image for pti=auto. Do the most
503 * secure thing (not global) if pti=on specified.
504 */
505 if (pti_mode != PTI_AUTO)
506 return false;
507
508 /*
509 * K8 may not tolerate the cleared _PAGE_RW on the userspace
510 * global kernel image pages. Do the safe thing (disable
511 * global kernel image). This is unlikely to ever be
512 * noticed because PTI is disabled by default on AMD CPUs.
513 */
514 if (boot_cpu_has(X86_FEATURE_K8))
515 return false;
516
517 /*
518 * RANDSTRUCT derives its hardening benefits from the
519 * attacker's lack of knowledge about the layout of kernel
520 * data structures. Keep the kernel image non-global in
521 * cases where RANDSTRUCT is in use to help keep the layout a
522 * secret.
523 */
524 if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
525 return false;
526
527 return true;
528 }
529
530 /*
531 * This is the only user for these and it is not arch-generic
532 * like the other set_memory.h functions. Just extern them.
533 */
534 extern int set_memory_nonglobal(unsigned long addr, int numpages);
535 extern int set_memory_global(unsigned long addr, int numpages);
536
537 /*
538 * For some configurations, map all of kernel text into the user page
539 * tables. This reduces TLB misses, especially on non-PCID systems.
540 */
541 static void pti_clone_kernel_text(void)
542 {
543 /*
544 * rodata is part of the kernel image and is normally
545 * readable on the filesystem or on the web. But, do not
546 * clone the areas past rodata, they might contain secrets.
547 */
548 unsigned long start = PFN_ALIGN(_text);
549 unsigned long end_clone = (unsigned long)__end_rodata_aligned;
550 unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table);
551
552 if (!pti_kernel_image_global_ok())
553 return;
554
555 pr_debug("mapping partial kernel image into user address space\n");
556
557 /*
558 * Note that this will undo _some_ of the work that
559 * pti_set_kernel_image_nonglobal() did to clear the
560 * global bit.
561 */
562 pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
563
564 /*
565 * pti_clone_pgtable() will set the global bit in any PMDs
566 * that it clones, but we also need to get any PTEs in
567 * the last level for areas that are not huge-page-aligned.
568 */
569
570 /* Set the global bit for normal non-__init kernel text: */
571 set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
572 }
573
574 void pti_set_kernel_image_nonglobal(void)
575 {
576 /*
577 * The identity map is created with PMDs, regardless of the
578 * actual length of the kernel. We need to clear
579 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
580 * of the image.
581 */
582 unsigned long start = PFN_ALIGN(_text);
583 unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
584
585 /*
586 * This clears _PAGE_GLOBAL from the entire kernel image.
587 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
588 * areas that are mapped to userspace.
589 */
590 set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
591 }
592
593 /*
594 * Initialize kernel page table isolation
595 */
596 void __init pti_init(void)
597 {
598 if (!static_cpu_has(X86_FEATURE_PTI))
599 return;
600
601 pr_info("enabled\n");
602
603 #ifdef CONFIG_X86_32
604 /*
605 * We check for X86_FEATURE_PCID here. But the init-code will
606 * clear the feature flag on 32 bit because the feature is not
607 * supported on 32 bit anyway. To print the warning we need to
608 * check with cpuid directly again.
609 */
610 if (cpuid_ecx(0x1) & BIT(17)) {
611 /* Use printk to work around pr_fmt() */
612 printk(KERN_WARNING "\n");
613 printk(KERN_WARNING "************************************************************\n");
614 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
615 printk(KERN_WARNING "** **\n");
616 printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
617 printk(KERN_WARNING "** Your performance will increase dramatically if you **\n");
618 printk(KERN_WARNING "** switch to a 64-bit kernel! **\n");
619 printk(KERN_WARNING "** **\n");
620 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
621 printk(KERN_WARNING "************************************************************\n");
622 }
623 #endif
624
625 pti_clone_user_shared();
626
627 /* Undo all global bits from the init pagetables in head_64.S: */
628 pti_set_kernel_image_nonglobal();
629 /* Replace some of the global bits just for shared entry text: */
630 pti_clone_entry_text();
631 pti_setup_espfix64();
632 pti_setup_vsyscall();
633 }
634
635 /*
636 * Finalize the kernel mappings in the userspace page-table. Some of the
637 * mappings for the kernel image might have changed since pti_init()
638 * cloned them. This is because parts of the kernel image have been
639 * mapped RO and/or NX. These changes need to be cloned again to the
640 * userspace page-table.
641 */
642 void pti_finalize(void)
643 {
644 /*
645 * We need to clone everything (again) that maps parts of the
646 * kernel image.
647 */
648 pti_clone_entry_text();
649 pti_clone_kernel_text();
650
651 debug_checkwx_user();
652 }