]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
x86/mm: Fix guard hole handling
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Fri, 30 Nov 2018 20:23:27 +0000 (23:23 +0300)
committerStefan Bader <stefan.bader@canonical.com>
Mon, 1 Apr 2019 12:37:29 +0000 (14:37 +0200)
CVE-2017-5754

There is a guard hole at the beginning of the kernel address space, also
used by hypervisors. It occupies 16 PGD entries.

This reserved range is not defined explicitely, it is calculated relative
to other entities: direct mapping and user space ranges.

The calculation got broken by recent changes of the kernel memory layout:
LDT remap range is now mapped before direct mapping and makes the
calculation invalid.

The breakage leads to crash on Xen dom0 boot[1].

Define the reserved range explicitely. It's part of kernel ABI (hypervisors
expect it to be stable) and must not depend on changes in the rest of
kernel memory layout.

[1] https://lists.xenproject.org/archives/html/xen-devel/2018-11/msg03313.html

Fixes: d52888aa2753 ("x86/mm: Move LDT remap out of KASLR region on 5-level paging")
Reported-by: Hans van Kranenburg <hans.van.kranenburg@mendix.com>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Hans van Kranenburg <hans.van.kranenburg@mendix.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
Cc: bp@alien8.de
Cc: hpa@zytor.com
Cc: dave.hansen@linux.intel.com
Cc: luto@kernel.org
Cc: peterz@infradead.org
Cc: boris.ostrovsky@oracle.com
Cc: bhe@redhat.com
Cc: linux-mm@kvack.org
Cc: xen-devel@lists.xenproject.org
Link: https://lkml.kernel.org/r/20181130202328.65359-2-kirill.shutemov@linux.intel.com
(backported from commit 16877a5570e0c5f4270d5b17f9bab427bcae9514)
[juergh: Adjusted context.]
Signed-off-by: Juerg Haefliger <juergh@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Tyler Hicks <tyhicks@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
arch/x86/include/asm/pgtable_64_types.h
arch/x86/mm/dump_pagetables.c
arch/x86/xen/mmu_pv.c

index d667ce09c82e01437a9b0535762972153522a180..ebd6cc9525a1b33aab6574eef6759f206d017c43 100644 (file)
@@ -84,6 +84,11 @@ typedef struct { pteval_t pte; } pte_t;
  */
 #define MAXMEM                 _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
 
+#define GUARD_HOLE_PGD_ENTRY   -256UL
+#define GUARD_HOLE_SIZE                (16UL << PGDIR_SHIFT)
+#define GUARD_HOLE_BASE_ADDR   (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
+#define GUARD_HOLE_END_ADDR    (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
+
 #ifdef CONFIG_X86_5LEVEL
 # define VMALLOC_SIZE_TB       _AC(12800, UL)
 # define __VMALLOC_BASE                _AC(0xffa0000000000000, UL)
index 4247f36d5ffb459798a817f220503e6f74527ff2..cd6b7ec39bcbe8d3501e1a2bd375a84c3c626cf3 100644 (file)
@@ -494,11 +494,11 @@ static inline bool is_hypervisor_range(int idx)
 {
 #ifdef CONFIG_X86_64
        /*
-        * ffff800000000000 - ffff87ffffffffff is reserved for
-        * the hypervisor.
+        * A hole in the beginning of kernel address space reserved
+        * for a hypervisor.
         */
-       return  (idx >= pgd_index(__PAGE_OFFSET) - 16) &&
-               (idx <  pgd_index(__PAGE_OFFSET));
+       return  (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
+               (idx <  pgd_index(GUARD_HOLE_END_ADDR));
 #else
        return false;
 #endif
index 40c48ea1b889fc2a5bbd86aa1acae813f90d7151..0b99078dbd92f8d7926b6e6b118ef94252af2585 100644 (file)
@@ -615,19 +615,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
                          unsigned long limit)
 {
        int i, nr, flush = 0;
-       unsigned hole_low, hole_high;
+       unsigned hole_low = 0, hole_high = 0;
 
        /* The limit is the last byte to be touched */
        limit--;
        BUG_ON(limit >= FIXADDR_TOP);
 
+#ifdef CONFIG_X86_64
        /*
         * 64-bit has a great big hole in the middle of the address
-        * space, which contains the Xen mappings.  On 32-bit these
-        * will end up making a zero-sized hole and so is a no-op.
+        * space, which contains the Xen mappings.
         */
-       hole_low = pgd_index(USER_LIMIT);
-       hole_high = pgd_index(PAGE_OFFSET);
+       hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
+       hole_high = pgd_index(GUARD_HOLE_END_ADDR);
+#endif
 
        nr = pgd_index(limit) + 1;
        for (i = 0; i < nr; i++) {