]> git.proxmox.com Git - pve-kernel.git/blobdiff - patches/kernel/0127-x86-mm-kasan-Don-t-use-vmemmap_populate-to-initializ.patch
add KPTI and related patches
[pve-kernel.git] / patches / kernel / 0127-x86-mm-kasan-Don-t-use-vmemmap_populate-to-initializ.patch
diff --git a/patches/kernel/0127-x86-mm-kasan-Don-t-use-vmemmap_populate-to-initializ.patch b/patches/kernel/0127-x86-mm-kasan-Don-t-use-vmemmap_populate-to-initializ.patch
new file mode 100644 (file)
index 0000000..9c4ea91
--- /dev/null
@@ -0,0 +1,266 @@
+From 95ee3aee92e32b90ff10f47cb6cfc414e1fd92b2 Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Wed, 15 Nov 2017 17:36:35 -0800
+Subject: [PATCH 127/231] x86/mm/kasan: Don't use vmemmap_populate() to
+ initialize shadow
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+CVE-2017-5754
+
+[ Note, this is a Git cherry-pick of the following commit:
+
+    d17a1d97dc20: ("x86/mm/kasan: don't use vmemmap_populate() to initialize shadow")
+
+  ... for easier x86 PTI code testing and back-porting. ]
+
+The KASAN shadow is currently mapped using vmemmap_populate() since that
+provides a semi-convenient way to map pages into init_top_pgt.  However,
+since that no longer zeroes the mapped pages, it is not suitable for
+KASAN, which requires zeroed shadow memory.
+
+Add kasan_populate_shadow() interface and use it instead of
+vmemmap_populate().  Besides, this allows us to take advantage of
+gigantic pages and use them to populate the shadow, which should save us
+some memory wasted on page tables and reduce TLB pressure.
+
+Link: http://lkml.kernel.org/r/20171103185147.2688-2-pasha.tatashin@oracle.com
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Steven Sistare <steven.sistare@oracle.com>
+Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
+Cc: Bob Picco <bob.picco@oracle.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+(cherry picked from commit 2aeb07365bcd489620f71390a7d2031cd4dfb83e)
+Signed-off-by: Andy Whitcroft <apw@canonical.com>
+Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
+(cherry picked from commit f60ab0015a57d9fbf659b212d504682f069b0590)
+Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
+---
+ arch/x86/mm/kasan_init_64.c | 143 +++++++++++++++++++++++++++++++++++++++++---
+ arch/x86/Kconfig            |   2 +-
+ 2 files changed, 137 insertions(+), 8 deletions(-)
+
+diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
+index 464089f33e80..3d7341986e13 100644
+--- a/arch/x86/mm/kasan_init_64.c
++++ b/arch/x86/mm/kasan_init_64.c
+@@ -3,12 +3,14 @@
+ #include <linux/bootmem.h>
+ #include <linux/kasan.h>
+ #include <linux/kdebug.h>
++#include <linux/memblock.h>
+ #include <linux/mm.h>
+ #include <linux/sched.h>
+ #include <linux/sched/task.h>
+ #include <linux/vmalloc.h>
+ #include <asm/e820/types.h>
++#include <asm/pgalloc.h>
+ #include <asm/tlbflush.h>
+ #include <asm/sections.h>
+@@ -17,7 +19,134 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
+ static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
+-static int __init map_range(struct range *range)
++static __init void *early_alloc(size_t size, int nid)
++{
++      return memblock_virt_alloc_try_nid_nopanic(size, size,
++              __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
++}
++
++static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
++                                    unsigned long end, int nid)
++{
++      pte_t *pte;
++
++      if (pmd_none(*pmd)) {
++              void *p;
++
++              if (boot_cpu_has(X86_FEATURE_PSE) &&
++                  ((end - addr) == PMD_SIZE) &&
++                  IS_ALIGNED(addr, PMD_SIZE)) {
++                      p = early_alloc(PMD_SIZE, nid);
++                      if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
++                              return;
++                      else if (p)
++                              memblock_free(__pa(p), PMD_SIZE);
++              }
++
++              p = early_alloc(PAGE_SIZE, nid);
++              pmd_populate_kernel(&init_mm, pmd, p);
++      }
++
++      pte = pte_offset_kernel(pmd, addr);
++      do {
++              pte_t entry;
++              void *p;
++
++              if (!pte_none(*pte))
++                      continue;
++
++              p = early_alloc(PAGE_SIZE, nid);
++              entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
++              set_pte_at(&init_mm, addr, pte, entry);
++      } while (pte++, addr += PAGE_SIZE, addr != end);
++}
++
++static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
++                                    unsigned long end, int nid)
++{
++      pmd_t *pmd;
++      unsigned long next;
++
++      if (pud_none(*pud)) {
++              void *p;
++
++              if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
++                  ((end - addr) == PUD_SIZE) &&
++                  IS_ALIGNED(addr, PUD_SIZE)) {
++                      p = early_alloc(PUD_SIZE, nid);
++                      if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
++                              return;
++                      else if (p)
++                              memblock_free(__pa(p), PUD_SIZE);
++              }
++
++              p = early_alloc(PAGE_SIZE, nid);
++              pud_populate(&init_mm, pud, p);
++      }
++
++      pmd = pmd_offset(pud, addr);
++      do {
++              next = pmd_addr_end(addr, end);
++              if (!pmd_large(*pmd))
++                      kasan_populate_pmd(pmd, addr, next, nid);
++      } while (pmd++, addr = next, addr != end);
++}
++
++static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
++                                    unsigned long end, int nid)
++{
++      pud_t *pud;
++      unsigned long next;
++
++      if (p4d_none(*p4d)) {
++              void *p = early_alloc(PAGE_SIZE, nid);
++
++              p4d_populate(&init_mm, p4d, p);
++      }
++
++      pud = pud_offset(p4d, addr);
++      do {
++              next = pud_addr_end(addr, end);
++              if (!pud_large(*pud))
++                      kasan_populate_pud(pud, addr, next, nid);
++      } while (pud++, addr = next, addr != end);
++}
++
++static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
++                                    unsigned long end, int nid)
++{
++      void *p;
++      p4d_t *p4d;
++      unsigned long next;
++
++      if (pgd_none(*pgd)) {
++              p = early_alloc(PAGE_SIZE, nid);
++              pgd_populate(&init_mm, pgd, p);
++      }
++
++      p4d = p4d_offset(pgd, addr);
++      do {
++              next = p4d_addr_end(addr, end);
++              kasan_populate_p4d(p4d, addr, next, nid);
++      } while (p4d++, addr = next, addr != end);
++}
++
++static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
++                                       int nid)
++{
++      pgd_t *pgd;
++      unsigned long next;
++
++      addr = addr & PAGE_MASK;
++      end = round_up(end, PAGE_SIZE);
++      pgd = pgd_offset_k(addr);
++      do {
++              next = pgd_addr_end(addr, end);
++              kasan_populate_pgd(pgd, addr, next, nid);
++      } while (pgd++, addr = next, addr != end);
++}
++
++static void __init map_range(struct range *range)
+ {
+       unsigned long start;
+       unsigned long end;
+@@ -25,7 +154,7 @@ static int __init map_range(struct range *range)
+       start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
+       end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
+-      return vmemmap_populate(start, end, NUMA_NO_NODE);
++      kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
+ }
+ static void __init clear_pgds(unsigned long start,
+@@ -188,16 +317,16 @@ void __init kasan_init(void)
+               if (pfn_mapped[i].end == 0)
+                       break;
+-              if (map_range(&pfn_mapped[i]))
+-                      panic("kasan: unable to allocate shadow!");
++              map_range(&pfn_mapped[i]);
+       }
++
+       kasan_populate_zero_shadow(
+               kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
+               kasan_mem_to_shadow((void *)__START_KERNEL_map));
+-      vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
+-                      (unsigned long)kasan_mem_to_shadow(_end),
+-                      NUMA_NO_NODE);
++      kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
++                            (unsigned long)kasan_mem_to_shadow(_end),
++                            early_pfn_to_nid(__pa(_stext)));
+       kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
+                       (void *)KASAN_SHADOW_END);
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 67d07802ae95..8b5499bb24bb 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -106,7 +106,7 @@ config X86
+       select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_HUGE_VMAP              if X86_64 || X86_PAE
+       select HAVE_ARCH_JUMP_LABEL
+-      select HAVE_ARCH_KASAN                  if X86_64 && SPARSEMEM_VMEMMAP
++      select HAVE_ARCH_KASAN                  if X86_64
+       select HAVE_ARCH_KGDB
+       select HAVE_ARCH_KMEMCHECK
+       select HAVE_ARCH_MMAP_RND_BITS          if MMU
+-- 
+2.14.2
+