]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
arm64: add basic VMAP_STACK support
authorMark Rutland <mark.rutland@arm.com>
Fri, 21 Jul 2017 13:25:33 +0000 (14:25 +0100)
committerKhalid Elmously <khalid.elmously@canonical.com>
Tue, 27 Feb 2018 16:32:12 +0000 (11:32 -0500)
This patch enables arm64 to be built with vmap'd task and IRQ stacks.

As vmap'd stacks are mapped at page granularity, stacks must be a multiple of
PAGE_SIZE. This means that a 64K page kernel must use stacks of at least 64K in
size.

To minimize the increase in Image size, IRQ stacks are dynamically allocated at
boot time, rather than embedding the boot CPU's IRQ stack in the kernel image.

This patch was co-authored by Ard Biesheuvel and Mark Rutland.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
(cherry picked from commit e3067861ba6650a566a6273738c23c956ad55c02)

CVE-2017-5753
CVE-2017-5715
CVE-2017-5754

Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
Acked-by: Brad Figg <brad.figg@canonical.com>
Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
arch/arm64/Kconfig
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/memory.h
arch/arm64/kernel/irq.c
arch/arm64/kernel/vmlinux.lds.S

index df8f02c42e52fc2d15770797b94b0bcd247b5539..b0066730b78618e5d14a3fa880acb089d5416382 100644 (file)
@@ -92,6 +92,7 @@ config ARM64
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       select HAVE_ARCH_VMAP_STACK
        select HAVE_ARM_SMCCC
        select HAVE_EBPF_JIT
        select HAVE_C_RECORDMCOUNT
index 0e8cc3b85bb879d359751cbc4508669b867b2e74..2b1e5def2e49630538f4d97b8f8ead39abfb78da 100644 (file)
@@ -49,7 +49,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
  */
 #define EFI_FDT_ALIGN  SZ_2M   /* used by allocate_new_fdt_and_exit_boot() */
 
-#define EFI_KIMG_ALIGN SEGMENT_ALIGN
+/*
+ * In some configurations (e.g. VMAP_STACK && 64K pages), stacks built into the
+ * kernel need greater alignment than we require the segments to be padded to.
+ */
+#define EFI_KIMG_ALIGN \
+       (SEGMENT_ALIGN > THREAD_ALIGN ? SEGMENT_ALIGN : THREAD_ALIGN)
 
 /* on arm64, the FDT may be located anywhere in system RAM */
 static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
index 83dc97dde0f512c7b6b5e6213907651055e96fb8..bb5e87374751cafc26201872ddbb1d45bbd0d6cc 100644 (file)
 #define KASAN_SHADOW_SIZE      (0)
 #endif
 
-#define THREAD_SHIFT           14
+#define MIN_THREAD_SHIFT       14
+
+/*
+ * VMAP'd stacks are allocated at page granularity, so we must ensure that such
+ * stacks are a multiple of page size.
+ */
+#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT)
+#define THREAD_SHIFT           PAGE_SHIFT
+#else
+#define THREAD_SHIFT           MIN_THREAD_SHIFT
+#endif
 
 #if THREAD_SHIFT >= PAGE_SHIFT
 #define THREAD_SIZE_ORDER      (THREAD_SHIFT - PAGE_SHIFT)
 
 #define THREAD_SIZE            (UL(1) << THREAD_SHIFT)
 
+/*
+ * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
+ * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
+ * assembly.
+ */
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN           (2 * THREAD_SIZE)
+#else
+#define THREAD_ALIGN           THREAD_SIZE
+#endif
+
 #define IRQ_STACK_SIZE         THREAD_SIZE
 
 /*
index 5141282e47d5821a46cdee8d0f14b9cf93a9ff1d..713561e5bcabc5467e2d86c9d88ec75be2c34a44 100644 (file)
 
 #include <linux/kernel_stat.h>
 #include <linux/irq.h>
+#include <linux/memory.h>
 #include <linux/smp.h>
 #include <linux/init.h>
 #include <linux/irqchip.h>
 #include <linux/seq_file.h>
+#include <linux/vmalloc.h>
 
 unsigned long irq_err_count;
 
-/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
-DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16);
 DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
 
 int arch_show_interrupts(struct seq_file *p, int prec)
@@ -51,6 +51,31 @@ void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
        handle_arch_irq = handle_irq;
 }
 
+#ifdef CONFIG_VMAP_STACK
+static void init_irq_stacks(void)
+{
+       int cpu;
+       unsigned long *p;
+
+       for_each_possible_cpu(cpu) {
+               /*
+               * To ensure that VMAP'd stack overflow detection works
+               * correctly, the IRQ stacks need to have the same
+               * alignment as other stacks.
+               */
+               p = __vmalloc_node_range(IRQ_STACK_SIZE, THREAD_ALIGN,
+                                        VMALLOC_START, VMALLOC_END,
+                                        THREADINFO_GFP, PAGE_KERNEL,
+                                        0, cpu_to_node(cpu),
+                                        __builtin_return_address(0));
+
+               per_cpu(irq_stack_ptr, cpu) = p;
+       }
+}
+#else
+/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
+DEFINE_PER_CPU_ALIGNED(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
+
 static void init_irq_stacks(void)
 {
        int cpu;
@@ -58,6 +83,7 @@ static void init_irq_stacks(void)
        for_each_possible_cpu(cpu)
                per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
 }
+#endif
 
 void __init init_IRQ(void)
 {
index 71565386d0631548385ad9dee01cd535277a7021..fe56c268a7d9c37374f8126cae0af243adace762 100644 (file)
@@ -176,7 +176,7 @@ SECTIONS
 
        _data = .;
        _sdata = .;
-       RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+       RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
 
        /*
         * Data written with the MMU off but read with the MMU on requires