]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
arm64: Add skeleton to harden the branch predictor against aliasing attacks
authorWill Deacon <will.deacon@arm.com>
Wed, 3 Jan 2018 11:17:58 +0000 (11:17 +0000)
committerKhalid Elmously <khalid.elmously@canonical.com>
Tue, 27 Feb 2018 16:33:10 +0000 (11:33 -0500)
Commit 0f15adbb2861 upstream.

Aliasing attacks against CPU branch predictors can allow an attacker to
redirect speculative control flow on some CPUs and potentially divulge
information from one context to another.

This patch adds initial skeleton code behind a new Kconfig option to
enable implementation-specific mitigations against these attacks for
CPUs that are affected.

Co-developed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
(cherry picked from commit 5bee81c980297f3f5486539881ab4241c5f0dea3)

CVE-2017-5753
CVE-2017-5715
CVE-2017-5754

Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
Acked-by: Brad Figg <brad.figg@canonical.com>
Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
arch/arm64/Kconfig
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/bpi.S [new file with mode: 0644]
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/mm/context.c
arch/arm64/mm/fault.c

index 21d07f16be1b5381bfc749713b9129c9f5c9c61b..809d81914164e430c0898707b899ba4a5634efa7 100644 (file)
@@ -836,6 +836,23 @@ config UNMAP_KERNEL_AT_EL0
 
          If unsure, say Y.
 
+config HARDEN_BRANCH_PREDICTOR
+       bool "Harden the branch predictor against aliasing attacks" if EXPERT
+       default y
+       help
+         Speculation attacks against some high-performance processors rely on
+         being able to manipulate the branch predictor for a victim context by
+         executing aliasing branches in the attacker context.  Such attacks
+         can be partially mitigated against by clearing internal branch
+         predictor state and limiting the prediction logic in some situations.
+
+         This config option will take CPU-specific actions to harden the
+         branch predictor against aliasing attacks and may rely on specific
+         instruction sequences or control bits being set by the system
+         firmware.
+
+         If unsure, say Y.
+
 menuconfig ARMV8_DEPRECATED
        bool "Emulate deprecated/obsolete ARMv8 instructions"
        depends on COMPAT
index 6835a48b31d4a48781ad40968974327a5793b25b..8498eb4ccfc252de3bb0b7b3f0d9e50226458a9a 100644 (file)
@@ -41,7 +41,8 @@
 #define ARM64_WORKAROUND_CAVIUM_30115          20
 #define ARM64_HAS_DCPOP                                21
 #define ARM64_UNMAP_KERNEL_AT_EL0              23
+#define ARM64_HARDEN_BRANCH_PREDICTOR          24
 
-#define ARM64_NCAPS                            24
+#define ARM64_NCAPS                            25
 
 #endif /* __ASM_CPUCAPS_H */
index 6f7bdb89817ffeb2a81cccaa512c60bde19d1489..6dd83d75b82ab8b9808f8b60b3ac6252344222a8 100644 (file)
@@ -41,6 +41,43 @@ static inline bool arm64_kernel_unmapped_at_el0(void)
               cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
 }
 
+typedef void (*bp_hardening_cb_t)(void);
+
+struct bp_hardening_data {
+       int                     hyp_vectors_slot;
+       bp_hardening_cb_t       fn;
+};
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
+
+DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+       return this_cpu_ptr(&bp_hardening_data);
+}
+
+static inline void arm64_apply_bp_hardening(void)
+{
+       struct bp_hardening_data *d;
+
+       if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+               return;
+
+       d = arm64_get_bp_hardening_data();
+       if (d->fn)
+               d->fn();
+}
+#else
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+       return NULL;
+}
+
+static inline void arm64_apply_bp_hardening(void)      { }
+#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
 extern void paging_init(void);
 extern void bootmem_init(void);
 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
index 1efbe29c8b248ef8aae97e06f81ab0a5c4bb5b69..ede80d47d0effdb749a51df45d39dd576611fc9c 100644 (file)
 
 /* id_aa64pfr0 */
 #define ID_AA64PFR0_CSV3_SHIFT         60
+#define ID_AA64PFR0_CSV2_SHIFT         56
 #define ID_AA64PFR0_GIC_SHIFT          24
 #define ID_AA64PFR0_ASIMD_SHIFT                20
 #define ID_AA64PFR0_FP_SHIFT           16
index f2b4e816b6dec5d55d677ca55f9764ad4dcbbb0d..0b7eb05e7244af4c4dc2106aa6b92ae598d7fd6d 100644 (file)
@@ -54,6 +54,10 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
 arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
 arm64-obj-$(CONFIG_CRASH_DUMP)         += crash_dump.o
 
+ifeq ($(CONFIG_KVM),y)
+arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR)    += bpi.o
+endif
+
 obj-y                                  += $(arm64-obj-y) vdso/ probes/
 obj-m                                  += $(arm64-obj-m)
 head-y                                 := head.o
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
new file mode 100644 (file)
index 0000000..06a931e
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Contains CPU specific branch predictor invalidation sequences
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+.macro ventry target
+       .rept 31
+       nop
+       .endr
+       b       \target
+.endm
+
+.macro vectors target
+       ventry \target + 0x000
+       ventry \target + 0x080
+       ventry \target + 0x100
+       ventry \target + 0x180
+
+       ventry \target + 0x200
+       ventry \target + 0x280
+       ventry \target + 0x300
+       ventry \target + 0x380
+
+       ventry \target + 0x400
+       ventry \target + 0x480
+       ventry \target + 0x500
+       ventry \target + 0x580
+
+       ventry \target + 0x600
+       ventry \target + 0x680
+       ventry \target + 0x700
+       ventry \target + 0x780
+.endm
+
+       .align  11
+ENTRY(__bp_harden_hyp_vecs_start)
+       .rept 4
+       vectors __kvm_hyp_vector
+       .endr
+ENTRY(__bp_harden_hyp_vecs_end)
index 624f8a1180291ce1f6a970dfff72013d4bd54465..3f2fee9d4590ef04ecb7214382d13addd79e6c59 100644 (file)
@@ -60,6 +60,80 @@ static int cpu_enable_trap_ctr_access(void *__unused)
        return 0;
 }
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+#ifdef CONFIG_KVM
+static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+                               const char *hyp_vecs_end)
+{
+       void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
+       int i;
+
+       for (i = 0; i < SZ_2K; i += 0x80)
+               memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
+
+       flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+}
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+                                     const char *hyp_vecs_start,
+                                     const char *hyp_vecs_end)
+{
+       static int last_slot = -1;
+       static DEFINE_SPINLOCK(bp_lock);
+       int cpu, slot = -1;
+
+       spin_lock(&bp_lock);
+       for_each_possible_cpu(cpu) {
+               if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
+                       slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+                       break;
+               }
+       }
+
+       if (slot == -1) {
+               last_slot++;
+               BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
+                       / SZ_2K) <= last_slot);
+               slot = last_slot;
+               __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+       }
+
+       __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+       __this_cpu_write(bp_hardening_data.fn, fn);
+       spin_unlock(&bp_lock);
+}
+#else
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+                                     const char *hyp_vecs_start,
+                                     const char *hyp_vecs_end)
+{
+       __this_cpu_write(bp_hardening_data.fn, fn);
+}
+#endif /* CONFIG_KVM */
+
+static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
+                                    bp_hardening_cb_t fn,
+                                    const char *hyp_vecs_start,
+                                    const char *hyp_vecs_end)
+{
+       u64 pfr0;
+
+       if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+               return;
+
+       pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+       if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+               return;
+
+       __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
+}
+#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
 #define MIDR_RANGE(model, min, max) \
        .def_scope = SCOPE_LOCAL_CPU, \
        .matches = is_affected_midr_range, \
index c284b855315f6e33ff902ffcfa8b81991117ef5e..582142ae92e17e180605a5538e5d8b84f23b2f37 100644 (file)
@@ -126,6 +126,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
 
 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
        S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
        S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
index fd695e860ecb2370952fd5975faf3a0d10bb277a..d88eeb132d2d8270c512cfbda26f9694f33a2f0f 100644 (file)
@@ -726,13 +726,15 @@ el0_ia:
         * Instruction abort handling
         */
        mrs     x26, far_el1
-       // enable interrupts before calling the main handler
-       enable_dbg_and_irq
+       enable_dbg
+#ifdef CONFIG_TRACE_IRQFLAGS
+       bl      trace_hardirqs_off
+#endif
        ct_user_exit
        mov     x0, x26
        mov     x1, x25
        mov     x2, sp
-       bl      do_mem_abort
+       bl      do_el0_ia_bp_hardening
        b       ret_to_user
 el0_fpsimd_acc:
        /*
index 23498d032c820821a515f7f483861594b394b447..abc56dc31ae06e0baa27e305b4a1f2077d746501 100644 (file)
@@ -242,6 +242,8 @@ asmlinkage void post_ttbr_update_workaround(void)
                        "ic iallu; dsb nsh; isb",
                        ARM64_WORKAROUND_CAVIUM_27456,
                        CONFIG_CAVIUM_ERRATUM_27456));
+
+       arm64_apply_bp_hardening();
 }
 
 static int asids_init(void)
index addfb8c2e349f86b737454697a4ec8f1c0e4b794..aa542869c55491a6f69259b3bb8ed710d56806fa 100644 (file)
@@ -704,6 +704,23 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
        arm64_notify_die("", regs, &info, esr);
 }
 
+asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
+                                                  unsigned int esr,
+                                                  struct pt_regs *regs)
+{
+       /*
+        * We've taken an instruction abort from userspace and not yet
+        * re-enabled IRQs. If the address is a kernel address, apply
+        * BP hardening prior to enabling IRQs and pre-emption.
+        */
+       if (addr > TASK_SIZE)
+               arm64_apply_bp_hardening();
+
+       local_irq_enable();
+       do_mem_abort(addr, esr, regs);
+}
+
+
 /*
  * Handle stack alignment exceptions.
  */