]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
arm64: KVM: Use per-CPU vector when BP hardening is enabled
authorMarc Zyngier <marc.zyngier@arm.com>
Wed, 3 Jan 2018 16:38:35 +0000 (16:38 +0000)
committerKhalid Elmously <khalid.elmously@canonical.com>
Tue, 27 Feb 2018 16:33:11 +0000 (11:33 -0500)
Commit 6840bdd73d07 upstream.

Now that we have per-CPU vectors, let's plug then in the KVM/arm64 code.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
(cherry picked from commit aab3306701f10c5dc35d8da74431cde6249baf0b)

CVE-2017-5753
CVE-2017-5715
CVE-2017-5754

Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
Acked-by: Brad Figg <brad.figg@canonical.com>
Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
arch/arm/include/asm/kvm_mmu.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kvm/hyp/switch.c
virt/kvm/arm/arm.c

index fa6f2174276bdd665519a2cafcaf737e3fda2ce5..eb46fc81a440c3384ff55efe1ab26c43cd6c86db 100644 (file)
@@ -221,6 +221,16 @@ static inline unsigned int kvm_get_vmid_bits(void)
        return 8;
 }
 
+static inline void *kvm_get_hyp_vector(void)
+{
+       return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+       return 0;
+}
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ARM_KVM_MMU_H__ */
index 672c8684d5c2a796fadae762846c1f314016c7c3..2d6d4bd9de52b48cbaaeae1cea86544c3f406bec 100644 (file)
@@ -309,5 +309,43 @@ static inline unsigned int kvm_get_vmid_bits(void)
        return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
 }
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu.h>
+
+static inline void *kvm_get_hyp_vector(void)
+{
+       struct bp_hardening_data *data = arm64_get_bp_hardening_data();
+       void *vect = kvm_ksym_ref(__kvm_hyp_vector);
+
+       if (data->fn) {
+               vect = __bp_harden_hyp_vecs_start +
+                      data->hyp_vectors_slot * SZ_2K;
+
+               if (!has_vhe())
+                       vect = lm_alias(vect);
+       }
+
+       return vect;
+}
+
+static inline int kvm_map_vectors(void)
+{
+       return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
+                                  kvm_ksym_ref(__bp_harden_hyp_vecs_end),
+                                  PAGE_HYP_EXEC);
+}
+
+#else
+static inline void *kvm_get_hyp_vector(void)
+{
+       return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+       return 0;
+}
+#endif
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
index 945e79c641c4a2b69dc0a1b97c5b68c5f99d6bc0..9b2a76d783eb8ef95efeeb474e995547d22033c8 100644 (file)
@@ -51,7 +51,7 @@ static void __hyp_text __activate_traps_vhe(void)
        val &= ~CPACR_EL1_FPEN;
        write_sysreg(val, cpacr_el1);
 
-       write_sysreg(__kvm_hyp_vector, vbar_el1);
+       write_sysreg(kvm_get_hyp_vector(), vbar_el1);
 }
 
 static void __hyp_text __activate_traps_nvhe(void)
index a39a1e161e63d53f5e199189c0427a733b2e9544..4de94f8da84ef2994b0f6ca750458de2ec6d8365 100644 (file)
@@ -1136,7 +1136,7 @@ static void cpu_init_hyp_mode(void *dummy)
        pgd_ptr = kvm_mmu_get_httbr();
        stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
        hyp_stack_ptr = stack_page + PAGE_SIZE;
-       vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
+       vector_ptr = (unsigned long)kvm_get_hyp_vector();
 
        __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
        __cpu_init_stage2();
@@ -1390,6 +1390,12 @@ static int init_hyp_mode(void)
                goto out_err;
        }
 
+       err = kvm_map_vectors();
+       if (err) {
+               kvm_err("Cannot map vectors\n");
+               goto out_err;
+       }
+
        /*
         * Map the Hyp stack pages
         */