]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
KVM: arm64: Avoid storing the vcpu pointer on the stack
authorChristoffer Dall <christoffer.dall@linaro.org>
Fri, 20 Jul 2018 09:52:58 +0000 (10:52 +0100)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Wed, 5 Sep 2018 12:53:31 +0000 (14:53 +0200)
BugLink: https://bugs.launchpad.net/bugs/1787993
CVE-2018-3639 (arm64)

Commit 4464e210de9e80e38de59df052fe09ea2ff80b1b upstream.

We already have the percpu area for the host cpu state, which points to
the VCPU, so there's no need to store the VCPU pointer on the stack on
every context switch.  We can be a little more clever and just use
tpidr_el2 for the percpu offset and load the VCPU pointer from the host
context.

This has the benefit of being able to retrieve the host context even
when our stack is corrupted, and it has a potential performance benefit
because we trade a store plus a load for an mrs and a load on a round
trip to the guest.

This does require us to calculate the percpu offset without including
the offset from the kernel mapping of the percpu array to the linear
mapping of the array (which is what we store in tpidr_el1), because a
PC-relative generated address in EL2 is already giving us the hyp alias
of the linear mapping of a kernel address.  We do this in
__cpu_init_hyp_mode() by using kvm_ksym_ref().

The code that accesses ESR_EL2 was previously using an alternative to
use the _EL1 accessor on VHE systems, but this was actually unnecessary
as the _EL1 accessor aliases the ESR_EL2 register on VHE, and the _EL2
accessor does the same thing on both systems.

Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
(cherry picked from commit 2cdc2e62a6ac829832c2bf4ccb1098fccc67f82c
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git /
linux-4.14.y)
Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kvm/hyp/entry.S
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/hyp/sysreg-sr.c

index 24961b732e654893af57b3c511e875f797b4cf28..7149f15203826d6e38ecd586cfc4ef5be523baaa 100644 (file)
@@ -33,6 +33,7 @@
 #define KVM_ARM64_DEBUG_DIRTY_SHIFT    0
 #define KVM_ARM64_DEBUG_DIRTY          (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
 
+/* Translate a kernel address of @sym into its equivalent linear mapping */
 #define kvm_ksym_ref(sym)                                              \
        ({                                                              \
                void *val = &sym;                                       \
@@ -70,6 +71,20 @@ extern u32 __init_stage2_translation(void);
 
 extern void __qcom_hyp_sanitize_btac_predictors(void);
 
+#else /* __ASSEMBLY__ */
+
+.macro get_host_ctxt reg, tmp
+       adr_l   \reg, kvm_host_cpu_state
+       mrs     \tmp, tpidr_el2
+       add     \reg, \reg, \tmp
+.endm
+
+.macro get_vcpu_ptr vcpu, ctxt
+       get_host_ctxt \ctxt, \vcpu
+       ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
+       kern_hyp_va     \vcpu
+.endm
+
 #endif
 
 #endif /* __ARM_KVM_ASM_H__ */
index 448d3b9a58cb0605472fb4590a8e8fc592c5ad49..3100428bbee12378f311a8fe1b2f4155227cfd22 100644 (file)
@@ -359,10 +359,15 @@ int kvm_perf_teardown(void);
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 
+void __kvm_set_tpidr_el2(u64 tpidr_el2);
+DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
+
 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
                                       unsigned long hyp_stack_ptr,
                                       unsigned long vector_ptr)
 {
+       u64 tpidr_el2;
+
        /*
         * Call initialization code, and switch to the full blown HYP code.
         * If the cpucaps haven't been finalized yet, something has gone very
@@ -371,6 +376,16 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
         */
        BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
        __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
+
+       /*
+        * Calculate the raw per-cpu offset without a translation from the
+        * kernel's mapping to the linear mapping, and store it in tpidr_el2
+        * so that we can use adr_l to access per-cpu variables in EL2.
+        */
+       tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state)
+               - (u64)kvm_ksym_ref(kvm_host_cpu_state);
+
+       kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
 }
 
 static inline void kvm_arch_hardware_unsetup(void) {}
index 1303e04110cdb18f1a3bd722d2bd704e4c0e6032..78e1b0a70aaf37a4bb14cc24de36e1801fedc56f 100644 (file)
@@ -138,6 +138,7 @@ int main(void)
   DEFINE(CPU_FP_REGS,          offsetof(struct kvm_regs, fp_regs));
   DEFINE(VCPU_FPEXC32_EL2,     offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
   DEFINE(VCPU_HOST_CONTEXT,    offsetof(struct kvm_vcpu, arch.host_cpu_context));
+  DEFINE(HOST_CONTEXT_VCPU,    offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
 #endif
 #ifdef CONFIG_CPU_PM
   DEFINE(CPU_SUSPEND_SZ,       sizeof(struct cpu_suspend_ctx));
index fdd1068ee3a59e0dc7aac3842084b7a5a38e2099..1f458f7c3b44066966750da2b54f9134f43bd9fb 100644 (file)
@@ -62,9 +62,6 @@ ENTRY(__guest_enter)
        // Store the host regs
        save_callee_saved_regs x1
 
-       // Store host_ctxt and vcpu for use at exit time
-       stp     x1, x0, [sp, #-16]!
-
        add     x18, x0, #VCPU_CONTEXT
 
        // Restore guest regs x0-x17
@@ -118,8 +115,7 @@ ENTRY(__guest_exit)
        // Store the guest regs x19-x29, lr
        save_callee_saved_regs x1
 
-       // Restore the host_ctxt from the stack
-       ldr     x2, [sp], #16
+       get_host_ctxt   x2, x3
 
        // Now restore the host regs
        restore_callee_saved_regs x2
index f36464bd57c5f23f2fef85a51efe9c4b033874f1..82fbc368f7382f48addb25862aca8b75f16a35fd 100644 (file)
@@ -57,13 +57,8 @@ ENDPROC(__vhe_hyp_call)
 el1_sync:                              // Guest trapped into EL2
        stp     x0, x1, [sp, #-16]!
 
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
-       mrs     x1, esr_el2
-alternative_else
-       mrs     x1, esr_el1
-alternative_endif
-       lsr     x0, x1, #ESR_ELx_EC_SHIFT
-
+       mrs     x0, esr_el2
+       lsr     x0, x0, #ESR_ELx_EC_SHIFT
        cmp     x0, #ESR_ELx_EC_HVC64
        ccmp    x0, #ESR_ELx_EC_HVC32, #4, ne
        b.ne    el1_trap
@@ -117,10 +112,14 @@ el1_hvc_guest:
        eret
 
 el1_trap:
+       get_vcpu_ptr    x1, x0
+
+       mrs             x0, esr_el2
+       lsr             x0, x0, #ESR_ELx_EC_SHIFT
        /*
         * x0: ESR_EC
+        * x1: vcpu pointer
         */
-       ldr     x1, [sp, #16 + 8]       // vcpu stored by __guest_enter
 
        /*
         * We trap the first access to the FP/SIMD to save the host context
@@ -138,13 +137,13 @@ alternative_else_nop_endif
 
 el1_irq:
        stp     x0, x1, [sp, #-16]!
-       ldr     x1, [sp, #16 + 8]
+       get_vcpu_ptr    x1, x0
        mov     x0, #ARM_EXCEPTION_IRQ
        b       __guest_exit
 
 el1_error:
        stp     x0, x1, [sp, #-16]!
-       ldr     x1, [sp, #16 + 8]
+       get_vcpu_ptr    x1, x0
        mov     x0, #ARM_EXCEPTION_EL1_SERROR
        b       __guest_exit
 
@@ -180,14 +179,7 @@ ENTRY(__hyp_do_panic)
 ENDPROC(__hyp_do_panic)
 
 ENTRY(__hyp_panic)
-       /*
-        * '=kvm_host_cpu_state' is a host VA from the constant pool, it may
-        * not be accessible by this address from EL2, hyp_panic() converts
-        * it with kern_hyp_va() before use.
-        */
-       ldr     x0, =kvm_host_cpu_state
-       mrs     x1, tpidr_el2
-       add     x0, x0, x1
+       get_host_ctxt x0, x1
        b       hyp_panic
 ENDPROC(__hyp_panic)
 
index 9739fc805ca1a6eccf6d80ecbeddf86e8e4707c7..31b359b12ff5e97bebee80102af317ffc5ba7990 100644 (file)
@@ -472,7 +472,7 @@ static hyp_alternate_select(__hyp_call_panic,
                            __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
                            ARM64_HAS_VIRT_HOST_EXTN);
 
-void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *__host_ctxt)
+void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
 {
        struct kvm_vcpu *vcpu = NULL;
 
@@ -481,9 +481,6 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *__host_ctxt)
        u64 par = read_sysreg(par_el1);
 
        if (read_sysreg(vttbr_el2)) {
-               struct kvm_cpu_context *host_ctxt;
-
-               host_ctxt = kern_hyp_va(__host_ctxt);
                vcpu = host_ctxt->__hyp_running_vcpu;
                __timer_disable_traps(vcpu);
                __deactivate_traps(vcpu);
index 2c17afd2be96b8fe7774de732fbe1717d614da24..43b7dd65e3e685504f2ea33ee81e1cd6754b62f4 100644 (file)
@@ -189,3 +189,8 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
        if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
                write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
 }
+
+void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2)
+{
+       asm("msr tpidr_el2, %0": : "r" (tpidr_el2));
+}