]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
KVM: arm/arm64: mask/unmask daif around VHE guests
authorJames Morse <james.morse@arm.com>
Mon, 15 Jan 2018 19:39:00 +0000 (19:39 +0000)
committerSeth Forshee <seth.forshee@canonical.com>
Fri, 16 Mar 2018 15:45:29 +0000 (10:45 -0500)
BugLink: http://bugs.launchpad.net/bugs/1756096
Non-VHE systems take an exception to EL2 in order to world-switch into the
guest. When returning from the guest KVM implicitly restores the DAIF
flags when it returns to the kernel at EL1.

With VHE none of this exception-level jumping happens, so KVMs
world-switch code is exposed to the host kernel's DAIF values, and KVM
spills the guest-exit DAIF values back into the host kernel.
On entry to a guest we have Debug and SError exceptions unmasked, KVM
has switched VBAR but isn't prepared to handle these. On guest exit
Debug exceptions are left disabled once we return to the host and will
stay this way until we enter user space.

Add a helper to mask/unmask DAIF around VHE guests. The unmask can only
happen after the hosts VBAR value has been synchronised by the isb in
__vhe_hyp_call (via kvm_call_hyp()). Masking could be as late as
setting KVMs VBAR value, but is kept here for symmetry.

Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
(backported from commit 4f5abad9e826bd579b0661efa32682d9c9bc3fa8)
Signed-off-by: Manoj Iyer <manoj.iyer@canonical.com>
Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
arch/arm/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_host.h
virt/kvm/arm/arm.c

index fdd9da1555bea7199375403b7edf733ed9a979f4..9d43d9d27884b87ee27990ccbec1a8b189450ff0 100644 (file)
@@ -301,6 +301,9 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
 /* All host FP/SIMD state is restored on guest exit, so nothing to save: */
 static inline void kvm_fpsimd_flush_cpu_state(void) {}
 
+static inline void kvm_arm_vhe_guest_enter(void) {}
+static inline void kvm_arm_vhe_guest_exit(void) {}
+
 static inline bool kvm_arm_harden_branch_predictor(void)
 {
        /* No way to detect it yet, pretend it is not there. */
index 5466710a37805068ceb821e0bb1715f708a73649..f1882bd70e327a0201ec56e882e5b03752ce3c52 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/types.h>
 #include <linux/kvm_types.h>
 #include <asm/cpufeature.h>
+#include <asm/daifflags.h>
 #include <asm/fpsimd.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
@@ -398,6 +399,16 @@ static inline void kvm_fpsimd_flush_cpu_state(void)
                sve_flush_cpu_state();
 }
 
+static inline void kvm_arm_vhe_guest_enter(void)
+{
+       local_daif_mask();
+}
+
+static inline void kvm_arm_vhe_guest_exit(void)
+{
+       local_daif_restore(DAIF_PROCCTX_NOIRQ);
+}
+
 static inline bool kvm_arm_harden_branch_predictor(void)
 {
        return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
index c2e055eb5e1847979862abf2a06a3268d4d60641..f065cfe1c1db821f0dc792cc043fc6000cbecd41 100644 (file)
@@ -704,9 +704,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                 */
                trace_kvm_entry(*vcpu_pc(vcpu));
                guest_enter_irqoff();
+               if (has_vhe())
+                       kvm_arm_vhe_guest_enter();
 
                ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
 
+               if (has_vhe())
+                       kvm_arm_vhe_guest_exit();
                vcpu->mode = OUTSIDE_GUEST_MODE;
                vcpu->stat.exits++;
                /*