]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
KVM: MIPS: Precalculate MMIO load resume PC
authorJames Hogan <james.hogan@imgtec.com>
Tue, 25 Oct 2016 15:11:12 +0000 (16:11 +0100)
committerLuis Henriques <luis.henriques@canonical.com>
Tue, 6 Dec 2016 16:52:54 +0000 (16:52 +0000)
BugLink: http://bugs.launchpad.net/bugs/1642968
commit e1e575f6b026734be3b1f075e780e91ab08ca541 upstream.

The advancing of the PC when completing an MMIO load is done before
re-entering the guest, i.e. before restoring the guest ASID. However if
the load is in a branch delay slot it may need to access guest code to
read the prior branch instruction. This isn't safe in TLB mapped code at
the moment, nor in the future when we'll access unmapped guest segments
using direct user accessors too, as it could read the branch from host
user memory instead.

Therefore calculate the resume PC in advance while we're still in the
right context and save it in the new vcpu->arch.io_pc (replacing the no
longer needed vcpu->arch.pending_load_cause), and restore it on MMIO
completion.

Fixes: e685c689f3a8 ("KVM/MIPS32: Privileged instruction/target branch emulation.")
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
[james.hogan@imgtec.com: Backport to 3.18..4.4]
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Tim Gardner <tim.gardner@canonical.com>
arch/mips/include/asm/kvm_host.h
arch/mips/kvm/emulate.c

index dd7cee7957097009ab08f14841a4f220e756bc3d..c8c04a1f1c9f97686db1b77026ade2604ce9db92 100644 (file)
@@ -400,7 +400,10 @@ struct kvm_vcpu_arch {
        /* Host KSEG0 address of the EI/DI offset */
        void *kseg0_commpage;
 
-       u32 io_gpr;             /* GPR used as IO source/target */
+       /* Resume PC after MMIO completion */
+       unsigned long io_pc;
+       /* GPR used as IO source/target */
+       u32 io_gpr;
 
        struct hrtimer comparecount_timer;
        /* Count timer control KVM register */
@@ -422,8 +425,6 @@ struct kvm_vcpu_arch {
        /* Bitmask of pending exceptions to be cleared */
        unsigned long pending_exceptions_clr;
 
-       unsigned long pending_load_cause;
-
        /* Save/Restore the entryhi register when are are preempted/scheduled back in */
        unsigned long preempt_entryhi;
 
index 4298aeb1e20f3ef9ebc2b477d411ffc0cdbf35ef..4c85ab808f9935cb76434d54555ce16f7c192368 100644 (file)
@@ -1473,6 +1473,7 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
                                            struct kvm_vcpu *vcpu)
 {
        enum emulation_result er = EMULATE_DO_MMIO;
+       unsigned long curr_pc;
        int32_t op, base, rt, offset;
        uint32_t bytes;
 
@@ -1481,7 +1482,18 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
        offset = inst & 0xffff;
        op = (inst >> 26) & 0x3f;
 
-       vcpu->arch.pending_load_cause = cause;
+       /*
+        * Find the resume PC now while we have safe and easy access to the
+        * prior branch instruction, and save it for
+        * kvm_mips_complete_mmio_load() to restore later.
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+       vcpu->arch.io_pc = vcpu->arch.pc;
+       vcpu->arch.pc = curr_pc;
+
        vcpu->arch.io_gpr = rt;
 
        switch (op) {
@@ -2461,9 +2473,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
                goto done;
        }
 
-       er = update_pc(vcpu, vcpu->arch.pending_load_cause);
-       if (er == EMULATE_FAIL)
-               return er;
+       /* Restore saved resume PC */
+       vcpu->arch.pc = vcpu->arch.io_pc;
 
        switch (run->mmio.len) {
        case 4:
@@ -2485,11 +2496,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
                break;
        }
 
-       if (vcpu->arch.pending_load_cause & CAUSEF_BD)
-               kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
-                         vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
-                         vcpu->mmio_needed);
-
 done:
        return er;
 }