]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
KVM: x86: Add kvm_skip_emulated_instruction and use it.
authorKyle Huey <me@kylehuey.com>
Tue, 29 Nov 2016 20:40:40 +0000 (12:40 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 8 Dec 2016 14:31:05 +0000 (15:31 +0100)
kvm_skip_emulated_instruction calls both
kvm_x86_ops->skip_emulated_instruction and kvm_vcpu_check_singlestep,
skipping the emulated instruction and generating a trap if necessary.

Replacing skip_emulated_instruction calls with
kvm_skip_emulated_instruction is straightforward, except for:

- ICEBP, which is already inside a trap, so avoid triggering another trap.
- Instructions that can trigger exits to userspace, such as the IO insns,
  MOVs to CR8, and HALT. If kvm_skip_emulated_instruction does trigger a
  KVM_GUESTDBG_SINGLESTEP exit, and the handling code for
  IN/OUT/MOV CR8/HALT also triggers an exit to userspace, the latter will
  take precedence. The singlestep will be triggered again on the next
  instruction, which is the current behavior.
- Task switch instructions which would require additional handling (e.g.
  the task switch bit) and are instead left alone.
- Cases where VMLAUNCH/VMRESUME do not proceed to the next instruction,
  which do not trigger singlestep traps as mentioned previously.

Signed-off-by: Kyle Huey <khuey@kylehuey.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/cpuid.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 80bad5c372bf6c1597f61e3bd2b5161a5810eb7e..8d15870928517d7359a2b5bae372bf0e20cfd716 100644 (file)
@@ -1368,7 +1368,8 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 
-void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
+int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
 
 int kvm_is_in_guest(void);
 
index 07cc629555204f913f4a6306ff7a70d62aeffb2b..dc2685e3f8ea01f9272b135207e598ba2b018685 100644 (file)
@@ -890,7 +890,6 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
        kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
        kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
        kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
-       kvm_x86_ops->skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
index 5bdffcd781f55fdaf01e17716ae98e2e60b61776..08a4d3ab3455734a669ffe0326450f4d0fb34e55 100644 (file)
@@ -3151,8 +3151,7 @@ static int skinit_interception(struct vcpu_svm *svm)
 
 static int wbinvd_interception(struct vcpu_svm *svm)
 {
-       kvm_emulate_wbinvd(&svm->vcpu);
-       return 1;
+       return kvm_emulate_wbinvd(&svm->vcpu);
 }
 
 static int xsetbv_interception(struct vcpu_svm *svm)
@@ -3275,9 +3274,7 @@ static int rdpmc_interception(struct vcpu_svm *svm)
                return emulate_on_interception(svm);
 
        err = kvm_rdpmc(&svm->vcpu);
-       kvm_complete_insn_gp(&svm->vcpu, err);
-
-       return 1;
+       return kvm_complete_insn_gp(&svm->vcpu, err);
 }
 
 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
@@ -3374,9 +3371,7 @@ static int cr_interception(struct vcpu_svm *svm)
                }
                kvm_register_write(&svm->vcpu, reg, val);
        }
-       kvm_complete_insn_gp(&svm->vcpu, err);
-
-       return 1;
+       return kvm_complete_insn_gp(&svm->vcpu, err);
 }
 
 static int dr_interception(struct vcpu_svm *svm)
index f4f6304f9583a24527264fa254ad14e7e4587a37..16a144d220330b0ea59b521d18c11a6b018cbc95 100644 (file)
@@ -5556,7 +5556,7 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu)
 static int handle_io(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification;
-       int size, in, string;
+       int size, in, string, ret;
        unsigned port;
 
        exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -5570,9 +5570,14 @@ static int handle_io(struct kvm_vcpu *vcpu)
 
        port = exit_qualification >> 16;
        size = (exit_qualification & 7) + 1;
-       skip_emulated_instruction(vcpu);
 
-       return kvm_fast_pio_out(vcpu, size, port);
+       ret = kvm_skip_emulated_instruction(vcpu);
+
+       /*
+        * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
+        * KVM_EXIT_DEBUG here.
+        */
+       return kvm_fast_pio_out(vcpu, size, port) && ret;
 }
 
 static void
@@ -5670,6 +5675,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
        int cr;
        int reg;
        int err;
+       int ret;
 
        exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
        cr = exit_qualification & 15;
@@ -5681,25 +5687,27 @@ static int handle_cr(struct kvm_vcpu *vcpu)
                switch (cr) {
                case 0:
                        err = handle_set_cr0(vcpu, val);
-                       kvm_complete_insn_gp(vcpu, err);
-                       return 1;
+                       return kvm_complete_insn_gp(vcpu, err);
                case 3:
                        err = kvm_set_cr3(vcpu, val);
-                       kvm_complete_insn_gp(vcpu, err);
-                       return 1;
+                       return kvm_complete_insn_gp(vcpu, err);
                case 4:
                        err = handle_set_cr4(vcpu, val);
-                       kvm_complete_insn_gp(vcpu, err);
-                       return 1;
+                       return kvm_complete_insn_gp(vcpu, err);
                case 8: {
                                u8 cr8_prev = kvm_get_cr8(vcpu);
                                u8 cr8 = (u8)val;
                                err = kvm_set_cr8(vcpu, cr8);
-                               kvm_complete_insn_gp(vcpu, err);
+                               ret = kvm_complete_insn_gp(vcpu, err);
                                if (lapic_in_kernel(vcpu))
-                                       return 1;
+                                       return ret;
                                if (cr8_prev <= cr8)
-                                       return 1;
+                                       return ret;
+                               /*
+                                * TODO: we might be squashing a
+                                * KVM_GUESTDBG_SINGLESTEP-triggered
+                                * KVM_EXIT_DEBUG here.
+                                */
                                vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
                                return 0;
                        }
@@ -5709,22 +5717,19 @@ static int handle_cr(struct kvm_vcpu *vcpu)
                handle_clts(vcpu);
                trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
                vmx_fpu_activate(vcpu);
-               skip_emulated_instruction(vcpu);
-               return 1;
+               return kvm_skip_emulated_instruction(vcpu);
        case 1: /*mov from cr*/
                switch (cr) {
                case 3:
                        val = kvm_read_cr3(vcpu);
                        kvm_register_write(vcpu, reg, val);
                        trace_kvm_cr_read(cr, val);
-                       skip_emulated_instruction(vcpu);
-                       return 1;
+                       return kvm_skip_emulated_instruction(vcpu);
                case 8:
                        val = kvm_get_cr8(vcpu);
                        kvm_register_write(vcpu, reg, val);
                        trace_kvm_cr_read(cr, val);
-                       skip_emulated_instruction(vcpu);
-                       return 1;
+                       return kvm_skip_emulated_instruction(vcpu);
                }
                break;
        case 3: /* lmsw */
@@ -5732,8 +5737,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
                trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
                kvm_lmsw(vcpu, val);
 
-               skip_emulated_instruction(vcpu);
-               return 1;
+               return kvm_skip_emulated_instruction(vcpu);
        default:
                break;
        }
@@ -5804,8 +5808,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
                if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
                        return 1;
 
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
@@ -5858,8 +5861,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
        /* FIXME: handling of bits 32:63 of rax, rdx */
        vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
        vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_wrmsr(struct kvm_vcpu *vcpu)
@@ -5879,8 +5881,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
        }
 
        trace_kvm_msr_write(ecx, data);
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
@@ -5924,8 +5925,7 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
        unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 
        kvm_mmu_invlpg(vcpu, exit_qualification);
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_rdpmc(struct kvm_vcpu *vcpu)
@@ -5933,15 +5933,12 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu)
        int err;
 
        err = kvm_rdpmc(vcpu);
-       kvm_complete_insn_gp(vcpu, err);
-
-       return 1;
+       return kvm_complete_insn_gp(vcpu, err);
 }
 
 static int handle_wbinvd(struct kvm_vcpu *vcpu)
 {
-       kvm_emulate_wbinvd(vcpu);
-       return 1;
+       return kvm_emulate_wbinvd(vcpu);
 }
 
 static int handle_xsetbv(struct kvm_vcpu *vcpu)
@@ -5950,20 +5947,20 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu)
        u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
 
        if (kvm_set_xcr(vcpu, index, new_bv) == 0)
-               skip_emulated_instruction(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
        return 1;
 }
 
 static int handle_xsaves(struct kvm_vcpu *vcpu)
 {
-       skip_emulated_instruction(vcpu);
+       kvm_skip_emulated_instruction(vcpu);
        WARN(1, "this should never happen\n");
        return 1;
 }
 
 static int handle_xrstors(struct kvm_vcpu *vcpu)
 {
-       skip_emulated_instruction(vcpu);
+       kvm_skip_emulated_instruction(vcpu);
        WARN(1, "this should never happen\n");
        return 1;
 }
@@ -5984,8 +5981,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
                if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
                    (offset == APIC_EOI)) {
                        kvm_lapic_set_eoi(vcpu);
-                       skip_emulated_instruction(vcpu);
-                       return 1;
+                       return kvm_skip_emulated_instruction(vcpu);
                }
        }
        return emulate_instruction(vcpu, 0) == EMULATE_DONE;
@@ -6134,8 +6130,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
        gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
        if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
                trace_kvm_fast_mmio(gpa);
-               skip_emulated_instruction(vcpu);
-               return 1;
+               return kvm_skip_emulated_instruction(vcpu);
        }
 
        ret = handle_mmio_page_fault(vcpu, gpa, true);
@@ -6508,15 +6503,12 @@ static int handle_pause(struct kvm_vcpu *vcpu)
                grow_ple_window(vcpu);
 
        kvm_vcpu_on_spin(vcpu);
-       skip_emulated_instruction(vcpu);
-
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_nop(struct kvm_vcpu *vcpu)
 {
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_mwait(struct kvm_vcpu *vcpu)
@@ -6823,8 +6815,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
                 */
                if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
                        nested_vmx_failInvalid(vcpu);
-                       skip_emulated_instruction(vcpu);
-                       return 1;
+                       return kvm_skip_emulated_instruction(vcpu);
                }
 
                page = nested_get_page(vcpu, vmptr);
@@ -6832,8 +6823,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
                    *(u32 *)kmap(page) != VMCS12_REVISION) {
                        nested_vmx_failInvalid(vcpu);
                        kunmap(page);
-                       skip_emulated_instruction(vcpu);
-                       return 1;
+                       return kvm_skip_emulated_instruction(vcpu);
                }
                kunmap(page);
                vmx->nested.vmxon_ptr = vmptr;
@@ -6842,30 +6832,26 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
                if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
                        nested_vmx_failValid(vcpu,
                                             VMXERR_VMCLEAR_INVALID_ADDRESS);
-                       skip_emulated_instruction(vcpu);
-                       return 1;
+                       return kvm_skip_emulated_instruction(vcpu);
                }
 
                if (vmptr == vmx->nested.vmxon_ptr) {
                        nested_vmx_failValid(vcpu,
                                             VMXERR_VMCLEAR_VMXON_POINTER);
-                       skip_emulated_instruction(vcpu);
-                       return 1;
+                       return kvm_skip_emulated_instruction(vcpu);
                }
                break;
        case EXIT_REASON_VMPTRLD:
                if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
                        nested_vmx_failValid(vcpu,
                                             VMXERR_VMPTRLD_INVALID_ADDRESS);
-                       skip_emulated_instruction(vcpu);
-                       return 1;
+                       return kvm_skip_emulated_instruction(vcpu);
                }
 
                if (vmptr == vmx->nested.vmxon_ptr) {
                        nested_vmx_failValid(vcpu,
                                             VMXERR_VMCLEAR_VMXON_POINTER);
-                       skip_emulated_instruction(vcpu);
-                       return 1;
+                       return kvm_skip_emulated_instruction(vcpu);
                }
                break;
        default:
@@ -6921,8 +6907,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 
        if (vmx->nested.vmxon) {
                nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
-               skip_emulated_instruction(vcpu);
-               return 1;
+               return kvm_skip_emulated_instruction(vcpu);
        }
 
        if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
@@ -6963,8 +6948,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
        vmx->nested.vmxon = true;
 
        nested_vmx_succeed(vcpu);
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 
 out_shadow_vmcs:
        kfree(vmx->nested.cached_vmcs12);
@@ -7084,8 +7068,7 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
                return 1;
        free_nested(to_vmx(vcpu));
        nested_vmx_succeed(vcpu);
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 /* Emulate the VMCLEAR instruction */
@@ -7125,8 +7108,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
        nested_free_vmcs02(vmx, vmptr);
 
        nested_vmx_succeed(vcpu);
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
@@ -7340,18 +7322,15 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (!nested_vmx_check_vmcs12(vcpu)) {
-               skip_emulated_instruction(vcpu);
-               return 1;
-       }
+       if (!nested_vmx_check_vmcs12(vcpu))
+               return kvm_skip_emulated_instruction(vcpu);
 
        /* Decode instruction info and find the field to read */
        field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
        /* Read the field, zero-extended to a u64 field_value */
        if (vmcs12_read_any(vcpu, field, &field_value) < 0) {
                nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
-               skip_emulated_instruction(vcpu);
-               return 1;
+               return kvm_skip_emulated_instruction(vcpu);
        }
        /*
         * Now copy part of this value to register or memory, as requested.
@@ -7371,8 +7350,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
        }
 
        nested_vmx_succeed(vcpu);
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 
@@ -7394,10 +7372,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (!nested_vmx_check_vmcs12(vcpu)) {
-               skip_emulated_instruction(vcpu);
-               return 1;
-       }
+       if (!nested_vmx_check_vmcs12(vcpu))
+               return kvm_skip_emulated_instruction(vcpu);
 
        if (vmx_instruction_info & (1u << 10))
                field_value = kvm_register_readl(vcpu,
@@ -7418,19 +7394,16 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
        if (vmcs_field_readonly(field)) {
                nested_vmx_failValid(vcpu,
                        VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
-               skip_emulated_instruction(vcpu);
-               return 1;
+               return kvm_skip_emulated_instruction(vcpu);
        }
 
        if (vmcs12_write_any(vcpu, field, field_value) < 0) {
                nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
-               skip_emulated_instruction(vcpu);
-               return 1;
+               return kvm_skip_emulated_instruction(vcpu);
        }
 
        nested_vmx_succeed(vcpu);
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 /* Emulate the VMPTRLD instruction */
@@ -7451,8 +7424,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
                page = nested_get_page(vcpu, vmptr);
                if (page == NULL) {
                        nested_vmx_failInvalid(vcpu);
-                       skip_emulated_instruction(vcpu);
-                       return 1;
+                       return kvm_skip_emulated_instruction(vcpu);
                }
                new_vmcs12 = kmap(page);
                if (new_vmcs12->revision_id != VMCS12_REVISION) {
@@ -7460,8 +7432,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
                        nested_release_page_clean(page);
                        nested_vmx_failValid(vcpu,
                                VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
-                       skip_emulated_instruction(vcpu);
-                       return 1;
+                       return kvm_skip_emulated_instruction(vcpu);
                }
 
                nested_release_vmcs12(vmx);
@@ -7485,8 +7456,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
        }
 
        nested_vmx_succeed(vcpu);
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 /* Emulate the VMPTRST instruction */
@@ -7511,8 +7481,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
                return 1;
        }
        nested_vmx_succeed(vcpu);
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 /* Emulate the INVEPT instruction */
@@ -7550,8 +7519,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
        if (type >= 32 || !(types & (1 << type))) {
                nested_vmx_failValid(vcpu,
                                VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
-               skip_emulated_instruction(vcpu);
-               return 1;
+               return kvm_skip_emulated_instruction(vcpu);
        }
 
        /* According to the Intel VMX instruction reference, the memory
@@ -7582,8 +7550,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
                break;
        }
 
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_invvpid(struct kvm_vcpu *vcpu)
@@ -7614,8 +7581,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
        if (type >= 32 || !(types & (1 << type))) {
                nested_vmx_failValid(vcpu,
                        VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
-               skip_emulated_instruction(vcpu);
-               return 1;
+               return kvm_skip_emulated_instruction(vcpu);
        }
 
        /* according to the intel vmx instruction reference, the memory
@@ -7637,23 +7603,20 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
                if (!vpid) {
                        nested_vmx_failValid(vcpu,
                                VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
-                       skip_emulated_instruction(vcpu);
-                       return 1;
+                       return kvm_skip_emulated_instruction(vcpu);
                }
                break;
        case VMX_VPID_EXTENT_ALL_CONTEXT:
                break;
        default:
                WARN_ON_ONCE(1);
-               skip_emulated_instruction(vcpu);
-               return 1;
+               return kvm_skip_emulated_instruction(vcpu);
        }
 
        __vmx_flush_tlb(vcpu, vmx->nested.vpid02);
        nested_vmx_succeed(vcpu);
 
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_pml_full(struct kvm_vcpu *vcpu)
@@ -10194,6 +10157,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        if (!vmcs02)
                return -ENOMEM;
 
+       /*
+        * After this point, the trap flag no longer triggers a singlestep trap
+        * on the vm entry instructions. Don't call
+        * kvm_skip_emulated_instruction.
+        */
        skip_emulated_instruction(vcpu);
        enter_guest_mode(vcpu);
 
@@ -10238,8 +10206,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        return 1;
 
 out:
-       skip_emulated_instruction(vcpu);
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 /*
index ec59301f51926a06aa3f8f57c08e112362b1bda3..7b38c5e6f412f35502ef1c50671903740e8e0416 100644 (file)
@@ -425,12 +425,14 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
 }
 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
 
-void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
+int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
 {
        if (err)
                kvm_inject_gp(vcpu, 0);
        else
-               kvm_x86_ops->skip_emulated_instruction(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+
+       return 1;
 }
 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
 
@@ -4813,8 +4815,8 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
 
 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
 {
-       kvm_x86_ops->skip_emulated_instruction(vcpu);
-       return kvm_emulate_wbinvd_noskip(vcpu);
+       kvm_emulate_wbinvd_noskip(vcpu);
+       return kvm_skip_emulated_instruction(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
 
@@ -5430,6 +5432,17 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag
        }
 }
 
+int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
+{
+       unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
+       int r = EMULATE_DONE;
+
+       kvm_x86_ops->skip_emulated_instruction(vcpu);
+       kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+       return r == EMULATE_DONE;
+}
+EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
+
 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
 {
        if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
@@ -6007,8 +6020,12 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
 
 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 {
-       kvm_x86_ops->skip_emulated_instruction(vcpu);
-       return kvm_vcpu_halt(vcpu);
+       int ret = kvm_skip_emulated_instruction(vcpu);
+       /*
+        * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
+        * KVM_EXIT_DEBUG here.
+        */
+       return kvm_vcpu_halt(vcpu) && ret;
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
 
@@ -6039,9 +6056,9 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 {
        unsigned long nr, a0, a1, a2, a3, ret;
-       int op_64_bit, r = 1;
+       int op_64_bit, r;
 
-       kvm_x86_ops->skip_emulated_instruction(vcpu);
+       r = kvm_skip_emulated_instruction(vcpu);
 
        if (kvm_hv_hypercall_enabled(vcpu->kvm))
                return kvm_hv_hypercall(vcpu);