]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - arch/x86/kvm/svm.c
KVM: x86: return all bits from get_interrupt_shadow
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kvm / svm.c
index 7f4f9c2badaefdf880b999fed48274748a808fd7..ddf742768ecf2f823b7e8462a8ccda2c45225f2f 100644 (file)
@@ -486,14 +486,14 @@ static int is_external_interrupt(u32 info)
        return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
 }
 
-static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        u32 ret = 0;
 
        if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
-               ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
-       return ret & mask;
+               ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
+       return ret;
 }
 
 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
@@ -1338,21 +1338,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
                wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 }
 
-static void svm_update_cpl(struct kvm_vcpu *vcpu)
-{
-       struct vcpu_svm *svm = to_svm(vcpu);
-       int cpl;
-
-       if (!is_protmode(vcpu))
-               cpl = 0;
-       else if (svm->vmcb->save.rflags & X86_EFLAGS_VM)
-               cpl = 3;
-       else
-               cpl = svm->vmcb->save.cs.selector & 0x3;
-
-       svm->vmcb->save.cpl = cpl;
-}
-
 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
 {
        return to_svm(vcpu)->vmcb->save.rflags;
@@ -1360,11 +1345,12 @@ static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
 
 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 {
-       unsigned long old_rflags = to_svm(vcpu)->vmcb->save.rflags;
-
+       /*
+        * Any change of EFLAGS.VM is accompained by a reload of SS
+        * (caused by either a task switch or an inter-privilege IRET),
+        * so we do not need to update the CPL here.
+        */
        to_svm(vcpu)->vmcb->save.rflags = rflags;
-       if ((old_rflags ^ rflags) & X86_EFLAGS_VM)
-               svm_update_cpl(vcpu);
 }
 
 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
@@ -1429,7 +1415,16 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
        var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
        var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
        var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
-       var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
+
+       /*
+        * AMD CPUs circa 2014 track the G bit for all segments except CS.
+        * However, the SVM spec states that the G bit is not observed by the
+        * CPU, and some VMware virtual CPUs drop the G bit for all segments.
+        * So let's synthesize a legal G bit for all segments, this helps
+        * running KVM nested. It also helps cross-vendor migration, because
+        * Intel's vmentry has a check on the 'G' bit.
+        */
+       var->g = s->limit > 0xfffff;
 
        /*
         * AMD's VMCB does not have an explicit unusable field, so emulate it
@@ -1438,14 +1433,6 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
        var->unusable = !var->present || (var->type == 0);
 
        switch (seg) {
-       case VCPU_SREG_CS:
-               /*
-                * SVM always stores 0 for the 'G' bit in the CS selector in
-                * the VMCB on a VMEXIT. This hurts cross-vendor migration:
-                * Intel's VMENTRY has a check on the 'G' bit.
-                */
-               var->g = s->limit > 0xfffff;
-               break;
        case VCPU_SREG_TR:
                /*
                 * Work around a bug where the busy flag in the tr selector
@@ -1476,6 +1463,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
                 */
                if (var->unusable)
                        var->db = 0;
+               var->dpl = to_svm(vcpu)->vmcb->save.cpl;
                break;
        }
 }
@@ -1631,8 +1619,15 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
                s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
                s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
        }
-       if (seg == VCPU_SREG_CS)
-               svm_update_cpl(vcpu);
+
+       /*
+        * This is always accurate, except if SYSRET returned to a segment
+        * with SS.DPL != 3.  Intel does not have this quirk, and always
+        * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
+        * would entail passing the CPL to userspace and back.
+        */
+       if (seg == VCPU_SREG_SS)
+               svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
 
        mark_dirty(svm->vmcb, VMCB_SEG);
 }
@@ -2122,22 +2117,27 @@ static void nested_svm_unmap(struct page *page)
 
 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
 {
-       unsigned port;
-       u8 val, bit;
+       unsigned port, size, iopm_len;
+       u16 val, mask;
+       u8 start_bit;
        u64 gpa;
 
        if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
                return NESTED_EXIT_HOST;
 
        port = svm->vmcb->control.exit_info_1 >> 16;
+       size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
+               SVM_IOIO_SIZE_SHIFT;
        gpa  = svm->nested.vmcb_iopm + (port / 8);
-       bit  = port % 8;
-       val  = 0;
+       start_bit = port % 8;
+       iopm_len = (start_bit + size > 8) ? 2 : 1;
+       mask = (0xf >> (4 - size)) << start_bit;
+       val = 0;
 
-       if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
-               val &= (1 << bit);
+       if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len))
+               return NESTED_EXIT_DONE;
 
-       return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
+       return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
 }
 
 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
@@ -2770,12 +2770,6 @@ static int xsetbv_interception(struct vcpu_svm *svm)
        return 1;
 }
 
-static int invalid_op_interception(struct vcpu_svm *svm)
-{
-       kvm_queue_exception(&svm->vcpu, UD_VECTOR);
-       return 1;
-}
-
 static int task_switch_interception(struct vcpu_svm *svm)
 {
        u16 tss_selector;
@@ -3287,6 +3281,24 @@ static int pause_interception(struct vcpu_svm *svm)
        return 1;
 }
 
+static int nop_interception(struct vcpu_svm *svm)
+{
+       skip_emulated_instruction(&(svm->vcpu));
+       return 1;
+}
+
+static int monitor_interception(struct vcpu_svm *svm)
+{
+       printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
+       return nop_interception(svm);
+}
+
+static int mwait_interception(struct vcpu_svm *svm)
+{
+       printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
+       return nop_interception(svm);
+}
+
 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
        [SVM_EXIT_READ_CR0]                     = cr_interception,
        [SVM_EXIT_READ_CR3]                     = cr_interception,
@@ -3344,8 +3356,8 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
        [SVM_EXIT_CLGI]                         = clgi_interception,
        [SVM_EXIT_SKINIT]                       = skinit_interception,
        [SVM_EXIT_WBINVD]                       = emulate_on_interception,
-       [SVM_EXIT_MONITOR]                      = invalid_op_interception,
-       [SVM_EXIT_MWAIT]                        = invalid_op_interception,
+       [SVM_EXIT_MONITOR]                      = monitor_interception,
+       [SVM_EXIT_MWAIT]                        = mwait_interception,
        [SVM_EXIT_XSETBV]                       = xsetbv_interception,
        [SVM_EXIT_NPF]                          = pf_interception,
 };
@@ -4199,7 +4211,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
                if (info->intercept == x86_intercept_cr_write)
                        icpt_info.exit_code += info->modrm_reg;
 
-               if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
+               if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
+                   info->intercept == x86_intercept_clts)
                        break;
 
                intercept = svm->nested.intercept;
@@ -4244,14 +4257,14 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
                u64 exit_info;
                u32 bytes;
 
-               exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
-
                if (info->intercept == x86_intercept_in ||
                    info->intercept == x86_intercept_ins) {
-                       exit_info |= SVM_IOIO_TYPE_MASK;
-                       bytes = info->src_bytes;
-               } else {
+                       exit_info = ((info->src_val & 0xffff) << 16) |
+                               SVM_IOIO_TYPE_MASK;
                        bytes = info->dst_bytes;
+               } else {
+                       exit_info = (info->dst_val & 0xffff) << 16;
+                       bytes = info->src_bytes;
                }
 
                if (info->intercept == x86_intercept_outs ||