]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - arch/x86/kvm/svm.c
KVM: x86: return all bits from get_interrupt_shadow
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kvm / svm.c
index ec8366c5cfeaa2004a637465d9fc65787cc97618..ddf742768ecf2f823b7e8462a8ccda2c45225f2f 100644 (file)
@@ -486,14 +486,14 @@ static int is_external_interrupt(u32 info)
        return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
 }
 
-static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        u32 ret = 0;
 
        if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
-               ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
-       return ret & mask;
+               ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
+       return ret;
 }
 
 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
@@ -1415,7 +1415,16 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
        var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
        var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
        var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
-       var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
+
+       /*
+        * AMD CPUs circa 2014 track the G bit for all segments except CS.
+        * However, the SVM spec states that the G bit is not observed by the
+        * CPU, and some VMware virtual CPUs drop the G bit for all segments.
+        * So let's synthesize a legal G bit for all segments, this helps
+        * running KVM nested. It also helps cross-vendor migration, because
+        * Intel's vmentry has a check on the 'G' bit.
+        */
+       var->g = s->limit > 0xfffff;
 
        /*
         * AMD's VMCB does not have an explicit unusable field, so emulate it
@@ -1424,14 +1433,6 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
        var->unusable = !var->present || (var->type == 0);
 
        switch (seg) {
-       case VCPU_SREG_CS:
-               /*
-                * SVM always stores 0 for the 'G' bit in the CS selector in
-                * the VMCB on a VMEXIT. This hurts cross-vendor migration:
-                * Intel's VMENTRY has a check on the 'G' bit.
-                */
-               var->g = s->limit > 0xfffff;
-               break;
        case VCPU_SREG_TR:
                /*
                 * Work around a bug where the busy flag in the tr selector
@@ -1462,6 +1463,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
                 */
                if (var->unusable)
                        var->db = 0;
+               var->dpl = to_svm(vcpu)->vmcb->save.cpl;
                break;
        }
 }
@@ -2115,22 +2117,27 @@ static void nested_svm_unmap(struct page *page)
 
 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
 {
-       unsigned port;
-       u8 val, bit;
+       unsigned port, size, iopm_len;
+       u16 val, mask;
+       u8 start_bit;
        u64 gpa;
 
        if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
                return NESTED_EXIT_HOST;
 
        port = svm->vmcb->control.exit_info_1 >> 16;
+       size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
+               SVM_IOIO_SIZE_SHIFT;
        gpa  = svm->nested.vmcb_iopm + (port / 8);
-       bit  = port % 8;
-       val  = 0;
+       start_bit = port % 8;
+       iopm_len = (start_bit + size > 8) ? 2 : 1;
+       mask = (0xf >> (4 - size)) << start_bit;
+       val = 0;
 
-       if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
-               val &= (1 << bit);
+       if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len))
+               return NESTED_EXIT_DONE;
 
-       return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
+       return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
 }
 
 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
@@ -4204,7 +4211,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
                if (info->intercept == x86_intercept_cr_write)
                        icpt_info.exit_code += info->modrm_reg;
 
-               if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
+               if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
+                   info->intercept == x86_intercept_clts)
                        break;
 
                intercept = svm->nested.intercept;
@@ -4249,14 +4257,14 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
                u64 exit_info;
                u32 bytes;
 
-               exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
-
                if (info->intercept == x86_intercept_in ||
                    info->intercept == x86_intercept_ins) {
-                       exit_info |= SVM_IOIO_TYPE_MASK;
-                       bytes = info->src_bytes;
-               } else {
+                       exit_info = ((info->src_val & 0xffff) << 16) |
+                               SVM_IOIO_TYPE_MASK;
                        bytes = info->dst_bytes;
+               } else {
+                       exit_info = (info->dst_val & 0xffff) << 16;
+                       bytes = info->src_bytes;
                }
 
                if (info->intercept == x86_intercept_outs ||