]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - drivers/kvm/svm.c
[PATCH] KVM: MMU: Detect oom conditions and propagate error to userspace
[mirror_ubuntu-jammy-kernel.git] / drivers / kvm / svm.c
index a33a89c68138a20308089aae0105c25c31da703f..af1e7b3f91719356321fce0bddea00c0603e0706 100644 (file)
@@ -166,11 +166,6 @@ static inline void write_dr7(unsigned long val)
        asm volatile ("mov %0, %%dr7" :: "r" (val));
 }
 
-static inline int svm_is_long_mode(struct kvm_vcpu *vcpu)
-{
-       return vcpu->svm->vmcb->save.efer & KVM_EFER_LMA;
-}
-
 static inline void force_new_asid(struct kvm_vcpu *vcpu)
 {
        vcpu->svm->asid_generation--;
@@ -240,13 +235,15 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
 
        vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip;
        vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
+
+       vcpu->interrupt_window_open = 1;
 }
 
 static int has_svm(void)
 {
        uint32_t eax, ebx, ecx, edx;
 
-       if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
                printk(KERN_INFO "has_svm: not amd\n");
                return 0;
        }
@@ -287,7 +284,7 @@ static void svm_hardware_enable(void *garbage)
 
        struct svm_cpu_data *svm_data;
        uint64_t efer;
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
        struct desc_ptr gdt_descr;
 #else
        struct Xgt_desc_struct gdt_descr;
@@ -377,6 +374,7 @@ static __init int svm_hardware_setup(void)
        void *msrpm_va;
        int r;
 
+       kvm_emulator_want_group7_invlpg();
 
        iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
 
@@ -397,15 +395,15 @@ static __init int svm_hardware_setup(void)
        memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
        msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
 
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
        set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
        set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
        set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
-       set_msr_interception(msrpm_va, MSR_STAR, 1, 1);
        set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1);
        set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1);
        set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1);
 #endif
+       set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1);
        set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1);
        set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1);
        set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1);
@@ -499,7 +497,6 @@ static void init_vmcb(struct vmcb *vmcb)
                /*              (1ULL << INTERCEPT_SELECTIVE_CR0) | */
                                (1ULL << INTERCEPT_CPUID) |
                                (1ULL << INTERCEPT_HLT) |
-                               (1ULL << INTERCEPT_INVLPG) |
                                (1ULL << INTERCEPT_INVLPGA) |
                                (1ULL << INTERCEPT_IOIO_PROT) |
                                (1ULL << INTERCEPT_MSR_PROT) |
@@ -574,6 +571,8 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
        memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs));
        init_vmcb(vcpu->svm->vmcb);
 
+       fx_init(vcpu);
+
        return 0;
 
 out2:
@@ -702,9 +701,13 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
        vcpu->svm->vmcb->save.gdtr.base = dt->base ;
 }
 
+static void svm_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu)
+{
+}
+
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
        if (vcpu->shadow_efer & KVM_EFER_LME) {
                if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
                        vcpu->shadow_efer |= KVM_EFER_LMA;
@@ -849,6 +852,7 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        u64 fault_address;
        u32 error_code;
        enum emulation_result er;
+       int r;
 
        if (is_external_interrupt(exit_int_info))
                push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
@@ -857,7 +861,12 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        fault_address  = vcpu->svm->vmcb->control.exit_info_2;
        error_code = vcpu->svm->vmcb->control.exit_info_1;
-       if (!vcpu->mmu.page_fault(vcpu, fault_address, error_code)) {
+       r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
+       if (r < 0) {
+               spin_unlock(&vcpu->kvm->lock);
+               return r;
+       }
+       if (!r) {
                spin_unlock(&vcpu->kvm->lock);
                return 1;
        }
@@ -1033,10 +1042,11 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
        skip_emulated_instruction(vcpu);
-       if (vcpu->irq_summary && (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF))
+       if (vcpu->irq_summary)
                return 1;
 
        kvm_run->exit_reason = KVM_EXIT_HLT;
+       ++kvm_stat.halt_exits;
        return 0;
 }
 
@@ -1070,20 +1080,6 @@ static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_ru
 static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
 {
        switch (ecx) {
-       case MSR_IA32_MC0_CTL:
-       case MSR_IA32_MCG_STATUS:
-       case MSR_IA32_MCG_CAP:
-       case MSR_IA32_MC0_MISC:
-       case MSR_IA32_MC0_MISC+4:
-       case MSR_IA32_MC0_MISC+8:
-       case MSR_IA32_MC0_MISC+12:
-       case MSR_IA32_MC0_MISC+16:
-       case MSR_IA32_UCODE_REV:
-               /* MTRR registers */
-       case 0xfe:
-       case 0x200 ... 0x2ff:
-               *data = 0;
-               break;
        case MSR_IA32_TIME_STAMP_COUNTER: {
                u64 tsc;
 
@@ -1091,16 +1087,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
                *data = vcpu->svm->vmcb->control.tsc_offset + tsc;
                break;
        }
-       case MSR_EFER:
-               *data = vcpu->shadow_efer;
-               break;
-       case MSR_IA32_APICBASE:
-               *data = vcpu->apic_base;
-               break;
-#ifdef __x86_64__
-       case MSR_STAR:
+       case MSR_K6_STAR:
                *data = vcpu->svm->vmcb->save.star;
                break;
+#ifdef CONFIG_X86_64
        case MSR_LSTAR:
                *data = vcpu->svm->vmcb->save.lstar;
                break;
@@ -1124,8 +1114,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
                *data = vcpu->svm->vmcb->save.sysenter_esp;
                break;
        default:
-               printk(KERN_ERR "kvm: unhandled rdmsr: 0x%x\n", ecx);
-               return 1;
+               return kvm_get_msr_common(vcpu, ecx, data);
        }
        return 0;
 }
@@ -1149,15 +1138,6 @@ static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
 {
        switch (ecx) {
-#ifdef __x86_64__
-       case MSR_EFER:
-               set_efer(vcpu, data);
-               break;
-#endif
-       case MSR_IA32_MC0_STATUS:
-               printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n"
-                           , __FUNCTION__, data);
-               break;
        case MSR_IA32_TIME_STAMP_COUNTER: {
                u64 tsc;
 
@@ -1165,17 +1145,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
                vcpu->svm->vmcb->control.tsc_offset = data - tsc;
                break;
        }
-       case MSR_IA32_UCODE_REV:
-       case MSR_IA32_UCODE_WRITE:
-       case 0x200 ... 0x2ff: /* MTRRs */
-               break;
-       case MSR_IA32_APICBASE:
-               vcpu->apic_base = data;
-               break;
-#ifdef __x86_64___
-       case MSR_STAR:
+       case MSR_K6_STAR:
                vcpu->svm->vmcb->save.star = data;
                break;
+#ifdef CONFIG_X86_64_
        case MSR_LSTAR:
                vcpu->svm->vmcb->save.lstar = data;
                break;
@@ -1199,8 +1172,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
                vcpu->svm->vmcb->save.sysenter_esp = data;
                break;
        default:
-               printk(KERN_ERR "kvm: unhandled wrmsr: %x\n", ecx);
-               return 1;
+               return kvm_set_msr_common(vcpu, ecx, data);
        }
        return 0;
 }
@@ -1226,6 +1198,24 @@ static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                return rdmsr_interception(vcpu, kvm_run);
 }
 
+static int interrupt_window_interception(struct kvm_vcpu *vcpu,
+                                  struct kvm_run *kvm_run)
+{
+       /*
+        * If the user space waits to inject interrupts, exit as soon as
+        * possible
+        */
+       if (kvm_run->request_interrupt_window &&
+           !vcpu->irq_summary &&
+           (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)) {
+               ++kvm_stat.irq_window_exits;
+               kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+               return 0;
+       }
+
+       return 1;
+}
+
 static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
                                      struct kvm_run *kvm_run) = {
        [SVM_EXIT_READ_CR0]                     = emulate_on_interception,
@@ -1250,6 +1240,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
        [SVM_EXIT_NMI]                          = nop_on_interception,
        [SVM_EXIT_SMI]                          = nop_on_interception,
        [SVM_EXIT_INIT]                         = nop_on_interception,
+       [SVM_EXIT_VINTR]                        = interrupt_window_interception,
        /* [SVM_EXIT_CR0_SEL_WRITE]             = emulate_on_interception, */
        [SVM_EXIT_CPUID]                        = cpuid_interception,
        [SVM_EXIT_HLT]                          = halt_interception,
@@ -1318,15 +1309,11 @@ static void pre_svm_run(struct kvm_vcpu *vcpu)
 }
 
 
-static inline void kvm_try_inject_irq(struct kvm_vcpu *vcpu)
+static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
 {
        struct vmcb_control_area *control;
 
-       if (!vcpu->irq_summary)
-               return;
-
        control = &vcpu->svm->vmcb->control;
-
        control->int_vector = pop_irq(vcpu);
        control->int_ctl &= ~V_INTR_PRIO_MASK;
        control->int_ctl |= V_IRQ_MASK |
@@ -1341,57 +1328,75 @@ static void kvm_reput_irq(struct kvm_vcpu *vcpu)
                control->int_ctl &= ~V_IRQ_MASK;
                push_irq(vcpu, control->int_vector);
        }
+
+       vcpu->interrupt_window_open =
+               !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
+}
+
+static void do_interrupt_requests(struct kvm_vcpu *vcpu,
+                                      struct kvm_run *kvm_run)
+{
+       struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
+
+       vcpu->interrupt_window_open =
+               (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
+                (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
+
+       if (vcpu->interrupt_window_open && vcpu->irq_summary)
+               /*
+                * If interrupts enabled, and not blocked by sti or mov ss. Good.
+                */
+               kvm_do_inject_irq(vcpu);
+
+       /*
+        * Interrupts blocked.  Wait for unblock.
+        */
+       if (!vcpu->interrupt_window_open &&
+           (vcpu->irq_summary || kvm_run->request_interrupt_window)) {
+               control->intercept |= 1ULL << INTERCEPT_VINTR;
+       } else
+               control->intercept &= ~(1ULL << INTERCEPT_VINTR);
+}
+
+static void post_kvm_run_save(struct kvm_vcpu *vcpu,
+                             struct kvm_run *kvm_run)
+{
+       kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
+                                                 vcpu->irq_summary == 0);
+       kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
+       kvm_run->cr8 = vcpu->cr8;
+       kvm_run->apic_base = vcpu->apic_base;
+}
+
+/*
+ * Check if userspace requested an interrupt window, and that the
+ * interrupt window is open.
+ *
+ * No need to exit to userspace if we already have an interrupt queued.
+ */
+static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
+                                         struct kvm_run *kvm_run)
+{
+       return (!vcpu->irq_summary &&
+               kvm_run->request_interrupt_window &&
+               vcpu->interrupt_window_open &&
+               (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
 }
 
 static void save_db_regs(unsigned long *db_regs)
 {
-#ifdef __x86_64__
-       asm ("mov %%dr0, %%rax \n\t"
-            "mov %%rax, %[dr0] \n\t"
-            "mov %%dr1, %%rax \n\t"
-            "mov %%rax, %[dr1] \n\t"
-            "mov %%dr2, %%rax \n\t"
-            "mov %%rax, %[dr2] \n\t"
-            "mov %%dr3, %%rax \n\t"
-            "mov %%rax, %[dr3] \n\t"
-            : [dr0] "=m"(db_regs[0]),
-              [dr1] "=m"(db_regs[1]),
-              [dr2] "=m"(db_regs[2]),
-              [dr3] "=m"(db_regs[3])
-            : : "rax");
-#else
-       asm ("mov %%dr0, %%eax \n\t"
-            "mov %%eax, %[dr0] \n\t"
-            "mov %%dr1, %%eax \n\t"
-            "mov %%eax, %[dr1] \n\t"
-            "mov %%dr2, %%eax \n\t"
-            "mov %%eax, %[dr2] \n\t"
-            "mov %%dr3, %%eax \n\t"
-            "mov %%eax, %[dr3] \n\t"
-            : [dr0] "=m"(db_regs[0]),
-              [dr1] "=m"(db_regs[1]),
-              [dr2] "=m"(db_regs[2]),
-              [dr3] "=m"(db_regs[3])
-            : : "eax");
-#endif
+       asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
+       asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
+       asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
+       asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
 }
 
 static void load_db_regs(unsigned long *db_regs)
 {
-       asm volatile ("mov %[dr0], %%dr0 \n\t"
-            "mov %[dr1], %%dr1 \n\t"
-            "mov %[dr2], %%dr2 \n\t"
-            "mov %[dr3], %%dr3 \n\t"
-            :
-            : [dr0] "r"(db_regs[0]),
-              [dr1] "r"(db_regs[1]),
-              [dr2] "r"(db_regs[2]),
-              [dr3] "r"(db_regs[3])
-#ifdef __x86_64__
-            : "rax");
-#else
-            : "eax");
-#endif
+       asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
+       asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
+       asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
+       asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
 }
 
 static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -1399,9 +1404,10 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        u16 fs_selector;
        u16 gs_selector;
        u16 ldt_selector;
+       int r;
 
 again:
-       kvm_try_inject_irq(vcpu);
+       do_interrupt_requests(vcpu, kvm_run);
 
        clgi();
 
@@ -1421,8 +1427,12 @@ again:
                save_db_regs(vcpu->svm->host_db_regs);
                load_db_regs(vcpu->svm->db_regs);
        }
+
+       fx_save(vcpu->host_fx_image);
+       fx_restore(vcpu->guest_fx_image);
+
        asm volatile (
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
                "push %%rbx; push %%rcx; push %%rdx;"
                "push %%rsi; push %%rdi; push %%rbp;"
                "push %%r8;  push %%r9;  push %%r10; push %%r11;"
@@ -1432,7 +1442,7 @@ again:
                "push %%esi; push %%edi; push %%ebp;"
 #endif
 
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
                "mov %c[rbx](%[vcpu]), %%rbx \n\t"
                "mov %c[rcx](%[vcpu]), %%rcx \n\t"
                "mov %c[rdx](%[vcpu]), %%rdx \n\t"
@@ -1456,7 +1466,7 @@ again:
                "mov %c[rbp](%[vcpu]), %%ebp \n\t"
 #endif
 
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
                /* Enter guest mode */
                "push %%rax \n\t"
                "mov %c[svm](%[vcpu]), %%rax \n\t"
@@ -1477,7 +1487,7 @@ again:
 #endif
 
                /* Save guest registers, load host registers */
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
                "mov %%rbx, %c[rbx](%[vcpu]) \n\t"
                "mov %%rcx, %c[rcx](%[vcpu]) \n\t"
                "mov %%rdx, %c[rdx](%[vcpu]) \n\t"
@@ -1518,7 +1528,7 @@ again:
                  [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
                  [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
                  [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP]))
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
                  ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
                  [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
                  [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
@@ -1530,6 +1540,9 @@ again:
 #endif
                : "cc", "memory" );
 
+       fx_save(vcpu->guest_fx_image);
+       fx_restore(vcpu->host_fx_image);
+
        if ((vcpu->svm->vmcb->save.dr7 & 0xff))
                load_db_regs(vcpu->svm->host_db_regs);
 
@@ -1555,18 +1568,28 @@ again:
        if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
                kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
                kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code;
+               post_kvm_run_save(vcpu, kvm_run);
                return 0;
        }
 
-       if (handle_exit(vcpu, kvm_run)) {
+       r = handle_exit(vcpu, kvm_run);
+       if (r > 0) {
                if (signal_pending(current)) {
                        ++kvm_stat.signal_exits;
+                       post_kvm_run_save(vcpu, kvm_run);
+                       return -EINTR;
+               }
+
+               if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+                       ++kvm_stat.request_irq_exits;
+                       post_kvm_run_save(vcpu, kvm_run);
                        return -EINTR;
                }
                kvm_resched(vcpu);
                goto again;
        }
-       return 0;
+       post_kvm_run_save(vcpu, kvm_run);
+       return r;
 }
 
 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
@@ -1632,8 +1655,8 @@ static struct kvm_arch_ops svm_arch_ops = {
        .get_segment_base = svm_get_segment_base,
        .get_segment = svm_get_segment,
        .set_segment = svm_set_segment,
-       .is_long_mode = svm_is_long_mode,
        .get_cs_db_l_bits = svm_get_cs_db_l_bits,
+       .decache_cr0_cr4_guest_bits = svm_decache_cr0_cr4_guest_bits,
        .set_cr0 = svm_set_cr0,
        .set_cr0_no_modeswitch = svm_set_cr0,
        .set_cr3 = svm_set_cr3,
@@ -1663,9 +1686,7 @@ static struct kvm_arch_ops svm_arch_ops = {
 
 static int __init svm_init(void)
 {
-       kvm_emulator_want_group7_invlpg();
-       kvm_init_arch(&svm_arch_ops, THIS_MODULE);
-       return 0;
+       return kvm_init_arch(&svm_arch_ops, THIS_MODULE);
 }
 
 static void __exit svm_exit(void)