]> git.proxmox.com Git - qemu.git/blobdiff - target-i386/kvm.c
cpu: Move halted and interrupt_request fields to CPUState
[qemu.git] / target-i386 / kvm.c
index ff5c9cd1cd98581a05a6dd44d20a3d2e32d63f63..df30fa6ed62d015960fde9d117dc28f7536d664b 100644 (file)
 #include <linux/kvm_para.h>
 
 #include "qemu-common.h"
-#include "sysemu.h"
-#include "kvm.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
 #include "kvm_i386.h"
 #include "cpu.h"
-#include "gdbstub.h"
-#include "host-utils.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+#include "qemu/config-file.h"
 #include "hw/pc.h"
 #include "hw/apic.h"
-#include "ioport.h"
+#include "exec/ioport.h"
 #include "hyperv.h"
-#include "hw/pci.h"
+#include "hw/pci/pci.h"
 
 //#define DEBUG_KVM
 
@@ -62,6 +63,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
 
 static bool has_msr_star;
 static bool has_msr_hsave_pa;
+static bool has_msr_tsc_adjust;
 static bool has_msr_tsc_deadline;
 static bool has_msr_async_pf_en;
 static bool has_msr_pv_eoi_en;
@@ -409,11 +411,19 @@ static void cpu_update_state(void *opaque, int running, RunState state)
     }
 }
 
+unsigned long kvm_arch_vcpu_id(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    return cpu->env.cpuid_apic_id;
+}
+
+#define KVM_MAX_CPUID_ENTRIES  100
+
 int kvm_arch_init_vcpu(CPUState *cs)
 {
     struct {
         struct kvm_cpuid2 cpuid;
-        struct kvm_cpuid_entry2 entries[100];
+        struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
     } QEMU_PACKED cpuid_data;
     X86CPU *cpu = X86_CPU(cs);
     CPUX86State *env = &cpu->env;
@@ -500,6 +510,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
     cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
 
     for (i = 0; i <= limit; i++) {
+        if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+            fprintf(stderr, "unsupported level value: 0x%x\n", limit);
+            abort();
+        }
         c = &cpuid_data.entries[cpuid_i++];
 
         switch (i) {
@@ -514,6 +528,11 @@ int kvm_arch_init_vcpu(CPUState *cs)
             times = c->eax & 0xff;
 
             for (j = 1; j < times; ++j) {
+                if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+                    fprintf(stderr, "cpuid_data is full, no space for "
+                            "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
+                    abort();
+                }
                 c = &cpuid_data.entries[cpuid_i++];
                 c->function = i;
                 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
@@ -542,6 +561,11 @@ int kvm_arch_init_vcpu(CPUState *cs)
                 if (i == 0xd && c->eax == 0) {
                     continue;
                 }
+                if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+                    fprintf(stderr, "cpuid_data is full, no space for "
+                            "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
+                    abort();
+                }
                 c = &cpuid_data.entries[cpuid_i++];
             }
             break;
@@ -555,6 +579,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
     cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
 
     for (i = 0x80000000; i <= limit; i++) {
+        if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+            fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
+            abort();
+        }
         c = &cpuid_data.entries[cpuid_i++];
 
         c->function = i;
@@ -567,6 +595,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
         cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
 
         for (i = 0xC0000000; i <= limit; i++) {
+            if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+                fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
+                abort();
+            }
             c = &cpuid_data.entries[cpuid_i++];
 
             c->function = i;
@@ -682,6 +714,10 @@ static int kvm_get_supported_msrs(KVMState *s)
                     has_msr_hsave_pa = true;
                     continue;
                 }
+                if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
+                    has_msr_tsc_adjust = true;
+                    continue;
+                }
                 if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
                     has_msr_tsc_deadline = true;
                     continue;
@@ -1025,6 +1061,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
     if (has_msr_hsave_pa) {
         kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
     }
+    if (has_msr_tsc_adjust) {
+        kvm_msr_entry_set(&msrs[n++], MSR_TSC_ADJUST, env->tsc_adjust);
+    }
     if (has_msr_tsc_deadline) {
         kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
     }
@@ -1290,6 +1329,9 @@ static int kvm_get_msrs(X86CPU *cpu)
     if (has_msr_hsave_pa) {
         msrs[n++].index = MSR_VM_HSAVE_PA;
     }
+    if (has_msr_tsc_adjust) {
+        msrs[n++].index = MSR_TSC_ADJUST;
+    }
     if (has_msr_tsc_deadline) {
         msrs[n++].index = MSR_IA32_TSCDEADLINE;
     }
@@ -1367,6 +1409,9 @@ static int kvm_get_msrs(X86CPU *cpu)
         case MSR_IA32_TSC:
             env->tsc = msrs[i].data;
             break;
+        case MSR_TSC_ADJUST:
+            env->tsc_adjust = msrs[i].data;
+            break;
         case MSR_IA32_TSCDEADLINE:
             env->tsc_deadline = msrs[i].data;
             break;
@@ -1415,17 +1460,18 @@ static int kvm_put_mp_state(X86CPU *cpu)
 
 static int kvm_get_mp_state(X86CPU *cpu)
 {
+    CPUState *cs = CPU(cpu);
     CPUX86State *env = &cpu->env;
     struct kvm_mp_state mp_state;
     int ret;
 
-    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state);
+    ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
     if (ret < 0) {
         return ret;
     }
     env->mp_state = mp_state.mp_state;
     if (kvm_irqchip_in_kernel()) {
-        env->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
+        cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
     }
     return 0;
 }
@@ -1717,8 +1763,8 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
     int ret;
 
     /* Inject NMI */
-    if (env->interrupt_request & CPU_INTERRUPT_NMI) {
-        env->interrupt_request &= ~CPU_INTERRUPT_NMI;
+    if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
+        cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
         DPRINTF("injected NMI\n");
         ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
         if (ret < 0) {
@@ -1730,18 +1776,18 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
     if (!kvm_irqchip_in_kernel()) {
         /* Force the VCPU out of its inner loop to process any INIT requests
          * or pending TPR access reports. */
-        if (env->interrupt_request &
+        if (cpu->interrupt_request &
             (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
-            env->exit_request = 1;
+            cpu->exit_request = 1;
         }
 
         /* Try to inject an interrupt if the guest can accept it */
         if (run->ready_for_interrupt_injection &&
-            (env->interrupt_request & CPU_INTERRUPT_HARD) &&
+            (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
             (env->eflags & IF_MASK)) {
             int irq;
 
-            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
+            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
             irq = cpu_get_pic_interrupt(env);
             if (irq >= 0) {
                 struct kvm_interrupt intr;
@@ -1761,7 +1807,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
          * interrupt, request an interrupt window exit.  This will
          * cause a return to userspace as soon as the guest is ready to
          * receive interrupts. */
-        if ((env->interrupt_request & CPU_INTERRUPT_HARD)) {
+        if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
             run->request_interrupt_window = 1;
         } else {
             run->request_interrupt_window = 0;
@@ -1791,24 +1837,24 @@ int kvm_arch_process_async_events(CPUState *cs)
     X86CPU *cpu = X86_CPU(cs);
     CPUX86State *env = &cpu->env;
 
-    if (env->interrupt_request & CPU_INTERRUPT_MCE) {
+    if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
         /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
         assert(env->mcg_cap);
 
-        env->interrupt_request &= ~CPU_INTERRUPT_MCE;
+        cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
 
         kvm_cpu_synchronize_state(env);
 
         if (env->exception_injected == EXCP08_DBLE) {
             /* this means triple fault */
             qemu_system_reset_request();
-            env->exit_request = 1;
+            cs->exit_request = 1;
             return 0;
         }
         env->exception_injected = EXCP12_MCHK;
         env->has_error_code = 0;
 
-        env->halted = 0;
+        cs->halted = 0;
         if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
             env->mp_state = KVM_MP_STATE_RUNNABLE;
         }
@@ -1818,50 +1864,53 @@ int kvm_arch_process_async_events(CPUState *cs)
         return 0;
     }
 
-    if (env->interrupt_request & CPU_INTERRUPT_POLL) {
-        env->interrupt_request &= ~CPU_INTERRUPT_POLL;
+    if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
+        cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
         apic_poll_irq(env->apic_state);
     }
-    if (((env->interrupt_request & CPU_INTERRUPT_HARD) &&
+    if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
          (env->eflags & IF_MASK)) ||
-        (env->interrupt_request & CPU_INTERRUPT_NMI)) {
-        env->halted = 0;
+        (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+        cs->halted = 0;
     }
-    if (env->interrupt_request & CPU_INTERRUPT_INIT) {
+    if (cs->interrupt_request & CPU_INTERRUPT_INIT) {
         kvm_cpu_synchronize_state(env);
         do_cpu_init(cpu);
     }
-    if (env->interrupt_request & CPU_INTERRUPT_SIPI) {
+    if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
         kvm_cpu_synchronize_state(env);
         do_cpu_sipi(cpu);
     }
-    if (env->interrupt_request & CPU_INTERRUPT_TPR) {
-        env->interrupt_request &= ~CPU_INTERRUPT_TPR;
+    if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
+        cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
         kvm_cpu_synchronize_state(env);
         apic_handle_tpr_access_report(env->apic_state, env->eip,
                                       env->tpr_access_type);
     }
 
-    return env->halted;
+    return cs->halted;
 }
 
 static int kvm_handle_halt(X86CPU *cpu)
 {
+    CPUState *cs = CPU(cpu);
     CPUX86State *env = &cpu->env;
 
-    if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
+    if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
           (env->eflags & IF_MASK)) &&
-        !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
-        env->halted = 1;
+        !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+        cs->halted = 1;
         return EXCP_HLT;
     }
 
     return 0;
 }
 
-static int kvm_handle_tpr_access(CPUX86State *env)
+static int kvm_handle_tpr_access(X86CPU *cpu)
 {
-    struct kvm_run *run = env->kvm_run;
+    CPUX86State *env = &cpu->env;
+    CPUState *cs = CPU(cpu);
+    struct kvm_run *run = cs->kvm_run;
 
     apic_handle_tpr_access_report(env->apic_state, run->tpr_access.rip,
                                   run->tpr_access.is_write ? TPR_ACCESS_WRITE
@@ -2067,7 +2116,6 @@ static bool host_supports_vmx(void)
 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
 {
     X86CPU *cpu = X86_CPU(cs);
-    CPUX86State *env = &cpu->env;
     uint64_t code;
     int ret;
 
@@ -2080,7 +2128,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
         ret = 0;
         break;
     case KVM_EXIT_TPR_ACCESS:
-        ret = kvm_handle_tpr_access(env);
+        ret = kvm_handle_tpr_access(cpu);
         break;
     case KVM_EXIT_FAIL_ENTRY:
         code = run->fail_entry.hardware_entry_failure_reason;