#include "hw/i386/pc.h"
#include "hw/i386/apic.h"
#include "exec/ioport.h"
-#include "hyperv.h"
+#include <asm/hyperv.h>
#include "hw/pci/pci.h"
//#define DEBUG_KVM
static bool has_msr_hsave_pa;
static bool has_msr_tsc_adjust;
static bool has_msr_tsc_deadline;
+static bool has_msr_feature_control;
static bool has_msr_async_pf_en;
static bool has_msr_pv_eoi_en;
static bool has_msr_misc_enable;
return cpu->env.cpuid_apic_id;
}
+#ifndef KVM_CPUID_SIGNATURE_NEXT
+#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
+#endif
+
+static bool hyperv_hypercall_available(X86CPU *cpu)
+{
+ return cpu->hyperv_vapic ||
+ (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
+}
+
+static bool hyperv_enabled(X86CPU *cpu)
+{
+ return hyperv_hypercall_available(cpu) ||
+ cpu->hyperv_relaxed_timing;
+}
+
#define KVM_MAX_CPUID_ENTRIES 100
int kvm_arch_init_vcpu(CPUState *cs)
uint32_t signature[3];
int r;
+ memset(&cpuid_data, 0, sizeof(cpuid_data));
+
cpuid_i = 0;
/* Paravirtualization CPUIDs */
c = &cpuid_data.entries[cpuid_i++];
- memset(c, 0, sizeof(*c));
c->function = KVM_CPUID_SIGNATURE;
- if (!hyperv_enabled()) {
+ if (!hyperv_enabled(cpu)) {
memcpy(signature, "KVMKVMKVM\0\0\0", 12);
c->eax = 0;
} else {
c->edx = signature[2];
c = &cpuid_data.entries[cpuid_i++];
- memset(c, 0, sizeof(*c));
c->function = KVM_CPUID_FEATURES;
c->eax = env->features[FEAT_KVM];
- if (hyperv_enabled()) {
+ if (hyperv_enabled(cpu)) {
memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
c->eax = signature[0];
c = &cpuid_data.entries[cpuid_i++];
- memset(c, 0, sizeof(*c));
c->function = HYPERV_CPUID_VERSION;
c->eax = 0x00001bbc;
c->ebx = 0x00060001;
c = &cpuid_data.entries[cpuid_i++];
- memset(c, 0, sizeof(*c));
c->function = HYPERV_CPUID_FEATURES;
- if (hyperv_relaxed_timing_enabled()) {
+ if (cpu->hyperv_relaxed_timing) {
c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
}
- if (hyperv_vapic_recommended()) {
+ if (cpu->hyperv_vapic) {
c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
}
c = &cpuid_data.entries[cpuid_i++];
- memset(c, 0, sizeof(*c));
c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
- if (hyperv_relaxed_timing_enabled()) {
+ if (cpu->hyperv_relaxed_timing) {
c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
}
- if (hyperv_vapic_recommended()) {
+ if (cpu->hyperv_vapic) {
c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
}
- c->ebx = hyperv_get_spinlock_retries();
+ c->ebx = cpu->hyperv_spinlock_attempts;
c = &cpuid_data.entries[cpuid_i++];
- memset(c, 0, sizeof(*c));
c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
c->eax = 0x40;
c->ebx = 0x40;
c = &cpuid_data.entries[cpuid_i++];
- memset(c, 0, sizeof(*c));
c->function = KVM_CPUID_SIGNATURE_NEXT;
memcpy(signature, "KVMKVMKVM\0\0\0", 12);
c->eax = 0;
qemu_add_vm_change_state_handler(cpu_update_state, env);
+ c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
+ if (c) {
+ has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
+ !!(c->ecx & CPUID_EXT_SMX);
+ }
+
cpuid_data.cpuid.padding = 0;
r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
if (r) {
entry->data = value;
}
+static int kvm_put_tscdeadline_msr(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct {
+ struct kvm_msrs info;
+ struct kvm_msr_entry entries[1];
+ } msr_data;
+ struct kvm_msr_entry *msrs = msr_data.entries;
+
+ if (!has_msr_tsc_deadline) {
+ return 0;
+ }
+
+ kvm_msr_entry_set(&msrs[0], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
+
+ msr_data.info.nmsrs = 1;
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+}
+
static int kvm_put_msrs(X86CPU *cpu, int level)
{
CPUX86State *env = &cpu->env;
if (has_msr_tsc_adjust) {
kvm_msr_entry_set(&msrs[n++], MSR_TSC_ADJUST, env->tsc_adjust);
}
- if (has_msr_tsc_deadline) {
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
- }
if (has_msr_misc_enable) {
kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE,
env->msr_ia32_misc_enable);
kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL,
env->msr_global_ctrl);
}
- if (hyperv_hypercall_available()) {
+ if (hyperv_hypercall_available(cpu)) {
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID, 0);
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL, 0);
}
- if (hyperv_vapic_recommended()) {
+ if (cpu->hyperv_vapic) {
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE, 0);
}
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_FEATURE_CONTROL, env->msr_ia32_feature_control);
+ if (has_msr_feature_control) {
+ kvm_msr_entry_set(&msrs[n++], MSR_IA32_FEATURE_CONTROL,
+ env->msr_ia32_feature_control);
+ }
}
if (env->mcg_cap) {
int i;
for (i = 0; i < xcrs.nr_xcrs; i++) {
/* Only support xcr0 now */
- if (xcrs.xcrs[0].xcr == 0) {
- env->xcr0 = xcrs.xcrs[0].value;
+ if (xcrs.xcrs[i].xcr == 0) {
+ env->xcr0 = xcrs.xcrs[i].value;
break;
}
}
if (has_msr_misc_enable) {
msrs[n++].index = MSR_IA32_MISC_ENABLE;
}
- msrs[n++].index = MSR_IA32_FEATURE_CONTROL;
+ if (has_msr_feature_control) {
+ msrs[n++].index = MSR_IA32_FEATURE_CONTROL;
+ }
if (!env->tsc_valid) {
msrs[n++].index = MSR_IA32_TSC;
break;
case MSR_IA32_FEATURE_CONTROL:
env->msr_ia32_feature_control = msrs[i].data;
+ break;
default:
if (msrs[i].index >= MSR_MC0_CTL &&
msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
*/
if (reinject_trap ||
(!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
- ret = kvm_update_guest_debug(env, reinject_trap);
+ ret = kvm_update_guest_debug(cs, reinject_trap);
}
return ret;
}
return ret;
}
}
+
+ ret = kvm_put_tscdeadline_msr(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+
ret = kvm_put_vcpu_events(x86_cpu, level);
if (ret < 0) {
return ret;