*
*/
-#include <sys/types.h>
+#include "qemu/osdep.h"
+#include "qapi/error.h"
#include <sys/ioctl.h>
-#include <sys/mman.h>
#include <sys/utsname.h>
#include <linux/kvm.h>
#include <linux/kvm_para.h>
#include "qemu-common.h"
+#include "cpu.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm_int.h"
#include "kvm_i386.h"
-#include "cpu.h"
#include "hyperv.h"
#include "exec/gdbstub.h"
#define MSR_KVM_WALL_CLOCK 0x11
#define MSR_KVM_SYSTEM_TIME 0x12
+/* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
+ * 255 kvm_msr_entry structs */
+#define MSR_BUF_SIZE 4096
+
#ifndef BUS_MCEERR_AR
#define BUS_MCEERR_AR 4
#endif
return ret;
}
+ assert(ret == 1);
env->tsc = msr_data.entries[0].data;
return 0;
}
if ((env->mcg_cap & MCG_SER_P) && addr
&& (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
- if (qemu_ram_addr_from_host(addr, &ram_addr) == NULL ||
+ ram_addr = qemu_ram_addr_from_host(addr);
+ if (ram_addr == RAM_ADDR_INVALID ||
!kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!\n");
hwaddr paddr;
/* Hope we are lucky for AO MCE */
- if (qemu_ram_addr_from_host(addr, &ram_addr) == NULL ||
+ ram_addr = qemu_ram_addr_from_host(addr);
+ if (ram_addr == RAM_ADDR_INVALID ||
!kvm_physical_memory_addr_from_host(first_cpu->kvm_state,
addr, &paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
cpu->hyperv_stimer);
}
+static int kvm_arch_set_tsc_khz(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ int r;
+
+ if (!env->tsc_khz) {
+ return 0;
+ }
+
+ r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
+ kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
+ -ENOTSUP;
+ if (r < 0) {
+ /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
+ * TSC frequency doesn't match the one we want.
+ */
+ int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+ kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
+ -ENOTSUP;
+ if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
+ error_report("warning: TSC frequency mismatch between "
+ "VM and host, and TSC scaling unavailable");
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static Error *invtsc_mig_blocker;
#define KVM_MAX_CPUID_ENTRIES 100
if (cpu->hyperv_crash && has_msr_hv_crash) {
c->edx |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
}
+ c->edx |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
if (cpu->hyperv_reset && has_msr_hv_reset) {
c->eax |= HV_X64_MSR_RESET_AVAILABLE;
}
return r;
}
- r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL);
- if (r && env->tsc_khz) {
- r = kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz);
- if (r < 0) {
- fprintf(stderr, "KVM_SET_TSC_KHZ failed\n");
- return r;
+ r = kvm_arch_set_tsc_khz(cs);
+ if (r < 0) {
+ return r;
+ }
+
+ /* vcpu's TSC frequency is either specified by user, or following
+ * the value used by KVM if the former is not present. In the
+ * latter case, we query it from KVM and record in env->tsc_khz,
+ * so that vcpu's TSC frequency can be migrated later via this field.
+ */
+ if (!env->tsc_khz) {
+ r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+ kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
+ -ENOTSUP;
+ if (r > 0) {
+ env->tsc_khz = r;
}
}
if (has_xsave) {
env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
}
+ cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
has_msr_mtrr = true;
}
+ if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
+ has_msr_tsc_aux = false;
+ }
return 0;
}
lhs->l = (flags >> DESC_L_SHIFT) & 1;
lhs->g = (flags & DESC_G_MASK) != 0;
lhs->avl = (flags & DESC_AVL_MASK) != 0;
- lhs->unusable = 0;
+ lhs->unusable = !lhs->present;
lhs->padding = 0;
}
lhs->selector = rhs->selector;
lhs->base = rhs->base;
lhs->limit = rhs->limit;
- lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
- (rhs->present * DESC_P_MASK) |
- (rhs->dpl << DESC_DPL_SHIFT) |
- (rhs->db << DESC_B_SHIFT) |
- (rhs->s * DESC_S_MASK) |
- (rhs->l << DESC_L_SHIFT) |
- (rhs->g * DESC_G_MASK) |
- (rhs->avl * DESC_AVL_MASK);
+ if (rhs->unusable) {
+ lhs->flags = 0;
+ } else {
+ lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
+ (rhs->present * DESC_P_MASK) |
+ (rhs->dpl << DESC_DPL_SHIFT) |
+ (rhs->db << DESC_B_SHIFT) |
+ (rhs->s * DESC_S_MASK) |
+ (rhs->l << DESC_L_SHIFT) |
+ (rhs->g * DESC_G_MASK) |
+ (rhs->avl * DESC_AVL_MASK);
+ }
}
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
}
memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
for (i = 0; i < CPU_NB_REGS; i++) {
- stq_p(&fpu.xmm[i][0], env->xmm_regs[i].XMM_Q(0));
- stq_p(&fpu.xmm[i][8], env->xmm_regs[i].XMM_Q(1));
+ stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
+ stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
}
fpu.mxcsr = env->mxcsr;
#define XSAVE_OPMASK 272
#define XSAVE_ZMM_Hi256 288
#define XSAVE_Hi16_ZMM 416
+#define XSAVE_PKRU 672
+
+#define XSAVE_BYTE_OFFSET(word_offset) \
+ ((word_offset) * sizeof(((struct kvm_xsave *)0)->region[0]))
+
+#define ASSERT_OFFSET(word_offset, field) \
+ QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
+ offsetof(X86XSaveArea, field))
+
+ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
+ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
+ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
+ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
+ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
+ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
+ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
+ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
+ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
+ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
+ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
+ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
+ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
+ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
+ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
static int kvm_put_xsave(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
- struct kvm_xsave* xsave = env->kvm_xsave_buf;
+ X86XSaveArea *xsave = env->kvm_xsave_buf;
uint16_t cwd, swd, twd;
- uint8_t *xmm, *ymmh, *zmmh;
int i, r;
if (!has_xsave) {
for (i = 0; i < 8; ++i) {
twd |= (!env->fptags[i]) << i;
}
- xsave->region[XSAVE_FCW_FSW] = (uint32_t)(swd << 16) + cwd;
- xsave->region[XSAVE_FTW_FOP] = (uint32_t)(env->fpop << 16) + twd;
- memcpy(&xsave->region[XSAVE_CWD_RIP], &env->fpip, sizeof(env->fpip));
- memcpy(&xsave->region[XSAVE_CWD_RDP], &env->fpdp, sizeof(env->fpdp));
- memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
+ xsave->legacy.fcw = cwd;
+ xsave->legacy.fsw = swd;
+ xsave->legacy.ftw = twd;
+ xsave->legacy.fpop = env->fpop;
+ xsave->legacy.fpip = env->fpip;
+ xsave->legacy.fpdp = env->fpdp;
+ memcpy(&xsave->legacy.fpregs, env->fpregs,
sizeof env->fpregs);
- xsave->region[XSAVE_MXCSR] = env->mxcsr;
- *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
- memcpy(&xsave->region[XSAVE_BNDREGS], env->bnd_regs,
+ xsave->legacy.mxcsr = env->mxcsr;
+ xsave->header.xstate_bv = env->xstate_bv;
+ memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
sizeof env->bnd_regs);
- memcpy(&xsave->region[XSAVE_BNDCSR], &env->bndcs_regs,
- sizeof(env->bndcs_regs));
- memcpy(&xsave->region[XSAVE_OPMASK], env->opmask_regs,
+ xsave->bndcsr_state.bndcsr = env->bndcs_regs;
+ memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
sizeof env->opmask_regs);
- xmm = (uint8_t *)&xsave->region[XSAVE_XMM_SPACE];
- ymmh = (uint8_t *)&xsave->region[XSAVE_YMMH_SPACE];
- zmmh = (uint8_t *)&xsave->region[XSAVE_ZMM_Hi256];
- for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) {
- stq_p(xmm, env->xmm_regs[i].XMM_Q(0));
- stq_p(xmm+8, env->xmm_regs[i].XMM_Q(1));
- stq_p(ymmh, env->xmm_regs[i].XMM_Q(2));
- stq_p(ymmh+8, env->xmm_regs[i].XMM_Q(3));
- stq_p(zmmh, env->xmm_regs[i].XMM_Q(4));
- stq_p(zmmh+8, env->xmm_regs[i].XMM_Q(5));
- stq_p(zmmh+16, env->xmm_regs[i].XMM_Q(6));
- stq_p(zmmh+24, env->xmm_regs[i].XMM_Q(7));
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ uint8_t *xmm = xsave->legacy.xmm_regs[i];
+ uint8_t *ymmh = xsave->avx_state.ymmh[i];
+ uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
+ stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
+ stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
+ stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
+ stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
+ stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
+ stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
+ stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
+ stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
}
#ifdef TARGET_X86_64
- memcpy(&xsave->region[XSAVE_Hi16_ZMM], &env->xmm_regs[16],
+ memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
16 * sizeof env->xmm_regs[16]);
+ memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
#endif
r = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
return r;
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
}
-static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
- uint32_t index, uint64_t value)
+static void kvm_msr_buf_reset(X86CPU *cpu)
{
+ memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
+}
+
+static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
+{
+ struct kvm_msrs *msrs = cpu->kvm_msr_buf;
+ void *limit = ((void *)msrs) + MSR_BUF_SIZE;
+ struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
+
+ assert((void *)(entry + 1) <= limit);
+
entry->index = index;
entry->reserved = 0;
entry->data = value;
+ msrs->nmsrs++;
}
static int kvm_put_tscdeadline_msr(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
- struct {
- struct kvm_msrs info;
- struct kvm_msr_entry entries[1];
- } msr_data;
- struct kvm_msr_entry *msrs = msr_data.entries;
+ int ret;
if (!has_msr_tsc_deadline) {
return 0;
}
- kvm_msr_entry_set(&msrs[0], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
+ kvm_msr_buf_reset(cpu);
+ kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
- msr_data.info = (struct kvm_msrs) {
- .nmsrs = 1,
- };
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
+ if (ret < 0) {
+ return ret;
+ }
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ assert(ret == 1);
+ return 0;
}
/*
*/
static int kvm_put_msr_feature_control(X86CPU *cpu)
{
- struct {
- struct kvm_msrs info;
- struct kvm_msr_entry entry;
- } msr_data;
+ int ret;
+
+ if (!has_msr_feature_control) {
+ return 0;
+ }
- kvm_msr_entry_set(&msr_data.entry, MSR_IA32_FEATURE_CONTROL,
+ kvm_msr_buf_reset(cpu);
+ kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL,
cpu->env.msr_ia32_feature_control);
- msr_data.info = (struct kvm_msrs) {
- .nmsrs = 1,
- };
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
+ if (ret < 0) {
+ return ret;
+ }
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ assert(ret == 1);
+ return 0;
}
static int kvm_put_msrs(X86CPU *cpu, int level)
{
CPUX86State *env = &cpu->env;
- struct {
- struct kvm_msrs info;
- struct kvm_msr_entry entries[150];
- } msr_data;
- struct kvm_msr_entry *msrs = msr_data.entries;
- int n = 0, i;
+ int i;
+ int ret;
+
+ kvm_msr_buf_reset(cpu);
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
- kvm_msr_entry_set(&msrs[n++], MSR_PAT, env->pat);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
+ kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
if (has_msr_star) {
- kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
+ kvm_msr_entry_add(cpu, MSR_STAR, env->star);
}
if (has_msr_hsave_pa) {
- kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
+ kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
}
if (has_msr_tsc_aux) {
- kvm_msr_entry_set(&msrs[n++], MSR_TSC_AUX, env->tsc_aux);
+ kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
}
if (has_msr_tsc_adjust) {
- kvm_msr_entry_set(&msrs[n++], MSR_TSC_ADJUST, env->tsc_adjust);
+ kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
}
if (has_msr_misc_enable) {
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE,
+ kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
env->msr_ia32_misc_enable);
}
if (has_msr_smbase) {
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_SMBASE, env->smbase);
+ kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
}
if (has_msr_bndcfgs) {
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_BNDCFGS, env->msr_bndcfgs);
+ kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
}
if (has_msr_xss) {
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_XSS, env->xss);
+ kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
}
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
- kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
- kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
- kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
- kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
+ kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
+ kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
+ kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
+ kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
}
#endif
/*
* for normal writeback. Limit them to reset or full state updates.
*/
if (level >= KVM_PUT_RESET_STATE) {
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
- kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
- env->system_time_msr);
- kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
+ kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
+ kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
+ kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
if (has_msr_async_pf_en) {
- kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN,
- env->async_pf_en_msr);
+ kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
}
if (has_msr_pv_eoi_en) {
- kvm_msr_entry_set(&msrs[n++], MSR_KVM_PV_EOI_EN,
- env->pv_eoi_en_msr);
+ kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
}
if (has_msr_kvm_steal_time) {
- kvm_msr_entry_set(&msrs[n++], MSR_KVM_STEAL_TIME,
- env->steal_time_msr);
+ kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
}
if (has_msr_architectural_pmu) {
/* Stop the counter. */
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
/* Set the counter values. */
for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR0 + i,
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
env->msr_fixed_counters[i]);
}
for (i = 0; i < num_architectural_pmu_counters; i++) {
- kvm_msr_entry_set(&msrs[n++], MSR_P6_PERFCTR0 + i,
+ kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
env->msr_gp_counters[i]);
- kvm_msr_entry_set(&msrs[n++], MSR_P6_EVNTSEL0 + i,
+ kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
env->msr_gp_evtsel[i]);
}
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_STATUS,
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
env->msr_global_status);
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_OVF_CTRL,
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
env->msr_global_ovf_ctrl);
/* Now start the PMU. */
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR_CTRL,
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
env->msr_fixed_ctr_ctrl);
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL,
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
env->msr_global_ctrl);
}
if (has_msr_hv_hypercall) {
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
env->msr_hv_guest_os_id);
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
env->msr_hv_hypercall);
}
if (has_msr_hv_vapic) {
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
env->msr_hv_vapic);
}
if (has_msr_hv_tsc) {
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC,
- env->msr_hv_tsc);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
}
if (has_msr_hv_crash) {
int j;
for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_P0 + j,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
env->msr_hv_crash_params[j]);
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_CTL,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL,
HV_X64_MSR_CRASH_CTL_NOTIFY);
}
if (has_msr_hv_runtime) {
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_VP_RUNTIME,
- env->msr_hv_runtime);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
}
if (cpu->hyperv_synic) {
int j;
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SCONTROL,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
env->msr_hv_synic_control);
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SVERSION,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION,
env->msr_hv_synic_version);
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIEFP,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
env->msr_hv_synic_evt_page);
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIMP,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
env->msr_hv_synic_msg_page);
for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SINT0 + j,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
env->msr_hv_synic_sint[j]);
}
}
int j;
for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_STIMER0_CONFIG + j*2,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
env->msr_hv_stimer_config[j]);
}
for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_STIMER0_COUNT + j*2,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
env->msr_hv_stimer_count[j]);
}
}
if (has_msr_mtrr) {
- kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
+ kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRphysBase(i), env->mtrr_var[i].base);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRphysMask(i), env->mtrr_var[i].mask);
+ kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
+ env->mtrr_var[i].base);
+ kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i),
+ env->mtrr_var[i].mask);
}
}
if (env->mcg_cap) {
int i;
- kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
- kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
+ kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
+ kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
- kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
+ kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
}
}
- msr_data.info = (struct kvm_msrs) {
- .nmsrs = n,
- };
-
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
+ if (ret < 0) {
+ return ret;
+ }
+ assert(ret == cpu->kvm_msr_buf->nmsrs);
+ return 0;
}
}
memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
for (i = 0; i < CPU_NB_REGS; i++) {
- env->xmm_regs[i].XMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
- env->xmm_regs[i].XMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
+ env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
+ env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
}
env->mxcsr = fpu.mxcsr;
static int kvm_get_xsave(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
- struct kvm_xsave* xsave = env->kvm_xsave_buf;
+ X86XSaveArea *xsave = env->kvm_xsave_buf;
int ret, i;
- const uint8_t *xmm, *ymmh, *zmmh;
uint16_t cwd, swd, twd;
if (!has_xsave) {
return ret;
}
- cwd = (uint16_t)xsave->region[XSAVE_FCW_FSW];
- swd = (uint16_t)(xsave->region[XSAVE_FCW_FSW] >> 16);
- twd = (uint16_t)xsave->region[XSAVE_FTW_FOP];
- env->fpop = (uint16_t)(xsave->region[XSAVE_FTW_FOP] >> 16);
+ cwd = xsave->legacy.fcw;
+ swd = xsave->legacy.fsw;
+ twd = xsave->legacy.ftw;
+ env->fpop = xsave->legacy.fpop;
env->fpstt = (swd >> 11) & 7;
env->fpus = swd;
env->fpuc = cwd;
for (i = 0; i < 8; ++i) {
env->fptags[i] = !((twd >> i) & 1);
}
- memcpy(&env->fpip, &xsave->region[XSAVE_CWD_RIP], sizeof(env->fpip));
- memcpy(&env->fpdp, &xsave->region[XSAVE_CWD_RDP], sizeof(env->fpdp));
- env->mxcsr = xsave->region[XSAVE_MXCSR];
- memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
+ env->fpip = xsave->legacy.fpip;
+ env->fpdp = xsave->legacy.fpdp;
+ env->mxcsr = xsave->legacy.mxcsr;
+ memcpy(env->fpregs, &xsave->legacy.fpregs,
sizeof env->fpregs);
- env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
- memcpy(env->bnd_regs, &xsave->region[XSAVE_BNDREGS],
+ env->xstate_bv = xsave->header.xstate_bv;
+ memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
sizeof env->bnd_regs);
- memcpy(&env->bndcs_regs, &xsave->region[XSAVE_BNDCSR],
- sizeof(env->bndcs_regs));
- memcpy(env->opmask_regs, &xsave->region[XSAVE_OPMASK],
+ env->bndcs_regs = xsave->bndcsr_state.bndcsr;
+ memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
sizeof env->opmask_regs);
- xmm = (const uint8_t *)&xsave->region[XSAVE_XMM_SPACE];
- ymmh = (const uint8_t *)&xsave->region[XSAVE_YMMH_SPACE];
- zmmh = (const uint8_t *)&xsave->region[XSAVE_ZMM_Hi256];
- for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) {
- env->xmm_regs[i].XMM_Q(0) = ldq_p(xmm);
- env->xmm_regs[i].XMM_Q(1) = ldq_p(xmm+8);
- env->xmm_regs[i].XMM_Q(2) = ldq_p(ymmh);
- env->xmm_regs[i].XMM_Q(3) = ldq_p(ymmh+8);
- env->xmm_regs[i].XMM_Q(4) = ldq_p(zmmh);
- env->xmm_regs[i].XMM_Q(5) = ldq_p(zmmh+8);
- env->xmm_regs[i].XMM_Q(6) = ldq_p(zmmh+16);
- env->xmm_regs[i].XMM_Q(7) = ldq_p(zmmh+24);
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ uint8_t *xmm = xsave->legacy.xmm_regs[i];
+ uint8_t *ymmh = xsave->avx_state.ymmh[i];
+ uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
+ env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
+ env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
+ env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
+ env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
+ env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
+ env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
+ env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
+ env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
}
#ifdef TARGET_X86_64
- memcpy(&env->xmm_regs[16], &xsave->region[XSAVE_Hi16_ZMM],
+ memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
16 * sizeof env->xmm_regs[16]);
+ memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
#endif
return 0;
}
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
- hflags = (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+ hflags = env->hflags & HFLAG_COPY_MASK;
+ hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
- hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
- (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
+
+ if (env->cr[4] & CR4_OSFXSR_MASK) {
+ hflags |= HF_OSFXSR_MASK;
+ }
if (env->efer & MSR_EFER_LMA) {
hflags |= HF_LMA_MASK;
env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
}
}
- env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
+ env->hflags = hflags;
return 0;
}
static int kvm_get_msrs(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
- struct {
- struct kvm_msrs info;
- struct kvm_msr_entry entries[150];
- } msr_data;
- struct kvm_msr_entry *msrs = msr_data.entries;
- int ret, i, n;
-
- n = 0;
- msrs[n++].index = MSR_IA32_SYSENTER_CS;
- msrs[n++].index = MSR_IA32_SYSENTER_ESP;
- msrs[n++].index = MSR_IA32_SYSENTER_EIP;
- msrs[n++].index = MSR_PAT;
+ struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
+ int ret, i;
+
+ kvm_msr_buf_reset(cpu);
+
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
+ kvm_msr_entry_add(cpu, MSR_PAT, 0);
if (has_msr_star) {
- msrs[n++].index = MSR_STAR;
+ kvm_msr_entry_add(cpu, MSR_STAR, 0);
}
if (has_msr_hsave_pa) {
- msrs[n++].index = MSR_VM_HSAVE_PA;
+ kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
}
if (has_msr_tsc_aux) {
- msrs[n++].index = MSR_TSC_AUX;
+ kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
}
if (has_msr_tsc_adjust) {
- msrs[n++].index = MSR_TSC_ADJUST;
+ kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
}
if (has_msr_tsc_deadline) {
- msrs[n++].index = MSR_IA32_TSCDEADLINE;
+ kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
}
if (has_msr_misc_enable) {
- msrs[n++].index = MSR_IA32_MISC_ENABLE;
+ kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
}
if (has_msr_smbase) {
- msrs[n++].index = MSR_IA32_SMBASE;
+ kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
}
if (has_msr_feature_control) {
- msrs[n++].index = MSR_IA32_FEATURE_CONTROL;
+ kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
}
if (has_msr_bndcfgs) {
- msrs[n++].index = MSR_IA32_BNDCFGS;
+ kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
}
if (has_msr_xss) {
- msrs[n++].index = MSR_IA32_XSS;
+ kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
}
if (!env->tsc_valid) {
- msrs[n++].index = MSR_IA32_TSC;
+ kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
env->tsc_valid = !runstate_is_running();
}
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
- msrs[n++].index = MSR_CSTAR;
- msrs[n++].index = MSR_KERNELGSBASE;
- msrs[n++].index = MSR_FMASK;
- msrs[n++].index = MSR_LSTAR;
+ kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
+ kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
+ kvm_msr_entry_add(cpu, MSR_FMASK, 0);
+ kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
}
#endif
- msrs[n++].index = MSR_KVM_SYSTEM_TIME;
- msrs[n++].index = MSR_KVM_WALL_CLOCK;
+ kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
+ kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
if (has_msr_async_pf_en) {
- msrs[n++].index = MSR_KVM_ASYNC_PF_EN;
+ kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
}
if (has_msr_pv_eoi_en) {
- msrs[n++].index = MSR_KVM_PV_EOI_EN;
+ kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
}
if (has_msr_kvm_steal_time) {
- msrs[n++].index = MSR_KVM_STEAL_TIME;
+ kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
}
if (has_msr_architectural_pmu) {
- msrs[n++].index = MSR_CORE_PERF_FIXED_CTR_CTRL;
- msrs[n++].index = MSR_CORE_PERF_GLOBAL_CTRL;
- msrs[n++].index = MSR_CORE_PERF_GLOBAL_STATUS;
- msrs[n++].index = MSR_CORE_PERF_GLOBAL_OVF_CTRL;
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
- msrs[n++].index = MSR_CORE_PERF_FIXED_CTR0 + i;
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
}
for (i = 0; i < num_architectural_pmu_counters; i++) {
- msrs[n++].index = MSR_P6_PERFCTR0 + i;
- msrs[n++].index = MSR_P6_EVNTSEL0 + i;
+ kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
+ kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
}
}
if (env->mcg_cap) {
- msrs[n++].index = MSR_MCG_STATUS;
- msrs[n++].index = MSR_MCG_CTL;
+ kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
+ kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
- msrs[n++].index = MSR_MC0_CTL + i;
+ kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
}
}
if (has_msr_hv_hypercall) {
- msrs[n++].index = HV_X64_MSR_HYPERCALL;
- msrs[n++].index = HV_X64_MSR_GUEST_OS_ID;
+ kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
}
if (has_msr_hv_vapic) {
- msrs[n++].index = HV_X64_MSR_APIC_ASSIST_PAGE;
+ kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
}
if (has_msr_hv_tsc) {
- msrs[n++].index = HV_X64_MSR_REFERENCE_TSC;
+ kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
}
if (has_msr_hv_crash) {
int j;
for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
- msrs[n++].index = HV_X64_MSR_CRASH_P0 + j;
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
}
}
if (has_msr_hv_runtime) {
- msrs[n++].index = HV_X64_MSR_VP_RUNTIME;
+ kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
}
if (cpu->hyperv_synic) {
uint32_t msr;
- msrs[n++].index = HV_X64_MSR_SCONTROL;
- msrs[n++].index = HV_X64_MSR_SVERSION;
- msrs[n++].index = HV_X64_MSR_SIEFP;
- msrs[n++].index = HV_X64_MSR_SIMP;
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
- msrs[n++].index = msr;
+ kvm_msr_entry_add(cpu, msr, 0);
}
}
if (has_msr_hv_stimer) {
for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
msr++) {
- msrs[n++].index = msr;
+ kvm_msr_entry_add(cpu, msr, 0);
}
}
if (has_msr_mtrr) {
- msrs[n++].index = MSR_MTRRdefType;
- msrs[n++].index = MSR_MTRRfix64K_00000;
- msrs[n++].index = MSR_MTRRfix16K_80000;
- msrs[n++].index = MSR_MTRRfix16K_A0000;
- msrs[n++].index = MSR_MTRRfix4K_C0000;
- msrs[n++].index = MSR_MTRRfix4K_C8000;
- msrs[n++].index = MSR_MTRRfix4K_D0000;
- msrs[n++].index = MSR_MTRRfix4K_D8000;
- msrs[n++].index = MSR_MTRRfix4K_E0000;
- msrs[n++].index = MSR_MTRRfix4K_E8000;
- msrs[n++].index = MSR_MTRRfix4K_F0000;
- msrs[n++].index = MSR_MTRRfix4K_F8000;
+ kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
- msrs[n++].index = MSR_MTRRphysBase(i);
- msrs[n++].index = MSR_MTRRphysMask(i);
+ kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
}
}
- msr_data.info = (struct kvm_msrs) {
- .nmsrs = n,
- };
-
- ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
if (ret < 0) {
return ret;
}
+ assert(ret == cpu->kvm_msr_buf->nmsrs);
for (i = 0; i < ret; i++) {
uint32_t index = msrs[i].index;
switch (index) {
assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
- if (level >= KVM_PUT_RESET_STATE && has_msr_feature_control) {
+ if (level >= KVM_PUT_RESET_STATE) {
ret = kvm_put_msr_feature_control(x86_cpu);
if (ret < 0) {
return ret;
}
}
+ if (level == KVM_PUT_FULL_STATE) {
+ /* We don't check for kvm_arch_set_tsc_khz() errors here,
+ * because TSC frequency mismatch shouldn't abort migration,
+ * unless the user explicitly asked for a more strict TSC
+ * setting (e.g. using an explicit "tsc-freq" option).
+ */
+ kvm_arch_set_tsc_khz(cpu);
+ }
+
ret = kvm_getput_regs(x86_cpu, 1);
if (ret < 0) {
return ret;
ret = kvm_getput_regs(cpu, 0);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_xsave(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_xcrs(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_sregs(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_msrs(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_mp_state(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_apic(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_vcpu_events(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_debugregs(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
- return 0;
+ ret = 0;
+ out:
+ cpu_sync_bndcs_hflags(&cpu->env);
+ return ret;
}
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)