4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
20 #include <linux/kvm.h>
21 #include "standard-headers/asm-x86/kvm_para.h"
24 #include "sysemu/sysemu.h"
25 #include "sysemu/hw_accel.h"
26 #include "sysemu/kvm_int.h"
29 #include "hyperv-proto.h"
31 #include "exec/gdbstub.h"
32 #include "qemu/host-utils.h"
33 #include "qemu/config-file.h"
34 #include "qemu/error-report.h"
35 #include "hw/i386/pc.h"
36 #include "hw/i386/apic.h"
37 #include "hw/i386/apic_internal.h"
38 #include "hw/i386/apic-msidef.h"
39 #include "hw/i386/intel_iommu.h"
40 #include "hw/i386/x86-iommu.h"
42 #include "hw/pci/pci.h"
43 #include "hw/pci/msi.h"
44 #include "hw/pci/msix.h"
45 #include "migration/blocker.h"
46 #include "exec/memattrs.h"
52 #define DPRINTF(fmt, ...) \
53 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
55 #define DPRINTF(fmt, ...) \
59 #define MSR_KVM_WALL_CLOCK 0x11
60 #define MSR_KVM_SYSTEM_TIME 0x12
62 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
63 * 255 kvm_msr_entry structs */
64 #define MSR_BUF_SIZE 4096
66 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
67 KVM_CAP_INFO(SET_TSS_ADDR
),
68 KVM_CAP_INFO(EXT_CPUID
),
69 KVM_CAP_INFO(MP_STATE
),
73 static bool has_msr_star
;
74 static bool has_msr_hsave_pa
;
75 static bool has_msr_tsc_aux
;
76 static bool has_msr_tsc_adjust
;
77 static bool has_msr_tsc_deadline
;
78 static bool has_msr_feature_control
;
79 static bool has_msr_misc_enable
;
80 static bool has_msr_smbase
;
81 static bool has_msr_bndcfgs
;
82 static int lm_capable_kernel
;
83 static bool has_msr_hv_hypercall
;
84 static bool has_msr_hv_crash
;
85 static bool has_msr_hv_reset
;
86 static bool has_msr_hv_vpindex
;
87 static bool hv_vpindex_settable
;
88 static bool has_msr_hv_runtime
;
89 static bool has_msr_hv_synic
;
90 static bool has_msr_hv_stimer
;
91 static bool has_msr_hv_frequencies
;
92 static bool has_msr_hv_reenlightenment
;
93 static bool has_msr_xss
;
94 static bool has_msr_spec_ctrl
;
95 static bool has_msr_virt_ssbd
;
96 static bool has_msr_smi_count
;
97 static bool has_msr_arch_capabs
;
98 static bool has_msr_core_capabs
;
100 static uint32_t has_architectural_pmu_version
;
101 static uint32_t num_architectural_pmu_gp_counters
;
102 static uint32_t num_architectural_pmu_fixed_counters
;
104 static int has_xsave
;
106 static int has_pit_state2
;
107 static int has_exception_payload
;
109 static bool has_msr_mcg_ext_ctl
;
111 static struct kvm_cpuid2
*cpuid_cache
;
112 static struct kvm_msr_list
*kvm_feature_msrs
;
114 int kvm_has_pit_state2(void)
116 return has_pit_state2
;
119 bool kvm_has_smm(void)
121 return kvm_check_extension(kvm_state
, KVM_CAP_X86_SMM
);
124 bool kvm_has_adjust_clock_stable(void)
126 int ret
= kvm_check_extension(kvm_state
, KVM_CAP_ADJUST_CLOCK
);
128 return (ret
== KVM_CLOCK_TSC_STABLE
);
131 bool kvm_allows_irq0_override(void)
133 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
136 static bool kvm_x2apic_api_set_flags(uint64_t flags
)
138 KVMState
*s
= KVM_STATE(current_machine
->accelerator
);
140 return !kvm_vm_enable_cap(s
, KVM_CAP_X2APIC_API
, 0, flags
);
143 #define MEMORIZE(fn, _result) \
145 static bool _memorized; \
154 static bool has_x2apic_api
;
156 bool kvm_has_x2apic_api(void)
158 return has_x2apic_api
;
161 bool kvm_enable_x2apic(void)
164 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS
|
165 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK
),
169 bool kvm_hv_vpindex_settable(void)
171 return hv_vpindex_settable
;
174 static int kvm_get_tsc(CPUState
*cs
)
176 X86CPU
*cpu
= X86_CPU(cs
);
177 CPUX86State
*env
= &cpu
->env
;
179 struct kvm_msrs info
;
180 struct kvm_msr_entry entries
[1];
184 if (env
->tsc_valid
) {
188 msr_data
.info
.nmsrs
= 1;
189 msr_data
.entries
[0].index
= MSR_IA32_TSC
;
190 env
->tsc_valid
= !runstate_is_running();
192 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, &msr_data
);
198 env
->tsc
= msr_data
.entries
[0].data
;
202 static inline void do_kvm_synchronize_tsc(CPUState
*cpu
, run_on_cpu_data arg
)
207 void kvm_synchronize_all_tsc(void)
213 run_on_cpu(cpu
, do_kvm_synchronize_tsc
, RUN_ON_CPU_NULL
);
218 static struct kvm_cpuid2
*try_get_cpuid(KVMState
*s
, int max
)
220 struct kvm_cpuid2
*cpuid
;
223 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
224 cpuid
= g_malloc0(size
);
226 r
= kvm_ioctl(s
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
227 if (r
== 0 && cpuid
->nent
>= max
) {
235 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
243 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
246 static struct kvm_cpuid2
*get_supported_cpuid(KVMState
*s
)
248 struct kvm_cpuid2
*cpuid
;
251 if (cpuid_cache
!= NULL
) {
254 while ((cpuid
= try_get_cpuid(s
, max
)) == NULL
) {
261 static const struct kvm_para_features
{
264 } para_features
[] = {
265 { KVM_CAP_CLOCKSOURCE
, KVM_FEATURE_CLOCKSOURCE
},
266 { KVM_CAP_NOP_IO_DELAY
, KVM_FEATURE_NOP_IO_DELAY
},
267 { KVM_CAP_PV_MMU
, KVM_FEATURE_MMU_OP
},
268 { KVM_CAP_ASYNC_PF
, KVM_FEATURE_ASYNC_PF
},
271 static int get_para_features(KVMState
*s
)
275 for (i
= 0; i
< ARRAY_SIZE(para_features
); i
++) {
276 if (kvm_check_extension(s
, para_features
[i
].cap
)) {
277 features
|= (1 << para_features
[i
].feature
);
284 static bool host_tsx_blacklisted(void)
286 int family
, model
, stepping
;\
287 char vendor
[CPUID_VENDOR_SZ
+ 1];
289 host_vendor_fms(vendor
, &family
, &model
, &stepping
);
291 /* Check if we are running on a Haswell host known to have broken TSX */
292 return !strcmp(vendor
, CPUID_VENDOR_INTEL
) &&
294 ((model
== 63 && stepping
< 4) ||
295 model
== 60 || model
== 69 || model
== 70);
298 /* Returns the value for a specific register on the cpuid entry
300 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2
*entry
, int reg
)
320 /* Find matching entry for function/index on kvm_cpuid2 struct
322 static struct kvm_cpuid_entry2
*cpuid_find_entry(struct kvm_cpuid2
*cpuid
,
327 for (i
= 0; i
< cpuid
->nent
; ++i
) {
328 if (cpuid
->entries
[i
].function
== function
&&
329 cpuid
->entries
[i
].index
== index
) {
330 return &cpuid
->entries
[i
];
337 uint32_t kvm_arch_get_supported_cpuid(KVMState
*s
, uint32_t function
,
338 uint32_t index
, int reg
)
340 struct kvm_cpuid2
*cpuid
;
342 uint32_t cpuid_1_edx
;
345 cpuid
= get_supported_cpuid(s
);
347 struct kvm_cpuid_entry2
*entry
= cpuid_find_entry(cpuid
, function
, index
);
350 ret
= cpuid_entry_get_reg(entry
, reg
);
353 /* Fixups for the data returned by KVM, below */
355 if (function
== 1 && reg
== R_EDX
) {
356 /* KVM before 2.6.30 misreports the following features */
357 ret
|= CPUID_MTRR
| CPUID_PAT
| CPUID_MCE
| CPUID_MCA
;
358 } else if (function
== 1 && reg
== R_ECX
) {
359 /* We can set the hypervisor flag, even if KVM does not return it on
360 * GET_SUPPORTED_CPUID
362 ret
|= CPUID_EXT_HYPERVISOR
;
363 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
364 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
365 * and the irqchip is in the kernel.
367 if (kvm_irqchip_in_kernel() &&
368 kvm_check_extension(s
, KVM_CAP_TSC_DEADLINE_TIMER
)) {
369 ret
|= CPUID_EXT_TSC_DEADLINE_TIMER
;
372 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
373 * without the in-kernel irqchip
375 if (!kvm_irqchip_in_kernel()) {
376 ret
&= ~CPUID_EXT_X2APIC
;
380 int disable_exits
= kvm_check_extension(s
,
381 KVM_CAP_X86_DISABLE_EXITS
);
383 if (disable_exits
& KVM_X86_DISABLE_EXITS_MWAIT
) {
384 ret
|= CPUID_EXT_MONITOR
;
387 } else if (function
== 6 && reg
== R_EAX
) {
388 ret
|= CPUID_6_EAX_ARAT
; /* safe to allow because of emulated APIC */
389 } else if (function
== 7 && index
== 0 && reg
== R_EBX
) {
390 if (host_tsx_blacklisted()) {
391 ret
&= ~(CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_HLE
);
393 } else if (function
== 7 && index
== 0 && reg
== R_EDX
) {
395 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
396 * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
397 * returned by KVM_GET_MSR_INDEX_LIST.
399 if (!has_msr_arch_capabs
) {
400 ret
&= ~CPUID_7_0_EDX_ARCH_CAPABILITIES
;
402 } else if (function
== 0x80000001 && reg
== R_ECX
) {
404 * It's safe to enable TOPOEXT even if it's not returned by
405 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows
406 * us to keep CPU models including TOPOEXT runnable on older kernels.
408 ret
|= CPUID_EXT3_TOPOEXT
;
409 } else if (function
== 0x80000001 && reg
== R_EDX
) {
410 /* On Intel, kvm returns cpuid according to the Intel spec,
411 * so add missing bits according to the AMD spec:
413 cpuid_1_edx
= kvm_arch_get_supported_cpuid(s
, 1, 0, R_EDX
);
414 ret
|= cpuid_1_edx
& CPUID_EXT2_AMD_ALIASES
;
415 } else if (function
== KVM_CPUID_FEATURES
&& reg
== R_EAX
) {
416 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
417 * be enabled without the in-kernel irqchip
419 if (!kvm_irqchip_in_kernel()) {
420 ret
&= ~(1U << KVM_FEATURE_PV_UNHALT
);
422 } else if (function
== KVM_CPUID_FEATURES
&& reg
== R_EDX
) {
423 ret
|= 1U << KVM_HINTS_REALTIME
;
427 /* fallback for older kernels */
428 if ((function
== KVM_CPUID_FEATURES
) && !found
) {
429 ret
= get_para_features(s
);
435 uint32_t kvm_arch_get_supported_msr_feature(KVMState
*s
, uint32_t index
)
438 struct kvm_msrs info
;
439 struct kvm_msr_entry entries
[1];
443 if (kvm_feature_msrs
== NULL
) { /* Host doesn't support feature MSRs */
447 /* Check if requested MSR is supported feature MSR */
449 for (i
= 0; i
< kvm_feature_msrs
->nmsrs
; i
++)
450 if (kvm_feature_msrs
->indices
[i
] == index
) {
453 if (i
== kvm_feature_msrs
->nmsrs
) {
454 return 0; /* if the feature MSR is not supported, simply return 0 */
457 msr_data
.info
.nmsrs
= 1;
458 msr_data
.entries
[0].index
= index
;
460 ret
= kvm_ioctl(s
, KVM_GET_MSRS
, &msr_data
);
462 error_report("KVM get MSR (index=0x%x) feature failed, %s",
463 index
, strerror(-ret
));
467 return msr_data
.entries
[0].data
;
471 typedef struct HWPoisonPage
{
473 QLIST_ENTRY(HWPoisonPage
) list
;
476 static QLIST_HEAD(, HWPoisonPage
) hwpoison_page_list
=
477 QLIST_HEAD_INITIALIZER(hwpoison_page_list
);
479 static void kvm_unpoison_all(void *param
)
481 HWPoisonPage
*page
, *next_page
;
483 QLIST_FOREACH_SAFE(page
, &hwpoison_page_list
, list
, next_page
) {
484 QLIST_REMOVE(page
, list
);
485 qemu_ram_remap(page
->ram_addr
, TARGET_PAGE_SIZE
);
490 static void kvm_hwpoison_page_add(ram_addr_t ram_addr
)
494 QLIST_FOREACH(page
, &hwpoison_page_list
, list
) {
495 if (page
->ram_addr
== ram_addr
) {
499 page
= g_new(HWPoisonPage
, 1);
500 page
->ram_addr
= ram_addr
;
501 QLIST_INSERT_HEAD(&hwpoison_page_list
, page
, list
);
504 static int kvm_get_mce_cap_supported(KVMState
*s
, uint64_t *mce_cap
,
509 r
= kvm_check_extension(s
, KVM_CAP_MCE
);
512 return kvm_ioctl(s
, KVM_X86_GET_MCE_CAP_SUPPORTED
, mce_cap
);
517 static void kvm_mce_inject(X86CPU
*cpu
, hwaddr paddr
, int code
)
519 CPUState
*cs
= CPU(cpu
);
520 CPUX86State
*env
= &cpu
->env
;
521 uint64_t status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
|
522 MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
;
523 uint64_t mcg_status
= MCG_STATUS_MCIP
;
526 if (code
== BUS_MCEERR_AR
) {
527 status
|= MCI_STATUS_AR
| 0x134;
528 mcg_status
|= MCG_STATUS_EIPV
;
531 mcg_status
|= MCG_STATUS_RIPV
;
534 flags
= cpu_x86_support_mca_broadcast(env
) ? MCE_INJECT_BROADCAST
: 0;
535 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
536 * guest kernel back into env->mcg_ext_ctl.
538 cpu_synchronize_state(cs
);
539 if (env
->mcg_ext_ctl
& MCG_EXT_CTL_LMCE_EN
) {
540 mcg_status
|= MCG_STATUS_LMCE
;
544 cpu_x86_inject_mce(NULL
, cpu
, 9, status
, mcg_status
, paddr
,
545 (MCM_ADDR_PHYS
<< 6) | 0xc, flags
);
548 static void hardware_memory_error(void)
550 fprintf(stderr
, "Hardware memory error!\n");
554 void kvm_arch_on_sigbus_vcpu(CPUState
*c
, int code
, void *addr
)
556 X86CPU
*cpu
= X86_CPU(c
);
557 CPUX86State
*env
= &cpu
->env
;
561 /* If we get an action required MCE, it has been injected by KVM
562 * while the VM was running. An action optional MCE instead should
563 * be coming from the main thread, which qemu_init_sigbus identifies
564 * as the "early kill" thread.
566 assert(code
== BUS_MCEERR_AR
|| code
== BUS_MCEERR_AO
);
568 if ((env
->mcg_cap
& MCG_SER_P
) && addr
) {
569 ram_addr
= qemu_ram_addr_from_host(addr
);
570 if (ram_addr
!= RAM_ADDR_INVALID
&&
571 kvm_physical_memory_addr_from_host(c
->kvm_state
, addr
, &paddr
)) {
572 kvm_hwpoison_page_add(ram_addr
);
573 kvm_mce_inject(cpu
, paddr
, code
);
577 fprintf(stderr
, "Hardware memory error for memory used by "
578 "QEMU itself instead of guest system!\n");
581 if (code
== BUS_MCEERR_AR
) {
582 hardware_memory_error();
585 /* Hope we are lucky for AO MCE */
588 static void kvm_reset_exception(CPUX86State
*env
)
590 env
->exception_nr
= -1;
591 env
->exception_pending
= 0;
592 env
->exception_injected
= 0;
593 env
->exception_has_payload
= false;
594 env
->exception_payload
= 0;
597 static void kvm_queue_exception(CPUX86State
*env
,
598 int32_t exception_nr
,
599 uint8_t exception_has_payload
,
600 uint64_t exception_payload
)
602 assert(env
->exception_nr
== -1);
603 assert(!env
->exception_pending
);
604 assert(!env
->exception_injected
);
605 assert(!env
->exception_has_payload
);
607 env
->exception_nr
= exception_nr
;
609 if (has_exception_payload
) {
610 env
->exception_pending
= 1;
612 env
->exception_has_payload
= exception_has_payload
;
613 env
->exception_payload
= exception_payload
;
615 env
->exception_injected
= 1;
617 if (exception_nr
== EXCP01_DB
) {
618 assert(exception_has_payload
);
619 env
->dr
[6] = exception_payload
;
620 } else if (exception_nr
== EXCP0E_PAGE
) {
621 assert(exception_has_payload
);
622 env
->cr
[2] = exception_payload
;
624 assert(!exception_has_payload
);
629 static int kvm_inject_mce_oldstyle(X86CPU
*cpu
)
631 CPUX86State
*env
= &cpu
->env
;
633 if (!kvm_has_vcpu_events() && env
->exception_nr
== EXCP12_MCHK
) {
634 unsigned int bank
, bank_num
= env
->mcg_cap
& 0xff;
635 struct kvm_x86_mce mce
;
637 kvm_reset_exception(env
);
640 * There must be at least one bank in use if an MCE is pending.
641 * Find it and use its values for the event injection.
643 for (bank
= 0; bank
< bank_num
; bank
++) {
644 if (env
->mce_banks
[bank
* 4 + 1] & MCI_STATUS_VAL
) {
648 assert(bank
< bank_num
);
651 mce
.status
= env
->mce_banks
[bank
* 4 + 1];
652 mce
.mcg_status
= env
->mcg_status
;
653 mce
.addr
= env
->mce_banks
[bank
* 4 + 2];
654 mce
.misc
= env
->mce_banks
[bank
* 4 + 3];
656 return kvm_vcpu_ioctl(CPU(cpu
), KVM_X86_SET_MCE
, &mce
);
661 static void cpu_update_state(void *opaque
, int running
, RunState state
)
663 CPUX86State
*env
= opaque
;
666 env
->tsc_valid
= false;
670 unsigned long kvm_arch_vcpu_id(CPUState
*cs
)
672 X86CPU
*cpu
= X86_CPU(cs
);
676 #ifndef KVM_CPUID_SIGNATURE_NEXT
677 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
680 static bool hyperv_enabled(X86CPU
*cpu
)
682 CPUState
*cs
= CPU(cpu
);
683 return kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV
) > 0 &&
684 ((cpu
->hyperv_spinlock_attempts
!= HYPERV_SPINLOCK_NEVER_RETRY
) ||
685 cpu
->hyperv_features
|| cpu
->hyperv_passthrough
);
688 static int kvm_arch_set_tsc_khz(CPUState
*cs
)
690 X86CPU
*cpu
= X86_CPU(cs
);
691 CPUX86State
*env
= &cpu
->env
;
698 r
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_TSC_CONTROL
) ?
699 kvm_vcpu_ioctl(cs
, KVM_SET_TSC_KHZ
, env
->tsc_khz
) :
702 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
703 * TSC frequency doesn't match the one we want.
705 int cur_freq
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
706 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) :
708 if (cur_freq
<= 0 || cur_freq
!= env
->tsc_khz
) {
709 warn_report("TSC frequency mismatch between "
710 "VM (%" PRId64
" kHz) and host (%d kHz), "
711 "and TSC scaling unavailable",
712 env
->tsc_khz
, cur_freq
);
720 static bool tsc_is_stable_and_known(CPUX86State
*env
)
725 return (env
->features
[FEAT_8000_0007_EDX
] & CPUID_APM_INVTSC
)
726 || env
->user_tsc_khz
;
735 uint64_t dependencies
;
736 } kvm_hyperv_properties
[] = {
737 [HYPERV_FEAT_RELAXED
] = {
738 .desc
= "relaxed timing (hv-relaxed)",
740 {.fw
= FEAT_HYPERV_EAX
,
741 .bits
= HV_HYPERCALL_AVAILABLE
},
742 {.fw
= FEAT_HV_RECOMM_EAX
,
743 .bits
= HV_RELAXED_TIMING_RECOMMENDED
}
746 [HYPERV_FEAT_VAPIC
] = {
747 .desc
= "virtual APIC (hv-vapic)",
749 {.fw
= FEAT_HYPERV_EAX
,
750 .bits
= HV_HYPERCALL_AVAILABLE
| HV_APIC_ACCESS_AVAILABLE
},
751 {.fw
= FEAT_HV_RECOMM_EAX
,
752 .bits
= HV_APIC_ACCESS_RECOMMENDED
}
755 [HYPERV_FEAT_TIME
] = {
756 .desc
= "clocksources (hv-time)",
758 {.fw
= FEAT_HYPERV_EAX
,
759 .bits
= HV_HYPERCALL_AVAILABLE
| HV_TIME_REF_COUNT_AVAILABLE
|
760 HV_REFERENCE_TSC_AVAILABLE
}
763 [HYPERV_FEAT_CRASH
] = {
764 .desc
= "crash MSRs (hv-crash)",
766 {.fw
= FEAT_HYPERV_EDX
,
767 .bits
= HV_GUEST_CRASH_MSR_AVAILABLE
}
770 [HYPERV_FEAT_RESET
] = {
771 .desc
= "reset MSR (hv-reset)",
773 {.fw
= FEAT_HYPERV_EAX
,
774 .bits
= HV_RESET_AVAILABLE
}
777 [HYPERV_FEAT_VPINDEX
] = {
778 .desc
= "VP_INDEX MSR (hv-vpindex)",
780 {.fw
= FEAT_HYPERV_EAX
,
781 .bits
= HV_VP_INDEX_AVAILABLE
}
784 [HYPERV_FEAT_RUNTIME
] = {
785 .desc
= "VP_RUNTIME MSR (hv-runtime)",
787 {.fw
= FEAT_HYPERV_EAX
,
788 .bits
= HV_VP_RUNTIME_AVAILABLE
}
791 [HYPERV_FEAT_SYNIC
] = {
792 .desc
= "synthetic interrupt controller (hv-synic)",
794 {.fw
= FEAT_HYPERV_EAX
,
795 .bits
= HV_SYNIC_AVAILABLE
}
798 [HYPERV_FEAT_STIMER
] = {
799 .desc
= "synthetic timers (hv-stimer)",
801 {.fw
= FEAT_HYPERV_EAX
,
802 .bits
= HV_SYNTIMERS_AVAILABLE
}
804 .dependencies
= BIT(HYPERV_FEAT_SYNIC
) | BIT(HYPERV_FEAT_TIME
)
806 [HYPERV_FEAT_FREQUENCIES
] = {
807 .desc
= "frequency MSRs (hv-frequencies)",
809 {.fw
= FEAT_HYPERV_EAX
,
810 .bits
= HV_ACCESS_FREQUENCY_MSRS
},
811 {.fw
= FEAT_HYPERV_EDX
,
812 .bits
= HV_FREQUENCY_MSRS_AVAILABLE
}
815 [HYPERV_FEAT_REENLIGHTENMENT
] = {
816 .desc
= "reenlightenment MSRs (hv-reenlightenment)",
818 {.fw
= FEAT_HYPERV_EAX
,
819 .bits
= HV_ACCESS_REENLIGHTENMENTS_CONTROL
}
822 [HYPERV_FEAT_TLBFLUSH
] = {
823 .desc
= "paravirtualized TLB flush (hv-tlbflush)",
825 {.fw
= FEAT_HV_RECOMM_EAX
,
826 .bits
= HV_REMOTE_TLB_FLUSH_RECOMMENDED
|
827 HV_EX_PROCESSOR_MASKS_RECOMMENDED
}
829 .dependencies
= BIT(HYPERV_FEAT_VPINDEX
)
831 [HYPERV_FEAT_EVMCS
] = {
832 .desc
= "enlightened VMCS (hv-evmcs)",
834 {.fw
= FEAT_HV_RECOMM_EAX
,
835 .bits
= HV_ENLIGHTENED_VMCS_RECOMMENDED
}
837 .dependencies
= BIT(HYPERV_FEAT_VAPIC
)
839 [HYPERV_FEAT_IPI
] = {
840 .desc
= "paravirtualized IPI (hv-ipi)",
842 {.fw
= FEAT_HV_RECOMM_EAX
,
843 .bits
= HV_CLUSTER_IPI_RECOMMENDED
|
844 HV_EX_PROCESSOR_MASKS_RECOMMENDED
}
846 .dependencies
= BIT(HYPERV_FEAT_VPINDEX
)
848 [HYPERV_FEAT_STIMER_DIRECT
] = {
849 .desc
= "direct mode synthetic timers (hv-stimer-direct)",
851 {.fw
= FEAT_HYPERV_EDX
,
852 .bits
= HV_STIMER_DIRECT_MODE_AVAILABLE
}
854 .dependencies
= BIT(HYPERV_FEAT_STIMER
)
858 static struct kvm_cpuid2
*try_get_hv_cpuid(CPUState
*cs
, int max
)
860 struct kvm_cpuid2
*cpuid
;
863 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
864 cpuid
= g_malloc0(size
);
867 r
= kvm_vcpu_ioctl(cs
, KVM_GET_SUPPORTED_HV_CPUID
, cpuid
);
868 if (r
== 0 && cpuid
->nent
>= max
) {
876 fprintf(stderr
, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
885 * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
888 static struct kvm_cpuid2
*get_supported_hv_cpuid(CPUState
*cs
)
890 struct kvm_cpuid2
*cpuid
;
891 int max
= 7; /* 0x40000000..0x40000005, 0x4000000A */
894 * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
895 * -E2BIG, however, it doesn't report back the right size. Keep increasing
896 * it and re-trying until we succeed.
898 while ((cpuid
= try_get_hv_cpuid(cs
, max
)) == NULL
) {
905 * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
906 * leaves from KVM_CAP_HYPERV* and present MSRs data.
908 static struct kvm_cpuid2
*get_supported_hv_cpuid_legacy(CPUState
*cs
)
910 X86CPU
*cpu
= X86_CPU(cs
);
911 struct kvm_cpuid2
*cpuid
;
912 struct kvm_cpuid_entry2
*entry_feat
, *entry_recomm
;
914 /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
915 cpuid
= g_malloc0(sizeof(*cpuid
) + 2 * sizeof(*cpuid
->entries
));
918 /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
919 entry_feat
= &cpuid
->entries
[0];
920 entry_feat
->function
= HV_CPUID_FEATURES
;
922 entry_recomm
= &cpuid
->entries
[1];
923 entry_recomm
->function
= HV_CPUID_ENLIGHTMENT_INFO
;
924 entry_recomm
->ebx
= cpu
->hyperv_spinlock_attempts
;
926 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV
) > 0) {
927 entry_feat
->eax
|= HV_HYPERCALL_AVAILABLE
;
928 entry_feat
->eax
|= HV_APIC_ACCESS_AVAILABLE
;
929 entry_feat
->edx
|= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE
;
930 entry_recomm
->eax
|= HV_RELAXED_TIMING_RECOMMENDED
;
931 entry_recomm
->eax
|= HV_APIC_ACCESS_RECOMMENDED
;
934 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV_TIME
) > 0) {
935 entry_feat
->eax
|= HV_TIME_REF_COUNT_AVAILABLE
;
936 entry_feat
->eax
|= HV_REFERENCE_TSC_AVAILABLE
;
939 if (has_msr_hv_frequencies
) {
940 entry_feat
->eax
|= HV_ACCESS_FREQUENCY_MSRS
;
941 entry_feat
->edx
|= HV_FREQUENCY_MSRS_AVAILABLE
;
944 if (has_msr_hv_crash
) {
945 entry_feat
->edx
|= HV_GUEST_CRASH_MSR_AVAILABLE
;
948 if (has_msr_hv_reenlightenment
) {
949 entry_feat
->eax
|= HV_ACCESS_REENLIGHTENMENTS_CONTROL
;
952 if (has_msr_hv_reset
) {
953 entry_feat
->eax
|= HV_RESET_AVAILABLE
;
956 if (has_msr_hv_vpindex
) {
957 entry_feat
->eax
|= HV_VP_INDEX_AVAILABLE
;
960 if (has_msr_hv_runtime
) {
961 entry_feat
->eax
|= HV_VP_RUNTIME_AVAILABLE
;
964 if (has_msr_hv_synic
) {
965 unsigned int cap
= cpu
->hyperv_synic_kvm_only
?
966 KVM_CAP_HYPERV_SYNIC
: KVM_CAP_HYPERV_SYNIC2
;
968 if (kvm_check_extension(cs
->kvm_state
, cap
) > 0) {
969 entry_feat
->eax
|= HV_SYNIC_AVAILABLE
;
973 if (has_msr_hv_stimer
) {
974 entry_feat
->eax
|= HV_SYNTIMERS_AVAILABLE
;
977 if (kvm_check_extension(cs
->kvm_state
,
978 KVM_CAP_HYPERV_TLBFLUSH
) > 0) {
979 entry_recomm
->eax
|= HV_REMOTE_TLB_FLUSH_RECOMMENDED
;
980 entry_recomm
->eax
|= HV_EX_PROCESSOR_MASKS_RECOMMENDED
;
983 if (kvm_check_extension(cs
->kvm_state
,
984 KVM_CAP_HYPERV_ENLIGHTENED_VMCS
) > 0) {
985 entry_recomm
->eax
|= HV_ENLIGHTENED_VMCS_RECOMMENDED
;
988 if (kvm_check_extension(cs
->kvm_state
,
989 KVM_CAP_HYPERV_SEND_IPI
) > 0) {
990 entry_recomm
->eax
|= HV_CLUSTER_IPI_RECOMMENDED
;
991 entry_recomm
->eax
|= HV_EX_PROCESSOR_MASKS_RECOMMENDED
;
997 static int hv_cpuid_get_fw(struct kvm_cpuid2
*cpuid
, int fw
, uint32_t *r
)
999 struct kvm_cpuid_entry2
*entry
;
1004 case FEAT_HYPERV_EAX
:
1006 func
= HV_CPUID_FEATURES
;
1008 case FEAT_HYPERV_EDX
:
1010 func
= HV_CPUID_FEATURES
;
1012 case FEAT_HV_RECOMM_EAX
:
1014 func
= HV_CPUID_ENLIGHTMENT_INFO
;
1020 entry
= cpuid_find_entry(cpuid
, func
, 0);
1039 static int hv_cpuid_check_and_set(CPUState
*cs
, struct kvm_cpuid2
*cpuid
,
1042 X86CPU
*cpu
= X86_CPU(cs
);
1043 CPUX86State
*env
= &cpu
->env
;
1044 uint32_t r
, fw
, bits
;
1048 if (!hyperv_feat_enabled(cpu
, feature
) && !cpu
->hyperv_passthrough
) {
1052 deps
= kvm_hyperv_properties
[feature
].dependencies
;
1054 dep_feat
= ctz64(deps
);
1055 if (!(hyperv_feat_enabled(cpu
, dep_feat
))) {
1057 "Hyper-V %s requires Hyper-V %s\n",
1058 kvm_hyperv_properties
[feature
].desc
,
1059 kvm_hyperv_properties
[dep_feat
].desc
);
1062 deps
&= ~(1ull << dep_feat
);
1065 for (i
= 0; i
< ARRAY_SIZE(kvm_hyperv_properties
[feature
].flags
); i
++) {
1066 fw
= kvm_hyperv_properties
[feature
].flags
[i
].fw
;
1067 bits
= kvm_hyperv_properties
[feature
].flags
[i
].bits
;
1073 if (hv_cpuid_get_fw(cpuid
, fw
, &r
) || (r
& bits
) != bits
) {
1074 if (hyperv_feat_enabled(cpu
, feature
)) {
1076 "Hyper-V %s is not supported by kernel\n",
1077 kvm_hyperv_properties
[feature
].desc
);
1084 env
->features
[fw
] |= bits
;
1087 if (cpu
->hyperv_passthrough
) {
1088 cpu
->hyperv_features
|= BIT(feature
);
1095 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent in
1096 * case of success, errno < 0 in case of failure and 0 when no Hyper-V
1097 * extentions are enabled.
1099 static int hyperv_handle_properties(CPUState
*cs
,
1100 struct kvm_cpuid_entry2
*cpuid_ent
)
1102 X86CPU
*cpu
= X86_CPU(cs
);
1103 CPUX86State
*env
= &cpu
->env
;
1104 struct kvm_cpuid2
*cpuid
;
1105 struct kvm_cpuid_entry2
*c
;
1106 uint32_t signature
[3];
1107 uint32_t cpuid_i
= 0;
1110 if (!hyperv_enabled(cpu
))
1113 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
) ||
1114 cpu
->hyperv_passthrough
) {
1115 uint16_t evmcs_version
;
1117 r
= kvm_vcpu_enable_cap(cs
, KVM_CAP_HYPERV_ENLIGHTENED_VMCS
, 0,
1118 (uintptr_t)&evmcs_version
);
1120 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
) && r
) {
1121 fprintf(stderr
, "Hyper-V %s is not supported by kernel\n",
1122 kvm_hyperv_properties
[HYPERV_FEAT_EVMCS
].desc
);
1127 env
->features
[FEAT_HV_RECOMM_EAX
] |=
1128 HV_ENLIGHTENED_VMCS_RECOMMENDED
;
1129 env
->features
[FEAT_HV_NESTED_EAX
] = evmcs_version
;
1133 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV_CPUID
) > 0) {
1134 cpuid
= get_supported_hv_cpuid(cs
);
1136 cpuid
= get_supported_hv_cpuid_legacy(cs
);
1139 if (cpu
->hyperv_passthrough
) {
1140 memcpy(cpuid_ent
, &cpuid
->entries
[0],
1141 cpuid
->nent
* sizeof(cpuid
->entries
[0]));
1143 c
= cpuid_find_entry(cpuid
, HV_CPUID_FEATURES
, 0);
1145 env
->features
[FEAT_HYPERV_EAX
] = c
->eax
;
1146 env
->features
[FEAT_HYPERV_EBX
] = c
->ebx
;
1147 env
->features
[FEAT_HYPERV_EDX
] = c
->eax
;
1149 c
= cpuid_find_entry(cpuid
, HV_CPUID_ENLIGHTMENT_INFO
, 0);
1151 env
->features
[FEAT_HV_RECOMM_EAX
] = c
->eax
;
1153 /* hv-spinlocks may have been overriden */
1154 if (cpu
->hyperv_spinlock_attempts
!= HYPERV_SPINLOCK_NEVER_RETRY
) {
1155 c
->ebx
= cpu
->hyperv_spinlock_attempts
;
1158 c
= cpuid_find_entry(cpuid
, HV_CPUID_NESTED_FEATURES
, 0);
1160 env
->features
[FEAT_HV_NESTED_EAX
] = c
->eax
;
1165 r
= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_RELAXED
);
1166 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_VAPIC
);
1167 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_TIME
);
1168 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_CRASH
);
1169 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_RESET
);
1170 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_VPINDEX
);
1171 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_RUNTIME
);
1172 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_SYNIC
);
1173 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_STIMER
);
1174 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_FREQUENCIES
);
1175 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_REENLIGHTENMENT
);
1176 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_TLBFLUSH
);
1177 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_EVMCS
);
1178 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_IPI
);
1179 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_STIMER_DIRECT
);
1181 /* Additional dependencies not covered by kvm_hyperv_properties[] */
1182 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
) &&
1183 !cpu
->hyperv_synic_kvm_only
&&
1184 !hyperv_feat_enabled(cpu
, HYPERV_FEAT_VPINDEX
)) {
1185 fprintf(stderr
, "Hyper-V %s requires Hyper-V %s\n",
1186 kvm_hyperv_properties
[HYPERV_FEAT_SYNIC
].desc
,
1187 kvm_hyperv_properties
[HYPERV_FEAT_VPINDEX
].desc
);
1191 /* Not exposed by KVM but needed to make CPU hotplug in Windows work */
1192 env
->features
[FEAT_HYPERV_EDX
] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE
;
1199 if (cpu
->hyperv_passthrough
) {
1200 /* We already copied all feature words from KVM as is */
1205 c
= &cpuid_ent
[cpuid_i
++];
1206 c
->function
= HV_CPUID_VENDOR_AND_MAX_FUNCTIONS
;
1207 if (!cpu
->hyperv_vendor_id
) {
1208 memcpy(signature
, "Microsoft Hv", 12);
1210 size_t len
= strlen(cpu
->hyperv_vendor_id
);
1213 error_report("hv-vendor-id truncated to 12 characters");
1216 memset(signature
, 0, 12);
1217 memcpy(signature
, cpu
->hyperv_vendor_id
, len
);
1219 c
->eax
= hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
) ?
1220 HV_CPUID_NESTED_FEATURES
: HV_CPUID_IMPLEMENT_LIMITS
;
1221 c
->ebx
= signature
[0];
1222 c
->ecx
= signature
[1];
1223 c
->edx
= signature
[2];
1225 c
= &cpuid_ent
[cpuid_i
++];
1226 c
->function
= HV_CPUID_INTERFACE
;
1227 memcpy(signature
, "Hv#1\0\0\0\0\0\0\0\0", 12);
1228 c
->eax
= signature
[0];
1233 c
= &cpuid_ent
[cpuid_i
++];
1234 c
->function
= HV_CPUID_VERSION
;
1235 c
->eax
= 0x00001bbc;
1236 c
->ebx
= 0x00060001;
1238 c
= &cpuid_ent
[cpuid_i
++];
1239 c
->function
= HV_CPUID_FEATURES
;
1240 c
->eax
= env
->features
[FEAT_HYPERV_EAX
];
1241 c
->ebx
= env
->features
[FEAT_HYPERV_EBX
];
1242 c
->edx
= env
->features
[FEAT_HYPERV_EDX
];
1244 c
= &cpuid_ent
[cpuid_i
++];
1245 c
->function
= HV_CPUID_ENLIGHTMENT_INFO
;
1246 c
->eax
= env
->features
[FEAT_HV_RECOMM_EAX
];
1247 c
->ebx
= cpu
->hyperv_spinlock_attempts
;
1249 c
= &cpuid_ent
[cpuid_i
++];
1250 c
->function
= HV_CPUID_IMPLEMENT_LIMITS
;
1251 c
->eax
= cpu
->hv_max_vps
;
1254 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
)) {
1257 /* Create zeroed 0x40000006..0x40000009 leaves */
1258 for (function
= HV_CPUID_IMPLEMENT_LIMITS
+ 1;
1259 function
< HV_CPUID_NESTED_FEATURES
; function
++) {
1260 c
= &cpuid_ent
[cpuid_i
++];
1261 c
->function
= function
;
1264 c
= &cpuid_ent
[cpuid_i
++];
1265 c
->function
= HV_CPUID_NESTED_FEATURES
;
1266 c
->eax
= env
->features
[FEAT_HV_NESTED_EAX
];
1276 static Error
*hv_passthrough_mig_blocker
;
1278 static int hyperv_init_vcpu(X86CPU
*cpu
)
1280 CPUState
*cs
= CPU(cpu
);
1281 Error
*local_err
= NULL
;
1284 if (cpu
->hyperv_passthrough
&& hv_passthrough_mig_blocker
== NULL
) {
1285 error_setg(&hv_passthrough_mig_blocker
,
1286 "'hv-passthrough' CPU flag prevents migration, use explicit"
1287 " set of hv-* flags instead");
1288 ret
= migrate_add_blocker(hv_passthrough_mig_blocker
, &local_err
);
1290 error_report_err(local_err
);
1291 error_free(hv_passthrough_mig_blocker
);
1296 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VPINDEX
) && !hv_vpindex_settable
) {
1298 * the kernel doesn't support setting vp_index; assert that its value
1302 struct kvm_msrs info
;
1303 struct kvm_msr_entry entries
[1];
1306 .entries
[0].index
= HV_X64_MSR_VP_INDEX
,
1309 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_MSRS
, &msr_data
);
1315 if (msr_data
.entries
[0].data
!= hyperv_vp_index(CPU(cpu
))) {
1316 error_report("kernel's vp_index != QEMU's vp_index");
1321 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
1322 uint32_t synic_cap
= cpu
->hyperv_synic_kvm_only
?
1323 KVM_CAP_HYPERV_SYNIC
: KVM_CAP_HYPERV_SYNIC2
;
1324 ret
= kvm_vcpu_enable_cap(cs
, synic_cap
, 0);
1326 error_report("failed to turn on HyperV SynIC in KVM: %s",
1331 if (!cpu
->hyperv_synic_kvm_only
) {
1332 ret
= hyperv_x86_synic_add(cpu
);
1334 error_report("failed to create HyperV SynIC: %s",
1344 static Error
*invtsc_mig_blocker
;
1345 static Error
*nested_virt_mig_blocker
;
1347 #define KVM_MAX_CPUID_ENTRIES 100
1349 int kvm_arch_init_vcpu(CPUState
*cs
)
1352 struct kvm_cpuid2 cpuid
;
1353 struct kvm_cpuid_entry2 entries
[KVM_MAX_CPUID_ENTRIES
];
1356 * The kernel defines these structs with padding fields so there
1357 * should be no extra padding in our cpuid_data struct.
1359 QEMU_BUILD_BUG_ON(sizeof(cpuid_data
) !=
1360 sizeof(struct kvm_cpuid2
) +
1361 sizeof(struct kvm_cpuid_entry2
) * KVM_MAX_CPUID_ENTRIES
);
1363 X86CPU
*cpu
= X86_CPU(cs
);
1364 CPUX86State
*env
= &cpu
->env
;
1365 uint32_t limit
, i
, j
, cpuid_i
;
1367 struct kvm_cpuid_entry2
*c
;
1368 uint32_t signature
[3];
1369 int kvm_base
= KVM_CPUID_SIGNATURE
;
1370 int max_nested_state_len
;
1372 Error
*local_err
= NULL
;
1374 memset(&cpuid_data
, 0, sizeof(cpuid_data
));
1378 r
= kvm_arch_set_tsc_khz(cs
);
1383 /* vcpu's TSC frequency is either specified by user, or following
1384 * the value used by KVM if the former is not present. In the
1385 * latter case, we query it from KVM and record in env->tsc_khz,
1386 * so that vcpu's TSC frequency can be migrated later via this field.
1388 if (!env
->tsc_khz
) {
1389 r
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
1390 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) :
1397 /* Paravirtualization CPUIDs */
1398 r
= hyperv_handle_properties(cs
, cpuid_data
.entries
);
1403 kvm_base
= KVM_CPUID_SIGNATURE_NEXT
;
1404 has_msr_hv_hypercall
= true;
1407 if (cpu
->expose_kvm
) {
1408 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
1409 c
= &cpuid_data
.entries
[cpuid_i
++];
1410 c
->function
= KVM_CPUID_SIGNATURE
| kvm_base
;
1411 c
->eax
= KVM_CPUID_FEATURES
| kvm_base
;
1412 c
->ebx
= signature
[0];
1413 c
->ecx
= signature
[1];
1414 c
->edx
= signature
[2];
1416 c
= &cpuid_data
.entries
[cpuid_i
++];
1417 c
->function
= KVM_CPUID_FEATURES
| kvm_base
;
1418 c
->eax
= env
->features
[FEAT_KVM
];
1419 c
->edx
= env
->features
[FEAT_KVM_HINTS
];
1422 cpu_x86_cpuid(env
, 0, 0, &limit
, &unused
, &unused
, &unused
);
1424 for (i
= 0; i
<= limit
; i
++) {
1425 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1426 fprintf(stderr
, "unsupported level value: 0x%x\n", limit
);
1429 c
= &cpuid_data
.entries
[cpuid_i
++];
1433 /* Keep reading function 2 till all the input is received */
1437 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
|
1438 KVM_CPUID_FLAG_STATE_READ_NEXT
;
1439 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1440 times
= c
->eax
& 0xff;
1442 for (j
= 1; j
< times
; ++j
) {
1443 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1444 fprintf(stderr
, "cpuid_data is full, no space for "
1445 "cpuid(eax:2):eax & 0xf = 0x%x\n", times
);
1448 c
= &cpuid_data
.entries
[cpuid_i
++];
1450 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
;
1451 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1456 if (env
->nr_dies
< 2) {
1462 for (j
= 0; ; j
++) {
1463 if (i
== 0xd && j
== 64) {
1467 if (i
== 0x1f && j
== 64) {
1472 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1474 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1476 if (i
== 4 && c
->eax
== 0) {
1479 if (i
== 0xb && !(c
->ecx
& 0xff00)) {
1482 if (i
== 0x1f && !(c
->ecx
& 0xff00)) {
1485 if (i
== 0xd && c
->eax
== 0) {
1488 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1489 fprintf(stderr
, "cpuid_data is full, no space for "
1490 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
1493 c
= &cpuid_data
.entries
[cpuid_i
++];
1501 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1502 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1505 for (j
= 1; j
<= times
; ++j
) {
1506 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1507 fprintf(stderr
, "cpuid_data is full, no space for "
1508 "cpuid(eax:0x14,ecx:0x%x)\n", j
);
1511 c
= &cpuid_data
.entries
[cpuid_i
++];
1514 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1515 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1522 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1527 if (limit
>= 0x0a) {
1530 cpu_x86_cpuid(env
, 0x0a, 0, &eax
, &unused
, &unused
, &edx
);
1532 has_architectural_pmu_version
= eax
& 0xff;
1533 if (has_architectural_pmu_version
> 0) {
1534 num_architectural_pmu_gp_counters
= (eax
& 0xff00) >> 8;
1536 /* Shouldn't be more than 32, since that's the number of bits
1537 * available in EBX to tell us _which_ counters are available.
1540 if (num_architectural_pmu_gp_counters
> MAX_GP_COUNTERS
) {
1541 num_architectural_pmu_gp_counters
= MAX_GP_COUNTERS
;
1544 if (has_architectural_pmu_version
> 1) {
1545 num_architectural_pmu_fixed_counters
= edx
& 0x1f;
1547 if (num_architectural_pmu_fixed_counters
> MAX_FIXED_COUNTERS
) {
1548 num_architectural_pmu_fixed_counters
= MAX_FIXED_COUNTERS
;
1554 cpu_x86_cpuid(env
, 0x80000000, 0, &limit
, &unused
, &unused
, &unused
);
1556 for (i
= 0x80000000; i
<= limit
; i
++) {
1557 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1558 fprintf(stderr
, "unsupported xlevel value: 0x%x\n", limit
);
1561 c
= &cpuid_data
.entries
[cpuid_i
++];
1565 /* Query for all AMD cache information leaves */
1566 for (j
= 0; ; j
++) {
1568 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1570 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1575 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1576 fprintf(stderr
, "cpuid_data is full, no space for "
1577 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
1580 c
= &cpuid_data
.entries
[cpuid_i
++];
1586 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1591 /* Call Centaur's CPUID instructions they are supported. */
1592 if (env
->cpuid_xlevel2
> 0) {
1593 cpu_x86_cpuid(env
, 0xC0000000, 0, &limit
, &unused
, &unused
, &unused
);
1595 for (i
= 0xC0000000; i
<= limit
; i
++) {
1596 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1597 fprintf(stderr
, "unsupported xlevel2 value: 0x%x\n", limit
);
1600 c
= &cpuid_data
.entries
[cpuid_i
++];
1604 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1608 cpuid_data
.cpuid
.nent
= cpuid_i
;
1610 if (((env
->cpuid_version
>> 8)&0xF) >= 6
1611 && (env
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
1612 (CPUID_MCE
| CPUID_MCA
)
1613 && kvm_check_extension(cs
->kvm_state
, KVM_CAP_MCE
) > 0) {
1614 uint64_t mcg_cap
, unsupported_caps
;
1618 ret
= kvm_get_mce_cap_supported(cs
->kvm_state
, &mcg_cap
, &banks
);
1620 fprintf(stderr
, "kvm_get_mce_cap_supported: %s", strerror(-ret
));
1624 if (banks
< (env
->mcg_cap
& MCG_CAP_BANKS_MASK
)) {
1625 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
1626 (int)(env
->mcg_cap
& MCG_CAP_BANKS_MASK
), banks
);
1630 unsupported_caps
= env
->mcg_cap
& ~(mcg_cap
| MCG_CAP_BANKS_MASK
);
1631 if (unsupported_caps
) {
1632 if (unsupported_caps
& MCG_LMCE_P
) {
1633 error_report("kvm: LMCE not supported");
1636 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64
,
1640 env
->mcg_cap
&= mcg_cap
| MCG_CAP_BANKS_MASK
;
1641 ret
= kvm_vcpu_ioctl(cs
, KVM_X86_SETUP_MCE
, &env
->mcg_cap
);
1643 fprintf(stderr
, "KVM_X86_SETUP_MCE: %s", strerror(-ret
));
1648 qemu_add_vm_change_state_handler(cpu_update_state
, env
);
1650 c
= cpuid_find_entry(&cpuid_data
.cpuid
, 1, 0);
1652 has_msr_feature_control
= !!(c
->ecx
& CPUID_EXT_VMX
) ||
1653 !!(c
->ecx
& CPUID_EXT_SMX
);
1656 if (cpu_has_vmx(env
) && !nested_virt_mig_blocker
&&
1657 ((kvm_max_nested_state_length() <= 0) || !has_exception_payload
)) {
1658 error_setg(&nested_virt_mig_blocker
,
1659 "Kernel do not provide required capabilities for "
1660 "nested virtualization migration. "
1661 "(CAP_NESTED_STATE=%d, CAP_EXCEPTION_PAYLOAD=%d)",
1662 kvm_max_nested_state_length() > 0,
1663 has_exception_payload
);
1664 r
= migrate_add_blocker(nested_virt_mig_blocker
, &local_err
);
1666 error_report_err(local_err
);
1667 error_free(nested_virt_mig_blocker
);
1672 if (env
->mcg_cap
& MCG_LMCE_P
) {
1673 has_msr_mcg_ext_ctl
= has_msr_feature_control
= true;
1676 if (!env
->user_tsc_khz
) {
1677 if ((env
->features
[FEAT_8000_0007_EDX
] & CPUID_APM_INVTSC
) &&
1678 invtsc_mig_blocker
== NULL
) {
1679 error_setg(&invtsc_mig_blocker
,
1680 "State blocked by non-migratable CPU device"
1682 r
= migrate_add_blocker(invtsc_mig_blocker
, &local_err
);
1684 error_report_err(local_err
);
1685 error_free(invtsc_mig_blocker
);
1691 if (cpu
->vmware_cpuid_freq
1692 /* Guests depend on 0x40000000 to detect this feature, so only expose
1693 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
1695 && kvm_base
== KVM_CPUID_SIGNATURE
1696 /* TSC clock must be stable and known for this feature. */
1697 && tsc_is_stable_and_known(env
)) {
1699 c
= &cpuid_data
.entries
[cpuid_i
++];
1700 c
->function
= KVM_CPUID_SIGNATURE
| 0x10;
1701 c
->eax
= env
->tsc_khz
;
1702 /* LAPIC resolution of 1ns (freq: 1GHz) is hardcoded in KVM's
1703 * APIC_BUS_CYCLE_NS */
1705 c
->ecx
= c
->edx
= 0;
1707 c
= cpuid_find_entry(&cpuid_data
.cpuid
, kvm_base
, 0);
1708 c
->eax
= MAX(c
->eax
, KVM_CPUID_SIGNATURE
| 0x10);
1711 cpuid_data
.cpuid
.nent
= cpuid_i
;
1713 cpuid_data
.cpuid
.padding
= 0;
1714 r
= kvm_vcpu_ioctl(cs
, KVM_SET_CPUID2
, &cpuid_data
);
1720 env
->xsave_buf
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
1723 max_nested_state_len
= kvm_max_nested_state_length();
1724 if (max_nested_state_len
> 0) {
1725 assert(max_nested_state_len
>= offsetof(struct kvm_nested_state
, data
));
1726 env
->nested_state
= g_malloc0(max_nested_state_len
);
1728 env
->nested_state
->size
= max_nested_state_len
;
1730 if (IS_INTEL_CPU(env
)) {
1731 struct kvm_vmx_nested_state_hdr
*vmx_hdr
=
1732 &env
->nested_state
->hdr
.vmx
;
1734 env
->nested_state
->format
= KVM_STATE_NESTED_FORMAT_VMX
;
1735 vmx_hdr
->vmxon_pa
= -1ull;
1736 vmx_hdr
->vmcs12_pa
= -1ull;
1740 cpu
->kvm_msr_buf
= g_malloc0(MSR_BUF_SIZE
);
1742 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_RDTSCP
)) {
1743 has_msr_tsc_aux
= false;
1746 r
= hyperv_init_vcpu(cpu
);
1754 migrate_del_blocker(invtsc_mig_blocker
);
1756 migrate_del_blocker(nested_virt_mig_blocker
);
1761 int kvm_arch_destroy_vcpu(CPUState
*cs
)
1763 X86CPU
*cpu
= X86_CPU(cs
);
1764 CPUX86State
*env
= &cpu
->env
;
1766 if (cpu
->kvm_msr_buf
) {
1767 g_free(cpu
->kvm_msr_buf
);
1768 cpu
->kvm_msr_buf
= NULL
;
1771 if (env
->nested_state
) {
1772 g_free(env
->nested_state
);
1773 env
->nested_state
= NULL
;
1779 void kvm_arch_reset_vcpu(X86CPU
*cpu
)
1781 CPUX86State
*env
= &cpu
->env
;
1784 if (kvm_irqchip_in_kernel()) {
1785 env
->mp_state
= cpu_is_bsp(cpu
) ? KVM_MP_STATE_RUNNABLE
:
1786 KVM_MP_STATE_UNINITIALIZED
;
1788 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
1791 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
1793 for (i
= 0; i
< ARRAY_SIZE(env
->msr_hv_synic_sint
); i
++) {
1794 env
->msr_hv_synic_sint
[i
] = HV_SINT_MASKED
;
1797 hyperv_x86_synic_reset(cpu
);
1801 void kvm_arch_do_init_vcpu(X86CPU
*cpu
)
1803 CPUX86State
*env
= &cpu
->env
;
1805 /* APs get directly into wait-for-SIPI state. */
1806 if (env
->mp_state
== KVM_MP_STATE_UNINITIALIZED
) {
1807 env
->mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
1811 static int kvm_get_supported_feature_msrs(KVMState
*s
)
1815 if (kvm_feature_msrs
!= NULL
) {
1819 if (!kvm_check_extension(s
, KVM_CAP_GET_MSR_FEATURES
)) {
1823 struct kvm_msr_list msr_list
;
1826 ret
= kvm_ioctl(s
, KVM_GET_MSR_FEATURE_INDEX_LIST
, &msr_list
);
1827 if (ret
< 0 && ret
!= -E2BIG
) {
1828 error_report("Fetch KVM feature MSR list failed: %s",
1833 assert(msr_list
.nmsrs
> 0);
1834 kvm_feature_msrs
= (struct kvm_msr_list
*) \
1835 g_malloc0(sizeof(msr_list
) +
1836 msr_list
.nmsrs
* sizeof(msr_list
.indices
[0]));
1838 kvm_feature_msrs
->nmsrs
= msr_list
.nmsrs
;
1839 ret
= kvm_ioctl(s
, KVM_GET_MSR_FEATURE_INDEX_LIST
, kvm_feature_msrs
);
1842 error_report("Fetch KVM feature MSR list failed: %s",
1844 g_free(kvm_feature_msrs
);
1845 kvm_feature_msrs
= NULL
;
1852 static int kvm_get_supported_msrs(KVMState
*s
)
1854 static int kvm_supported_msrs
;
1858 if (kvm_supported_msrs
== 0) {
1859 struct kvm_msr_list msr_list
, *kvm_msr_list
;
1861 kvm_supported_msrs
= -1;
1863 /* Obtain MSR list from KVM. These are the MSRs that we must
1866 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, &msr_list
);
1867 if (ret
< 0 && ret
!= -E2BIG
) {
1870 /* Old kernel modules had a bug and could write beyond the provided
1871 memory. Allocate at least a safe amount of 1K. */
1872 kvm_msr_list
= g_malloc0(MAX(1024, sizeof(msr_list
) +
1874 sizeof(msr_list
.indices
[0])));
1876 kvm_msr_list
->nmsrs
= msr_list
.nmsrs
;
1877 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, kvm_msr_list
);
1881 for (i
= 0; i
< kvm_msr_list
->nmsrs
; i
++) {
1882 switch (kvm_msr_list
->indices
[i
]) {
1884 has_msr_star
= true;
1886 case MSR_VM_HSAVE_PA
:
1887 has_msr_hsave_pa
= true;
1890 has_msr_tsc_aux
= true;
1892 case MSR_TSC_ADJUST
:
1893 has_msr_tsc_adjust
= true;
1895 case MSR_IA32_TSCDEADLINE
:
1896 has_msr_tsc_deadline
= true;
1898 case MSR_IA32_SMBASE
:
1899 has_msr_smbase
= true;
1902 has_msr_smi_count
= true;
1904 case MSR_IA32_MISC_ENABLE
:
1905 has_msr_misc_enable
= true;
1907 case MSR_IA32_BNDCFGS
:
1908 has_msr_bndcfgs
= true;
1913 case HV_X64_MSR_CRASH_CTL
:
1914 has_msr_hv_crash
= true;
1916 case HV_X64_MSR_RESET
:
1917 has_msr_hv_reset
= true;
1919 case HV_X64_MSR_VP_INDEX
:
1920 has_msr_hv_vpindex
= true;
1922 case HV_X64_MSR_VP_RUNTIME
:
1923 has_msr_hv_runtime
= true;
1925 case HV_X64_MSR_SCONTROL
:
1926 has_msr_hv_synic
= true;
1928 case HV_X64_MSR_STIMER0_CONFIG
:
1929 has_msr_hv_stimer
= true;
1931 case HV_X64_MSR_TSC_FREQUENCY
:
1932 has_msr_hv_frequencies
= true;
1934 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
1935 has_msr_hv_reenlightenment
= true;
1937 case MSR_IA32_SPEC_CTRL
:
1938 has_msr_spec_ctrl
= true;
1941 has_msr_virt_ssbd
= true;
1943 case MSR_IA32_ARCH_CAPABILITIES
:
1944 has_msr_arch_capabs
= true;
1946 case MSR_IA32_CORE_CAPABILITY
:
1947 has_msr_core_capabs
= true;
1953 g_free(kvm_msr_list
);
1959 static Notifier smram_machine_done
;
1960 static KVMMemoryListener smram_listener
;
1961 static AddressSpace smram_address_space
;
1962 static MemoryRegion smram_as_root
;
1963 static MemoryRegion smram_as_mem
;
1965 static void register_smram_listener(Notifier
*n
, void *unused
)
1967 MemoryRegion
*smram
=
1968 (MemoryRegion
*) object_resolve_path("/machine/smram", NULL
);
1970 /* Outer container... */
1971 memory_region_init(&smram_as_root
, OBJECT(kvm_state
), "mem-container-smram", ~0ull);
1972 memory_region_set_enabled(&smram_as_root
, true);
1974 /* ... with two regions inside: normal system memory with low
1977 memory_region_init_alias(&smram_as_mem
, OBJECT(kvm_state
), "mem-smram",
1978 get_system_memory(), 0, ~0ull);
1979 memory_region_add_subregion_overlap(&smram_as_root
, 0, &smram_as_mem
, 0);
1980 memory_region_set_enabled(&smram_as_mem
, true);
1983 /* ... SMRAM with higher priority */
1984 memory_region_add_subregion_overlap(&smram_as_root
, 0, smram
, 10);
1985 memory_region_set_enabled(smram
, true);
1988 address_space_init(&smram_address_space
, &smram_as_root
, "KVM-SMRAM");
1989 kvm_memory_listener_register(kvm_state
, &smram_listener
,
1990 &smram_address_space
, 1);
1993 int kvm_arch_init(MachineState
*ms
, KVMState
*s
)
1995 uint64_t identity_base
= 0xfffbc000;
1996 uint64_t shadow_mem
;
1998 struct utsname utsname
;
2000 has_xsave
= kvm_check_extension(s
, KVM_CAP_XSAVE
);
2001 has_xcrs
= kvm_check_extension(s
, KVM_CAP_XCRS
);
2002 has_pit_state2
= kvm_check_extension(s
, KVM_CAP_PIT_STATE2
);
2004 hv_vpindex_settable
= kvm_check_extension(s
, KVM_CAP_HYPERV_VP_INDEX
);
2006 has_exception_payload
= kvm_check_extension(s
, KVM_CAP_EXCEPTION_PAYLOAD
);
2007 if (has_exception_payload
) {
2008 ret
= kvm_vm_enable_cap(s
, KVM_CAP_EXCEPTION_PAYLOAD
, 0, true);
2010 error_report("kvm: Failed to enable exception payload cap: %s",
2016 ret
= kvm_get_supported_msrs(s
);
2021 kvm_get_supported_feature_msrs(s
);
2024 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
2027 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
2028 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
2029 * Since these must be part of guest physical memory, we need to allocate
2030 * them, both by setting their start addresses in the kernel and by
2031 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
2033 * Older KVM versions may not support setting the identity map base. In
2034 * that case we need to stick with the default, i.e. a 256K maximum BIOS
2037 if (kvm_check_extension(s
, KVM_CAP_SET_IDENTITY_MAP_ADDR
)) {
2038 /* Allows up to 16M BIOSes. */
2039 identity_base
= 0xfeffc000;
2041 ret
= kvm_vm_ioctl(s
, KVM_SET_IDENTITY_MAP_ADDR
, &identity_base
);
2047 /* Set TSS base one page after EPT identity map. */
2048 ret
= kvm_vm_ioctl(s
, KVM_SET_TSS_ADDR
, identity_base
+ 0x1000);
2053 /* Tell fw_cfg to notify the BIOS to reserve the range. */
2054 ret
= e820_add_entry(identity_base
, 0x4000, E820_RESERVED
);
2056 fprintf(stderr
, "e820_add_entry() table is full\n");
2059 qemu_register_reset(kvm_unpoison_all
, NULL
);
2061 shadow_mem
= machine_kvm_shadow_mem(ms
);
2062 if (shadow_mem
!= -1) {
2064 ret
= kvm_vm_ioctl(s
, KVM_SET_NR_MMU_PAGES
, shadow_mem
);
2070 if (kvm_check_extension(s
, KVM_CAP_X86_SMM
) &&
2071 object_dynamic_cast(OBJECT(ms
), TYPE_PC_MACHINE
) &&
2072 pc_machine_is_smm_enabled(PC_MACHINE(ms
))) {
2073 smram_machine_done
.notify
= register_smram_listener
;
2074 qemu_add_machine_init_done_notifier(&smram_machine_done
);
2077 if (enable_cpu_pm
) {
2078 int disable_exits
= kvm_check_extension(s
, KVM_CAP_X86_DISABLE_EXITS
);
2081 /* Work around for kernel header with a typo. TODO: fix header and drop. */
2082 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
2083 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
2085 if (disable_exits
) {
2086 disable_exits
&= (KVM_X86_DISABLE_EXITS_MWAIT
|
2087 KVM_X86_DISABLE_EXITS_HLT
|
2088 KVM_X86_DISABLE_EXITS_PAUSE
);
2091 ret
= kvm_vm_enable_cap(s
, KVM_CAP_X86_DISABLE_EXITS
, 0,
2094 error_report("kvm: guest stopping CPU not supported: %s",
2102 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
2104 lhs
->selector
= rhs
->selector
;
2105 lhs
->base
= rhs
->base
;
2106 lhs
->limit
= rhs
->limit
;
2118 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
2120 unsigned flags
= rhs
->flags
;
2121 lhs
->selector
= rhs
->selector
;
2122 lhs
->base
= rhs
->base
;
2123 lhs
->limit
= rhs
->limit
;
2124 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
2125 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
2126 lhs
->dpl
= (flags
>> DESC_DPL_SHIFT
) & 3;
2127 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
2128 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
2129 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
2130 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
2131 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
2132 lhs
->unusable
= !lhs
->present
;
2136 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
2138 lhs
->selector
= rhs
->selector
;
2139 lhs
->base
= rhs
->base
;
2140 lhs
->limit
= rhs
->limit
;
2141 lhs
->flags
= (rhs
->type
<< DESC_TYPE_SHIFT
) |
2142 ((rhs
->present
&& !rhs
->unusable
) * DESC_P_MASK
) |
2143 (rhs
->dpl
<< DESC_DPL_SHIFT
) |
2144 (rhs
->db
<< DESC_B_SHIFT
) |
2145 (rhs
->s
* DESC_S_MASK
) |
2146 (rhs
->l
<< DESC_L_SHIFT
) |
2147 (rhs
->g
* DESC_G_MASK
) |
2148 (rhs
->avl
* DESC_AVL_MASK
);
2151 static void kvm_getput_reg(__u64
*kvm_reg
, target_ulong
*qemu_reg
, int set
)
2154 *kvm_reg
= *qemu_reg
;
2156 *qemu_reg
= *kvm_reg
;
2160 static int kvm_getput_regs(X86CPU
*cpu
, int set
)
2162 CPUX86State
*env
= &cpu
->env
;
2163 struct kvm_regs regs
;
2167 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_REGS
, ®s
);
2173 kvm_getput_reg(®s
.rax
, &env
->regs
[R_EAX
], set
);
2174 kvm_getput_reg(®s
.rbx
, &env
->regs
[R_EBX
], set
);
2175 kvm_getput_reg(®s
.rcx
, &env
->regs
[R_ECX
], set
);
2176 kvm_getput_reg(®s
.rdx
, &env
->regs
[R_EDX
], set
);
2177 kvm_getput_reg(®s
.rsi
, &env
->regs
[R_ESI
], set
);
2178 kvm_getput_reg(®s
.rdi
, &env
->regs
[R_EDI
], set
);
2179 kvm_getput_reg(®s
.rsp
, &env
->regs
[R_ESP
], set
);
2180 kvm_getput_reg(®s
.rbp
, &env
->regs
[R_EBP
], set
);
2181 #ifdef TARGET_X86_64
2182 kvm_getput_reg(®s
.r8
, &env
->regs
[8], set
);
2183 kvm_getput_reg(®s
.r9
, &env
->regs
[9], set
);
2184 kvm_getput_reg(®s
.r10
, &env
->regs
[10], set
);
2185 kvm_getput_reg(®s
.r11
, &env
->regs
[11], set
);
2186 kvm_getput_reg(®s
.r12
, &env
->regs
[12], set
);
2187 kvm_getput_reg(®s
.r13
, &env
->regs
[13], set
);
2188 kvm_getput_reg(®s
.r14
, &env
->regs
[14], set
);
2189 kvm_getput_reg(®s
.r15
, &env
->regs
[15], set
);
2192 kvm_getput_reg(®s
.rflags
, &env
->eflags
, set
);
2193 kvm_getput_reg(®s
.rip
, &env
->eip
, set
);
2196 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_REGS
, ®s
);
2202 static int kvm_put_fpu(X86CPU
*cpu
)
2204 CPUX86State
*env
= &cpu
->env
;
2208 memset(&fpu
, 0, sizeof fpu
);
2209 fpu
.fsw
= env
->fpus
& ~(7 << 11);
2210 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
2211 fpu
.fcw
= env
->fpuc
;
2212 fpu
.last_opcode
= env
->fpop
;
2213 fpu
.last_ip
= env
->fpip
;
2214 fpu
.last_dp
= env
->fpdp
;
2215 for (i
= 0; i
< 8; ++i
) {
2216 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
2218 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
2219 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
2220 stq_p(&fpu
.xmm
[i
][0], env
->xmm_regs
[i
].ZMM_Q(0));
2221 stq_p(&fpu
.xmm
[i
][8], env
->xmm_regs
[i
].ZMM_Q(1));
2223 fpu
.mxcsr
= env
->mxcsr
;
2225 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_FPU
, &fpu
);
2228 #define XSAVE_FCW_FSW 0
2229 #define XSAVE_FTW_FOP 1
2230 #define XSAVE_CWD_RIP 2
2231 #define XSAVE_CWD_RDP 4
2232 #define XSAVE_MXCSR 6
2233 #define XSAVE_ST_SPACE 8
2234 #define XSAVE_XMM_SPACE 40
2235 #define XSAVE_XSTATE_BV 128
2236 #define XSAVE_YMMH_SPACE 144
2237 #define XSAVE_BNDREGS 240
2238 #define XSAVE_BNDCSR 256
2239 #define XSAVE_OPMASK 272
2240 #define XSAVE_ZMM_Hi256 288
2241 #define XSAVE_Hi16_ZMM 416
2242 #define XSAVE_PKRU 672
2244 #define XSAVE_BYTE_OFFSET(word_offset) \
2245 ((word_offset) * sizeof_field(struct kvm_xsave, region[0]))
2247 #define ASSERT_OFFSET(word_offset, field) \
2248 QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
2249 offsetof(X86XSaveArea, field))
2251 ASSERT_OFFSET(XSAVE_FCW_FSW
, legacy
.fcw
);
2252 ASSERT_OFFSET(XSAVE_FTW_FOP
, legacy
.ftw
);
2253 ASSERT_OFFSET(XSAVE_CWD_RIP
, legacy
.fpip
);
2254 ASSERT_OFFSET(XSAVE_CWD_RDP
, legacy
.fpdp
);
2255 ASSERT_OFFSET(XSAVE_MXCSR
, legacy
.mxcsr
);
2256 ASSERT_OFFSET(XSAVE_ST_SPACE
, legacy
.fpregs
);
2257 ASSERT_OFFSET(XSAVE_XMM_SPACE
, legacy
.xmm_regs
);
2258 ASSERT_OFFSET(XSAVE_XSTATE_BV
, header
.xstate_bv
);
2259 ASSERT_OFFSET(XSAVE_YMMH_SPACE
, avx_state
);
2260 ASSERT_OFFSET(XSAVE_BNDREGS
, bndreg_state
);
2261 ASSERT_OFFSET(XSAVE_BNDCSR
, bndcsr_state
);
2262 ASSERT_OFFSET(XSAVE_OPMASK
, opmask_state
);
2263 ASSERT_OFFSET(XSAVE_ZMM_Hi256
, zmm_hi256_state
);
2264 ASSERT_OFFSET(XSAVE_Hi16_ZMM
, hi16_zmm_state
);
2265 ASSERT_OFFSET(XSAVE_PKRU
, pkru_state
);
2267 static int kvm_put_xsave(X86CPU
*cpu
)
2269 CPUX86State
*env
= &cpu
->env
;
2270 X86XSaveArea
*xsave
= env
->xsave_buf
;
2273 return kvm_put_fpu(cpu
);
2275 x86_cpu_xsave_all_areas(cpu
, xsave
);
2277 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XSAVE
, xsave
);
2280 static int kvm_put_xcrs(X86CPU
*cpu
)
2282 CPUX86State
*env
= &cpu
->env
;
2283 struct kvm_xcrs xcrs
= {};
2291 xcrs
.xcrs
[0].xcr
= 0;
2292 xcrs
.xcrs
[0].value
= env
->xcr0
;
2293 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XCRS
, &xcrs
);
2296 static int kvm_put_sregs(X86CPU
*cpu
)
2298 CPUX86State
*env
= &cpu
->env
;
2299 struct kvm_sregs sregs
;
2301 memset(sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
2302 if (env
->interrupt_injected
>= 0) {
2303 sregs
.interrupt_bitmap
[env
->interrupt_injected
/ 64] |=
2304 (uint64_t)1 << (env
->interrupt_injected
% 64);
2307 if ((env
->eflags
& VM_MASK
)) {
2308 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
2309 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
2310 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
2311 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
2312 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
2313 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
2315 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
2316 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
2317 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
2318 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
2319 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
2320 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
2323 set_seg(&sregs
.tr
, &env
->tr
);
2324 set_seg(&sregs
.ldt
, &env
->ldt
);
2326 sregs
.idt
.limit
= env
->idt
.limit
;
2327 sregs
.idt
.base
= env
->idt
.base
;
2328 memset(sregs
.idt
.padding
, 0, sizeof sregs
.idt
.padding
);
2329 sregs
.gdt
.limit
= env
->gdt
.limit
;
2330 sregs
.gdt
.base
= env
->gdt
.base
;
2331 memset(sregs
.gdt
.padding
, 0, sizeof sregs
.gdt
.padding
);
2333 sregs
.cr0
= env
->cr
[0];
2334 sregs
.cr2
= env
->cr
[2];
2335 sregs
.cr3
= env
->cr
[3];
2336 sregs
.cr4
= env
->cr
[4];
2338 sregs
.cr8
= cpu_get_apic_tpr(cpu
->apic_state
);
2339 sregs
.apic_base
= cpu_get_apic_base(cpu
->apic_state
);
2341 sregs
.efer
= env
->efer
;
2343 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_SREGS
, &sregs
);
2346 static void kvm_msr_buf_reset(X86CPU
*cpu
)
2348 memset(cpu
->kvm_msr_buf
, 0, MSR_BUF_SIZE
);
2351 static void kvm_msr_entry_add(X86CPU
*cpu
, uint32_t index
, uint64_t value
)
2353 struct kvm_msrs
*msrs
= cpu
->kvm_msr_buf
;
2354 void *limit
= ((void *)msrs
) + MSR_BUF_SIZE
;
2355 struct kvm_msr_entry
*entry
= &msrs
->entries
[msrs
->nmsrs
];
2357 assert((void *)(entry
+ 1) <= limit
);
2359 entry
->index
= index
;
2360 entry
->reserved
= 0;
2361 entry
->data
= value
;
2365 static int kvm_put_one_msr(X86CPU
*cpu
, int index
, uint64_t value
)
2367 kvm_msr_buf_reset(cpu
);
2368 kvm_msr_entry_add(cpu
, index
, value
);
2370 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, cpu
->kvm_msr_buf
);
2373 void kvm_put_apicbase(X86CPU
*cpu
, uint64_t value
)
2377 ret
= kvm_put_one_msr(cpu
, MSR_IA32_APICBASE
, value
);
2381 static int kvm_put_tscdeadline_msr(X86CPU
*cpu
)
2383 CPUX86State
*env
= &cpu
->env
;
2386 if (!has_msr_tsc_deadline
) {
2390 ret
= kvm_put_one_msr(cpu
, MSR_IA32_TSCDEADLINE
, env
->tsc_deadline
);
2400 * Provide a separate write service for the feature control MSR in order to
2401 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
2402 * before writing any other state because forcibly leaving nested mode
2403 * invalidates the VCPU state.
2405 static int kvm_put_msr_feature_control(X86CPU
*cpu
)
2409 if (!has_msr_feature_control
) {
2413 ret
= kvm_put_one_msr(cpu
, MSR_IA32_FEATURE_CONTROL
,
2414 cpu
->env
.msr_ia32_feature_control
);
2423 static int kvm_put_msrs(X86CPU
*cpu
, int level
)
2425 CPUX86State
*env
= &cpu
->env
;
2429 kvm_msr_buf_reset(cpu
);
2431 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
2432 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
2433 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
2434 kvm_msr_entry_add(cpu
, MSR_PAT
, env
->pat
);
2436 kvm_msr_entry_add(cpu
, MSR_STAR
, env
->star
);
2438 if (has_msr_hsave_pa
) {
2439 kvm_msr_entry_add(cpu
, MSR_VM_HSAVE_PA
, env
->vm_hsave
);
2441 if (has_msr_tsc_aux
) {
2442 kvm_msr_entry_add(cpu
, MSR_TSC_AUX
, env
->tsc_aux
);
2444 if (has_msr_tsc_adjust
) {
2445 kvm_msr_entry_add(cpu
, MSR_TSC_ADJUST
, env
->tsc_adjust
);
2447 if (has_msr_misc_enable
) {
2448 kvm_msr_entry_add(cpu
, MSR_IA32_MISC_ENABLE
,
2449 env
->msr_ia32_misc_enable
);
2451 if (has_msr_smbase
) {
2452 kvm_msr_entry_add(cpu
, MSR_IA32_SMBASE
, env
->smbase
);
2454 if (has_msr_smi_count
) {
2455 kvm_msr_entry_add(cpu
, MSR_SMI_COUNT
, env
->msr_smi_count
);
2457 if (has_msr_bndcfgs
) {
2458 kvm_msr_entry_add(cpu
, MSR_IA32_BNDCFGS
, env
->msr_bndcfgs
);
2461 kvm_msr_entry_add(cpu
, MSR_IA32_XSS
, env
->xss
);
2463 if (has_msr_spec_ctrl
) {
2464 kvm_msr_entry_add(cpu
, MSR_IA32_SPEC_CTRL
, env
->spec_ctrl
);
2466 if (has_msr_virt_ssbd
) {
2467 kvm_msr_entry_add(cpu
, MSR_VIRT_SSBD
, env
->virt_ssbd
);
2470 #ifdef TARGET_X86_64
2471 if (lm_capable_kernel
) {
2472 kvm_msr_entry_add(cpu
, MSR_CSTAR
, env
->cstar
);
2473 kvm_msr_entry_add(cpu
, MSR_KERNELGSBASE
, env
->kernelgsbase
);
2474 kvm_msr_entry_add(cpu
, MSR_FMASK
, env
->fmask
);
2475 kvm_msr_entry_add(cpu
, MSR_LSTAR
, env
->lstar
);
2479 /* If host supports feature MSR, write down. */
2480 if (has_msr_arch_capabs
) {
2481 kvm_msr_entry_add(cpu
, MSR_IA32_ARCH_CAPABILITIES
,
2482 env
->features
[FEAT_ARCH_CAPABILITIES
]);
2485 if (has_msr_core_capabs
) {
2486 kvm_msr_entry_add(cpu
, MSR_IA32_CORE_CAPABILITY
,
2487 env
->features
[FEAT_CORE_CAPABILITY
]);
2491 * The following MSRs have side effects on the guest or are too heavy
2492 * for normal writeback. Limit them to reset or full state updates.
2494 if (level
>= KVM_PUT_RESET_STATE
) {
2495 kvm_msr_entry_add(cpu
, MSR_IA32_TSC
, env
->tsc
);
2496 kvm_msr_entry_add(cpu
, MSR_KVM_SYSTEM_TIME
, env
->system_time_msr
);
2497 kvm_msr_entry_add(cpu
, MSR_KVM_WALL_CLOCK
, env
->wall_clock_msr
);
2498 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF
)) {
2499 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_EN
, env
->async_pf_en_msr
);
2501 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_PV_EOI
)) {
2502 kvm_msr_entry_add(cpu
, MSR_KVM_PV_EOI_EN
, env
->pv_eoi_en_msr
);
2504 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_STEAL_TIME
)) {
2505 kvm_msr_entry_add(cpu
, MSR_KVM_STEAL_TIME
, env
->steal_time_msr
);
2507 if (has_architectural_pmu_version
> 0) {
2508 if (has_architectural_pmu_version
> 1) {
2509 /* Stop the counter. */
2510 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
2511 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
, 0);
2514 /* Set the counter values. */
2515 for (i
= 0; i
< num_architectural_pmu_fixed_counters
; i
++) {
2516 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR0
+ i
,
2517 env
->msr_fixed_counters
[i
]);
2519 for (i
= 0; i
< num_architectural_pmu_gp_counters
; i
++) {
2520 kvm_msr_entry_add(cpu
, MSR_P6_PERFCTR0
+ i
,
2521 env
->msr_gp_counters
[i
]);
2522 kvm_msr_entry_add(cpu
, MSR_P6_EVNTSEL0
+ i
,
2523 env
->msr_gp_evtsel
[i
]);
2525 if (has_architectural_pmu_version
> 1) {
2526 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_STATUS
,
2527 env
->msr_global_status
);
2528 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_OVF_CTRL
,
2529 env
->msr_global_ovf_ctrl
);
2531 /* Now start the PMU. */
2532 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
,
2533 env
->msr_fixed_ctr_ctrl
);
2534 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
,
2535 env
->msr_global_ctrl
);
2539 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
2540 * only sync them to KVM on the first cpu
2542 if (current_cpu
== first_cpu
) {
2543 if (has_msr_hv_hypercall
) {
2544 kvm_msr_entry_add(cpu
, HV_X64_MSR_GUEST_OS_ID
,
2545 env
->msr_hv_guest_os_id
);
2546 kvm_msr_entry_add(cpu
, HV_X64_MSR_HYPERCALL
,
2547 env
->msr_hv_hypercall
);
2549 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_TIME
)) {
2550 kvm_msr_entry_add(cpu
, HV_X64_MSR_REFERENCE_TSC
,
2553 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_REENLIGHTENMENT
)) {
2554 kvm_msr_entry_add(cpu
, HV_X64_MSR_REENLIGHTENMENT_CONTROL
,
2555 env
->msr_hv_reenlightenment_control
);
2556 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_CONTROL
,
2557 env
->msr_hv_tsc_emulation_control
);
2558 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_STATUS
,
2559 env
->msr_hv_tsc_emulation_status
);
2562 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VAPIC
)) {
2563 kvm_msr_entry_add(cpu
, HV_X64_MSR_APIC_ASSIST_PAGE
,
2566 if (has_msr_hv_crash
) {
2569 for (j
= 0; j
< HV_CRASH_PARAMS
; j
++)
2570 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_P0
+ j
,
2571 env
->msr_hv_crash_params
[j
]);
2573 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_CTL
, HV_CRASH_CTL_NOTIFY
);
2575 if (has_msr_hv_runtime
) {
2576 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_RUNTIME
, env
->msr_hv_runtime
);
2578 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VPINDEX
)
2579 && hv_vpindex_settable
) {
2580 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_INDEX
,
2581 hyperv_vp_index(CPU(cpu
)));
2583 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
2586 kvm_msr_entry_add(cpu
, HV_X64_MSR_SVERSION
, HV_SYNIC_VERSION
);
2588 kvm_msr_entry_add(cpu
, HV_X64_MSR_SCONTROL
,
2589 env
->msr_hv_synic_control
);
2590 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIEFP
,
2591 env
->msr_hv_synic_evt_page
);
2592 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIMP
,
2593 env
->msr_hv_synic_msg_page
);
2595 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_synic_sint
); j
++) {
2596 kvm_msr_entry_add(cpu
, HV_X64_MSR_SINT0
+ j
,
2597 env
->msr_hv_synic_sint
[j
]);
2600 if (has_msr_hv_stimer
) {
2603 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_stimer_config
); j
++) {
2604 kvm_msr_entry_add(cpu
, HV_X64_MSR_STIMER0_CONFIG
+ j
* 2,
2605 env
->msr_hv_stimer_config
[j
]);
2608 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_stimer_count
); j
++) {
2609 kvm_msr_entry_add(cpu
, HV_X64_MSR_STIMER0_COUNT
+ j
* 2,
2610 env
->msr_hv_stimer_count
[j
]);
2613 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
2614 uint64_t phys_mask
= MAKE_64BIT_MASK(0, cpu
->phys_bits
);
2616 kvm_msr_entry_add(cpu
, MSR_MTRRdefType
, env
->mtrr_deftype
);
2617 kvm_msr_entry_add(cpu
, MSR_MTRRfix64K_00000
, env
->mtrr_fixed
[0]);
2618 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_80000
, env
->mtrr_fixed
[1]);
2619 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_A0000
, env
->mtrr_fixed
[2]);
2620 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C0000
, env
->mtrr_fixed
[3]);
2621 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C8000
, env
->mtrr_fixed
[4]);
2622 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D0000
, env
->mtrr_fixed
[5]);
2623 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D8000
, env
->mtrr_fixed
[6]);
2624 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E0000
, env
->mtrr_fixed
[7]);
2625 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E8000
, env
->mtrr_fixed
[8]);
2626 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F0000
, env
->mtrr_fixed
[9]);
2627 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F8000
, env
->mtrr_fixed
[10]);
2628 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
2629 /* The CPU GPs if we write to a bit above the physical limit of
2630 * the host CPU (and KVM emulates that)
2632 uint64_t mask
= env
->mtrr_var
[i
].mask
;
2635 kvm_msr_entry_add(cpu
, MSR_MTRRphysBase(i
),
2636 env
->mtrr_var
[i
].base
);
2637 kvm_msr_entry_add(cpu
, MSR_MTRRphysMask(i
), mask
);
2640 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) {
2641 int addr_num
= kvm_arch_get_supported_cpuid(kvm_state
,
2642 0x14, 1, R_EAX
) & 0x7;
2644 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CTL
,
2645 env
->msr_rtit_ctrl
);
2646 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_STATUS
,
2647 env
->msr_rtit_status
);
2648 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_BASE
,
2649 env
->msr_rtit_output_base
);
2650 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_MASK
,
2651 env
->msr_rtit_output_mask
);
2652 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CR3_MATCH
,
2653 env
->msr_rtit_cr3_match
);
2654 for (i
= 0; i
< addr_num
; i
++) {
2655 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_ADDR0_A
+ i
,
2656 env
->msr_rtit_addrs
[i
]);
2660 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
2661 * kvm_put_msr_feature_control. */
2666 kvm_msr_entry_add(cpu
, MSR_MCG_STATUS
, env
->mcg_status
);
2667 kvm_msr_entry_add(cpu
, MSR_MCG_CTL
, env
->mcg_ctl
);
2668 if (has_msr_mcg_ext_ctl
) {
2669 kvm_msr_entry_add(cpu
, MSR_MCG_EXT_CTL
, env
->mcg_ext_ctl
);
2671 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
2672 kvm_msr_entry_add(cpu
, MSR_MC0_CTL
+ i
, env
->mce_banks
[i
]);
2676 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, cpu
->kvm_msr_buf
);
2681 if (ret
< cpu
->kvm_msr_buf
->nmsrs
) {
2682 struct kvm_msr_entry
*e
= &cpu
->kvm_msr_buf
->entries
[ret
];
2683 error_report("error: failed to set MSR 0x%" PRIx32
" to 0x%" PRIx64
,
2684 (uint32_t)e
->index
, (uint64_t)e
->data
);
2687 assert(ret
== cpu
->kvm_msr_buf
->nmsrs
);
2692 static int kvm_get_fpu(X86CPU
*cpu
)
2694 CPUX86State
*env
= &cpu
->env
;
2698 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_FPU
, &fpu
);
2703 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
2704 env
->fpus
= fpu
.fsw
;
2705 env
->fpuc
= fpu
.fcw
;
2706 env
->fpop
= fpu
.last_opcode
;
2707 env
->fpip
= fpu
.last_ip
;
2708 env
->fpdp
= fpu
.last_dp
;
2709 for (i
= 0; i
< 8; ++i
) {
2710 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
2712 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
2713 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
2714 env
->xmm_regs
[i
].ZMM_Q(0) = ldq_p(&fpu
.xmm
[i
][0]);
2715 env
->xmm_regs
[i
].ZMM_Q(1) = ldq_p(&fpu
.xmm
[i
][8]);
2717 env
->mxcsr
= fpu
.mxcsr
;
2722 static int kvm_get_xsave(X86CPU
*cpu
)
2724 CPUX86State
*env
= &cpu
->env
;
2725 X86XSaveArea
*xsave
= env
->xsave_buf
;
2729 return kvm_get_fpu(cpu
);
2732 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XSAVE
, xsave
);
2736 x86_cpu_xrstor_all_areas(cpu
, xsave
);
2741 static int kvm_get_xcrs(X86CPU
*cpu
)
2743 CPUX86State
*env
= &cpu
->env
;
2745 struct kvm_xcrs xcrs
;
2751 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XCRS
, &xcrs
);
2756 for (i
= 0; i
< xcrs
.nr_xcrs
; i
++) {
2757 /* Only support xcr0 now */
2758 if (xcrs
.xcrs
[i
].xcr
== 0) {
2759 env
->xcr0
= xcrs
.xcrs
[i
].value
;
2766 static int kvm_get_sregs(X86CPU
*cpu
)
2768 CPUX86State
*env
= &cpu
->env
;
2769 struct kvm_sregs sregs
;
2772 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_SREGS
, &sregs
);
2777 /* There can only be one pending IRQ set in the bitmap at a time, so try
2778 to find it and save its number instead (-1 for none). */
2779 env
->interrupt_injected
= -1;
2780 for (i
= 0; i
< ARRAY_SIZE(sregs
.interrupt_bitmap
); i
++) {
2781 if (sregs
.interrupt_bitmap
[i
]) {
2782 bit
= ctz64(sregs
.interrupt_bitmap
[i
]);
2783 env
->interrupt_injected
= i
* 64 + bit
;
2788 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
2789 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
2790 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
2791 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
2792 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
2793 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
2795 get_seg(&env
->tr
, &sregs
.tr
);
2796 get_seg(&env
->ldt
, &sregs
.ldt
);
2798 env
->idt
.limit
= sregs
.idt
.limit
;
2799 env
->idt
.base
= sregs
.idt
.base
;
2800 env
->gdt
.limit
= sregs
.gdt
.limit
;
2801 env
->gdt
.base
= sregs
.gdt
.base
;
2803 env
->cr
[0] = sregs
.cr0
;
2804 env
->cr
[2] = sregs
.cr2
;
2805 env
->cr
[3] = sregs
.cr3
;
2806 env
->cr
[4] = sregs
.cr4
;
2808 env
->efer
= sregs
.efer
;
2810 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
2811 x86_update_hflags(env
);
2816 static int kvm_get_msrs(X86CPU
*cpu
)
2818 CPUX86State
*env
= &cpu
->env
;
2819 struct kvm_msr_entry
*msrs
= cpu
->kvm_msr_buf
->entries
;
2821 uint64_t mtrr_top_bits
;
2823 kvm_msr_buf_reset(cpu
);
2825 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_CS
, 0);
2826 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_ESP
, 0);
2827 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_EIP
, 0);
2828 kvm_msr_entry_add(cpu
, MSR_PAT
, 0);
2830 kvm_msr_entry_add(cpu
, MSR_STAR
, 0);
2832 if (has_msr_hsave_pa
) {
2833 kvm_msr_entry_add(cpu
, MSR_VM_HSAVE_PA
, 0);
2835 if (has_msr_tsc_aux
) {
2836 kvm_msr_entry_add(cpu
, MSR_TSC_AUX
, 0);
2838 if (has_msr_tsc_adjust
) {
2839 kvm_msr_entry_add(cpu
, MSR_TSC_ADJUST
, 0);
2841 if (has_msr_tsc_deadline
) {
2842 kvm_msr_entry_add(cpu
, MSR_IA32_TSCDEADLINE
, 0);
2844 if (has_msr_misc_enable
) {
2845 kvm_msr_entry_add(cpu
, MSR_IA32_MISC_ENABLE
, 0);
2847 if (has_msr_smbase
) {
2848 kvm_msr_entry_add(cpu
, MSR_IA32_SMBASE
, 0);
2850 if (has_msr_smi_count
) {
2851 kvm_msr_entry_add(cpu
, MSR_SMI_COUNT
, 0);
2853 if (has_msr_feature_control
) {
2854 kvm_msr_entry_add(cpu
, MSR_IA32_FEATURE_CONTROL
, 0);
2856 if (has_msr_bndcfgs
) {
2857 kvm_msr_entry_add(cpu
, MSR_IA32_BNDCFGS
, 0);
2860 kvm_msr_entry_add(cpu
, MSR_IA32_XSS
, 0);
2862 if (has_msr_spec_ctrl
) {
2863 kvm_msr_entry_add(cpu
, MSR_IA32_SPEC_CTRL
, 0);
2865 if (has_msr_virt_ssbd
) {
2866 kvm_msr_entry_add(cpu
, MSR_VIRT_SSBD
, 0);
2868 if (!env
->tsc_valid
) {
2869 kvm_msr_entry_add(cpu
, MSR_IA32_TSC
, 0);
2870 env
->tsc_valid
= !runstate_is_running();
2873 #ifdef TARGET_X86_64
2874 if (lm_capable_kernel
) {
2875 kvm_msr_entry_add(cpu
, MSR_CSTAR
, 0);
2876 kvm_msr_entry_add(cpu
, MSR_KERNELGSBASE
, 0);
2877 kvm_msr_entry_add(cpu
, MSR_FMASK
, 0);
2878 kvm_msr_entry_add(cpu
, MSR_LSTAR
, 0);
2881 kvm_msr_entry_add(cpu
, MSR_KVM_SYSTEM_TIME
, 0);
2882 kvm_msr_entry_add(cpu
, MSR_KVM_WALL_CLOCK
, 0);
2883 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF
)) {
2884 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_EN
, 0);
2886 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_PV_EOI
)) {
2887 kvm_msr_entry_add(cpu
, MSR_KVM_PV_EOI_EN
, 0);
2889 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_STEAL_TIME
)) {
2890 kvm_msr_entry_add(cpu
, MSR_KVM_STEAL_TIME
, 0);
2892 if (has_architectural_pmu_version
> 0) {
2893 if (has_architectural_pmu_version
> 1) {
2894 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
2895 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
, 0);
2896 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_STATUS
, 0);
2897 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_OVF_CTRL
, 0);
2899 for (i
= 0; i
< num_architectural_pmu_fixed_counters
; i
++) {
2900 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR0
+ i
, 0);
2902 for (i
= 0; i
< num_architectural_pmu_gp_counters
; i
++) {
2903 kvm_msr_entry_add(cpu
, MSR_P6_PERFCTR0
+ i
, 0);
2904 kvm_msr_entry_add(cpu
, MSR_P6_EVNTSEL0
+ i
, 0);
2909 kvm_msr_entry_add(cpu
, MSR_MCG_STATUS
, 0);
2910 kvm_msr_entry_add(cpu
, MSR_MCG_CTL
, 0);
2911 if (has_msr_mcg_ext_ctl
) {
2912 kvm_msr_entry_add(cpu
, MSR_MCG_EXT_CTL
, 0);
2914 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
2915 kvm_msr_entry_add(cpu
, MSR_MC0_CTL
+ i
, 0);
2919 if (has_msr_hv_hypercall
) {
2920 kvm_msr_entry_add(cpu
, HV_X64_MSR_HYPERCALL
, 0);
2921 kvm_msr_entry_add(cpu
, HV_X64_MSR_GUEST_OS_ID
, 0);
2923 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VAPIC
)) {
2924 kvm_msr_entry_add(cpu
, HV_X64_MSR_APIC_ASSIST_PAGE
, 0);
2926 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_TIME
)) {
2927 kvm_msr_entry_add(cpu
, HV_X64_MSR_REFERENCE_TSC
, 0);
2929 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_REENLIGHTENMENT
)) {
2930 kvm_msr_entry_add(cpu
, HV_X64_MSR_REENLIGHTENMENT_CONTROL
, 0);
2931 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_CONTROL
, 0);
2932 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_STATUS
, 0);
2934 if (has_msr_hv_crash
) {
2937 for (j
= 0; j
< HV_CRASH_PARAMS
; j
++) {
2938 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_P0
+ j
, 0);
2941 if (has_msr_hv_runtime
) {
2942 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_RUNTIME
, 0);
2944 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
2947 kvm_msr_entry_add(cpu
, HV_X64_MSR_SCONTROL
, 0);
2948 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIEFP
, 0);
2949 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIMP
, 0);
2950 for (msr
= HV_X64_MSR_SINT0
; msr
<= HV_X64_MSR_SINT15
; msr
++) {
2951 kvm_msr_entry_add(cpu
, msr
, 0);
2954 if (has_msr_hv_stimer
) {
2957 for (msr
= HV_X64_MSR_STIMER0_CONFIG
; msr
<= HV_X64_MSR_STIMER3_COUNT
;
2959 kvm_msr_entry_add(cpu
, msr
, 0);
2962 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
2963 kvm_msr_entry_add(cpu
, MSR_MTRRdefType
, 0);
2964 kvm_msr_entry_add(cpu
, MSR_MTRRfix64K_00000
, 0);
2965 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_80000
, 0);
2966 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_A0000
, 0);
2967 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C0000
, 0);
2968 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C8000
, 0);
2969 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D0000
, 0);
2970 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D8000
, 0);
2971 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E0000
, 0);
2972 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E8000
, 0);
2973 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F0000
, 0);
2974 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F8000
, 0);
2975 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
2976 kvm_msr_entry_add(cpu
, MSR_MTRRphysBase(i
), 0);
2977 kvm_msr_entry_add(cpu
, MSR_MTRRphysMask(i
), 0);
2981 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) {
2983 kvm_arch_get_supported_cpuid(kvm_state
, 0x14, 1, R_EAX
) & 0x7;
2985 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CTL
, 0);
2986 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_STATUS
, 0);
2987 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_BASE
, 0);
2988 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_MASK
, 0);
2989 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CR3_MATCH
, 0);
2990 for (i
= 0; i
< addr_num
; i
++) {
2991 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_ADDR0_A
+ i
, 0);
2995 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, cpu
->kvm_msr_buf
);
3000 if (ret
< cpu
->kvm_msr_buf
->nmsrs
) {
3001 struct kvm_msr_entry
*e
= &cpu
->kvm_msr_buf
->entries
[ret
];
3002 error_report("error: failed to get MSR 0x%" PRIx32
,
3003 (uint32_t)e
->index
);
3006 assert(ret
== cpu
->kvm_msr_buf
->nmsrs
);
3008 * MTRR masks: Each mask consists of 5 parts
3009 * a 10..0: must be zero
3011 * c n-1.12: actual mask bits
3012 * d 51..n: reserved must be zero
3013 * e 63.52: reserved must be zero
3015 * 'n' is the number of physical bits supported by the CPU and is
3016 * apparently always <= 52. We know our 'n' but don't know what
3017 * the destinations 'n' is; it might be smaller, in which case
3018 * it masks (c) on loading. It might be larger, in which case
3019 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
3020 * we're migrating to.
3023 if (cpu
->fill_mtrr_mask
) {
3024 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS
> 52);
3025 assert(cpu
->phys_bits
<= TARGET_PHYS_ADDR_SPACE_BITS
);
3026 mtrr_top_bits
= MAKE_64BIT_MASK(cpu
->phys_bits
, 52 - cpu
->phys_bits
);
3031 for (i
= 0; i
< ret
; i
++) {
3032 uint32_t index
= msrs
[i
].index
;
3034 case MSR_IA32_SYSENTER_CS
:
3035 env
->sysenter_cs
= msrs
[i
].data
;
3037 case MSR_IA32_SYSENTER_ESP
:
3038 env
->sysenter_esp
= msrs
[i
].data
;
3040 case MSR_IA32_SYSENTER_EIP
:
3041 env
->sysenter_eip
= msrs
[i
].data
;
3044 env
->pat
= msrs
[i
].data
;
3047 env
->star
= msrs
[i
].data
;
3049 #ifdef TARGET_X86_64
3051 env
->cstar
= msrs
[i
].data
;
3053 case MSR_KERNELGSBASE
:
3054 env
->kernelgsbase
= msrs
[i
].data
;
3057 env
->fmask
= msrs
[i
].data
;
3060 env
->lstar
= msrs
[i
].data
;
3064 env
->tsc
= msrs
[i
].data
;
3067 env
->tsc_aux
= msrs
[i
].data
;
3069 case MSR_TSC_ADJUST
:
3070 env
->tsc_adjust
= msrs
[i
].data
;
3072 case MSR_IA32_TSCDEADLINE
:
3073 env
->tsc_deadline
= msrs
[i
].data
;
3075 case MSR_VM_HSAVE_PA
:
3076 env
->vm_hsave
= msrs
[i
].data
;
3078 case MSR_KVM_SYSTEM_TIME
:
3079 env
->system_time_msr
= msrs
[i
].data
;
3081 case MSR_KVM_WALL_CLOCK
:
3082 env
->wall_clock_msr
= msrs
[i
].data
;
3084 case MSR_MCG_STATUS
:
3085 env
->mcg_status
= msrs
[i
].data
;
3088 env
->mcg_ctl
= msrs
[i
].data
;
3090 case MSR_MCG_EXT_CTL
:
3091 env
->mcg_ext_ctl
= msrs
[i
].data
;
3093 case MSR_IA32_MISC_ENABLE
:
3094 env
->msr_ia32_misc_enable
= msrs
[i
].data
;
3096 case MSR_IA32_SMBASE
:
3097 env
->smbase
= msrs
[i
].data
;
3100 env
->msr_smi_count
= msrs
[i
].data
;
3102 case MSR_IA32_FEATURE_CONTROL
:
3103 env
->msr_ia32_feature_control
= msrs
[i
].data
;
3105 case MSR_IA32_BNDCFGS
:
3106 env
->msr_bndcfgs
= msrs
[i
].data
;
3109 env
->xss
= msrs
[i
].data
;
3112 if (msrs
[i
].index
>= MSR_MC0_CTL
&&
3113 msrs
[i
].index
< MSR_MC0_CTL
+ (env
->mcg_cap
& 0xff) * 4) {
3114 env
->mce_banks
[msrs
[i
].index
- MSR_MC0_CTL
] = msrs
[i
].data
;
3117 case MSR_KVM_ASYNC_PF_EN
:
3118 env
->async_pf_en_msr
= msrs
[i
].data
;
3120 case MSR_KVM_PV_EOI_EN
:
3121 env
->pv_eoi_en_msr
= msrs
[i
].data
;
3123 case MSR_KVM_STEAL_TIME
:
3124 env
->steal_time_msr
= msrs
[i
].data
;
3126 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
3127 env
->msr_fixed_ctr_ctrl
= msrs
[i
].data
;
3129 case MSR_CORE_PERF_GLOBAL_CTRL
:
3130 env
->msr_global_ctrl
= msrs
[i
].data
;
3132 case MSR_CORE_PERF_GLOBAL_STATUS
:
3133 env
->msr_global_status
= msrs
[i
].data
;
3135 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
3136 env
->msr_global_ovf_ctrl
= msrs
[i
].data
;
3138 case MSR_CORE_PERF_FIXED_CTR0
... MSR_CORE_PERF_FIXED_CTR0
+ MAX_FIXED_COUNTERS
- 1:
3139 env
->msr_fixed_counters
[index
- MSR_CORE_PERF_FIXED_CTR0
] = msrs
[i
].data
;
3141 case MSR_P6_PERFCTR0
... MSR_P6_PERFCTR0
+ MAX_GP_COUNTERS
- 1:
3142 env
->msr_gp_counters
[index
- MSR_P6_PERFCTR0
] = msrs
[i
].data
;
3144 case MSR_P6_EVNTSEL0
... MSR_P6_EVNTSEL0
+ MAX_GP_COUNTERS
- 1:
3145 env
->msr_gp_evtsel
[index
- MSR_P6_EVNTSEL0
] = msrs
[i
].data
;
3147 case HV_X64_MSR_HYPERCALL
:
3148 env
->msr_hv_hypercall
= msrs
[i
].data
;
3150 case HV_X64_MSR_GUEST_OS_ID
:
3151 env
->msr_hv_guest_os_id
= msrs
[i
].data
;
3153 case HV_X64_MSR_APIC_ASSIST_PAGE
:
3154 env
->msr_hv_vapic
= msrs
[i
].data
;
3156 case HV_X64_MSR_REFERENCE_TSC
:
3157 env
->msr_hv_tsc
= msrs
[i
].data
;
3159 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
3160 env
->msr_hv_crash_params
[index
- HV_X64_MSR_CRASH_P0
] = msrs
[i
].data
;
3162 case HV_X64_MSR_VP_RUNTIME
:
3163 env
->msr_hv_runtime
= msrs
[i
].data
;
3165 case HV_X64_MSR_SCONTROL
:
3166 env
->msr_hv_synic_control
= msrs
[i
].data
;
3168 case HV_X64_MSR_SIEFP
:
3169 env
->msr_hv_synic_evt_page
= msrs
[i
].data
;
3171 case HV_X64_MSR_SIMP
:
3172 env
->msr_hv_synic_msg_page
= msrs
[i
].data
;
3174 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
3175 env
->msr_hv_synic_sint
[index
- HV_X64_MSR_SINT0
] = msrs
[i
].data
;
3177 case HV_X64_MSR_STIMER0_CONFIG
:
3178 case HV_X64_MSR_STIMER1_CONFIG
:
3179 case HV_X64_MSR_STIMER2_CONFIG
:
3180 case HV_X64_MSR_STIMER3_CONFIG
:
3181 env
->msr_hv_stimer_config
[(index
- HV_X64_MSR_STIMER0_CONFIG
)/2] =
3184 case HV_X64_MSR_STIMER0_COUNT
:
3185 case HV_X64_MSR_STIMER1_COUNT
:
3186 case HV_X64_MSR_STIMER2_COUNT
:
3187 case HV_X64_MSR_STIMER3_COUNT
:
3188 env
->msr_hv_stimer_count
[(index
- HV_X64_MSR_STIMER0_COUNT
)/2] =
3191 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
3192 env
->msr_hv_reenlightenment_control
= msrs
[i
].data
;
3194 case HV_X64_MSR_TSC_EMULATION_CONTROL
:
3195 env
->msr_hv_tsc_emulation_control
= msrs
[i
].data
;
3197 case HV_X64_MSR_TSC_EMULATION_STATUS
:
3198 env
->msr_hv_tsc_emulation_status
= msrs
[i
].data
;
3200 case MSR_MTRRdefType
:
3201 env
->mtrr_deftype
= msrs
[i
].data
;
3203 case MSR_MTRRfix64K_00000
:
3204 env
->mtrr_fixed
[0] = msrs
[i
].data
;
3206 case MSR_MTRRfix16K_80000
:
3207 env
->mtrr_fixed
[1] = msrs
[i
].data
;
3209 case MSR_MTRRfix16K_A0000
:
3210 env
->mtrr_fixed
[2] = msrs
[i
].data
;
3212 case MSR_MTRRfix4K_C0000
:
3213 env
->mtrr_fixed
[3] = msrs
[i
].data
;
3215 case MSR_MTRRfix4K_C8000
:
3216 env
->mtrr_fixed
[4] = msrs
[i
].data
;
3218 case MSR_MTRRfix4K_D0000
:
3219 env
->mtrr_fixed
[5] = msrs
[i
].data
;
3221 case MSR_MTRRfix4K_D8000
:
3222 env
->mtrr_fixed
[6] = msrs
[i
].data
;
3224 case MSR_MTRRfix4K_E0000
:
3225 env
->mtrr_fixed
[7] = msrs
[i
].data
;
3227 case MSR_MTRRfix4K_E8000
:
3228 env
->mtrr_fixed
[8] = msrs
[i
].data
;
3230 case MSR_MTRRfix4K_F0000
:
3231 env
->mtrr_fixed
[9] = msrs
[i
].data
;
3233 case MSR_MTRRfix4K_F8000
:
3234 env
->mtrr_fixed
[10] = msrs
[i
].data
;
3236 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT
- 1):
3238 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].mask
= msrs
[i
].data
|
3241 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].base
= msrs
[i
].data
;
3244 case MSR_IA32_SPEC_CTRL
:
3245 env
->spec_ctrl
= msrs
[i
].data
;
3248 env
->virt_ssbd
= msrs
[i
].data
;
3250 case MSR_IA32_RTIT_CTL
:
3251 env
->msr_rtit_ctrl
= msrs
[i
].data
;
3253 case MSR_IA32_RTIT_STATUS
:
3254 env
->msr_rtit_status
= msrs
[i
].data
;
3256 case MSR_IA32_RTIT_OUTPUT_BASE
:
3257 env
->msr_rtit_output_base
= msrs
[i
].data
;
3259 case MSR_IA32_RTIT_OUTPUT_MASK
:
3260 env
->msr_rtit_output_mask
= msrs
[i
].data
;
3262 case MSR_IA32_RTIT_CR3_MATCH
:
3263 env
->msr_rtit_cr3_match
= msrs
[i
].data
;
3265 case MSR_IA32_RTIT_ADDR0_A
... MSR_IA32_RTIT_ADDR3_B
:
3266 env
->msr_rtit_addrs
[index
- MSR_IA32_RTIT_ADDR0_A
] = msrs
[i
].data
;
3274 static int kvm_put_mp_state(X86CPU
*cpu
)
3276 struct kvm_mp_state mp_state
= { .mp_state
= cpu
->env
.mp_state
};
3278 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MP_STATE
, &mp_state
);
3281 static int kvm_get_mp_state(X86CPU
*cpu
)
3283 CPUState
*cs
= CPU(cpu
);
3284 CPUX86State
*env
= &cpu
->env
;
3285 struct kvm_mp_state mp_state
;
3288 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_MP_STATE
, &mp_state
);
3292 env
->mp_state
= mp_state
.mp_state
;
3293 if (kvm_irqchip_in_kernel()) {
3294 cs
->halted
= (mp_state
.mp_state
== KVM_MP_STATE_HALTED
);
3299 static int kvm_get_apic(X86CPU
*cpu
)
3301 DeviceState
*apic
= cpu
->apic_state
;
3302 struct kvm_lapic_state kapic
;
3305 if (apic
&& kvm_irqchip_in_kernel()) {
3306 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_LAPIC
, &kapic
);
3311 kvm_get_apic_state(apic
, &kapic
);
3316 static int kvm_put_vcpu_events(X86CPU
*cpu
, int level
)
3318 CPUState
*cs
= CPU(cpu
);
3319 CPUX86State
*env
= &cpu
->env
;
3320 struct kvm_vcpu_events events
= {};
3322 if (!kvm_has_vcpu_events()) {
3328 if (has_exception_payload
) {
3329 events
.flags
|= KVM_VCPUEVENT_VALID_PAYLOAD
;
3330 events
.exception
.pending
= env
->exception_pending
;
3331 events
.exception_has_payload
= env
->exception_has_payload
;
3332 events
.exception_payload
= env
->exception_payload
;
3334 events
.exception
.nr
= env
->exception_nr
;
3335 events
.exception
.injected
= env
->exception_injected
;
3336 events
.exception
.has_error_code
= env
->has_error_code
;
3337 events
.exception
.error_code
= env
->error_code
;
3339 events
.interrupt
.injected
= (env
->interrupt_injected
>= 0);
3340 events
.interrupt
.nr
= env
->interrupt_injected
;
3341 events
.interrupt
.soft
= env
->soft_interrupt
;
3343 events
.nmi
.injected
= env
->nmi_injected
;
3344 events
.nmi
.pending
= env
->nmi_pending
;
3345 events
.nmi
.masked
= !!(env
->hflags2
& HF2_NMI_MASK
);
3347 events
.sipi_vector
= env
->sipi_vector
;
3349 if (has_msr_smbase
) {
3350 events
.smi
.smm
= !!(env
->hflags
& HF_SMM_MASK
);
3351 events
.smi
.smm_inside_nmi
= !!(env
->hflags2
& HF2_SMM_INSIDE_NMI_MASK
);
3352 if (kvm_irqchip_in_kernel()) {
3353 /* As soon as these are moved to the kernel, remove them
3354 * from cs->interrupt_request.
3356 events
.smi
.pending
= cs
->interrupt_request
& CPU_INTERRUPT_SMI
;
3357 events
.smi
.latched_init
= cs
->interrupt_request
& CPU_INTERRUPT_INIT
;
3358 cs
->interrupt_request
&= ~(CPU_INTERRUPT_INIT
| CPU_INTERRUPT_SMI
);
3360 /* Keep these in cs->interrupt_request. */
3361 events
.smi
.pending
= 0;
3362 events
.smi
.latched_init
= 0;
3364 /* Stop SMI delivery on old machine types to avoid a reboot
3365 * on an inward migration of an old VM.
3367 if (!cpu
->kvm_no_smi_migration
) {
3368 events
.flags
|= KVM_VCPUEVENT_VALID_SMM
;
3372 if (level
>= KVM_PUT_RESET_STATE
) {
3373 events
.flags
|= KVM_VCPUEVENT_VALID_NMI_PENDING
;
3374 if (env
->mp_state
== KVM_MP_STATE_SIPI_RECEIVED
) {
3375 events
.flags
|= KVM_VCPUEVENT_VALID_SIPI_VECTOR
;
3379 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_VCPU_EVENTS
, &events
);
3382 static int kvm_get_vcpu_events(X86CPU
*cpu
)
3384 CPUX86State
*env
= &cpu
->env
;
3385 struct kvm_vcpu_events events
;
3388 if (!kvm_has_vcpu_events()) {
3392 memset(&events
, 0, sizeof(events
));
3393 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_VCPU_EVENTS
, &events
);
3398 if (events
.flags
& KVM_VCPUEVENT_VALID_PAYLOAD
) {
3399 env
->exception_pending
= events
.exception
.pending
;
3400 env
->exception_has_payload
= events
.exception_has_payload
;
3401 env
->exception_payload
= events
.exception_payload
;
3403 env
->exception_pending
= 0;
3404 env
->exception_has_payload
= false;
3406 env
->exception_injected
= events
.exception
.injected
;
3408 (env
->exception_pending
|| env
->exception_injected
) ?
3409 events
.exception
.nr
: -1;
3410 env
->has_error_code
= events
.exception
.has_error_code
;
3411 env
->error_code
= events
.exception
.error_code
;
3413 env
->interrupt_injected
=
3414 events
.interrupt
.injected
? events
.interrupt
.nr
: -1;
3415 env
->soft_interrupt
= events
.interrupt
.soft
;
3417 env
->nmi_injected
= events
.nmi
.injected
;
3418 env
->nmi_pending
= events
.nmi
.pending
;
3419 if (events
.nmi
.masked
) {
3420 env
->hflags2
|= HF2_NMI_MASK
;
3422 env
->hflags2
&= ~HF2_NMI_MASK
;
3425 if (events
.flags
& KVM_VCPUEVENT_VALID_SMM
) {
3426 if (events
.smi
.smm
) {
3427 env
->hflags
|= HF_SMM_MASK
;
3429 env
->hflags
&= ~HF_SMM_MASK
;
3431 if (events
.smi
.pending
) {
3432 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
3434 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
3436 if (events
.smi
.smm_inside_nmi
) {
3437 env
->hflags2
|= HF2_SMM_INSIDE_NMI_MASK
;
3439 env
->hflags2
&= ~HF2_SMM_INSIDE_NMI_MASK
;
3441 if (events
.smi
.latched_init
) {
3442 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
3444 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
3448 env
->sipi_vector
= events
.sipi_vector
;
3453 static int kvm_guest_debug_workarounds(X86CPU
*cpu
)
3455 CPUState
*cs
= CPU(cpu
);
3456 CPUX86State
*env
= &cpu
->env
;
3458 unsigned long reinject_trap
= 0;
3460 if (!kvm_has_vcpu_events()) {
3461 if (env
->exception_nr
== EXCP01_DB
) {
3462 reinject_trap
= KVM_GUESTDBG_INJECT_DB
;
3463 } else if (env
->exception_injected
== EXCP03_INT3
) {
3464 reinject_trap
= KVM_GUESTDBG_INJECT_BP
;
3466 kvm_reset_exception(env
);
3470 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
3471 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
3472 * by updating the debug state once again if single-stepping is on.
3473 * Another reason to call kvm_update_guest_debug here is a pending debug
3474 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
3475 * reinject them via SET_GUEST_DEBUG.
3477 if (reinject_trap
||
3478 (!kvm_has_robust_singlestep() && cs
->singlestep_enabled
)) {
3479 ret
= kvm_update_guest_debug(cs
, reinject_trap
);
3484 static int kvm_put_debugregs(X86CPU
*cpu
)
3486 CPUX86State
*env
= &cpu
->env
;
3487 struct kvm_debugregs dbgregs
;
3490 if (!kvm_has_debugregs()) {
3494 for (i
= 0; i
< 4; i
++) {
3495 dbgregs
.db
[i
] = env
->dr
[i
];
3497 dbgregs
.dr6
= env
->dr
[6];
3498 dbgregs
.dr7
= env
->dr
[7];
3501 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_DEBUGREGS
, &dbgregs
);
3504 static int kvm_get_debugregs(X86CPU
*cpu
)
3506 CPUX86State
*env
= &cpu
->env
;
3507 struct kvm_debugregs dbgregs
;
3510 if (!kvm_has_debugregs()) {
3514 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_DEBUGREGS
, &dbgregs
);
3518 for (i
= 0; i
< 4; i
++) {
3519 env
->dr
[i
] = dbgregs
.db
[i
];
3521 env
->dr
[4] = env
->dr
[6] = dbgregs
.dr6
;
3522 env
->dr
[5] = env
->dr
[7] = dbgregs
.dr7
;
3527 static int kvm_put_nested_state(X86CPU
*cpu
)
3529 CPUX86State
*env
= &cpu
->env
;
3530 int max_nested_state_len
= kvm_max_nested_state_length();
3532 if (max_nested_state_len
<= 0) {
3536 assert(env
->nested_state
->size
<= max_nested_state_len
);
3537 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_NESTED_STATE
, env
->nested_state
);
3540 static int kvm_get_nested_state(X86CPU
*cpu
)
3542 CPUX86State
*env
= &cpu
->env
;
3543 int max_nested_state_len
= kvm_max_nested_state_length();
3546 if (max_nested_state_len
<= 0) {
3551 * It is possible that migration restored a smaller size into
3552 * nested_state->hdr.size than what our kernel support.
3553 * We preserve migration origin nested_state->hdr.size for
3554 * call to KVM_SET_NESTED_STATE but wish that our next call
3555 * to KVM_GET_NESTED_STATE will use max size our kernel support.
3557 env
->nested_state
->size
= max_nested_state_len
;
3559 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_NESTED_STATE
, env
->nested_state
);
3564 if (env
->nested_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
) {
3565 env
->hflags
|= HF_GUEST_MASK
;
3567 env
->hflags
&= ~HF_GUEST_MASK
;
3573 int kvm_arch_put_registers(CPUState
*cpu
, int level
)
3575 X86CPU
*x86_cpu
= X86_CPU(cpu
);
3578 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
3580 ret
= kvm_put_nested_state(x86_cpu
);
3585 if (level
>= KVM_PUT_RESET_STATE
) {
3586 ret
= kvm_put_msr_feature_control(x86_cpu
);
3592 if (level
== KVM_PUT_FULL_STATE
) {
3593 /* We don't check for kvm_arch_set_tsc_khz() errors here,
3594 * because TSC frequency mismatch shouldn't abort migration,
3595 * unless the user explicitly asked for a more strict TSC
3596 * setting (e.g. using an explicit "tsc-freq" option).
3598 kvm_arch_set_tsc_khz(cpu
);
3601 ret
= kvm_getput_regs(x86_cpu
, 1);
3605 ret
= kvm_put_xsave(x86_cpu
);
3609 ret
= kvm_put_xcrs(x86_cpu
);
3613 ret
= kvm_put_sregs(x86_cpu
);
3617 /* must be before kvm_put_msrs */
3618 ret
= kvm_inject_mce_oldstyle(x86_cpu
);
3622 ret
= kvm_put_msrs(x86_cpu
, level
);
3626 ret
= kvm_put_vcpu_events(x86_cpu
, level
);
3630 if (level
>= KVM_PUT_RESET_STATE
) {
3631 ret
= kvm_put_mp_state(x86_cpu
);
3637 ret
= kvm_put_tscdeadline_msr(x86_cpu
);
3641 ret
= kvm_put_debugregs(x86_cpu
);
3646 ret
= kvm_guest_debug_workarounds(x86_cpu
);
3653 int kvm_arch_get_registers(CPUState
*cs
)
3655 X86CPU
*cpu
= X86_CPU(cs
);
3658 assert(cpu_is_stopped(cs
) || qemu_cpu_is_self(cs
));
3660 ret
= kvm_get_vcpu_events(cpu
);
3665 * KVM_GET_MPSTATE can modify CS and RIP, call it before
3666 * KVM_GET_REGS and KVM_GET_SREGS.
3668 ret
= kvm_get_mp_state(cpu
);
3672 ret
= kvm_getput_regs(cpu
, 0);
3676 ret
= kvm_get_xsave(cpu
);
3680 ret
= kvm_get_xcrs(cpu
);
3684 ret
= kvm_get_sregs(cpu
);
3688 ret
= kvm_get_msrs(cpu
);
3692 ret
= kvm_get_apic(cpu
);
3696 ret
= kvm_get_debugregs(cpu
);
3700 ret
= kvm_get_nested_state(cpu
);
3706 cpu_sync_bndcs_hflags(&cpu
->env
);
3710 void kvm_arch_pre_run(CPUState
*cpu
, struct kvm_run
*run
)
3712 X86CPU
*x86_cpu
= X86_CPU(cpu
);
3713 CPUX86State
*env
= &x86_cpu
->env
;
3717 if (cpu
->interrupt_request
& (CPU_INTERRUPT_NMI
| CPU_INTERRUPT_SMI
)) {
3718 if (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) {
3719 qemu_mutex_lock_iothread();
3720 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
3721 qemu_mutex_unlock_iothread();
3722 DPRINTF("injected NMI\n");
3723 ret
= kvm_vcpu_ioctl(cpu
, KVM_NMI
);
3725 fprintf(stderr
, "KVM: injection failed, NMI lost (%s)\n",
3729 if (cpu
->interrupt_request
& CPU_INTERRUPT_SMI
) {
3730 qemu_mutex_lock_iothread();
3731 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
3732 qemu_mutex_unlock_iothread();
3733 DPRINTF("injected SMI\n");
3734 ret
= kvm_vcpu_ioctl(cpu
, KVM_SMI
);
3736 fprintf(stderr
, "KVM: injection failed, SMI lost (%s)\n",
3742 if (!kvm_pic_in_kernel()) {
3743 qemu_mutex_lock_iothread();
3746 /* Force the VCPU out of its inner loop to process any INIT requests
3747 * or (for userspace APIC, but it is cheap to combine the checks here)
3748 * pending TPR access reports.
3750 if (cpu
->interrupt_request
& (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
)) {
3751 if ((cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
3752 !(env
->hflags
& HF_SMM_MASK
)) {
3753 cpu
->exit_request
= 1;
3755 if (cpu
->interrupt_request
& CPU_INTERRUPT_TPR
) {
3756 cpu
->exit_request
= 1;
3760 if (!kvm_pic_in_kernel()) {
3761 /* Try to inject an interrupt if the guest can accept it */
3762 if (run
->ready_for_interrupt_injection
&&
3763 (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
3764 (env
->eflags
& IF_MASK
)) {
3767 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
3768 irq
= cpu_get_pic_interrupt(env
);
3770 struct kvm_interrupt intr
;
3773 DPRINTF("injected interrupt %d\n", irq
);
3774 ret
= kvm_vcpu_ioctl(cpu
, KVM_INTERRUPT
, &intr
);
3777 "KVM: injection failed, interrupt lost (%s)\n",
3783 /* If we have an interrupt but the guest is not ready to receive an
3784 * interrupt, request an interrupt window exit. This will
3785 * cause a return to userspace as soon as the guest is ready to
3786 * receive interrupts. */
3787 if ((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
3788 run
->request_interrupt_window
= 1;
3790 run
->request_interrupt_window
= 0;
3793 DPRINTF("setting tpr\n");
3794 run
->cr8
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
3796 qemu_mutex_unlock_iothread();
3800 MemTxAttrs
kvm_arch_post_run(CPUState
*cpu
, struct kvm_run
*run
)
3802 X86CPU
*x86_cpu
= X86_CPU(cpu
);
3803 CPUX86State
*env
= &x86_cpu
->env
;
3805 if (run
->flags
& KVM_RUN_X86_SMM
) {
3806 env
->hflags
|= HF_SMM_MASK
;
3808 env
->hflags
&= ~HF_SMM_MASK
;
3811 env
->eflags
|= IF_MASK
;
3813 env
->eflags
&= ~IF_MASK
;
3816 /* We need to protect the apic state against concurrent accesses from
3817 * different threads in case the userspace irqchip is used. */
3818 if (!kvm_irqchip_in_kernel()) {
3819 qemu_mutex_lock_iothread();
3821 cpu_set_apic_tpr(x86_cpu
->apic_state
, run
->cr8
);
3822 cpu_set_apic_base(x86_cpu
->apic_state
, run
->apic_base
);
3823 if (!kvm_irqchip_in_kernel()) {
3824 qemu_mutex_unlock_iothread();
3826 return cpu_get_mem_attrs(env
);
3829 int kvm_arch_process_async_events(CPUState
*cs
)
3831 X86CPU
*cpu
= X86_CPU(cs
);
3832 CPUX86State
*env
= &cpu
->env
;
3834 if (cs
->interrupt_request
& CPU_INTERRUPT_MCE
) {
3835 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
3836 assert(env
->mcg_cap
);
3838 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
3840 kvm_cpu_synchronize_state(cs
);
3842 if (env
->exception_nr
== EXCP08_DBLE
) {
3843 /* this means triple fault */
3844 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
3845 cs
->exit_request
= 1;
3848 kvm_queue_exception(env
, EXCP12_MCHK
, 0, 0);
3849 env
->has_error_code
= 0;
3852 if (kvm_irqchip_in_kernel() && env
->mp_state
== KVM_MP_STATE_HALTED
) {
3853 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
3857 if ((cs
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
3858 !(env
->hflags
& HF_SMM_MASK
)) {
3859 kvm_cpu_synchronize_state(cs
);
3863 if (kvm_irqchip_in_kernel()) {
3867 if (cs
->interrupt_request
& CPU_INTERRUPT_POLL
) {
3868 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
3869 apic_poll_irq(cpu
->apic_state
);
3871 if (((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
3872 (env
->eflags
& IF_MASK
)) ||
3873 (cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
3876 if (cs
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
3877 kvm_cpu_synchronize_state(cs
);
3880 if (cs
->interrupt_request
& CPU_INTERRUPT_TPR
) {
3881 cs
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
3882 kvm_cpu_synchronize_state(cs
);
3883 apic_handle_tpr_access_report(cpu
->apic_state
, env
->eip
,
3884 env
->tpr_access_type
);
3890 static int kvm_handle_halt(X86CPU
*cpu
)
3892 CPUState
*cs
= CPU(cpu
);
3893 CPUX86State
*env
= &cpu
->env
;
3895 if (!((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
3896 (env
->eflags
& IF_MASK
)) &&
3897 !(cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
3905 static int kvm_handle_tpr_access(X86CPU
*cpu
)
3907 CPUState
*cs
= CPU(cpu
);
3908 struct kvm_run
*run
= cs
->kvm_run
;
3910 apic_handle_tpr_access_report(cpu
->apic_state
, run
->tpr_access
.rip
,
3911 run
->tpr_access
.is_write
? TPR_ACCESS_WRITE
3916 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
3918 static const uint8_t int3
= 0xcc;
3920 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
3921 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&int3
, 1, 1)) {
3927 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
3931 if (cpu_memory_rw_debug(cs
, bp
->pc
, &int3
, 1, 0) || int3
!= 0xcc ||
3932 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1)) {
3944 static int nb_hw_breakpoint
;
3946 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
3950 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
3951 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
3952 (hw_breakpoint
[n
].len
== len
|| len
== -1)) {
3959 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
3960 target_ulong len
, int type
)
3963 case GDB_BREAKPOINT_HW
:
3966 case GDB_WATCHPOINT_WRITE
:
3967 case GDB_WATCHPOINT_ACCESS
:
3974 if (addr
& (len
- 1)) {
3986 if (nb_hw_breakpoint
== 4) {
3989 if (find_hw_breakpoint(addr
, len
, type
) >= 0) {
3992 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
3993 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
3994 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
4000 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
4001 target_ulong len
, int type
)
4005 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
4010 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
4015 void kvm_arch_remove_all_hw_breakpoints(void)
4017 nb_hw_breakpoint
= 0;
4020 static CPUWatchpoint hw_watchpoint
;
4022 static int kvm_handle_debug(X86CPU
*cpu
,
4023 struct kvm_debug_exit_arch
*arch_info
)
4025 CPUState
*cs
= CPU(cpu
);
4026 CPUX86State
*env
= &cpu
->env
;
4030 if (arch_info
->exception
== EXCP01_DB
) {
4031 if (arch_info
->dr6
& DR6_BS
) {
4032 if (cs
->singlestep_enabled
) {
4036 for (n
= 0; n
< 4; n
++) {
4037 if (arch_info
->dr6
& (1 << n
)) {
4038 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
4044 cs
->watchpoint_hit
= &hw_watchpoint
;
4045 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
4046 hw_watchpoint
.flags
= BP_MEM_WRITE
;
4050 cs
->watchpoint_hit
= &hw_watchpoint
;
4051 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
4052 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
4058 } else if (kvm_find_sw_breakpoint(cs
, arch_info
->pc
)) {
4062 cpu_synchronize_state(cs
);
4063 assert(env
->exception_nr
== -1);
4066 kvm_queue_exception(env
, arch_info
->exception
,
4067 arch_info
->exception
== EXCP01_DB
,
4069 env
->has_error_code
= 0;
4075 void kvm_arch_update_guest_debug(CPUState
*cpu
, struct kvm_guest_debug
*dbg
)
4077 const uint8_t type_code
[] = {
4078 [GDB_BREAKPOINT_HW
] = 0x0,
4079 [GDB_WATCHPOINT_WRITE
] = 0x1,
4080 [GDB_WATCHPOINT_ACCESS
] = 0x3
4082 const uint8_t len_code
[] = {
4083 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
4087 if (kvm_sw_breakpoints_active(cpu
)) {
4088 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
4090 if (nb_hw_breakpoint
> 0) {
4091 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
4092 dbg
->arch
.debugreg
[7] = 0x0600;
4093 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
4094 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
4095 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
4096 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
4097 ((uint32_t)len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
4102 static bool host_supports_vmx(void)
4104 uint32_t ecx
, unused
;
4106 host_cpuid(1, 0, &unused
, &unused
, &ecx
, &unused
);
4107 return ecx
& CPUID_EXT_VMX
;
4110 #define VMX_INVALID_GUEST_STATE 0x80000021
4112 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
4114 X86CPU
*cpu
= X86_CPU(cs
);
4118 switch (run
->exit_reason
) {
4120 DPRINTF("handle_hlt\n");
4121 qemu_mutex_lock_iothread();
4122 ret
= kvm_handle_halt(cpu
);
4123 qemu_mutex_unlock_iothread();
4125 case KVM_EXIT_SET_TPR
:
4128 case KVM_EXIT_TPR_ACCESS
:
4129 qemu_mutex_lock_iothread();
4130 ret
= kvm_handle_tpr_access(cpu
);
4131 qemu_mutex_unlock_iothread();
4133 case KVM_EXIT_FAIL_ENTRY
:
4134 code
= run
->fail_entry
.hardware_entry_failure_reason
;
4135 fprintf(stderr
, "KVM: entry failed, hardware error 0x%" PRIx64
"\n",
4137 if (host_supports_vmx() && code
== VMX_INVALID_GUEST_STATE
) {
4139 "\nIf you're running a guest on an Intel machine without "
4140 "unrestricted mode\n"
4141 "support, the failure can be most likely due to the guest "
4142 "entering an invalid\n"
4143 "state for Intel VT. For example, the guest maybe running "
4144 "in big real mode\n"
4145 "which is not supported on less recent Intel processors."
4150 case KVM_EXIT_EXCEPTION
:
4151 fprintf(stderr
, "KVM: exception %d exit (error code 0x%x)\n",
4152 run
->ex
.exception
, run
->ex
.error_code
);
4155 case KVM_EXIT_DEBUG
:
4156 DPRINTF("kvm_exit_debug\n");
4157 qemu_mutex_lock_iothread();
4158 ret
= kvm_handle_debug(cpu
, &run
->debug
.arch
);
4159 qemu_mutex_unlock_iothread();
4161 case KVM_EXIT_HYPERV
:
4162 ret
= kvm_hv_handle_exit(cpu
, &run
->hyperv
);
4164 case KVM_EXIT_IOAPIC_EOI
:
4165 ioapic_eoi_broadcast(run
->eoi
.vector
);
4169 fprintf(stderr
, "KVM: unknown exit reason %d\n", run
->exit_reason
);
4177 bool kvm_arch_stop_on_emulation_error(CPUState
*cs
)
4179 X86CPU
*cpu
= X86_CPU(cs
);
4180 CPUX86State
*env
= &cpu
->env
;
4182 kvm_cpu_synchronize_state(cs
);
4183 return !(env
->cr
[0] & CR0_PE_MASK
) ||
4184 ((env
->segs
[R_CS
].selector
& 3) != 3);
4187 void kvm_arch_init_irq_routing(KVMState
*s
)
4189 if (!kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
)) {
4190 /* If kernel can't do irq routing, interrupt source
4191 * override 0->2 cannot be set up as required by HPET.
4192 * So we have to disable it.
4196 /* We know at this point that we're using the in-kernel
4197 * irqchip, so we can use irqfds, and on x86 we know
4198 * we can use msi via irqfd and GSI routing.
4200 kvm_msi_via_irqfd_allowed
= true;
4201 kvm_gsi_routing_allowed
= true;
4203 if (kvm_irqchip_is_split()) {
4206 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
4207 MSI routes for signaling interrupts to the local apics. */
4208 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++) {
4209 if (kvm_irqchip_add_msi_route(s
, 0, NULL
) < 0) {
4210 error_report("Could not enable split IRQ mode.");
4217 int kvm_arch_irqchip_create(MachineState
*ms
, KVMState
*s
)
4220 if (machine_kernel_irqchip_split(ms
)) {
4221 ret
= kvm_vm_enable_cap(s
, KVM_CAP_SPLIT_IRQCHIP
, 0, 24);
4223 error_report("Could not enable split irqchip mode: %s",
4227 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
4228 kvm_split_irqchip
= true;
4236 /* Classic KVM device assignment interface. Will remain x86 only. */
4237 int kvm_device_pci_assign(KVMState
*s
, PCIHostDeviceAddress
*dev_addr
,
4238 uint32_t flags
, uint32_t *dev_id
)
4240 struct kvm_assigned_pci_dev dev_data
= {
4241 .segnr
= dev_addr
->domain
,
4242 .busnr
= dev_addr
->bus
,
4243 .devfn
= PCI_DEVFN(dev_addr
->slot
, dev_addr
->function
),
4248 dev_data
.assigned_dev_id
=
4249 (dev_addr
->domain
<< 16) | (dev_addr
->bus
<< 8) | dev_data
.devfn
;
4251 ret
= kvm_vm_ioctl(s
, KVM_ASSIGN_PCI_DEVICE
, &dev_data
);
4256 *dev_id
= dev_data
.assigned_dev_id
;
4261 int kvm_device_pci_deassign(KVMState
*s
, uint32_t dev_id
)
4263 struct kvm_assigned_pci_dev dev_data
= {
4264 .assigned_dev_id
= dev_id
,
4267 return kvm_vm_ioctl(s
, KVM_DEASSIGN_PCI_DEVICE
, &dev_data
);
4270 static int kvm_assign_irq_internal(KVMState
*s
, uint32_t dev_id
,
4271 uint32_t irq_type
, uint32_t guest_irq
)
4273 struct kvm_assigned_irq assigned_irq
= {
4274 .assigned_dev_id
= dev_id
,
4275 .guest_irq
= guest_irq
,
4279 if (kvm_check_extension(s
, KVM_CAP_ASSIGN_DEV_IRQ
)) {
4280 return kvm_vm_ioctl(s
, KVM_ASSIGN_DEV_IRQ
, &assigned_irq
);
4282 return kvm_vm_ioctl(s
, KVM_ASSIGN_IRQ
, &assigned_irq
);
4286 int kvm_device_intx_assign(KVMState
*s
, uint32_t dev_id
, bool use_host_msi
,
4289 uint32_t irq_type
= KVM_DEV_IRQ_GUEST_INTX
|
4290 (use_host_msi
? KVM_DEV_IRQ_HOST_MSI
: KVM_DEV_IRQ_HOST_INTX
);
4292 return kvm_assign_irq_internal(s
, dev_id
, irq_type
, guest_irq
);
4295 int kvm_device_intx_set_mask(KVMState
*s
, uint32_t dev_id
, bool masked
)
4297 struct kvm_assigned_pci_dev dev_data
= {
4298 .assigned_dev_id
= dev_id
,
4299 .flags
= masked
? KVM_DEV_ASSIGN_MASK_INTX
: 0,
4302 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_INTX_MASK
, &dev_data
);
4305 static int kvm_deassign_irq_internal(KVMState
*s
, uint32_t dev_id
,
4308 struct kvm_assigned_irq assigned_irq
= {
4309 .assigned_dev_id
= dev_id
,
4313 return kvm_vm_ioctl(s
, KVM_DEASSIGN_DEV_IRQ
, &assigned_irq
);
4316 int kvm_device_intx_deassign(KVMState
*s
, uint32_t dev_id
, bool use_host_msi
)
4318 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_INTX
|
4319 (use_host_msi
? KVM_DEV_IRQ_HOST_MSI
: KVM_DEV_IRQ_HOST_INTX
));
4322 int kvm_device_msi_assign(KVMState
*s
, uint32_t dev_id
, int virq
)
4324 return kvm_assign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_HOST_MSI
|
4325 KVM_DEV_IRQ_GUEST_MSI
, virq
);
4328 int kvm_device_msi_deassign(KVMState
*s
, uint32_t dev_id
)
4330 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_MSI
|
4331 KVM_DEV_IRQ_HOST_MSI
);
4334 bool kvm_device_msix_supported(KVMState
*s
)
4336 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
4337 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
4338 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_NR
, NULL
) == -EFAULT
;
4341 int kvm_device_msix_init_vectors(KVMState
*s
, uint32_t dev_id
,
4342 uint32_t nr_vectors
)
4344 struct kvm_assigned_msix_nr msix_nr
= {
4345 .assigned_dev_id
= dev_id
,
4346 .entry_nr
= nr_vectors
,
4349 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_NR
, &msix_nr
);
4352 int kvm_device_msix_set_vector(KVMState
*s
, uint32_t dev_id
, uint32_t vector
,
4355 struct kvm_assigned_msix_entry msix_entry
= {
4356 .assigned_dev_id
= dev_id
,
4361 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_ENTRY
, &msix_entry
);
4364 int kvm_device_msix_assign(KVMState
*s
, uint32_t dev_id
)
4366 return kvm_assign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_HOST_MSIX
|
4367 KVM_DEV_IRQ_GUEST_MSIX
, 0);
4370 int kvm_device_msix_deassign(KVMState
*s
, uint32_t dev_id
)
4372 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_MSIX
|
4373 KVM_DEV_IRQ_HOST_MSIX
);
4376 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry
*route
,
4377 uint64_t address
, uint32_t data
, PCIDevice
*dev
)
4379 X86IOMMUState
*iommu
= x86_iommu_get_default();
4383 MSIMessage src
, dst
;
4384 X86IOMMUClass
*class = X86_IOMMU_GET_CLASS(iommu
);
4386 if (!class->int_remap
) {
4390 src
.address
= route
->u
.msi
.address_hi
;
4391 src
.address
<<= VTD_MSI_ADDR_HI_SHIFT
;
4392 src
.address
|= route
->u
.msi
.address_lo
;
4393 src
.data
= route
->u
.msi
.data
;
4395 ret
= class->int_remap(iommu
, &src
, &dst
, dev
? \
4396 pci_requester_id(dev
) : \
4397 X86_IOMMU_SID_INVALID
);
4399 trace_kvm_x86_fixup_msi_error(route
->gsi
);
4403 route
->u
.msi
.address_hi
= dst
.address
>> VTD_MSI_ADDR_HI_SHIFT
;
4404 route
->u
.msi
.address_lo
= dst
.address
& VTD_MSI_ADDR_LO_MASK
;
4405 route
->u
.msi
.data
= dst
.data
;
4411 typedef struct MSIRouteEntry MSIRouteEntry
;
4413 struct MSIRouteEntry
{
4414 PCIDevice
*dev
; /* Device pointer */
4415 int vector
; /* MSI/MSIX vector index */
4416 int virq
; /* Virtual IRQ index */
4417 QLIST_ENTRY(MSIRouteEntry
) list
;
4420 /* List of used GSI routes */
4421 static QLIST_HEAD(, MSIRouteEntry
) msi_route_list
= \
4422 QLIST_HEAD_INITIALIZER(msi_route_list
);
4424 static void kvm_update_msi_routes_all(void *private, bool global
,
4425 uint32_t index
, uint32_t mask
)
4427 int cnt
= 0, vector
;
4428 MSIRouteEntry
*entry
;
4432 /* TODO: explicit route update */
4433 QLIST_FOREACH(entry
, &msi_route_list
, list
) {
4435 vector
= entry
->vector
;
4437 if (msix_enabled(dev
) && !msix_is_masked(dev
, vector
)) {
4438 msg
= msix_get_message(dev
, vector
);
4439 } else if (msi_enabled(dev
) && !msi_is_masked(dev
, vector
)) {
4440 msg
= msi_get_message(dev
, vector
);
4443 * Either MSI/MSIX is disabled for the device, or the
4444 * specific message was masked out. Skip this one.
4448 kvm_irqchip_update_msi_route(kvm_state
, entry
->virq
, msg
, dev
);
4450 kvm_irqchip_commit_routes(kvm_state
);
4451 trace_kvm_x86_update_msi_routes(cnt
);
4454 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry
*route
,
4455 int vector
, PCIDevice
*dev
)
4457 static bool notify_list_inited
= false;
4458 MSIRouteEntry
*entry
;
4461 /* These are (possibly) IOAPIC routes only used for split
4462 * kernel irqchip mode, while what we are housekeeping are
4463 * PCI devices only. */
4467 entry
= g_new0(MSIRouteEntry
, 1);
4469 entry
->vector
= vector
;
4470 entry
->virq
= route
->gsi
;
4471 QLIST_INSERT_HEAD(&msi_route_list
, entry
, list
);
4473 trace_kvm_x86_add_msi_route(route
->gsi
);
4475 if (!notify_list_inited
) {
4476 /* For the first time we do add route, add ourselves into
4477 * IOMMU's IEC notify list if needed. */
4478 X86IOMMUState
*iommu
= x86_iommu_get_default();
4480 x86_iommu_iec_register_notifier(iommu
,
4481 kvm_update_msi_routes_all
,
4484 notify_list_inited
= true;
4489 int kvm_arch_release_virq_post(int virq
)
4491 MSIRouteEntry
*entry
, *next
;
4492 QLIST_FOREACH_SAFE(entry
, &msi_route_list
, list
, next
) {
4493 if (entry
->virq
== virq
) {
4494 trace_kvm_x86_remove_msi_route(virq
);
4495 QLIST_REMOVE(entry
, list
);
4503 int kvm_arch_msi_data_to_gsi(uint32_t data
)