4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
20 #include <linux/kvm.h>
21 #include "standard-headers/asm-x86/kvm_para.h"
24 #include "sysemu/sysemu.h"
25 #include "sysemu/hw_accel.h"
26 #include "sysemu/kvm_int.h"
29 #include "hyperv-proto.h"
31 #include "exec/gdbstub.h"
32 #include "qemu/host-utils.h"
33 #include "qemu/config-file.h"
34 #include "qemu/error-report.h"
35 #include "hw/i386/pc.h"
36 #include "hw/i386/apic.h"
37 #include "hw/i386/apic_internal.h"
38 #include "hw/i386/apic-msidef.h"
39 #include "hw/i386/intel_iommu.h"
40 #include "hw/i386/x86-iommu.h"
42 #include "hw/pci/pci.h"
43 #include "hw/pci/msi.h"
44 #include "hw/pci/msix.h"
45 #include "migration/blocker.h"
46 #include "exec/memattrs.h"
52 #define DPRINTF(fmt, ...) \
53 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
55 #define DPRINTF(fmt, ...) \
59 #define MSR_KVM_WALL_CLOCK 0x11
60 #define MSR_KVM_SYSTEM_TIME 0x12
62 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
63 * 255 kvm_msr_entry structs */
64 #define MSR_BUF_SIZE 4096
66 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
67 KVM_CAP_INFO(SET_TSS_ADDR
),
68 KVM_CAP_INFO(EXT_CPUID
),
69 KVM_CAP_INFO(MP_STATE
),
73 static bool has_msr_star
;
74 static bool has_msr_hsave_pa
;
75 static bool has_msr_tsc_aux
;
76 static bool has_msr_tsc_adjust
;
77 static bool has_msr_tsc_deadline
;
78 static bool has_msr_feature_control
;
79 static bool has_msr_misc_enable
;
80 static bool has_msr_smbase
;
81 static bool has_msr_bndcfgs
;
82 static int lm_capable_kernel
;
83 static bool has_msr_hv_hypercall
;
84 static bool has_msr_hv_crash
;
85 static bool has_msr_hv_reset
;
86 static bool has_msr_hv_vpindex
;
87 static bool hv_vpindex_settable
;
88 static bool has_msr_hv_runtime
;
89 static bool has_msr_hv_synic
;
90 static bool has_msr_hv_stimer
;
91 static bool has_msr_hv_frequencies
;
92 static bool has_msr_hv_reenlightenment
;
93 static bool has_msr_xss
;
94 static bool has_msr_spec_ctrl
;
95 static bool has_msr_virt_ssbd
;
96 static bool has_msr_smi_count
;
97 static bool has_msr_arch_capabs
;
99 static uint32_t has_architectural_pmu_version
;
100 static uint32_t num_architectural_pmu_gp_counters
;
101 static uint32_t num_architectural_pmu_fixed_counters
;
103 static int has_xsave
;
105 static int has_pit_state2
;
107 static bool has_msr_mcg_ext_ctl
;
109 static struct kvm_cpuid2
*cpuid_cache
;
110 static struct kvm_msr_list
*kvm_feature_msrs
;
112 int kvm_has_pit_state2(void)
114 return has_pit_state2
;
117 bool kvm_has_smm(void)
119 return kvm_check_extension(kvm_state
, KVM_CAP_X86_SMM
);
122 bool kvm_has_adjust_clock_stable(void)
124 int ret
= kvm_check_extension(kvm_state
, KVM_CAP_ADJUST_CLOCK
);
126 return (ret
== KVM_CLOCK_TSC_STABLE
);
129 bool kvm_allows_irq0_override(void)
131 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
134 static bool kvm_x2apic_api_set_flags(uint64_t flags
)
136 KVMState
*s
= KVM_STATE(current_machine
->accelerator
);
138 return !kvm_vm_enable_cap(s
, KVM_CAP_X2APIC_API
, 0, flags
);
141 #define MEMORIZE(fn, _result) \
143 static bool _memorized; \
152 static bool has_x2apic_api
;
154 bool kvm_has_x2apic_api(void)
156 return has_x2apic_api
;
159 bool kvm_enable_x2apic(void)
162 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS
|
163 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK
),
167 bool kvm_hv_vpindex_settable(void)
169 return hv_vpindex_settable
;
172 static int kvm_get_tsc(CPUState
*cs
)
174 X86CPU
*cpu
= X86_CPU(cs
);
175 CPUX86State
*env
= &cpu
->env
;
177 struct kvm_msrs info
;
178 struct kvm_msr_entry entries
[1];
182 if (env
->tsc_valid
) {
186 msr_data
.info
.nmsrs
= 1;
187 msr_data
.entries
[0].index
= MSR_IA32_TSC
;
188 env
->tsc_valid
= !runstate_is_running();
190 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, &msr_data
);
196 env
->tsc
= msr_data
.entries
[0].data
;
200 static inline void do_kvm_synchronize_tsc(CPUState
*cpu
, run_on_cpu_data arg
)
205 void kvm_synchronize_all_tsc(void)
211 run_on_cpu(cpu
, do_kvm_synchronize_tsc
, RUN_ON_CPU_NULL
);
216 static struct kvm_cpuid2
*try_get_cpuid(KVMState
*s
, int max
)
218 struct kvm_cpuid2
*cpuid
;
221 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
222 cpuid
= g_malloc0(size
);
224 r
= kvm_ioctl(s
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
225 if (r
== 0 && cpuid
->nent
>= max
) {
233 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
241 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
244 static struct kvm_cpuid2
*get_supported_cpuid(KVMState
*s
)
246 struct kvm_cpuid2
*cpuid
;
249 if (cpuid_cache
!= NULL
) {
252 while ((cpuid
= try_get_cpuid(s
, max
)) == NULL
) {
259 static const struct kvm_para_features
{
262 } para_features
[] = {
263 { KVM_CAP_CLOCKSOURCE
, KVM_FEATURE_CLOCKSOURCE
},
264 { KVM_CAP_NOP_IO_DELAY
, KVM_FEATURE_NOP_IO_DELAY
},
265 { KVM_CAP_PV_MMU
, KVM_FEATURE_MMU_OP
},
266 { KVM_CAP_ASYNC_PF
, KVM_FEATURE_ASYNC_PF
},
269 static int get_para_features(KVMState
*s
)
273 for (i
= 0; i
< ARRAY_SIZE(para_features
); i
++) {
274 if (kvm_check_extension(s
, para_features
[i
].cap
)) {
275 features
|= (1 << para_features
[i
].feature
);
282 static bool host_tsx_blacklisted(void)
284 int family
, model
, stepping
;\
285 char vendor
[CPUID_VENDOR_SZ
+ 1];
287 host_vendor_fms(vendor
, &family
, &model
, &stepping
);
289 /* Check if we are running on a Haswell host known to have broken TSX */
290 return !strcmp(vendor
, CPUID_VENDOR_INTEL
) &&
292 ((model
== 63 && stepping
< 4) ||
293 model
== 60 || model
== 69 || model
== 70);
296 /* Returns the value for a specific register on the cpuid entry
298 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2
*entry
, int reg
)
318 /* Find matching entry for function/index on kvm_cpuid2 struct
320 static struct kvm_cpuid_entry2
*cpuid_find_entry(struct kvm_cpuid2
*cpuid
,
325 for (i
= 0; i
< cpuid
->nent
; ++i
) {
326 if (cpuid
->entries
[i
].function
== function
&&
327 cpuid
->entries
[i
].index
== index
) {
328 return &cpuid
->entries
[i
];
335 uint32_t kvm_arch_get_supported_cpuid(KVMState
*s
, uint32_t function
,
336 uint32_t index
, int reg
)
338 struct kvm_cpuid2
*cpuid
;
340 uint32_t cpuid_1_edx
;
343 cpuid
= get_supported_cpuid(s
);
345 struct kvm_cpuid_entry2
*entry
= cpuid_find_entry(cpuid
, function
, index
);
348 ret
= cpuid_entry_get_reg(entry
, reg
);
351 /* Fixups for the data returned by KVM, below */
353 if (function
== 1 && reg
== R_EDX
) {
354 /* KVM before 2.6.30 misreports the following features */
355 ret
|= CPUID_MTRR
| CPUID_PAT
| CPUID_MCE
| CPUID_MCA
;
356 } else if (function
== 1 && reg
== R_ECX
) {
357 /* We can set the hypervisor flag, even if KVM does not return it on
358 * GET_SUPPORTED_CPUID
360 ret
|= CPUID_EXT_HYPERVISOR
;
361 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
362 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
363 * and the irqchip is in the kernel.
365 if (kvm_irqchip_in_kernel() &&
366 kvm_check_extension(s
, KVM_CAP_TSC_DEADLINE_TIMER
)) {
367 ret
|= CPUID_EXT_TSC_DEADLINE_TIMER
;
370 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
371 * without the in-kernel irqchip
373 if (!kvm_irqchip_in_kernel()) {
374 ret
&= ~CPUID_EXT_X2APIC
;
378 int disable_exits
= kvm_check_extension(s
,
379 KVM_CAP_X86_DISABLE_EXITS
);
381 if (disable_exits
& KVM_X86_DISABLE_EXITS_MWAIT
) {
382 ret
|= CPUID_EXT_MONITOR
;
385 } else if (function
== 6 && reg
== R_EAX
) {
386 ret
|= CPUID_6_EAX_ARAT
; /* safe to allow because of emulated APIC */
387 } else if (function
== 7 && index
== 0 && reg
== R_EBX
) {
388 if (host_tsx_blacklisted()) {
389 ret
&= ~(CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_HLE
);
391 } else if (function
== 7 && index
== 0 && reg
== R_EDX
) {
393 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
394 * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
395 * returned by KVM_GET_MSR_INDEX_LIST.
397 if (!has_msr_arch_capabs
) {
398 ret
&= ~CPUID_7_0_EDX_ARCH_CAPABILITIES
;
400 } else if (function
== 0x80000001 && reg
== R_ECX
) {
402 * It's safe to enable TOPOEXT even if it's not returned by
403 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows
404 * us to keep CPU models including TOPOEXT runnable on older kernels.
406 ret
|= CPUID_EXT3_TOPOEXT
;
407 } else if (function
== 0x80000001 && reg
== R_EDX
) {
408 /* On Intel, kvm returns cpuid according to the Intel spec,
409 * so add missing bits according to the AMD spec:
411 cpuid_1_edx
= kvm_arch_get_supported_cpuid(s
, 1, 0, R_EDX
);
412 ret
|= cpuid_1_edx
& CPUID_EXT2_AMD_ALIASES
;
413 } else if (function
== KVM_CPUID_FEATURES
&& reg
== R_EAX
) {
414 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
415 * be enabled without the in-kernel irqchip
417 if (!kvm_irqchip_in_kernel()) {
418 ret
&= ~(1U << KVM_FEATURE_PV_UNHALT
);
420 } else if (function
== KVM_CPUID_FEATURES
&& reg
== R_EDX
) {
421 ret
|= 1U << KVM_HINTS_REALTIME
;
425 /* fallback for older kernels */
426 if ((function
== KVM_CPUID_FEATURES
) && !found
) {
427 ret
= get_para_features(s
);
433 uint32_t kvm_arch_get_supported_msr_feature(KVMState
*s
, uint32_t index
)
436 struct kvm_msrs info
;
437 struct kvm_msr_entry entries
[1];
441 if (kvm_feature_msrs
== NULL
) { /* Host doesn't support feature MSRs */
445 /* Check if requested MSR is supported feature MSR */
447 for (i
= 0; i
< kvm_feature_msrs
->nmsrs
; i
++)
448 if (kvm_feature_msrs
->indices
[i
] == index
) {
451 if (i
== kvm_feature_msrs
->nmsrs
) {
452 return 0; /* if the feature MSR is not supported, simply return 0 */
455 msr_data
.info
.nmsrs
= 1;
456 msr_data
.entries
[0].index
= index
;
458 ret
= kvm_ioctl(s
, KVM_GET_MSRS
, &msr_data
);
460 error_report("KVM get MSR (index=0x%x) feature failed, %s",
461 index
, strerror(-ret
));
465 return msr_data
.entries
[0].data
;
469 typedef struct HWPoisonPage
{
471 QLIST_ENTRY(HWPoisonPage
) list
;
474 static QLIST_HEAD(, HWPoisonPage
) hwpoison_page_list
=
475 QLIST_HEAD_INITIALIZER(hwpoison_page_list
);
477 static void kvm_unpoison_all(void *param
)
479 HWPoisonPage
*page
, *next_page
;
481 QLIST_FOREACH_SAFE(page
, &hwpoison_page_list
, list
, next_page
) {
482 QLIST_REMOVE(page
, list
);
483 qemu_ram_remap(page
->ram_addr
, TARGET_PAGE_SIZE
);
488 static void kvm_hwpoison_page_add(ram_addr_t ram_addr
)
492 QLIST_FOREACH(page
, &hwpoison_page_list
, list
) {
493 if (page
->ram_addr
== ram_addr
) {
497 page
= g_new(HWPoisonPage
, 1);
498 page
->ram_addr
= ram_addr
;
499 QLIST_INSERT_HEAD(&hwpoison_page_list
, page
, list
);
502 static int kvm_get_mce_cap_supported(KVMState
*s
, uint64_t *mce_cap
,
507 r
= kvm_check_extension(s
, KVM_CAP_MCE
);
510 return kvm_ioctl(s
, KVM_X86_GET_MCE_CAP_SUPPORTED
, mce_cap
);
515 static void kvm_mce_inject(X86CPU
*cpu
, hwaddr paddr
, int code
)
517 CPUState
*cs
= CPU(cpu
);
518 CPUX86State
*env
= &cpu
->env
;
519 uint64_t status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
|
520 MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
;
521 uint64_t mcg_status
= MCG_STATUS_MCIP
;
524 if (code
== BUS_MCEERR_AR
) {
525 status
|= MCI_STATUS_AR
| 0x134;
526 mcg_status
|= MCG_STATUS_EIPV
;
529 mcg_status
|= MCG_STATUS_RIPV
;
532 flags
= cpu_x86_support_mca_broadcast(env
) ? MCE_INJECT_BROADCAST
: 0;
533 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
534 * guest kernel back into env->mcg_ext_ctl.
536 cpu_synchronize_state(cs
);
537 if (env
->mcg_ext_ctl
& MCG_EXT_CTL_LMCE_EN
) {
538 mcg_status
|= MCG_STATUS_LMCE
;
542 cpu_x86_inject_mce(NULL
, cpu
, 9, status
, mcg_status
, paddr
,
543 (MCM_ADDR_PHYS
<< 6) | 0xc, flags
);
546 static void hardware_memory_error(void)
548 fprintf(stderr
, "Hardware memory error!\n");
552 void kvm_arch_on_sigbus_vcpu(CPUState
*c
, int code
, void *addr
)
554 X86CPU
*cpu
= X86_CPU(c
);
555 CPUX86State
*env
= &cpu
->env
;
559 /* If we get an action required MCE, it has been injected by KVM
560 * while the VM was running. An action optional MCE instead should
561 * be coming from the main thread, which qemu_init_sigbus identifies
562 * as the "early kill" thread.
564 assert(code
== BUS_MCEERR_AR
|| code
== BUS_MCEERR_AO
);
566 if ((env
->mcg_cap
& MCG_SER_P
) && addr
) {
567 ram_addr
= qemu_ram_addr_from_host(addr
);
568 if (ram_addr
!= RAM_ADDR_INVALID
&&
569 kvm_physical_memory_addr_from_host(c
->kvm_state
, addr
, &paddr
)) {
570 kvm_hwpoison_page_add(ram_addr
);
571 kvm_mce_inject(cpu
, paddr
, code
);
575 fprintf(stderr
, "Hardware memory error for memory used by "
576 "QEMU itself instead of guest system!\n");
579 if (code
== BUS_MCEERR_AR
) {
580 hardware_memory_error();
583 /* Hope we are lucky for AO MCE */
586 static int kvm_inject_mce_oldstyle(X86CPU
*cpu
)
588 CPUX86State
*env
= &cpu
->env
;
590 if (!kvm_has_vcpu_events() && env
->exception_injected
== EXCP12_MCHK
) {
591 unsigned int bank
, bank_num
= env
->mcg_cap
& 0xff;
592 struct kvm_x86_mce mce
;
594 env
->exception_injected
= -1;
597 * There must be at least one bank in use if an MCE is pending.
598 * Find it and use its values for the event injection.
600 for (bank
= 0; bank
< bank_num
; bank
++) {
601 if (env
->mce_banks
[bank
* 4 + 1] & MCI_STATUS_VAL
) {
605 assert(bank
< bank_num
);
608 mce
.status
= env
->mce_banks
[bank
* 4 + 1];
609 mce
.mcg_status
= env
->mcg_status
;
610 mce
.addr
= env
->mce_banks
[bank
* 4 + 2];
611 mce
.misc
= env
->mce_banks
[bank
* 4 + 3];
613 return kvm_vcpu_ioctl(CPU(cpu
), KVM_X86_SET_MCE
, &mce
);
618 static void cpu_update_state(void *opaque
, int running
, RunState state
)
620 CPUX86State
*env
= opaque
;
623 env
->tsc_valid
= false;
627 unsigned long kvm_arch_vcpu_id(CPUState
*cs
)
629 X86CPU
*cpu
= X86_CPU(cs
);
633 #ifndef KVM_CPUID_SIGNATURE_NEXT
634 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
637 static bool hyperv_hypercall_available(X86CPU
*cpu
)
639 return cpu
->hyperv_vapic
||
640 (cpu
->hyperv_spinlock_attempts
!= HYPERV_SPINLOCK_NEVER_RETRY
);
643 static bool hyperv_enabled(X86CPU
*cpu
)
645 CPUState
*cs
= CPU(cpu
);
646 return kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV
) > 0 &&
647 (hyperv_hypercall_available(cpu
) ||
649 cpu
->hyperv_relaxed_timing
||
652 cpu
->hyperv_vpindex
||
653 cpu
->hyperv_runtime
||
655 cpu
->hyperv_stimer
||
656 cpu
->hyperv_reenlightenment
||
657 cpu
->hyperv_tlbflush
||
661 static int kvm_arch_set_tsc_khz(CPUState
*cs
)
663 X86CPU
*cpu
= X86_CPU(cs
);
664 CPUX86State
*env
= &cpu
->env
;
671 r
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_TSC_CONTROL
) ?
672 kvm_vcpu_ioctl(cs
, KVM_SET_TSC_KHZ
, env
->tsc_khz
) :
675 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
676 * TSC frequency doesn't match the one we want.
678 int cur_freq
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
679 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) :
681 if (cur_freq
<= 0 || cur_freq
!= env
->tsc_khz
) {
682 warn_report("TSC frequency mismatch between "
683 "VM (%" PRId64
" kHz) and host (%d kHz), "
684 "and TSC scaling unavailable",
685 env
->tsc_khz
, cur_freq
);
693 static bool tsc_is_stable_and_known(CPUX86State
*env
)
698 return (env
->features
[FEAT_8000_0007_EDX
] & CPUID_APM_INVTSC
)
699 || env
->user_tsc_khz
;
702 static int hyperv_handle_properties(CPUState
*cs
)
704 X86CPU
*cpu
= X86_CPU(cs
);
705 CPUX86State
*env
= &cpu
->env
;
707 if (cpu
->hyperv_relaxed_timing
) {
708 env
->features
[FEAT_HYPERV_EAX
] |= HV_HYPERCALL_AVAILABLE
;
710 if (cpu
->hyperv_vapic
) {
711 env
->features
[FEAT_HYPERV_EAX
] |= HV_HYPERCALL_AVAILABLE
;
712 env
->features
[FEAT_HYPERV_EAX
] |= HV_APIC_ACCESS_AVAILABLE
;
714 if (cpu
->hyperv_time
) {
715 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV_TIME
) <= 0) {
716 fprintf(stderr
, "Hyper-V clocksources "
717 "(requested by 'hv-time' cpu flag) "
718 "are not supported by kernel\n");
721 env
->features
[FEAT_HYPERV_EAX
] |= HV_HYPERCALL_AVAILABLE
;
722 env
->features
[FEAT_HYPERV_EAX
] |= HV_TIME_REF_COUNT_AVAILABLE
;
723 env
->features
[FEAT_HYPERV_EAX
] |= HV_REFERENCE_TSC_AVAILABLE
;
725 if (cpu
->hyperv_frequencies
) {
726 if (!has_msr_hv_frequencies
) {
727 fprintf(stderr
, "Hyper-V frequency MSRs "
728 "(requested by 'hv-frequencies' cpu flag) "
729 "are not supported by kernel\n");
732 env
->features
[FEAT_HYPERV_EAX
] |= HV_ACCESS_FREQUENCY_MSRS
;
733 env
->features
[FEAT_HYPERV_EDX
] |= HV_FREQUENCY_MSRS_AVAILABLE
;
735 if (cpu
->hyperv_crash
) {
736 if (!has_msr_hv_crash
) {
737 fprintf(stderr
, "Hyper-V crash MSRs "
738 "(requested by 'hv-crash' cpu flag) "
739 "are not supported by kernel\n");
742 env
->features
[FEAT_HYPERV_EDX
] |= HV_GUEST_CRASH_MSR_AVAILABLE
;
744 if (cpu
->hyperv_reenlightenment
) {
745 if (!has_msr_hv_reenlightenment
) {
747 "Hyper-V Reenlightenment MSRs "
748 "(requested by 'hv-reenlightenment' cpu flag) "
749 "are not supported by kernel\n");
752 env
->features
[FEAT_HYPERV_EAX
] |= HV_ACCESS_REENLIGHTENMENTS_CONTROL
;
754 env
->features
[FEAT_HYPERV_EDX
] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE
;
755 if (cpu
->hyperv_reset
) {
756 if (!has_msr_hv_reset
) {
757 fprintf(stderr
, "Hyper-V reset MSR "
758 "(requested by 'hv-reset' cpu flag) "
759 "is not supported by kernel\n");
762 env
->features
[FEAT_HYPERV_EAX
] |= HV_RESET_AVAILABLE
;
764 if (cpu
->hyperv_vpindex
) {
765 if (!has_msr_hv_vpindex
) {
766 fprintf(stderr
, "Hyper-V VP_INDEX MSR "
767 "(requested by 'hv-vpindex' cpu flag) "
768 "is not supported by kernel\n");
771 env
->features
[FEAT_HYPERV_EAX
] |= HV_VP_INDEX_AVAILABLE
;
773 if (cpu
->hyperv_runtime
) {
774 if (!has_msr_hv_runtime
) {
775 fprintf(stderr
, "Hyper-V VP_RUNTIME MSR "
776 "(requested by 'hv-runtime' cpu flag) "
777 "is not supported by kernel\n");
780 env
->features
[FEAT_HYPERV_EAX
] |= HV_VP_RUNTIME_AVAILABLE
;
782 if (cpu
->hyperv_synic
) {
783 unsigned int cap
= KVM_CAP_HYPERV_SYNIC
;
784 if (!cpu
->hyperv_synic_kvm_only
) {
785 if (!cpu
->hyperv_vpindex
) {
786 fprintf(stderr
, "Hyper-V SynIC "
787 "(requested by 'hv-synic' cpu flag) "
788 "requires Hyper-V VP_INDEX ('hv-vpindex')\n");
791 cap
= KVM_CAP_HYPERV_SYNIC2
;
794 if (!has_msr_hv_synic
|| !kvm_check_extension(cs
->kvm_state
, cap
)) {
795 fprintf(stderr
, "Hyper-V SynIC (requested by 'hv-synic' cpu flag) "
796 "is not supported by kernel\n");
800 env
->features
[FEAT_HYPERV_EAX
] |= HV_SYNIC_AVAILABLE
;
802 if (cpu
->hyperv_stimer
) {
803 if (!has_msr_hv_stimer
) {
804 fprintf(stderr
, "Hyper-V timers aren't supported by kernel\n");
807 env
->features
[FEAT_HYPERV_EAX
] |= HV_SYNTIMERS_AVAILABLE
;
809 if (cpu
->hyperv_relaxed_timing
) {
810 env
->features
[FEAT_HV_RECOMM_EAX
] |= HV_RELAXED_TIMING_RECOMMENDED
;
812 if (cpu
->hyperv_vapic
) {
813 env
->features
[FEAT_HV_RECOMM_EAX
] |= HV_APIC_ACCESS_RECOMMENDED
;
815 if (cpu
->hyperv_tlbflush
) {
816 if (kvm_check_extension(cs
->kvm_state
,
817 KVM_CAP_HYPERV_TLBFLUSH
) <= 0) {
818 fprintf(stderr
, "Hyper-V TLB flush support "
819 "(requested by 'hv-tlbflush' cpu flag) "
820 " is not supported by kernel\n");
823 env
->features
[FEAT_HV_RECOMM_EAX
] |= HV_REMOTE_TLB_FLUSH_RECOMMENDED
;
824 env
->features
[FEAT_HV_RECOMM_EAX
] |= HV_EX_PROCESSOR_MASKS_RECOMMENDED
;
826 if (cpu
->hyperv_ipi
) {
827 if (kvm_check_extension(cs
->kvm_state
,
828 KVM_CAP_HYPERV_SEND_IPI
) <= 0) {
829 fprintf(stderr
, "Hyper-V IPI send support "
830 "(requested by 'hv-ipi' cpu flag) "
831 " is not supported by kernel\n");
834 env
->features
[FEAT_HV_RECOMM_EAX
] |= HV_CLUSTER_IPI_RECOMMENDED
;
835 env
->features
[FEAT_HV_RECOMM_EAX
] |= HV_EX_PROCESSOR_MASKS_RECOMMENDED
;
837 if (cpu
->hyperv_evmcs
) {
838 uint16_t evmcs_version
;
840 if (kvm_vcpu_enable_cap(cs
, KVM_CAP_HYPERV_ENLIGHTENED_VMCS
, 0,
841 (uintptr_t)&evmcs_version
)) {
842 fprintf(stderr
, "Hyper-V Enlightened VMCS "
843 "(requested by 'hv-evmcs' cpu flag) "
844 "is not supported by kernel\n");
847 env
->features
[FEAT_HV_RECOMM_EAX
] |= HV_ENLIGHTENED_VMCS_RECOMMENDED
;
848 env
->features
[FEAT_HV_NESTED_EAX
] = evmcs_version
;
854 static int hyperv_init_vcpu(X86CPU
*cpu
)
856 CPUState
*cs
= CPU(cpu
);
859 if (cpu
->hyperv_vpindex
&& !hv_vpindex_settable
) {
861 * the kernel doesn't support setting vp_index; assert that its value
865 struct kvm_msrs info
;
866 struct kvm_msr_entry entries
[1];
869 .entries
[0].index
= HV_X64_MSR_VP_INDEX
,
872 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_MSRS
, &msr_data
);
878 if (msr_data
.entries
[0].data
!= hyperv_vp_index(CPU(cpu
))) {
879 error_report("kernel's vp_index != QEMU's vp_index");
884 if (cpu
->hyperv_synic
) {
885 uint32_t synic_cap
= cpu
->hyperv_synic_kvm_only
?
886 KVM_CAP_HYPERV_SYNIC
: KVM_CAP_HYPERV_SYNIC2
;
887 ret
= kvm_vcpu_enable_cap(cs
, synic_cap
, 0);
889 error_report("failed to turn on HyperV SynIC in KVM: %s",
894 if (!cpu
->hyperv_synic_kvm_only
) {
895 ret
= hyperv_x86_synic_add(cpu
);
897 error_report("failed to create HyperV SynIC: %s",
907 static Error
*invtsc_mig_blocker
;
908 static Error
*vmx_mig_blocker
;
910 #define KVM_MAX_CPUID_ENTRIES 100
912 int kvm_arch_init_vcpu(CPUState
*cs
)
915 struct kvm_cpuid2 cpuid
;
916 struct kvm_cpuid_entry2 entries
[KVM_MAX_CPUID_ENTRIES
];
919 * The kernel defines these structs with padding fields so there
920 * should be no extra padding in our cpuid_data struct.
922 QEMU_BUILD_BUG_ON(sizeof(cpuid_data
) !=
923 sizeof(struct kvm_cpuid2
) +
924 sizeof(struct kvm_cpuid_entry2
) * KVM_MAX_CPUID_ENTRIES
);
926 X86CPU
*cpu
= X86_CPU(cs
);
927 CPUX86State
*env
= &cpu
->env
;
928 uint32_t limit
, i
, j
, cpuid_i
;
930 struct kvm_cpuid_entry2
*c
;
931 uint32_t signature
[3];
932 int kvm_base
= KVM_CPUID_SIGNATURE
;
934 Error
*local_err
= NULL
;
936 memset(&cpuid_data
, 0, sizeof(cpuid_data
));
940 r
= kvm_arch_set_tsc_khz(cs
);
945 /* vcpu's TSC frequency is either specified by user, or following
946 * the value used by KVM if the former is not present. In the
947 * latter case, we query it from KVM and record in env->tsc_khz,
948 * so that vcpu's TSC frequency can be migrated later via this field.
951 r
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
952 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) :
959 /* Paravirtualization CPUIDs */
960 if (hyperv_enabled(cpu
)) {
961 c
= &cpuid_data
.entries
[cpuid_i
++];
962 c
->function
= HV_CPUID_VENDOR_AND_MAX_FUNCTIONS
;
963 if (!cpu
->hyperv_vendor_id
) {
964 memcpy(signature
, "Microsoft Hv", 12);
966 size_t len
= strlen(cpu
->hyperv_vendor_id
);
969 error_report("hv-vendor-id truncated to 12 characters");
972 memset(signature
, 0, 12);
973 memcpy(signature
, cpu
->hyperv_vendor_id
, len
);
975 c
->eax
= cpu
->hyperv_evmcs
?
976 HV_CPUID_NESTED_FEATURES
: HV_CPUID_IMPLEMENT_LIMITS
;
977 c
->ebx
= signature
[0];
978 c
->ecx
= signature
[1];
979 c
->edx
= signature
[2];
981 c
= &cpuid_data
.entries
[cpuid_i
++];
982 c
->function
= HV_CPUID_INTERFACE
;
983 memcpy(signature
, "Hv#1\0\0\0\0\0\0\0\0", 12);
984 c
->eax
= signature
[0];
989 c
= &cpuid_data
.entries
[cpuid_i
++];
990 c
->function
= HV_CPUID_VERSION
;
994 c
= &cpuid_data
.entries
[cpuid_i
++];
995 c
->function
= HV_CPUID_FEATURES
;
996 r
= hyperv_handle_properties(cs
);
1000 c
->eax
= env
->features
[FEAT_HYPERV_EAX
];
1001 c
->ebx
= env
->features
[FEAT_HYPERV_EBX
];
1002 c
->edx
= env
->features
[FEAT_HYPERV_EDX
];
1004 c
= &cpuid_data
.entries
[cpuid_i
++];
1005 c
->function
= HV_CPUID_ENLIGHTMENT_INFO
;
1007 c
->eax
= env
->features
[FEAT_HV_RECOMM_EAX
];
1008 c
->ebx
= cpu
->hyperv_spinlock_attempts
;
1010 c
= &cpuid_data
.entries
[cpuid_i
++];
1011 c
->function
= HV_CPUID_IMPLEMENT_LIMITS
;
1013 c
->eax
= cpu
->hv_max_vps
;
1016 kvm_base
= KVM_CPUID_SIGNATURE_NEXT
;
1017 has_msr_hv_hypercall
= true;
1019 if (cpu
->hyperv_evmcs
) {
1022 /* Create zeroed 0x40000006..0x40000009 leaves */
1023 for (function
= HV_CPUID_IMPLEMENT_LIMITS
+ 1;
1024 function
< HV_CPUID_NESTED_FEATURES
; function
++) {
1025 c
= &cpuid_data
.entries
[cpuid_i
++];
1026 c
->function
= function
;
1029 c
= &cpuid_data
.entries
[cpuid_i
++];
1030 c
->function
= HV_CPUID_NESTED_FEATURES
;
1031 c
->eax
= env
->features
[FEAT_HV_NESTED_EAX
];
1035 if (cpu
->expose_kvm
) {
1036 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
1037 c
= &cpuid_data
.entries
[cpuid_i
++];
1038 c
->function
= KVM_CPUID_SIGNATURE
| kvm_base
;
1039 c
->eax
= KVM_CPUID_FEATURES
| kvm_base
;
1040 c
->ebx
= signature
[0];
1041 c
->ecx
= signature
[1];
1042 c
->edx
= signature
[2];
1044 c
= &cpuid_data
.entries
[cpuid_i
++];
1045 c
->function
= KVM_CPUID_FEATURES
| kvm_base
;
1046 c
->eax
= env
->features
[FEAT_KVM
];
1047 c
->edx
= env
->features
[FEAT_KVM_HINTS
];
1050 cpu_x86_cpuid(env
, 0, 0, &limit
, &unused
, &unused
, &unused
);
1052 for (i
= 0; i
<= limit
; i
++) {
1053 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1054 fprintf(stderr
, "unsupported level value: 0x%x\n", limit
);
1057 c
= &cpuid_data
.entries
[cpuid_i
++];
1061 /* Keep reading function 2 till all the input is received */
1065 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
|
1066 KVM_CPUID_FLAG_STATE_READ_NEXT
;
1067 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1068 times
= c
->eax
& 0xff;
1070 for (j
= 1; j
< times
; ++j
) {
1071 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1072 fprintf(stderr
, "cpuid_data is full, no space for "
1073 "cpuid(eax:2):eax & 0xf = 0x%x\n", times
);
1076 c
= &cpuid_data
.entries
[cpuid_i
++];
1078 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
;
1079 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1086 for (j
= 0; ; j
++) {
1087 if (i
== 0xd && j
== 64) {
1091 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1093 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1095 if (i
== 4 && c
->eax
== 0) {
1098 if (i
== 0xb && !(c
->ecx
& 0xff00)) {
1101 if (i
== 0xd && c
->eax
== 0) {
1104 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1105 fprintf(stderr
, "cpuid_data is full, no space for "
1106 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
1109 c
= &cpuid_data
.entries
[cpuid_i
++];
1117 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1118 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1121 for (j
= 1; j
<= times
; ++j
) {
1122 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1123 fprintf(stderr
, "cpuid_data is full, no space for "
1124 "cpuid(eax:0x14,ecx:0x%x)\n", j
);
1127 c
= &cpuid_data
.entries
[cpuid_i
++];
1130 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1131 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1138 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1143 if (limit
>= 0x0a) {
1146 cpu_x86_cpuid(env
, 0x0a, 0, &eax
, &unused
, &unused
, &edx
);
1148 has_architectural_pmu_version
= eax
& 0xff;
1149 if (has_architectural_pmu_version
> 0) {
1150 num_architectural_pmu_gp_counters
= (eax
& 0xff00) >> 8;
1152 /* Shouldn't be more than 32, since that's the number of bits
1153 * available in EBX to tell us _which_ counters are available.
1156 if (num_architectural_pmu_gp_counters
> MAX_GP_COUNTERS
) {
1157 num_architectural_pmu_gp_counters
= MAX_GP_COUNTERS
;
1160 if (has_architectural_pmu_version
> 1) {
1161 num_architectural_pmu_fixed_counters
= edx
& 0x1f;
1163 if (num_architectural_pmu_fixed_counters
> MAX_FIXED_COUNTERS
) {
1164 num_architectural_pmu_fixed_counters
= MAX_FIXED_COUNTERS
;
1170 cpu_x86_cpuid(env
, 0x80000000, 0, &limit
, &unused
, &unused
, &unused
);
1172 for (i
= 0x80000000; i
<= limit
; i
++) {
1173 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1174 fprintf(stderr
, "unsupported xlevel value: 0x%x\n", limit
);
1177 c
= &cpuid_data
.entries
[cpuid_i
++];
1181 /* Query for all AMD cache information leaves */
1182 for (j
= 0; ; j
++) {
1184 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1186 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1191 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1192 fprintf(stderr
, "cpuid_data is full, no space for "
1193 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
1196 c
= &cpuid_data
.entries
[cpuid_i
++];
1202 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1207 /* Call Centaur's CPUID instructions they are supported. */
1208 if (env
->cpuid_xlevel2
> 0) {
1209 cpu_x86_cpuid(env
, 0xC0000000, 0, &limit
, &unused
, &unused
, &unused
);
1211 for (i
= 0xC0000000; i
<= limit
; i
++) {
1212 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1213 fprintf(stderr
, "unsupported xlevel2 value: 0x%x\n", limit
);
1216 c
= &cpuid_data
.entries
[cpuid_i
++];
1220 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1224 cpuid_data
.cpuid
.nent
= cpuid_i
;
1226 if (((env
->cpuid_version
>> 8)&0xF) >= 6
1227 && (env
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
1228 (CPUID_MCE
| CPUID_MCA
)
1229 && kvm_check_extension(cs
->kvm_state
, KVM_CAP_MCE
) > 0) {
1230 uint64_t mcg_cap
, unsupported_caps
;
1234 ret
= kvm_get_mce_cap_supported(cs
->kvm_state
, &mcg_cap
, &banks
);
1236 fprintf(stderr
, "kvm_get_mce_cap_supported: %s", strerror(-ret
));
1240 if (banks
< (env
->mcg_cap
& MCG_CAP_BANKS_MASK
)) {
1241 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
1242 (int)(env
->mcg_cap
& MCG_CAP_BANKS_MASK
), banks
);
1246 unsupported_caps
= env
->mcg_cap
& ~(mcg_cap
| MCG_CAP_BANKS_MASK
);
1247 if (unsupported_caps
) {
1248 if (unsupported_caps
& MCG_LMCE_P
) {
1249 error_report("kvm: LMCE not supported");
1252 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64
,
1256 env
->mcg_cap
&= mcg_cap
| MCG_CAP_BANKS_MASK
;
1257 ret
= kvm_vcpu_ioctl(cs
, KVM_X86_SETUP_MCE
, &env
->mcg_cap
);
1259 fprintf(stderr
, "KVM_X86_SETUP_MCE: %s", strerror(-ret
));
1264 qemu_add_vm_change_state_handler(cpu_update_state
, env
);
1266 c
= cpuid_find_entry(&cpuid_data
.cpuid
, 1, 0);
1268 has_msr_feature_control
= !!(c
->ecx
& CPUID_EXT_VMX
) ||
1269 !!(c
->ecx
& CPUID_EXT_SMX
);
1272 if ((env
->features
[FEAT_1_ECX
] & CPUID_EXT_VMX
) && !vmx_mig_blocker
) {
1273 error_setg(&vmx_mig_blocker
,
1274 "Nested VMX virtualization does not support live migration yet");
1275 r
= migrate_add_blocker(vmx_mig_blocker
, &local_err
);
1277 error_report_err(local_err
);
1278 error_free(vmx_mig_blocker
);
1283 if (env
->mcg_cap
& MCG_LMCE_P
) {
1284 has_msr_mcg_ext_ctl
= has_msr_feature_control
= true;
1287 if (!env
->user_tsc_khz
) {
1288 if ((env
->features
[FEAT_8000_0007_EDX
] & CPUID_APM_INVTSC
) &&
1289 invtsc_mig_blocker
== NULL
) {
1290 error_setg(&invtsc_mig_blocker
,
1291 "State blocked by non-migratable CPU device"
1293 r
= migrate_add_blocker(invtsc_mig_blocker
, &local_err
);
1295 error_report_err(local_err
);
1296 error_free(invtsc_mig_blocker
);
1302 if (cpu
->vmware_cpuid_freq
1303 /* Guests depend on 0x40000000 to detect this feature, so only expose
1304 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
1306 && kvm_base
== KVM_CPUID_SIGNATURE
1307 /* TSC clock must be stable and known for this feature. */
1308 && tsc_is_stable_and_known(env
)) {
1310 c
= &cpuid_data
.entries
[cpuid_i
++];
1311 c
->function
= KVM_CPUID_SIGNATURE
| 0x10;
1312 c
->eax
= env
->tsc_khz
;
1313 /* LAPIC resolution of 1ns (freq: 1GHz) is hardcoded in KVM's
1314 * APIC_BUS_CYCLE_NS */
1316 c
->ecx
= c
->edx
= 0;
1318 c
= cpuid_find_entry(&cpuid_data
.cpuid
, kvm_base
, 0);
1319 c
->eax
= MAX(c
->eax
, KVM_CPUID_SIGNATURE
| 0x10);
1322 cpuid_data
.cpuid
.nent
= cpuid_i
;
1324 cpuid_data
.cpuid
.padding
= 0;
1325 r
= kvm_vcpu_ioctl(cs
, KVM_SET_CPUID2
, &cpuid_data
);
1331 env
->xsave_buf
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
1333 cpu
->kvm_msr_buf
= g_malloc0(MSR_BUF_SIZE
);
1335 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_RDTSCP
)) {
1336 has_msr_tsc_aux
= false;
1339 r
= hyperv_init_vcpu(cpu
);
1347 migrate_del_blocker(invtsc_mig_blocker
);
1351 void kvm_arch_reset_vcpu(X86CPU
*cpu
)
1353 CPUX86State
*env
= &cpu
->env
;
1356 if (kvm_irqchip_in_kernel()) {
1357 env
->mp_state
= cpu_is_bsp(cpu
) ? KVM_MP_STATE_RUNNABLE
:
1358 KVM_MP_STATE_UNINITIALIZED
;
1360 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
1363 if (cpu
->hyperv_synic
) {
1365 for (i
= 0; i
< ARRAY_SIZE(env
->msr_hv_synic_sint
); i
++) {
1366 env
->msr_hv_synic_sint
[i
] = HV_SINT_MASKED
;
1369 hyperv_x86_synic_reset(cpu
);
1373 void kvm_arch_do_init_vcpu(X86CPU
*cpu
)
1375 CPUX86State
*env
= &cpu
->env
;
1377 /* APs get directly into wait-for-SIPI state. */
1378 if (env
->mp_state
== KVM_MP_STATE_UNINITIALIZED
) {
1379 env
->mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
1383 static int kvm_get_supported_feature_msrs(KVMState
*s
)
1387 if (kvm_feature_msrs
!= NULL
) {
1391 if (!kvm_check_extension(s
, KVM_CAP_GET_MSR_FEATURES
)) {
1395 struct kvm_msr_list msr_list
;
1398 ret
= kvm_ioctl(s
, KVM_GET_MSR_FEATURE_INDEX_LIST
, &msr_list
);
1399 if (ret
< 0 && ret
!= -E2BIG
) {
1400 error_report("Fetch KVM feature MSR list failed: %s",
1405 assert(msr_list
.nmsrs
> 0);
1406 kvm_feature_msrs
= (struct kvm_msr_list
*) \
1407 g_malloc0(sizeof(msr_list
) +
1408 msr_list
.nmsrs
* sizeof(msr_list
.indices
[0]));
1410 kvm_feature_msrs
->nmsrs
= msr_list
.nmsrs
;
1411 ret
= kvm_ioctl(s
, KVM_GET_MSR_FEATURE_INDEX_LIST
, kvm_feature_msrs
);
1414 error_report("Fetch KVM feature MSR list failed: %s",
1416 g_free(kvm_feature_msrs
);
1417 kvm_feature_msrs
= NULL
;
1424 static int kvm_get_supported_msrs(KVMState
*s
)
1426 static int kvm_supported_msrs
;
1430 if (kvm_supported_msrs
== 0) {
1431 struct kvm_msr_list msr_list
, *kvm_msr_list
;
1433 kvm_supported_msrs
= -1;
1435 /* Obtain MSR list from KVM. These are the MSRs that we must
1438 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, &msr_list
);
1439 if (ret
< 0 && ret
!= -E2BIG
) {
1442 /* Old kernel modules had a bug and could write beyond the provided
1443 memory. Allocate at least a safe amount of 1K. */
1444 kvm_msr_list
= g_malloc0(MAX(1024, sizeof(msr_list
) +
1446 sizeof(msr_list
.indices
[0])));
1448 kvm_msr_list
->nmsrs
= msr_list
.nmsrs
;
1449 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, kvm_msr_list
);
1453 for (i
= 0; i
< kvm_msr_list
->nmsrs
; i
++) {
1454 switch (kvm_msr_list
->indices
[i
]) {
1456 has_msr_star
= true;
1458 case MSR_VM_HSAVE_PA
:
1459 has_msr_hsave_pa
= true;
1462 has_msr_tsc_aux
= true;
1464 case MSR_TSC_ADJUST
:
1465 has_msr_tsc_adjust
= true;
1467 case MSR_IA32_TSCDEADLINE
:
1468 has_msr_tsc_deadline
= true;
1470 case MSR_IA32_SMBASE
:
1471 has_msr_smbase
= true;
1474 has_msr_smi_count
= true;
1476 case MSR_IA32_MISC_ENABLE
:
1477 has_msr_misc_enable
= true;
1479 case MSR_IA32_BNDCFGS
:
1480 has_msr_bndcfgs
= true;
1485 case HV_X64_MSR_CRASH_CTL
:
1486 has_msr_hv_crash
= true;
1488 case HV_X64_MSR_RESET
:
1489 has_msr_hv_reset
= true;
1491 case HV_X64_MSR_VP_INDEX
:
1492 has_msr_hv_vpindex
= true;
1494 case HV_X64_MSR_VP_RUNTIME
:
1495 has_msr_hv_runtime
= true;
1497 case HV_X64_MSR_SCONTROL
:
1498 has_msr_hv_synic
= true;
1500 case HV_X64_MSR_STIMER0_CONFIG
:
1501 has_msr_hv_stimer
= true;
1503 case HV_X64_MSR_TSC_FREQUENCY
:
1504 has_msr_hv_frequencies
= true;
1506 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
1507 has_msr_hv_reenlightenment
= true;
1509 case MSR_IA32_SPEC_CTRL
:
1510 has_msr_spec_ctrl
= true;
1513 has_msr_virt_ssbd
= true;
1515 case MSR_IA32_ARCH_CAPABILITIES
:
1516 has_msr_arch_capabs
= true;
1522 g_free(kvm_msr_list
);
1528 static Notifier smram_machine_done
;
1529 static KVMMemoryListener smram_listener
;
1530 static AddressSpace smram_address_space
;
1531 static MemoryRegion smram_as_root
;
1532 static MemoryRegion smram_as_mem
;
1534 static void register_smram_listener(Notifier
*n
, void *unused
)
1536 MemoryRegion
*smram
=
1537 (MemoryRegion
*) object_resolve_path("/machine/smram", NULL
);
1539 /* Outer container... */
1540 memory_region_init(&smram_as_root
, OBJECT(kvm_state
), "mem-container-smram", ~0ull);
1541 memory_region_set_enabled(&smram_as_root
, true);
1543 /* ... with two regions inside: normal system memory with low
1546 memory_region_init_alias(&smram_as_mem
, OBJECT(kvm_state
), "mem-smram",
1547 get_system_memory(), 0, ~0ull);
1548 memory_region_add_subregion_overlap(&smram_as_root
, 0, &smram_as_mem
, 0);
1549 memory_region_set_enabled(&smram_as_mem
, true);
1552 /* ... SMRAM with higher priority */
1553 memory_region_add_subregion_overlap(&smram_as_root
, 0, smram
, 10);
1554 memory_region_set_enabled(smram
, true);
1557 address_space_init(&smram_address_space
, &smram_as_root
, "KVM-SMRAM");
1558 kvm_memory_listener_register(kvm_state
, &smram_listener
,
1559 &smram_address_space
, 1);
1562 int kvm_arch_init(MachineState
*ms
, KVMState
*s
)
1564 uint64_t identity_base
= 0xfffbc000;
1565 uint64_t shadow_mem
;
1567 struct utsname utsname
;
1569 has_xsave
= kvm_check_extension(s
, KVM_CAP_XSAVE
);
1570 has_xcrs
= kvm_check_extension(s
, KVM_CAP_XCRS
);
1571 has_pit_state2
= kvm_check_extension(s
, KVM_CAP_PIT_STATE2
);
1573 hv_vpindex_settable
= kvm_check_extension(s
, KVM_CAP_HYPERV_VP_INDEX
);
1575 ret
= kvm_get_supported_msrs(s
);
1580 kvm_get_supported_feature_msrs(s
);
1583 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
1586 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
1587 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
1588 * Since these must be part of guest physical memory, we need to allocate
1589 * them, both by setting their start addresses in the kernel and by
1590 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
1592 * Older KVM versions may not support setting the identity map base. In
1593 * that case we need to stick with the default, i.e. a 256K maximum BIOS
1596 if (kvm_check_extension(s
, KVM_CAP_SET_IDENTITY_MAP_ADDR
)) {
1597 /* Allows up to 16M BIOSes. */
1598 identity_base
= 0xfeffc000;
1600 ret
= kvm_vm_ioctl(s
, KVM_SET_IDENTITY_MAP_ADDR
, &identity_base
);
1606 /* Set TSS base one page after EPT identity map. */
1607 ret
= kvm_vm_ioctl(s
, KVM_SET_TSS_ADDR
, identity_base
+ 0x1000);
1612 /* Tell fw_cfg to notify the BIOS to reserve the range. */
1613 ret
= e820_add_entry(identity_base
, 0x4000, E820_RESERVED
);
1615 fprintf(stderr
, "e820_add_entry() table is full\n");
1618 qemu_register_reset(kvm_unpoison_all
, NULL
);
1620 shadow_mem
= machine_kvm_shadow_mem(ms
);
1621 if (shadow_mem
!= -1) {
1623 ret
= kvm_vm_ioctl(s
, KVM_SET_NR_MMU_PAGES
, shadow_mem
);
1629 if (kvm_check_extension(s
, KVM_CAP_X86_SMM
) &&
1630 object_dynamic_cast(OBJECT(ms
), TYPE_PC_MACHINE
) &&
1631 pc_machine_is_smm_enabled(PC_MACHINE(ms
))) {
1632 smram_machine_done
.notify
= register_smram_listener
;
1633 qemu_add_machine_init_done_notifier(&smram_machine_done
);
1636 if (enable_cpu_pm
) {
1637 int disable_exits
= kvm_check_extension(s
, KVM_CAP_X86_DISABLE_EXITS
);
1640 /* Work around for kernel header with a typo. TODO: fix header and drop. */
1641 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
1642 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
1644 if (disable_exits
) {
1645 disable_exits
&= (KVM_X86_DISABLE_EXITS_MWAIT
|
1646 KVM_X86_DISABLE_EXITS_HLT
|
1647 KVM_X86_DISABLE_EXITS_PAUSE
);
1650 ret
= kvm_vm_enable_cap(s
, KVM_CAP_X86_DISABLE_EXITS
, 0,
1653 error_report("kvm: guest stopping CPU not supported: %s",
1661 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
1663 lhs
->selector
= rhs
->selector
;
1664 lhs
->base
= rhs
->base
;
1665 lhs
->limit
= rhs
->limit
;
1677 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
1679 unsigned flags
= rhs
->flags
;
1680 lhs
->selector
= rhs
->selector
;
1681 lhs
->base
= rhs
->base
;
1682 lhs
->limit
= rhs
->limit
;
1683 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
1684 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
1685 lhs
->dpl
= (flags
>> DESC_DPL_SHIFT
) & 3;
1686 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
1687 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
1688 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
1689 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
1690 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
1691 lhs
->unusable
= !lhs
->present
;
1695 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
1697 lhs
->selector
= rhs
->selector
;
1698 lhs
->base
= rhs
->base
;
1699 lhs
->limit
= rhs
->limit
;
1700 lhs
->flags
= (rhs
->type
<< DESC_TYPE_SHIFT
) |
1701 ((rhs
->present
&& !rhs
->unusable
) * DESC_P_MASK
) |
1702 (rhs
->dpl
<< DESC_DPL_SHIFT
) |
1703 (rhs
->db
<< DESC_B_SHIFT
) |
1704 (rhs
->s
* DESC_S_MASK
) |
1705 (rhs
->l
<< DESC_L_SHIFT
) |
1706 (rhs
->g
* DESC_G_MASK
) |
1707 (rhs
->avl
* DESC_AVL_MASK
);
1710 static void kvm_getput_reg(__u64
*kvm_reg
, target_ulong
*qemu_reg
, int set
)
1713 *kvm_reg
= *qemu_reg
;
1715 *qemu_reg
= *kvm_reg
;
1719 static int kvm_getput_regs(X86CPU
*cpu
, int set
)
1721 CPUX86State
*env
= &cpu
->env
;
1722 struct kvm_regs regs
;
1726 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_REGS
, ®s
);
1732 kvm_getput_reg(®s
.rax
, &env
->regs
[R_EAX
], set
);
1733 kvm_getput_reg(®s
.rbx
, &env
->regs
[R_EBX
], set
);
1734 kvm_getput_reg(®s
.rcx
, &env
->regs
[R_ECX
], set
);
1735 kvm_getput_reg(®s
.rdx
, &env
->regs
[R_EDX
], set
);
1736 kvm_getput_reg(®s
.rsi
, &env
->regs
[R_ESI
], set
);
1737 kvm_getput_reg(®s
.rdi
, &env
->regs
[R_EDI
], set
);
1738 kvm_getput_reg(®s
.rsp
, &env
->regs
[R_ESP
], set
);
1739 kvm_getput_reg(®s
.rbp
, &env
->regs
[R_EBP
], set
);
1740 #ifdef TARGET_X86_64
1741 kvm_getput_reg(®s
.r8
, &env
->regs
[8], set
);
1742 kvm_getput_reg(®s
.r9
, &env
->regs
[9], set
);
1743 kvm_getput_reg(®s
.r10
, &env
->regs
[10], set
);
1744 kvm_getput_reg(®s
.r11
, &env
->regs
[11], set
);
1745 kvm_getput_reg(®s
.r12
, &env
->regs
[12], set
);
1746 kvm_getput_reg(®s
.r13
, &env
->regs
[13], set
);
1747 kvm_getput_reg(®s
.r14
, &env
->regs
[14], set
);
1748 kvm_getput_reg(®s
.r15
, &env
->regs
[15], set
);
1751 kvm_getput_reg(®s
.rflags
, &env
->eflags
, set
);
1752 kvm_getput_reg(®s
.rip
, &env
->eip
, set
);
1755 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_REGS
, ®s
);
1761 static int kvm_put_fpu(X86CPU
*cpu
)
1763 CPUX86State
*env
= &cpu
->env
;
1767 memset(&fpu
, 0, sizeof fpu
);
1768 fpu
.fsw
= env
->fpus
& ~(7 << 11);
1769 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
1770 fpu
.fcw
= env
->fpuc
;
1771 fpu
.last_opcode
= env
->fpop
;
1772 fpu
.last_ip
= env
->fpip
;
1773 fpu
.last_dp
= env
->fpdp
;
1774 for (i
= 0; i
< 8; ++i
) {
1775 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
1777 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
1778 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
1779 stq_p(&fpu
.xmm
[i
][0], env
->xmm_regs
[i
].ZMM_Q(0));
1780 stq_p(&fpu
.xmm
[i
][8], env
->xmm_regs
[i
].ZMM_Q(1));
1782 fpu
.mxcsr
= env
->mxcsr
;
1784 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_FPU
, &fpu
);
1787 #define XSAVE_FCW_FSW 0
1788 #define XSAVE_FTW_FOP 1
1789 #define XSAVE_CWD_RIP 2
1790 #define XSAVE_CWD_RDP 4
1791 #define XSAVE_MXCSR 6
1792 #define XSAVE_ST_SPACE 8
1793 #define XSAVE_XMM_SPACE 40
1794 #define XSAVE_XSTATE_BV 128
1795 #define XSAVE_YMMH_SPACE 144
1796 #define XSAVE_BNDREGS 240
1797 #define XSAVE_BNDCSR 256
1798 #define XSAVE_OPMASK 272
1799 #define XSAVE_ZMM_Hi256 288
1800 #define XSAVE_Hi16_ZMM 416
1801 #define XSAVE_PKRU 672
1803 #define XSAVE_BYTE_OFFSET(word_offset) \
1804 ((word_offset) * sizeof_field(struct kvm_xsave, region[0]))
1806 #define ASSERT_OFFSET(word_offset, field) \
1807 QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
1808 offsetof(X86XSaveArea, field))
1810 ASSERT_OFFSET(XSAVE_FCW_FSW
, legacy
.fcw
);
1811 ASSERT_OFFSET(XSAVE_FTW_FOP
, legacy
.ftw
);
1812 ASSERT_OFFSET(XSAVE_CWD_RIP
, legacy
.fpip
);
1813 ASSERT_OFFSET(XSAVE_CWD_RDP
, legacy
.fpdp
);
1814 ASSERT_OFFSET(XSAVE_MXCSR
, legacy
.mxcsr
);
1815 ASSERT_OFFSET(XSAVE_ST_SPACE
, legacy
.fpregs
);
1816 ASSERT_OFFSET(XSAVE_XMM_SPACE
, legacy
.xmm_regs
);
1817 ASSERT_OFFSET(XSAVE_XSTATE_BV
, header
.xstate_bv
);
1818 ASSERT_OFFSET(XSAVE_YMMH_SPACE
, avx_state
);
1819 ASSERT_OFFSET(XSAVE_BNDREGS
, bndreg_state
);
1820 ASSERT_OFFSET(XSAVE_BNDCSR
, bndcsr_state
);
1821 ASSERT_OFFSET(XSAVE_OPMASK
, opmask_state
);
1822 ASSERT_OFFSET(XSAVE_ZMM_Hi256
, zmm_hi256_state
);
1823 ASSERT_OFFSET(XSAVE_Hi16_ZMM
, hi16_zmm_state
);
1824 ASSERT_OFFSET(XSAVE_PKRU
, pkru_state
);
1826 static int kvm_put_xsave(X86CPU
*cpu
)
1828 CPUX86State
*env
= &cpu
->env
;
1829 X86XSaveArea
*xsave
= env
->xsave_buf
;
1832 return kvm_put_fpu(cpu
);
1834 x86_cpu_xsave_all_areas(cpu
, xsave
);
1836 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XSAVE
, xsave
);
1839 static int kvm_put_xcrs(X86CPU
*cpu
)
1841 CPUX86State
*env
= &cpu
->env
;
1842 struct kvm_xcrs xcrs
= {};
1850 xcrs
.xcrs
[0].xcr
= 0;
1851 xcrs
.xcrs
[0].value
= env
->xcr0
;
1852 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XCRS
, &xcrs
);
1855 static int kvm_put_sregs(X86CPU
*cpu
)
1857 CPUX86State
*env
= &cpu
->env
;
1858 struct kvm_sregs sregs
;
1860 memset(sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
1861 if (env
->interrupt_injected
>= 0) {
1862 sregs
.interrupt_bitmap
[env
->interrupt_injected
/ 64] |=
1863 (uint64_t)1 << (env
->interrupt_injected
% 64);
1866 if ((env
->eflags
& VM_MASK
)) {
1867 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
1868 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
1869 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
1870 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
1871 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
1872 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
1874 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
1875 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
1876 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
1877 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
1878 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
1879 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
1882 set_seg(&sregs
.tr
, &env
->tr
);
1883 set_seg(&sregs
.ldt
, &env
->ldt
);
1885 sregs
.idt
.limit
= env
->idt
.limit
;
1886 sregs
.idt
.base
= env
->idt
.base
;
1887 memset(sregs
.idt
.padding
, 0, sizeof sregs
.idt
.padding
);
1888 sregs
.gdt
.limit
= env
->gdt
.limit
;
1889 sregs
.gdt
.base
= env
->gdt
.base
;
1890 memset(sregs
.gdt
.padding
, 0, sizeof sregs
.gdt
.padding
);
1892 sregs
.cr0
= env
->cr
[0];
1893 sregs
.cr2
= env
->cr
[2];
1894 sregs
.cr3
= env
->cr
[3];
1895 sregs
.cr4
= env
->cr
[4];
1897 sregs
.cr8
= cpu_get_apic_tpr(cpu
->apic_state
);
1898 sregs
.apic_base
= cpu_get_apic_base(cpu
->apic_state
);
1900 sregs
.efer
= env
->efer
;
1902 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_SREGS
, &sregs
);
1905 static void kvm_msr_buf_reset(X86CPU
*cpu
)
1907 memset(cpu
->kvm_msr_buf
, 0, MSR_BUF_SIZE
);
1910 static void kvm_msr_entry_add(X86CPU
*cpu
, uint32_t index
, uint64_t value
)
1912 struct kvm_msrs
*msrs
= cpu
->kvm_msr_buf
;
1913 void *limit
= ((void *)msrs
) + MSR_BUF_SIZE
;
1914 struct kvm_msr_entry
*entry
= &msrs
->entries
[msrs
->nmsrs
];
1916 assert((void *)(entry
+ 1) <= limit
);
1918 entry
->index
= index
;
1919 entry
->reserved
= 0;
1920 entry
->data
= value
;
1924 static int kvm_put_one_msr(X86CPU
*cpu
, int index
, uint64_t value
)
1926 kvm_msr_buf_reset(cpu
);
1927 kvm_msr_entry_add(cpu
, index
, value
);
1929 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, cpu
->kvm_msr_buf
);
1932 void kvm_put_apicbase(X86CPU
*cpu
, uint64_t value
)
1936 ret
= kvm_put_one_msr(cpu
, MSR_IA32_APICBASE
, value
);
1940 static int kvm_put_tscdeadline_msr(X86CPU
*cpu
)
1942 CPUX86State
*env
= &cpu
->env
;
1945 if (!has_msr_tsc_deadline
) {
1949 ret
= kvm_put_one_msr(cpu
, MSR_IA32_TSCDEADLINE
, env
->tsc_deadline
);
1959 * Provide a separate write service for the feature control MSR in order to
1960 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
1961 * before writing any other state because forcibly leaving nested mode
1962 * invalidates the VCPU state.
1964 static int kvm_put_msr_feature_control(X86CPU
*cpu
)
1968 if (!has_msr_feature_control
) {
1972 ret
= kvm_put_one_msr(cpu
, MSR_IA32_FEATURE_CONTROL
,
1973 cpu
->env
.msr_ia32_feature_control
);
1982 static int kvm_put_msrs(X86CPU
*cpu
, int level
)
1984 CPUX86State
*env
= &cpu
->env
;
1988 kvm_msr_buf_reset(cpu
);
1990 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
1991 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
1992 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
1993 kvm_msr_entry_add(cpu
, MSR_PAT
, env
->pat
);
1995 kvm_msr_entry_add(cpu
, MSR_STAR
, env
->star
);
1997 if (has_msr_hsave_pa
) {
1998 kvm_msr_entry_add(cpu
, MSR_VM_HSAVE_PA
, env
->vm_hsave
);
2000 if (has_msr_tsc_aux
) {
2001 kvm_msr_entry_add(cpu
, MSR_TSC_AUX
, env
->tsc_aux
);
2003 if (has_msr_tsc_adjust
) {
2004 kvm_msr_entry_add(cpu
, MSR_TSC_ADJUST
, env
->tsc_adjust
);
2006 if (has_msr_misc_enable
) {
2007 kvm_msr_entry_add(cpu
, MSR_IA32_MISC_ENABLE
,
2008 env
->msr_ia32_misc_enable
);
2010 if (has_msr_smbase
) {
2011 kvm_msr_entry_add(cpu
, MSR_IA32_SMBASE
, env
->smbase
);
2013 if (has_msr_smi_count
) {
2014 kvm_msr_entry_add(cpu
, MSR_SMI_COUNT
, env
->msr_smi_count
);
2016 if (has_msr_bndcfgs
) {
2017 kvm_msr_entry_add(cpu
, MSR_IA32_BNDCFGS
, env
->msr_bndcfgs
);
2020 kvm_msr_entry_add(cpu
, MSR_IA32_XSS
, env
->xss
);
2022 if (has_msr_spec_ctrl
) {
2023 kvm_msr_entry_add(cpu
, MSR_IA32_SPEC_CTRL
, env
->spec_ctrl
);
2025 if (has_msr_virt_ssbd
) {
2026 kvm_msr_entry_add(cpu
, MSR_VIRT_SSBD
, env
->virt_ssbd
);
2029 #ifdef TARGET_X86_64
2030 if (lm_capable_kernel
) {
2031 kvm_msr_entry_add(cpu
, MSR_CSTAR
, env
->cstar
);
2032 kvm_msr_entry_add(cpu
, MSR_KERNELGSBASE
, env
->kernelgsbase
);
2033 kvm_msr_entry_add(cpu
, MSR_FMASK
, env
->fmask
);
2034 kvm_msr_entry_add(cpu
, MSR_LSTAR
, env
->lstar
);
2038 /* If host supports feature MSR, write down. */
2039 if (has_msr_arch_capabs
) {
2040 kvm_msr_entry_add(cpu
, MSR_IA32_ARCH_CAPABILITIES
,
2041 env
->features
[FEAT_ARCH_CAPABILITIES
]);
2045 * The following MSRs have side effects on the guest or are too heavy
2046 * for normal writeback. Limit them to reset or full state updates.
2048 if (level
>= KVM_PUT_RESET_STATE
) {
2049 kvm_msr_entry_add(cpu
, MSR_IA32_TSC
, env
->tsc
);
2050 kvm_msr_entry_add(cpu
, MSR_KVM_SYSTEM_TIME
, env
->system_time_msr
);
2051 kvm_msr_entry_add(cpu
, MSR_KVM_WALL_CLOCK
, env
->wall_clock_msr
);
2052 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF
)) {
2053 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_EN
, env
->async_pf_en_msr
);
2055 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_PV_EOI
)) {
2056 kvm_msr_entry_add(cpu
, MSR_KVM_PV_EOI_EN
, env
->pv_eoi_en_msr
);
2058 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_STEAL_TIME
)) {
2059 kvm_msr_entry_add(cpu
, MSR_KVM_STEAL_TIME
, env
->steal_time_msr
);
2061 if (has_architectural_pmu_version
> 0) {
2062 if (has_architectural_pmu_version
> 1) {
2063 /* Stop the counter. */
2064 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
2065 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
, 0);
2068 /* Set the counter values. */
2069 for (i
= 0; i
< num_architectural_pmu_fixed_counters
; i
++) {
2070 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR0
+ i
,
2071 env
->msr_fixed_counters
[i
]);
2073 for (i
= 0; i
< num_architectural_pmu_gp_counters
; i
++) {
2074 kvm_msr_entry_add(cpu
, MSR_P6_PERFCTR0
+ i
,
2075 env
->msr_gp_counters
[i
]);
2076 kvm_msr_entry_add(cpu
, MSR_P6_EVNTSEL0
+ i
,
2077 env
->msr_gp_evtsel
[i
]);
2079 if (has_architectural_pmu_version
> 1) {
2080 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_STATUS
,
2081 env
->msr_global_status
);
2082 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_OVF_CTRL
,
2083 env
->msr_global_ovf_ctrl
);
2085 /* Now start the PMU. */
2086 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
,
2087 env
->msr_fixed_ctr_ctrl
);
2088 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
,
2089 env
->msr_global_ctrl
);
2093 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
2094 * only sync them to KVM on the first cpu
2096 if (current_cpu
== first_cpu
) {
2097 if (has_msr_hv_hypercall
) {
2098 kvm_msr_entry_add(cpu
, HV_X64_MSR_GUEST_OS_ID
,
2099 env
->msr_hv_guest_os_id
);
2100 kvm_msr_entry_add(cpu
, HV_X64_MSR_HYPERCALL
,
2101 env
->msr_hv_hypercall
);
2103 if (cpu
->hyperv_time
) {
2104 kvm_msr_entry_add(cpu
, HV_X64_MSR_REFERENCE_TSC
,
2107 if (cpu
->hyperv_reenlightenment
) {
2108 kvm_msr_entry_add(cpu
, HV_X64_MSR_REENLIGHTENMENT_CONTROL
,
2109 env
->msr_hv_reenlightenment_control
);
2110 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_CONTROL
,
2111 env
->msr_hv_tsc_emulation_control
);
2112 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_STATUS
,
2113 env
->msr_hv_tsc_emulation_status
);
2116 if (cpu
->hyperv_vapic
) {
2117 kvm_msr_entry_add(cpu
, HV_X64_MSR_APIC_ASSIST_PAGE
,
2120 if (has_msr_hv_crash
) {
2123 for (j
= 0; j
< HV_CRASH_PARAMS
; j
++)
2124 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_P0
+ j
,
2125 env
->msr_hv_crash_params
[j
]);
2127 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_CTL
, HV_CRASH_CTL_NOTIFY
);
2129 if (has_msr_hv_runtime
) {
2130 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_RUNTIME
, env
->msr_hv_runtime
);
2132 if (cpu
->hyperv_vpindex
&& hv_vpindex_settable
) {
2133 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_INDEX
,
2134 hyperv_vp_index(CPU(cpu
)));
2136 if (cpu
->hyperv_synic
) {
2139 kvm_msr_entry_add(cpu
, HV_X64_MSR_SVERSION
, HV_SYNIC_VERSION
);
2141 kvm_msr_entry_add(cpu
, HV_X64_MSR_SCONTROL
,
2142 env
->msr_hv_synic_control
);
2143 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIEFP
,
2144 env
->msr_hv_synic_evt_page
);
2145 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIMP
,
2146 env
->msr_hv_synic_msg_page
);
2148 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_synic_sint
); j
++) {
2149 kvm_msr_entry_add(cpu
, HV_X64_MSR_SINT0
+ j
,
2150 env
->msr_hv_synic_sint
[j
]);
2153 if (has_msr_hv_stimer
) {
2156 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_stimer_config
); j
++) {
2157 kvm_msr_entry_add(cpu
, HV_X64_MSR_STIMER0_CONFIG
+ j
* 2,
2158 env
->msr_hv_stimer_config
[j
]);
2161 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_stimer_count
); j
++) {
2162 kvm_msr_entry_add(cpu
, HV_X64_MSR_STIMER0_COUNT
+ j
* 2,
2163 env
->msr_hv_stimer_count
[j
]);
2166 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
2167 uint64_t phys_mask
= MAKE_64BIT_MASK(0, cpu
->phys_bits
);
2169 kvm_msr_entry_add(cpu
, MSR_MTRRdefType
, env
->mtrr_deftype
);
2170 kvm_msr_entry_add(cpu
, MSR_MTRRfix64K_00000
, env
->mtrr_fixed
[0]);
2171 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_80000
, env
->mtrr_fixed
[1]);
2172 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_A0000
, env
->mtrr_fixed
[2]);
2173 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C0000
, env
->mtrr_fixed
[3]);
2174 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C8000
, env
->mtrr_fixed
[4]);
2175 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D0000
, env
->mtrr_fixed
[5]);
2176 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D8000
, env
->mtrr_fixed
[6]);
2177 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E0000
, env
->mtrr_fixed
[7]);
2178 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E8000
, env
->mtrr_fixed
[8]);
2179 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F0000
, env
->mtrr_fixed
[9]);
2180 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F8000
, env
->mtrr_fixed
[10]);
2181 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
2182 /* The CPU GPs if we write to a bit above the physical limit of
2183 * the host CPU (and KVM emulates that)
2185 uint64_t mask
= env
->mtrr_var
[i
].mask
;
2188 kvm_msr_entry_add(cpu
, MSR_MTRRphysBase(i
),
2189 env
->mtrr_var
[i
].base
);
2190 kvm_msr_entry_add(cpu
, MSR_MTRRphysMask(i
), mask
);
2193 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) {
2194 int addr_num
= kvm_arch_get_supported_cpuid(kvm_state
,
2195 0x14, 1, R_EAX
) & 0x7;
2197 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CTL
,
2198 env
->msr_rtit_ctrl
);
2199 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_STATUS
,
2200 env
->msr_rtit_status
);
2201 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_BASE
,
2202 env
->msr_rtit_output_base
);
2203 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_MASK
,
2204 env
->msr_rtit_output_mask
);
2205 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CR3_MATCH
,
2206 env
->msr_rtit_cr3_match
);
2207 for (i
= 0; i
< addr_num
; i
++) {
2208 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_ADDR0_A
+ i
,
2209 env
->msr_rtit_addrs
[i
]);
2213 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
2214 * kvm_put_msr_feature_control. */
2219 kvm_msr_entry_add(cpu
, MSR_MCG_STATUS
, env
->mcg_status
);
2220 kvm_msr_entry_add(cpu
, MSR_MCG_CTL
, env
->mcg_ctl
);
2221 if (has_msr_mcg_ext_ctl
) {
2222 kvm_msr_entry_add(cpu
, MSR_MCG_EXT_CTL
, env
->mcg_ext_ctl
);
2224 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
2225 kvm_msr_entry_add(cpu
, MSR_MC0_CTL
+ i
, env
->mce_banks
[i
]);
2229 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, cpu
->kvm_msr_buf
);
2234 if (ret
< cpu
->kvm_msr_buf
->nmsrs
) {
2235 struct kvm_msr_entry
*e
= &cpu
->kvm_msr_buf
->entries
[ret
];
2236 error_report("error: failed to set MSR 0x%" PRIx32
" to 0x%" PRIx64
,
2237 (uint32_t)e
->index
, (uint64_t)e
->data
);
2240 assert(ret
== cpu
->kvm_msr_buf
->nmsrs
);
2245 static int kvm_get_fpu(X86CPU
*cpu
)
2247 CPUX86State
*env
= &cpu
->env
;
2251 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_FPU
, &fpu
);
2256 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
2257 env
->fpus
= fpu
.fsw
;
2258 env
->fpuc
= fpu
.fcw
;
2259 env
->fpop
= fpu
.last_opcode
;
2260 env
->fpip
= fpu
.last_ip
;
2261 env
->fpdp
= fpu
.last_dp
;
2262 for (i
= 0; i
< 8; ++i
) {
2263 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
2265 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
2266 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
2267 env
->xmm_regs
[i
].ZMM_Q(0) = ldq_p(&fpu
.xmm
[i
][0]);
2268 env
->xmm_regs
[i
].ZMM_Q(1) = ldq_p(&fpu
.xmm
[i
][8]);
2270 env
->mxcsr
= fpu
.mxcsr
;
2275 static int kvm_get_xsave(X86CPU
*cpu
)
2277 CPUX86State
*env
= &cpu
->env
;
2278 X86XSaveArea
*xsave
= env
->xsave_buf
;
2282 return kvm_get_fpu(cpu
);
2285 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XSAVE
, xsave
);
2289 x86_cpu_xrstor_all_areas(cpu
, xsave
);
2294 static int kvm_get_xcrs(X86CPU
*cpu
)
2296 CPUX86State
*env
= &cpu
->env
;
2298 struct kvm_xcrs xcrs
;
2304 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XCRS
, &xcrs
);
2309 for (i
= 0; i
< xcrs
.nr_xcrs
; i
++) {
2310 /* Only support xcr0 now */
2311 if (xcrs
.xcrs
[i
].xcr
== 0) {
2312 env
->xcr0
= xcrs
.xcrs
[i
].value
;
2319 static int kvm_get_sregs(X86CPU
*cpu
)
2321 CPUX86State
*env
= &cpu
->env
;
2322 struct kvm_sregs sregs
;
2325 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_SREGS
, &sregs
);
2330 /* There can only be one pending IRQ set in the bitmap at a time, so try
2331 to find it and save its number instead (-1 for none). */
2332 env
->interrupt_injected
= -1;
2333 for (i
= 0; i
< ARRAY_SIZE(sregs
.interrupt_bitmap
); i
++) {
2334 if (sregs
.interrupt_bitmap
[i
]) {
2335 bit
= ctz64(sregs
.interrupt_bitmap
[i
]);
2336 env
->interrupt_injected
= i
* 64 + bit
;
2341 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
2342 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
2343 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
2344 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
2345 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
2346 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
2348 get_seg(&env
->tr
, &sregs
.tr
);
2349 get_seg(&env
->ldt
, &sregs
.ldt
);
2351 env
->idt
.limit
= sregs
.idt
.limit
;
2352 env
->idt
.base
= sregs
.idt
.base
;
2353 env
->gdt
.limit
= sregs
.gdt
.limit
;
2354 env
->gdt
.base
= sregs
.gdt
.base
;
2356 env
->cr
[0] = sregs
.cr0
;
2357 env
->cr
[2] = sregs
.cr2
;
2358 env
->cr
[3] = sregs
.cr3
;
2359 env
->cr
[4] = sregs
.cr4
;
2361 env
->efer
= sregs
.efer
;
2363 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
2364 x86_update_hflags(env
);
2369 static int kvm_get_msrs(X86CPU
*cpu
)
2371 CPUX86State
*env
= &cpu
->env
;
2372 struct kvm_msr_entry
*msrs
= cpu
->kvm_msr_buf
->entries
;
2374 uint64_t mtrr_top_bits
;
2376 kvm_msr_buf_reset(cpu
);
2378 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_CS
, 0);
2379 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_ESP
, 0);
2380 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_EIP
, 0);
2381 kvm_msr_entry_add(cpu
, MSR_PAT
, 0);
2383 kvm_msr_entry_add(cpu
, MSR_STAR
, 0);
2385 if (has_msr_hsave_pa
) {
2386 kvm_msr_entry_add(cpu
, MSR_VM_HSAVE_PA
, 0);
2388 if (has_msr_tsc_aux
) {
2389 kvm_msr_entry_add(cpu
, MSR_TSC_AUX
, 0);
2391 if (has_msr_tsc_adjust
) {
2392 kvm_msr_entry_add(cpu
, MSR_TSC_ADJUST
, 0);
2394 if (has_msr_tsc_deadline
) {
2395 kvm_msr_entry_add(cpu
, MSR_IA32_TSCDEADLINE
, 0);
2397 if (has_msr_misc_enable
) {
2398 kvm_msr_entry_add(cpu
, MSR_IA32_MISC_ENABLE
, 0);
2400 if (has_msr_smbase
) {
2401 kvm_msr_entry_add(cpu
, MSR_IA32_SMBASE
, 0);
2403 if (has_msr_smi_count
) {
2404 kvm_msr_entry_add(cpu
, MSR_SMI_COUNT
, 0);
2406 if (has_msr_feature_control
) {
2407 kvm_msr_entry_add(cpu
, MSR_IA32_FEATURE_CONTROL
, 0);
2409 if (has_msr_bndcfgs
) {
2410 kvm_msr_entry_add(cpu
, MSR_IA32_BNDCFGS
, 0);
2413 kvm_msr_entry_add(cpu
, MSR_IA32_XSS
, 0);
2415 if (has_msr_spec_ctrl
) {
2416 kvm_msr_entry_add(cpu
, MSR_IA32_SPEC_CTRL
, 0);
2418 if (has_msr_virt_ssbd
) {
2419 kvm_msr_entry_add(cpu
, MSR_VIRT_SSBD
, 0);
2421 if (!env
->tsc_valid
) {
2422 kvm_msr_entry_add(cpu
, MSR_IA32_TSC
, 0);
2423 env
->tsc_valid
= !runstate_is_running();
2426 #ifdef TARGET_X86_64
2427 if (lm_capable_kernel
) {
2428 kvm_msr_entry_add(cpu
, MSR_CSTAR
, 0);
2429 kvm_msr_entry_add(cpu
, MSR_KERNELGSBASE
, 0);
2430 kvm_msr_entry_add(cpu
, MSR_FMASK
, 0);
2431 kvm_msr_entry_add(cpu
, MSR_LSTAR
, 0);
2434 kvm_msr_entry_add(cpu
, MSR_KVM_SYSTEM_TIME
, 0);
2435 kvm_msr_entry_add(cpu
, MSR_KVM_WALL_CLOCK
, 0);
2436 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF
)) {
2437 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_EN
, 0);
2439 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_PV_EOI
)) {
2440 kvm_msr_entry_add(cpu
, MSR_KVM_PV_EOI_EN
, 0);
2442 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_STEAL_TIME
)) {
2443 kvm_msr_entry_add(cpu
, MSR_KVM_STEAL_TIME
, 0);
2445 if (has_architectural_pmu_version
> 0) {
2446 if (has_architectural_pmu_version
> 1) {
2447 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
2448 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
, 0);
2449 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_STATUS
, 0);
2450 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_OVF_CTRL
, 0);
2452 for (i
= 0; i
< num_architectural_pmu_fixed_counters
; i
++) {
2453 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR0
+ i
, 0);
2455 for (i
= 0; i
< num_architectural_pmu_gp_counters
; i
++) {
2456 kvm_msr_entry_add(cpu
, MSR_P6_PERFCTR0
+ i
, 0);
2457 kvm_msr_entry_add(cpu
, MSR_P6_EVNTSEL0
+ i
, 0);
2462 kvm_msr_entry_add(cpu
, MSR_MCG_STATUS
, 0);
2463 kvm_msr_entry_add(cpu
, MSR_MCG_CTL
, 0);
2464 if (has_msr_mcg_ext_ctl
) {
2465 kvm_msr_entry_add(cpu
, MSR_MCG_EXT_CTL
, 0);
2467 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
2468 kvm_msr_entry_add(cpu
, MSR_MC0_CTL
+ i
, 0);
2472 if (has_msr_hv_hypercall
) {
2473 kvm_msr_entry_add(cpu
, HV_X64_MSR_HYPERCALL
, 0);
2474 kvm_msr_entry_add(cpu
, HV_X64_MSR_GUEST_OS_ID
, 0);
2476 if (cpu
->hyperv_vapic
) {
2477 kvm_msr_entry_add(cpu
, HV_X64_MSR_APIC_ASSIST_PAGE
, 0);
2479 if (cpu
->hyperv_time
) {
2480 kvm_msr_entry_add(cpu
, HV_X64_MSR_REFERENCE_TSC
, 0);
2482 if (cpu
->hyperv_reenlightenment
) {
2483 kvm_msr_entry_add(cpu
, HV_X64_MSR_REENLIGHTENMENT_CONTROL
, 0);
2484 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_CONTROL
, 0);
2485 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_STATUS
, 0);
2487 if (has_msr_hv_crash
) {
2490 for (j
= 0; j
< HV_CRASH_PARAMS
; j
++) {
2491 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_P0
+ j
, 0);
2494 if (has_msr_hv_runtime
) {
2495 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_RUNTIME
, 0);
2497 if (cpu
->hyperv_synic
) {
2500 kvm_msr_entry_add(cpu
, HV_X64_MSR_SCONTROL
, 0);
2501 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIEFP
, 0);
2502 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIMP
, 0);
2503 for (msr
= HV_X64_MSR_SINT0
; msr
<= HV_X64_MSR_SINT15
; msr
++) {
2504 kvm_msr_entry_add(cpu
, msr
, 0);
2507 if (has_msr_hv_stimer
) {
2510 for (msr
= HV_X64_MSR_STIMER0_CONFIG
; msr
<= HV_X64_MSR_STIMER3_COUNT
;
2512 kvm_msr_entry_add(cpu
, msr
, 0);
2515 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
2516 kvm_msr_entry_add(cpu
, MSR_MTRRdefType
, 0);
2517 kvm_msr_entry_add(cpu
, MSR_MTRRfix64K_00000
, 0);
2518 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_80000
, 0);
2519 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_A0000
, 0);
2520 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C0000
, 0);
2521 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C8000
, 0);
2522 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D0000
, 0);
2523 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D8000
, 0);
2524 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E0000
, 0);
2525 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E8000
, 0);
2526 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F0000
, 0);
2527 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F8000
, 0);
2528 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
2529 kvm_msr_entry_add(cpu
, MSR_MTRRphysBase(i
), 0);
2530 kvm_msr_entry_add(cpu
, MSR_MTRRphysMask(i
), 0);
2534 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) {
2536 kvm_arch_get_supported_cpuid(kvm_state
, 0x14, 1, R_EAX
) & 0x7;
2538 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CTL
, 0);
2539 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_STATUS
, 0);
2540 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_BASE
, 0);
2541 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_MASK
, 0);
2542 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CR3_MATCH
, 0);
2543 for (i
= 0; i
< addr_num
; i
++) {
2544 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_ADDR0_A
+ i
, 0);
2548 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, cpu
->kvm_msr_buf
);
2553 if (ret
< cpu
->kvm_msr_buf
->nmsrs
) {
2554 struct kvm_msr_entry
*e
= &cpu
->kvm_msr_buf
->entries
[ret
];
2555 error_report("error: failed to get MSR 0x%" PRIx32
,
2556 (uint32_t)e
->index
);
2559 assert(ret
== cpu
->kvm_msr_buf
->nmsrs
);
2561 * MTRR masks: Each mask consists of 5 parts
2562 * a 10..0: must be zero
2564 * c n-1.12: actual mask bits
2565 * d 51..n: reserved must be zero
2566 * e 63.52: reserved must be zero
2568 * 'n' is the number of physical bits supported by the CPU and is
2569 * apparently always <= 52. We know our 'n' but don't know what
2570 * the destinations 'n' is; it might be smaller, in which case
2571 * it masks (c) on loading. It might be larger, in which case
2572 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
2573 * we're migrating to.
2576 if (cpu
->fill_mtrr_mask
) {
2577 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS
> 52);
2578 assert(cpu
->phys_bits
<= TARGET_PHYS_ADDR_SPACE_BITS
);
2579 mtrr_top_bits
= MAKE_64BIT_MASK(cpu
->phys_bits
, 52 - cpu
->phys_bits
);
2584 for (i
= 0; i
< ret
; i
++) {
2585 uint32_t index
= msrs
[i
].index
;
2587 case MSR_IA32_SYSENTER_CS
:
2588 env
->sysenter_cs
= msrs
[i
].data
;
2590 case MSR_IA32_SYSENTER_ESP
:
2591 env
->sysenter_esp
= msrs
[i
].data
;
2593 case MSR_IA32_SYSENTER_EIP
:
2594 env
->sysenter_eip
= msrs
[i
].data
;
2597 env
->pat
= msrs
[i
].data
;
2600 env
->star
= msrs
[i
].data
;
2602 #ifdef TARGET_X86_64
2604 env
->cstar
= msrs
[i
].data
;
2606 case MSR_KERNELGSBASE
:
2607 env
->kernelgsbase
= msrs
[i
].data
;
2610 env
->fmask
= msrs
[i
].data
;
2613 env
->lstar
= msrs
[i
].data
;
2617 env
->tsc
= msrs
[i
].data
;
2620 env
->tsc_aux
= msrs
[i
].data
;
2622 case MSR_TSC_ADJUST
:
2623 env
->tsc_adjust
= msrs
[i
].data
;
2625 case MSR_IA32_TSCDEADLINE
:
2626 env
->tsc_deadline
= msrs
[i
].data
;
2628 case MSR_VM_HSAVE_PA
:
2629 env
->vm_hsave
= msrs
[i
].data
;
2631 case MSR_KVM_SYSTEM_TIME
:
2632 env
->system_time_msr
= msrs
[i
].data
;
2634 case MSR_KVM_WALL_CLOCK
:
2635 env
->wall_clock_msr
= msrs
[i
].data
;
2637 case MSR_MCG_STATUS
:
2638 env
->mcg_status
= msrs
[i
].data
;
2641 env
->mcg_ctl
= msrs
[i
].data
;
2643 case MSR_MCG_EXT_CTL
:
2644 env
->mcg_ext_ctl
= msrs
[i
].data
;
2646 case MSR_IA32_MISC_ENABLE
:
2647 env
->msr_ia32_misc_enable
= msrs
[i
].data
;
2649 case MSR_IA32_SMBASE
:
2650 env
->smbase
= msrs
[i
].data
;
2653 env
->msr_smi_count
= msrs
[i
].data
;
2655 case MSR_IA32_FEATURE_CONTROL
:
2656 env
->msr_ia32_feature_control
= msrs
[i
].data
;
2658 case MSR_IA32_BNDCFGS
:
2659 env
->msr_bndcfgs
= msrs
[i
].data
;
2662 env
->xss
= msrs
[i
].data
;
2665 if (msrs
[i
].index
>= MSR_MC0_CTL
&&
2666 msrs
[i
].index
< MSR_MC0_CTL
+ (env
->mcg_cap
& 0xff) * 4) {
2667 env
->mce_banks
[msrs
[i
].index
- MSR_MC0_CTL
] = msrs
[i
].data
;
2670 case MSR_KVM_ASYNC_PF_EN
:
2671 env
->async_pf_en_msr
= msrs
[i
].data
;
2673 case MSR_KVM_PV_EOI_EN
:
2674 env
->pv_eoi_en_msr
= msrs
[i
].data
;
2676 case MSR_KVM_STEAL_TIME
:
2677 env
->steal_time_msr
= msrs
[i
].data
;
2679 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
2680 env
->msr_fixed_ctr_ctrl
= msrs
[i
].data
;
2682 case MSR_CORE_PERF_GLOBAL_CTRL
:
2683 env
->msr_global_ctrl
= msrs
[i
].data
;
2685 case MSR_CORE_PERF_GLOBAL_STATUS
:
2686 env
->msr_global_status
= msrs
[i
].data
;
2688 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
2689 env
->msr_global_ovf_ctrl
= msrs
[i
].data
;
2691 case MSR_CORE_PERF_FIXED_CTR0
... MSR_CORE_PERF_FIXED_CTR0
+ MAX_FIXED_COUNTERS
- 1:
2692 env
->msr_fixed_counters
[index
- MSR_CORE_PERF_FIXED_CTR0
] = msrs
[i
].data
;
2694 case MSR_P6_PERFCTR0
... MSR_P6_PERFCTR0
+ MAX_GP_COUNTERS
- 1:
2695 env
->msr_gp_counters
[index
- MSR_P6_PERFCTR0
] = msrs
[i
].data
;
2697 case MSR_P6_EVNTSEL0
... MSR_P6_EVNTSEL0
+ MAX_GP_COUNTERS
- 1:
2698 env
->msr_gp_evtsel
[index
- MSR_P6_EVNTSEL0
] = msrs
[i
].data
;
2700 case HV_X64_MSR_HYPERCALL
:
2701 env
->msr_hv_hypercall
= msrs
[i
].data
;
2703 case HV_X64_MSR_GUEST_OS_ID
:
2704 env
->msr_hv_guest_os_id
= msrs
[i
].data
;
2706 case HV_X64_MSR_APIC_ASSIST_PAGE
:
2707 env
->msr_hv_vapic
= msrs
[i
].data
;
2709 case HV_X64_MSR_REFERENCE_TSC
:
2710 env
->msr_hv_tsc
= msrs
[i
].data
;
2712 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
2713 env
->msr_hv_crash_params
[index
- HV_X64_MSR_CRASH_P0
] = msrs
[i
].data
;
2715 case HV_X64_MSR_VP_RUNTIME
:
2716 env
->msr_hv_runtime
= msrs
[i
].data
;
2718 case HV_X64_MSR_SCONTROL
:
2719 env
->msr_hv_synic_control
= msrs
[i
].data
;
2721 case HV_X64_MSR_SIEFP
:
2722 env
->msr_hv_synic_evt_page
= msrs
[i
].data
;
2724 case HV_X64_MSR_SIMP
:
2725 env
->msr_hv_synic_msg_page
= msrs
[i
].data
;
2727 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
2728 env
->msr_hv_synic_sint
[index
- HV_X64_MSR_SINT0
] = msrs
[i
].data
;
2730 case HV_X64_MSR_STIMER0_CONFIG
:
2731 case HV_X64_MSR_STIMER1_CONFIG
:
2732 case HV_X64_MSR_STIMER2_CONFIG
:
2733 case HV_X64_MSR_STIMER3_CONFIG
:
2734 env
->msr_hv_stimer_config
[(index
- HV_X64_MSR_STIMER0_CONFIG
)/2] =
2737 case HV_X64_MSR_STIMER0_COUNT
:
2738 case HV_X64_MSR_STIMER1_COUNT
:
2739 case HV_X64_MSR_STIMER2_COUNT
:
2740 case HV_X64_MSR_STIMER3_COUNT
:
2741 env
->msr_hv_stimer_count
[(index
- HV_X64_MSR_STIMER0_COUNT
)/2] =
2744 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
2745 env
->msr_hv_reenlightenment_control
= msrs
[i
].data
;
2747 case HV_X64_MSR_TSC_EMULATION_CONTROL
:
2748 env
->msr_hv_tsc_emulation_control
= msrs
[i
].data
;
2750 case HV_X64_MSR_TSC_EMULATION_STATUS
:
2751 env
->msr_hv_tsc_emulation_status
= msrs
[i
].data
;
2753 case MSR_MTRRdefType
:
2754 env
->mtrr_deftype
= msrs
[i
].data
;
2756 case MSR_MTRRfix64K_00000
:
2757 env
->mtrr_fixed
[0] = msrs
[i
].data
;
2759 case MSR_MTRRfix16K_80000
:
2760 env
->mtrr_fixed
[1] = msrs
[i
].data
;
2762 case MSR_MTRRfix16K_A0000
:
2763 env
->mtrr_fixed
[2] = msrs
[i
].data
;
2765 case MSR_MTRRfix4K_C0000
:
2766 env
->mtrr_fixed
[3] = msrs
[i
].data
;
2768 case MSR_MTRRfix4K_C8000
:
2769 env
->mtrr_fixed
[4] = msrs
[i
].data
;
2771 case MSR_MTRRfix4K_D0000
:
2772 env
->mtrr_fixed
[5] = msrs
[i
].data
;
2774 case MSR_MTRRfix4K_D8000
:
2775 env
->mtrr_fixed
[6] = msrs
[i
].data
;
2777 case MSR_MTRRfix4K_E0000
:
2778 env
->mtrr_fixed
[7] = msrs
[i
].data
;
2780 case MSR_MTRRfix4K_E8000
:
2781 env
->mtrr_fixed
[8] = msrs
[i
].data
;
2783 case MSR_MTRRfix4K_F0000
:
2784 env
->mtrr_fixed
[9] = msrs
[i
].data
;
2786 case MSR_MTRRfix4K_F8000
:
2787 env
->mtrr_fixed
[10] = msrs
[i
].data
;
2789 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT
- 1):
2791 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].mask
= msrs
[i
].data
|
2794 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].base
= msrs
[i
].data
;
2797 case MSR_IA32_SPEC_CTRL
:
2798 env
->spec_ctrl
= msrs
[i
].data
;
2801 env
->virt_ssbd
= msrs
[i
].data
;
2803 case MSR_IA32_RTIT_CTL
:
2804 env
->msr_rtit_ctrl
= msrs
[i
].data
;
2806 case MSR_IA32_RTIT_STATUS
:
2807 env
->msr_rtit_status
= msrs
[i
].data
;
2809 case MSR_IA32_RTIT_OUTPUT_BASE
:
2810 env
->msr_rtit_output_base
= msrs
[i
].data
;
2812 case MSR_IA32_RTIT_OUTPUT_MASK
:
2813 env
->msr_rtit_output_mask
= msrs
[i
].data
;
2815 case MSR_IA32_RTIT_CR3_MATCH
:
2816 env
->msr_rtit_cr3_match
= msrs
[i
].data
;
2818 case MSR_IA32_RTIT_ADDR0_A
... MSR_IA32_RTIT_ADDR3_B
:
2819 env
->msr_rtit_addrs
[index
- MSR_IA32_RTIT_ADDR0_A
] = msrs
[i
].data
;
2827 static int kvm_put_mp_state(X86CPU
*cpu
)
2829 struct kvm_mp_state mp_state
= { .mp_state
= cpu
->env
.mp_state
};
2831 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MP_STATE
, &mp_state
);
2834 static int kvm_get_mp_state(X86CPU
*cpu
)
2836 CPUState
*cs
= CPU(cpu
);
2837 CPUX86State
*env
= &cpu
->env
;
2838 struct kvm_mp_state mp_state
;
2841 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_MP_STATE
, &mp_state
);
2845 env
->mp_state
= mp_state
.mp_state
;
2846 if (kvm_irqchip_in_kernel()) {
2847 cs
->halted
= (mp_state
.mp_state
== KVM_MP_STATE_HALTED
);
2852 static int kvm_get_apic(X86CPU
*cpu
)
2854 DeviceState
*apic
= cpu
->apic_state
;
2855 struct kvm_lapic_state kapic
;
2858 if (apic
&& kvm_irqchip_in_kernel()) {
2859 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_LAPIC
, &kapic
);
2864 kvm_get_apic_state(apic
, &kapic
);
2869 static int kvm_put_vcpu_events(X86CPU
*cpu
, int level
)
2871 CPUState
*cs
= CPU(cpu
);
2872 CPUX86State
*env
= &cpu
->env
;
2873 struct kvm_vcpu_events events
= {};
2875 if (!kvm_has_vcpu_events()) {
2879 events
.exception
.injected
= (env
->exception_injected
>= 0);
2880 events
.exception
.nr
= env
->exception_injected
;
2881 events
.exception
.has_error_code
= env
->has_error_code
;
2882 events
.exception
.error_code
= env
->error_code
;
2884 events
.interrupt
.injected
= (env
->interrupt_injected
>= 0);
2885 events
.interrupt
.nr
= env
->interrupt_injected
;
2886 events
.interrupt
.soft
= env
->soft_interrupt
;
2888 events
.nmi
.injected
= env
->nmi_injected
;
2889 events
.nmi
.pending
= env
->nmi_pending
;
2890 events
.nmi
.masked
= !!(env
->hflags2
& HF2_NMI_MASK
);
2892 events
.sipi_vector
= env
->sipi_vector
;
2895 if (has_msr_smbase
) {
2896 events
.smi
.smm
= !!(env
->hflags
& HF_SMM_MASK
);
2897 events
.smi
.smm_inside_nmi
= !!(env
->hflags2
& HF2_SMM_INSIDE_NMI_MASK
);
2898 if (kvm_irqchip_in_kernel()) {
2899 /* As soon as these are moved to the kernel, remove them
2900 * from cs->interrupt_request.
2902 events
.smi
.pending
= cs
->interrupt_request
& CPU_INTERRUPT_SMI
;
2903 events
.smi
.latched_init
= cs
->interrupt_request
& CPU_INTERRUPT_INIT
;
2904 cs
->interrupt_request
&= ~(CPU_INTERRUPT_INIT
| CPU_INTERRUPT_SMI
);
2906 /* Keep these in cs->interrupt_request. */
2907 events
.smi
.pending
= 0;
2908 events
.smi
.latched_init
= 0;
2910 /* Stop SMI delivery on old machine types to avoid a reboot
2911 * on an inward migration of an old VM.
2913 if (!cpu
->kvm_no_smi_migration
) {
2914 events
.flags
|= KVM_VCPUEVENT_VALID_SMM
;
2918 if (level
>= KVM_PUT_RESET_STATE
) {
2919 events
.flags
|= KVM_VCPUEVENT_VALID_NMI_PENDING
;
2920 if (env
->mp_state
== KVM_MP_STATE_SIPI_RECEIVED
) {
2921 events
.flags
|= KVM_VCPUEVENT_VALID_SIPI_VECTOR
;
2925 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_VCPU_EVENTS
, &events
);
2928 static int kvm_get_vcpu_events(X86CPU
*cpu
)
2930 CPUX86State
*env
= &cpu
->env
;
2931 struct kvm_vcpu_events events
;
2934 if (!kvm_has_vcpu_events()) {
2938 memset(&events
, 0, sizeof(events
));
2939 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_VCPU_EVENTS
, &events
);
2943 env
->exception_injected
=
2944 events
.exception
.injected
? events
.exception
.nr
: -1;
2945 env
->has_error_code
= events
.exception
.has_error_code
;
2946 env
->error_code
= events
.exception
.error_code
;
2948 env
->interrupt_injected
=
2949 events
.interrupt
.injected
? events
.interrupt
.nr
: -1;
2950 env
->soft_interrupt
= events
.interrupt
.soft
;
2952 env
->nmi_injected
= events
.nmi
.injected
;
2953 env
->nmi_pending
= events
.nmi
.pending
;
2954 if (events
.nmi
.masked
) {
2955 env
->hflags2
|= HF2_NMI_MASK
;
2957 env
->hflags2
&= ~HF2_NMI_MASK
;
2960 if (events
.flags
& KVM_VCPUEVENT_VALID_SMM
) {
2961 if (events
.smi
.smm
) {
2962 env
->hflags
|= HF_SMM_MASK
;
2964 env
->hflags
&= ~HF_SMM_MASK
;
2966 if (events
.smi
.pending
) {
2967 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
2969 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
2971 if (events
.smi
.smm_inside_nmi
) {
2972 env
->hflags2
|= HF2_SMM_INSIDE_NMI_MASK
;
2974 env
->hflags2
&= ~HF2_SMM_INSIDE_NMI_MASK
;
2976 if (events
.smi
.latched_init
) {
2977 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
2979 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
2983 env
->sipi_vector
= events
.sipi_vector
;
2988 static int kvm_guest_debug_workarounds(X86CPU
*cpu
)
2990 CPUState
*cs
= CPU(cpu
);
2991 CPUX86State
*env
= &cpu
->env
;
2993 unsigned long reinject_trap
= 0;
2995 if (!kvm_has_vcpu_events()) {
2996 if (env
->exception_injected
== 1) {
2997 reinject_trap
= KVM_GUESTDBG_INJECT_DB
;
2998 } else if (env
->exception_injected
== 3) {
2999 reinject_trap
= KVM_GUESTDBG_INJECT_BP
;
3001 env
->exception_injected
= -1;
3005 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
3006 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
3007 * by updating the debug state once again if single-stepping is on.
3008 * Another reason to call kvm_update_guest_debug here is a pending debug
3009 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
3010 * reinject them via SET_GUEST_DEBUG.
3012 if (reinject_trap
||
3013 (!kvm_has_robust_singlestep() && cs
->singlestep_enabled
)) {
3014 ret
= kvm_update_guest_debug(cs
, reinject_trap
);
3019 static int kvm_put_debugregs(X86CPU
*cpu
)
3021 CPUX86State
*env
= &cpu
->env
;
3022 struct kvm_debugregs dbgregs
;
3025 if (!kvm_has_debugregs()) {
3029 for (i
= 0; i
< 4; i
++) {
3030 dbgregs
.db
[i
] = env
->dr
[i
];
3032 dbgregs
.dr6
= env
->dr
[6];
3033 dbgregs
.dr7
= env
->dr
[7];
3036 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_DEBUGREGS
, &dbgregs
);
3039 static int kvm_get_debugregs(X86CPU
*cpu
)
3041 CPUX86State
*env
= &cpu
->env
;
3042 struct kvm_debugregs dbgregs
;
3045 if (!kvm_has_debugregs()) {
3049 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_DEBUGREGS
, &dbgregs
);
3053 for (i
= 0; i
< 4; i
++) {
3054 env
->dr
[i
] = dbgregs
.db
[i
];
3056 env
->dr
[4] = env
->dr
[6] = dbgregs
.dr6
;
3057 env
->dr
[5] = env
->dr
[7] = dbgregs
.dr7
;
3062 int kvm_arch_put_registers(CPUState
*cpu
, int level
)
3064 X86CPU
*x86_cpu
= X86_CPU(cpu
);
3067 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
3069 if (level
>= KVM_PUT_RESET_STATE
) {
3070 ret
= kvm_put_msr_feature_control(x86_cpu
);
3076 if (level
== KVM_PUT_FULL_STATE
) {
3077 /* We don't check for kvm_arch_set_tsc_khz() errors here,
3078 * because TSC frequency mismatch shouldn't abort migration,
3079 * unless the user explicitly asked for a more strict TSC
3080 * setting (e.g. using an explicit "tsc-freq" option).
3082 kvm_arch_set_tsc_khz(cpu
);
3085 ret
= kvm_getput_regs(x86_cpu
, 1);
3089 ret
= kvm_put_xsave(x86_cpu
);
3093 ret
= kvm_put_xcrs(x86_cpu
);
3097 ret
= kvm_put_sregs(x86_cpu
);
3101 /* must be before kvm_put_msrs */
3102 ret
= kvm_inject_mce_oldstyle(x86_cpu
);
3106 ret
= kvm_put_msrs(x86_cpu
, level
);
3110 ret
= kvm_put_vcpu_events(x86_cpu
, level
);
3114 if (level
>= KVM_PUT_RESET_STATE
) {
3115 ret
= kvm_put_mp_state(x86_cpu
);
3121 ret
= kvm_put_tscdeadline_msr(x86_cpu
);
3125 ret
= kvm_put_debugregs(x86_cpu
);
3130 ret
= kvm_guest_debug_workarounds(x86_cpu
);
3137 int kvm_arch_get_registers(CPUState
*cs
)
3139 X86CPU
*cpu
= X86_CPU(cs
);
3142 assert(cpu_is_stopped(cs
) || qemu_cpu_is_self(cs
));
3144 ret
= kvm_get_vcpu_events(cpu
);
3149 * KVM_GET_MPSTATE can modify CS and RIP, call it before
3150 * KVM_GET_REGS and KVM_GET_SREGS.
3152 ret
= kvm_get_mp_state(cpu
);
3156 ret
= kvm_getput_regs(cpu
, 0);
3160 ret
= kvm_get_xsave(cpu
);
3164 ret
= kvm_get_xcrs(cpu
);
3168 ret
= kvm_get_sregs(cpu
);
3172 ret
= kvm_get_msrs(cpu
);
3176 ret
= kvm_get_apic(cpu
);
3180 ret
= kvm_get_debugregs(cpu
);
3186 cpu_sync_bndcs_hflags(&cpu
->env
);
3190 void kvm_arch_pre_run(CPUState
*cpu
, struct kvm_run
*run
)
3192 X86CPU
*x86_cpu
= X86_CPU(cpu
);
3193 CPUX86State
*env
= &x86_cpu
->env
;
3197 if (cpu
->interrupt_request
& (CPU_INTERRUPT_NMI
| CPU_INTERRUPT_SMI
)) {
3198 if (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) {
3199 qemu_mutex_lock_iothread();
3200 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
3201 qemu_mutex_unlock_iothread();
3202 DPRINTF("injected NMI\n");
3203 ret
= kvm_vcpu_ioctl(cpu
, KVM_NMI
);
3205 fprintf(stderr
, "KVM: injection failed, NMI lost (%s)\n",
3209 if (cpu
->interrupt_request
& CPU_INTERRUPT_SMI
) {
3210 qemu_mutex_lock_iothread();
3211 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
3212 qemu_mutex_unlock_iothread();
3213 DPRINTF("injected SMI\n");
3214 ret
= kvm_vcpu_ioctl(cpu
, KVM_SMI
);
3216 fprintf(stderr
, "KVM: injection failed, SMI lost (%s)\n",
3222 if (!kvm_pic_in_kernel()) {
3223 qemu_mutex_lock_iothread();
3226 /* Force the VCPU out of its inner loop to process any INIT requests
3227 * or (for userspace APIC, but it is cheap to combine the checks here)
3228 * pending TPR access reports.
3230 if (cpu
->interrupt_request
& (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
)) {
3231 if ((cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
3232 !(env
->hflags
& HF_SMM_MASK
)) {
3233 cpu
->exit_request
= 1;
3235 if (cpu
->interrupt_request
& CPU_INTERRUPT_TPR
) {
3236 cpu
->exit_request
= 1;
3240 if (!kvm_pic_in_kernel()) {
3241 /* Try to inject an interrupt if the guest can accept it */
3242 if (run
->ready_for_interrupt_injection
&&
3243 (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
3244 (env
->eflags
& IF_MASK
)) {
3247 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
3248 irq
= cpu_get_pic_interrupt(env
);
3250 struct kvm_interrupt intr
;
3253 DPRINTF("injected interrupt %d\n", irq
);
3254 ret
= kvm_vcpu_ioctl(cpu
, KVM_INTERRUPT
, &intr
);
3257 "KVM: injection failed, interrupt lost (%s)\n",
3263 /* If we have an interrupt but the guest is not ready to receive an
3264 * interrupt, request an interrupt window exit. This will
3265 * cause a return to userspace as soon as the guest is ready to
3266 * receive interrupts. */
3267 if ((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
3268 run
->request_interrupt_window
= 1;
3270 run
->request_interrupt_window
= 0;
3273 DPRINTF("setting tpr\n");
3274 run
->cr8
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
3276 qemu_mutex_unlock_iothread();
3280 MemTxAttrs
kvm_arch_post_run(CPUState
*cpu
, struct kvm_run
*run
)
3282 X86CPU
*x86_cpu
= X86_CPU(cpu
);
3283 CPUX86State
*env
= &x86_cpu
->env
;
3285 if (run
->flags
& KVM_RUN_X86_SMM
) {
3286 env
->hflags
|= HF_SMM_MASK
;
3288 env
->hflags
&= ~HF_SMM_MASK
;
3291 env
->eflags
|= IF_MASK
;
3293 env
->eflags
&= ~IF_MASK
;
3296 /* We need to protect the apic state against concurrent accesses from
3297 * different threads in case the userspace irqchip is used. */
3298 if (!kvm_irqchip_in_kernel()) {
3299 qemu_mutex_lock_iothread();
3301 cpu_set_apic_tpr(x86_cpu
->apic_state
, run
->cr8
);
3302 cpu_set_apic_base(x86_cpu
->apic_state
, run
->apic_base
);
3303 if (!kvm_irqchip_in_kernel()) {
3304 qemu_mutex_unlock_iothread();
3306 return cpu_get_mem_attrs(env
);
3309 int kvm_arch_process_async_events(CPUState
*cs
)
3311 X86CPU
*cpu
= X86_CPU(cs
);
3312 CPUX86State
*env
= &cpu
->env
;
3314 if (cs
->interrupt_request
& CPU_INTERRUPT_MCE
) {
3315 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
3316 assert(env
->mcg_cap
);
3318 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
3320 kvm_cpu_synchronize_state(cs
);
3322 if (env
->exception_injected
== EXCP08_DBLE
) {
3323 /* this means triple fault */
3324 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
3325 cs
->exit_request
= 1;
3328 env
->exception_injected
= EXCP12_MCHK
;
3329 env
->has_error_code
= 0;
3332 if (kvm_irqchip_in_kernel() && env
->mp_state
== KVM_MP_STATE_HALTED
) {
3333 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
3337 if ((cs
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
3338 !(env
->hflags
& HF_SMM_MASK
)) {
3339 kvm_cpu_synchronize_state(cs
);
3343 if (kvm_irqchip_in_kernel()) {
3347 if (cs
->interrupt_request
& CPU_INTERRUPT_POLL
) {
3348 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
3349 apic_poll_irq(cpu
->apic_state
);
3351 if (((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
3352 (env
->eflags
& IF_MASK
)) ||
3353 (cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
3356 if (cs
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
3357 kvm_cpu_synchronize_state(cs
);
3360 if (cs
->interrupt_request
& CPU_INTERRUPT_TPR
) {
3361 cs
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
3362 kvm_cpu_synchronize_state(cs
);
3363 apic_handle_tpr_access_report(cpu
->apic_state
, env
->eip
,
3364 env
->tpr_access_type
);
3370 static int kvm_handle_halt(X86CPU
*cpu
)
3372 CPUState
*cs
= CPU(cpu
);
3373 CPUX86State
*env
= &cpu
->env
;
3375 if (!((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
3376 (env
->eflags
& IF_MASK
)) &&
3377 !(cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
3385 static int kvm_handle_tpr_access(X86CPU
*cpu
)
3387 CPUState
*cs
= CPU(cpu
);
3388 struct kvm_run
*run
= cs
->kvm_run
;
3390 apic_handle_tpr_access_report(cpu
->apic_state
, run
->tpr_access
.rip
,
3391 run
->tpr_access
.is_write
? TPR_ACCESS_WRITE
3396 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
3398 static const uint8_t int3
= 0xcc;
3400 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
3401 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&int3
, 1, 1)) {
3407 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
3411 if (cpu_memory_rw_debug(cs
, bp
->pc
, &int3
, 1, 0) || int3
!= 0xcc ||
3412 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1)) {
3424 static int nb_hw_breakpoint
;
3426 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
3430 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
3431 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
3432 (hw_breakpoint
[n
].len
== len
|| len
== -1)) {
3439 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
3440 target_ulong len
, int type
)
3443 case GDB_BREAKPOINT_HW
:
3446 case GDB_WATCHPOINT_WRITE
:
3447 case GDB_WATCHPOINT_ACCESS
:
3454 if (addr
& (len
- 1)) {
3466 if (nb_hw_breakpoint
== 4) {
3469 if (find_hw_breakpoint(addr
, len
, type
) >= 0) {
3472 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
3473 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
3474 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
3480 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
3481 target_ulong len
, int type
)
3485 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
3490 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
3495 void kvm_arch_remove_all_hw_breakpoints(void)
3497 nb_hw_breakpoint
= 0;
3500 static CPUWatchpoint hw_watchpoint
;
3502 static int kvm_handle_debug(X86CPU
*cpu
,
3503 struct kvm_debug_exit_arch
*arch_info
)
3505 CPUState
*cs
= CPU(cpu
);
3506 CPUX86State
*env
= &cpu
->env
;
3510 if (arch_info
->exception
== 1) {
3511 if (arch_info
->dr6
& (1 << 14)) {
3512 if (cs
->singlestep_enabled
) {
3516 for (n
= 0; n
< 4; n
++) {
3517 if (arch_info
->dr6
& (1 << n
)) {
3518 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
3524 cs
->watchpoint_hit
= &hw_watchpoint
;
3525 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
3526 hw_watchpoint
.flags
= BP_MEM_WRITE
;
3530 cs
->watchpoint_hit
= &hw_watchpoint
;
3531 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
3532 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
3538 } else if (kvm_find_sw_breakpoint(cs
, arch_info
->pc
)) {
3542 cpu_synchronize_state(cs
);
3543 assert(env
->exception_injected
== -1);
3546 env
->exception_injected
= arch_info
->exception
;
3547 env
->has_error_code
= 0;
3553 void kvm_arch_update_guest_debug(CPUState
*cpu
, struct kvm_guest_debug
*dbg
)
3555 const uint8_t type_code
[] = {
3556 [GDB_BREAKPOINT_HW
] = 0x0,
3557 [GDB_WATCHPOINT_WRITE
] = 0x1,
3558 [GDB_WATCHPOINT_ACCESS
] = 0x3
3560 const uint8_t len_code
[] = {
3561 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
3565 if (kvm_sw_breakpoints_active(cpu
)) {
3566 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
3568 if (nb_hw_breakpoint
> 0) {
3569 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
3570 dbg
->arch
.debugreg
[7] = 0x0600;
3571 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
3572 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
3573 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
3574 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
3575 ((uint32_t)len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
3580 static bool host_supports_vmx(void)
3582 uint32_t ecx
, unused
;
3584 host_cpuid(1, 0, &unused
, &unused
, &ecx
, &unused
);
3585 return ecx
& CPUID_EXT_VMX
;
3588 #define VMX_INVALID_GUEST_STATE 0x80000021
3590 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
3592 X86CPU
*cpu
= X86_CPU(cs
);
3596 switch (run
->exit_reason
) {
3598 DPRINTF("handle_hlt\n");
3599 qemu_mutex_lock_iothread();
3600 ret
= kvm_handle_halt(cpu
);
3601 qemu_mutex_unlock_iothread();
3603 case KVM_EXIT_SET_TPR
:
3606 case KVM_EXIT_TPR_ACCESS
:
3607 qemu_mutex_lock_iothread();
3608 ret
= kvm_handle_tpr_access(cpu
);
3609 qemu_mutex_unlock_iothread();
3611 case KVM_EXIT_FAIL_ENTRY
:
3612 code
= run
->fail_entry
.hardware_entry_failure_reason
;
3613 fprintf(stderr
, "KVM: entry failed, hardware error 0x%" PRIx64
"\n",
3615 if (host_supports_vmx() && code
== VMX_INVALID_GUEST_STATE
) {
3617 "\nIf you're running a guest on an Intel machine without "
3618 "unrestricted mode\n"
3619 "support, the failure can be most likely due to the guest "
3620 "entering an invalid\n"
3621 "state for Intel VT. For example, the guest maybe running "
3622 "in big real mode\n"
3623 "which is not supported on less recent Intel processors."
3628 case KVM_EXIT_EXCEPTION
:
3629 fprintf(stderr
, "KVM: exception %d exit (error code 0x%x)\n",
3630 run
->ex
.exception
, run
->ex
.error_code
);
3633 case KVM_EXIT_DEBUG
:
3634 DPRINTF("kvm_exit_debug\n");
3635 qemu_mutex_lock_iothread();
3636 ret
= kvm_handle_debug(cpu
, &run
->debug
.arch
);
3637 qemu_mutex_unlock_iothread();
3639 case KVM_EXIT_HYPERV
:
3640 ret
= kvm_hv_handle_exit(cpu
, &run
->hyperv
);
3642 case KVM_EXIT_IOAPIC_EOI
:
3643 ioapic_eoi_broadcast(run
->eoi
.vector
);
3647 fprintf(stderr
, "KVM: unknown exit reason %d\n", run
->exit_reason
);
3655 bool kvm_arch_stop_on_emulation_error(CPUState
*cs
)
3657 X86CPU
*cpu
= X86_CPU(cs
);
3658 CPUX86State
*env
= &cpu
->env
;
3660 kvm_cpu_synchronize_state(cs
);
3661 return !(env
->cr
[0] & CR0_PE_MASK
) ||
3662 ((env
->segs
[R_CS
].selector
& 3) != 3);
3665 void kvm_arch_init_irq_routing(KVMState
*s
)
3667 if (!kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
)) {
3668 /* If kernel can't do irq routing, interrupt source
3669 * override 0->2 cannot be set up as required by HPET.
3670 * So we have to disable it.
3674 /* We know at this point that we're using the in-kernel
3675 * irqchip, so we can use irqfds, and on x86 we know
3676 * we can use msi via irqfd and GSI routing.
3678 kvm_msi_via_irqfd_allowed
= true;
3679 kvm_gsi_routing_allowed
= true;
3681 if (kvm_irqchip_is_split()) {
3684 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
3685 MSI routes for signaling interrupts to the local apics. */
3686 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++) {
3687 if (kvm_irqchip_add_msi_route(s
, 0, NULL
) < 0) {
3688 error_report("Could not enable split IRQ mode.");
3695 int kvm_arch_irqchip_create(MachineState
*ms
, KVMState
*s
)
3698 if (machine_kernel_irqchip_split(ms
)) {
3699 ret
= kvm_vm_enable_cap(s
, KVM_CAP_SPLIT_IRQCHIP
, 0, 24);
3701 error_report("Could not enable split irqchip mode: %s",
3705 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
3706 kvm_split_irqchip
= true;
3714 /* Classic KVM device assignment interface. Will remain x86 only. */
3715 int kvm_device_pci_assign(KVMState
*s
, PCIHostDeviceAddress
*dev_addr
,
3716 uint32_t flags
, uint32_t *dev_id
)
3718 struct kvm_assigned_pci_dev dev_data
= {
3719 .segnr
= dev_addr
->domain
,
3720 .busnr
= dev_addr
->bus
,
3721 .devfn
= PCI_DEVFN(dev_addr
->slot
, dev_addr
->function
),
3726 dev_data
.assigned_dev_id
=
3727 (dev_addr
->domain
<< 16) | (dev_addr
->bus
<< 8) | dev_data
.devfn
;
3729 ret
= kvm_vm_ioctl(s
, KVM_ASSIGN_PCI_DEVICE
, &dev_data
);
3734 *dev_id
= dev_data
.assigned_dev_id
;
3739 int kvm_device_pci_deassign(KVMState
*s
, uint32_t dev_id
)
3741 struct kvm_assigned_pci_dev dev_data
= {
3742 .assigned_dev_id
= dev_id
,
3745 return kvm_vm_ioctl(s
, KVM_DEASSIGN_PCI_DEVICE
, &dev_data
);
3748 static int kvm_assign_irq_internal(KVMState
*s
, uint32_t dev_id
,
3749 uint32_t irq_type
, uint32_t guest_irq
)
3751 struct kvm_assigned_irq assigned_irq
= {
3752 .assigned_dev_id
= dev_id
,
3753 .guest_irq
= guest_irq
,
3757 if (kvm_check_extension(s
, KVM_CAP_ASSIGN_DEV_IRQ
)) {
3758 return kvm_vm_ioctl(s
, KVM_ASSIGN_DEV_IRQ
, &assigned_irq
);
3760 return kvm_vm_ioctl(s
, KVM_ASSIGN_IRQ
, &assigned_irq
);
3764 int kvm_device_intx_assign(KVMState
*s
, uint32_t dev_id
, bool use_host_msi
,
3767 uint32_t irq_type
= KVM_DEV_IRQ_GUEST_INTX
|
3768 (use_host_msi
? KVM_DEV_IRQ_HOST_MSI
: KVM_DEV_IRQ_HOST_INTX
);
3770 return kvm_assign_irq_internal(s
, dev_id
, irq_type
, guest_irq
);
3773 int kvm_device_intx_set_mask(KVMState
*s
, uint32_t dev_id
, bool masked
)
3775 struct kvm_assigned_pci_dev dev_data
= {
3776 .assigned_dev_id
= dev_id
,
3777 .flags
= masked
? KVM_DEV_ASSIGN_MASK_INTX
: 0,
3780 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_INTX_MASK
, &dev_data
);
3783 static int kvm_deassign_irq_internal(KVMState
*s
, uint32_t dev_id
,
3786 struct kvm_assigned_irq assigned_irq
= {
3787 .assigned_dev_id
= dev_id
,
3791 return kvm_vm_ioctl(s
, KVM_DEASSIGN_DEV_IRQ
, &assigned_irq
);
3794 int kvm_device_intx_deassign(KVMState
*s
, uint32_t dev_id
, bool use_host_msi
)
3796 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_INTX
|
3797 (use_host_msi
? KVM_DEV_IRQ_HOST_MSI
: KVM_DEV_IRQ_HOST_INTX
));
3800 int kvm_device_msi_assign(KVMState
*s
, uint32_t dev_id
, int virq
)
3802 return kvm_assign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_HOST_MSI
|
3803 KVM_DEV_IRQ_GUEST_MSI
, virq
);
3806 int kvm_device_msi_deassign(KVMState
*s
, uint32_t dev_id
)
3808 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_MSI
|
3809 KVM_DEV_IRQ_HOST_MSI
);
3812 bool kvm_device_msix_supported(KVMState
*s
)
3814 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
3815 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
3816 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_NR
, NULL
) == -EFAULT
;
3819 int kvm_device_msix_init_vectors(KVMState
*s
, uint32_t dev_id
,
3820 uint32_t nr_vectors
)
3822 struct kvm_assigned_msix_nr msix_nr
= {
3823 .assigned_dev_id
= dev_id
,
3824 .entry_nr
= nr_vectors
,
3827 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_NR
, &msix_nr
);
3830 int kvm_device_msix_set_vector(KVMState
*s
, uint32_t dev_id
, uint32_t vector
,
3833 struct kvm_assigned_msix_entry msix_entry
= {
3834 .assigned_dev_id
= dev_id
,
3839 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_ENTRY
, &msix_entry
);
3842 int kvm_device_msix_assign(KVMState
*s
, uint32_t dev_id
)
3844 return kvm_assign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_HOST_MSIX
|
3845 KVM_DEV_IRQ_GUEST_MSIX
, 0);
3848 int kvm_device_msix_deassign(KVMState
*s
, uint32_t dev_id
)
3850 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_MSIX
|
3851 KVM_DEV_IRQ_HOST_MSIX
);
3854 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry
*route
,
3855 uint64_t address
, uint32_t data
, PCIDevice
*dev
)
3857 X86IOMMUState
*iommu
= x86_iommu_get_default();
3861 MSIMessage src
, dst
;
3862 X86IOMMUClass
*class = X86_IOMMU_GET_CLASS(iommu
);
3864 if (!class->int_remap
) {
3868 src
.address
= route
->u
.msi
.address_hi
;
3869 src
.address
<<= VTD_MSI_ADDR_HI_SHIFT
;
3870 src
.address
|= route
->u
.msi
.address_lo
;
3871 src
.data
= route
->u
.msi
.data
;
3873 ret
= class->int_remap(iommu
, &src
, &dst
, dev
? \
3874 pci_requester_id(dev
) : \
3875 X86_IOMMU_SID_INVALID
);
3877 trace_kvm_x86_fixup_msi_error(route
->gsi
);
3881 route
->u
.msi
.address_hi
= dst
.address
>> VTD_MSI_ADDR_HI_SHIFT
;
3882 route
->u
.msi
.address_lo
= dst
.address
& VTD_MSI_ADDR_LO_MASK
;
3883 route
->u
.msi
.data
= dst
.data
;
3889 typedef struct MSIRouteEntry MSIRouteEntry
;
3891 struct MSIRouteEntry
{
3892 PCIDevice
*dev
; /* Device pointer */
3893 int vector
; /* MSI/MSIX vector index */
3894 int virq
; /* Virtual IRQ index */
3895 QLIST_ENTRY(MSIRouteEntry
) list
;
3898 /* List of used GSI routes */
3899 static QLIST_HEAD(, MSIRouteEntry
) msi_route_list
= \
3900 QLIST_HEAD_INITIALIZER(msi_route_list
);
3902 static void kvm_update_msi_routes_all(void *private, bool global
,
3903 uint32_t index
, uint32_t mask
)
3905 int cnt
= 0, vector
;
3906 MSIRouteEntry
*entry
;
3910 /* TODO: explicit route update */
3911 QLIST_FOREACH(entry
, &msi_route_list
, list
) {
3913 vector
= entry
->vector
;
3915 if (msix_enabled(dev
) && !msix_is_masked(dev
, vector
)) {
3916 msg
= msix_get_message(dev
, vector
);
3917 } else if (msi_enabled(dev
) && !msi_is_masked(dev
, vector
)) {
3918 msg
= msi_get_message(dev
, vector
);
3921 * Either MSI/MSIX is disabled for the device, or the
3922 * specific message was masked out. Skip this one.
3926 kvm_irqchip_update_msi_route(kvm_state
, entry
->virq
, msg
, dev
);
3928 kvm_irqchip_commit_routes(kvm_state
);
3929 trace_kvm_x86_update_msi_routes(cnt
);
3932 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry
*route
,
3933 int vector
, PCIDevice
*dev
)
3935 static bool notify_list_inited
= false;
3936 MSIRouteEntry
*entry
;
3939 /* These are (possibly) IOAPIC routes only used for split
3940 * kernel irqchip mode, while what we are housekeeping are
3941 * PCI devices only. */
3945 entry
= g_new0(MSIRouteEntry
, 1);
3947 entry
->vector
= vector
;
3948 entry
->virq
= route
->gsi
;
3949 QLIST_INSERT_HEAD(&msi_route_list
, entry
, list
);
3951 trace_kvm_x86_add_msi_route(route
->gsi
);
3953 if (!notify_list_inited
) {
3954 /* For the first time we do add route, add ourselves into
3955 * IOMMU's IEC notify list if needed. */
3956 X86IOMMUState
*iommu
= x86_iommu_get_default();
3958 x86_iommu_iec_register_notifier(iommu
,
3959 kvm_update_msi_routes_all
,
3962 notify_list_inited
= true;
3967 int kvm_arch_release_virq_post(int virq
)
3969 MSIRouteEntry
*entry
, *next
;
3970 QLIST_FOREACH_SAFE(entry
, &msi_route_list
, list
, next
) {
3971 if (entry
->virq
== virq
) {
3972 trace_kvm_x86_remove_msi_route(virq
);
3973 QLIST_REMOVE(entry
, list
);
3981 int kvm_arch_msi_data_to_gsi(uint32_t data
)