4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
23 #include "qemu-common.h"
24 #include "sysemu/sysemu.h"
25 #include "sysemu/kvm.h"
28 #include "exec/gdbstub.h"
29 #include "qemu/host-utils.h"
30 #include "qemu/config-file.h"
31 #include "hw/i386/pc.h"
32 #include "hw/i386/apic.h"
33 #include "exec/ioport.h"
34 #include <asm/hyperv.h>
35 #include "hw/pci/pci.h"
40 #define DPRINTF(fmt, ...) \
41 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
43 #define DPRINTF(fmt, ...) \
47 #define MSR_KVM_WALL_CLOCK 0x11
48 #define MSR_KVM_SYSTEM_TIME 0x12
51 #define BUS_MCEERR_AR 4
54 #define BUS_MCEERR_AO 5
57 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
58 KVM_CAP_INFO(SET_TSS_ADDR
),
59 KVM_CAP_INFO(EXT_CPUID
),
60 KVM_CAP_INFO(MP_STATE
),
64 static bool has_msr_star
;
65 static bool has_msr_hsave_pa
;
66 static bool has_msr_tsc_adjust
;
67 static bool has_msr_tsc_deadline
;
68 static bool has_msr_feature_control
;
69 static bool has_msr_async_pf_en
;
70 static bool has_msr_pv_eoi_en
;
71 static bool has_msr_misc_enable
;
72 static bool has_msr_bndcfgs
;
73 static bool has_msr_kvm_steal_time
;
74 static int lm_capable_kernel
;
75 static bool has_msr_hv_hypercall
;
76 static bool has_msr_hv_vapic
;
78 static bool has_msr_architectural_pmu
;
79 static uint32_t num_architectural_pmu_counters
;
81 bool kvm_allows_irq0_override(void)
83 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
86 static struct kvm_cpuid2
*try_get_cpuid(KVMState
*s
, int max
)
88 struct kvm_cpuid2
*cpuid
;
91 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
92 cpuid
= (struct kvm_cpuid2
*)g_malloc0(size
);
94 r
= kvm_ioctl(s
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
95 if (r
== 0 && cpuid
->nent
>= max
) {
103 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
111 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
114 static struct kvm_cpuid2
*get_supported_cpuid(KVMState
*s
)
116 struct kvm_cpuid2
*cpuid
;
118 while ((cpuid
= try_get_cpuid(s
, max
)) == NULL
) {
124 struct kvm_para_features
{
127 } para_features
[] = {
128 { KVM_CAP_CLOCKSOURCE
, KVM_FEATURE_CLOCKSOURCE
},
129 { KVM_CAP_NOP_IO_DELAY
, KVM_FEATURE_NOP_IO_DELAY
},
130 { KVM_CAP_PV_MMU
, KVM_FEATURE_MMU_OP
},
131 { KVM_CAP_ASYNC_PF
, KVM_FEATURE_ASYNC_PF
},
135 static int get_para_features(KVMState
*s
)
139 for (i
= 0; i
< ARRAY_SIZE(para_features
) - 1; i
++) {
140 if (kvm_check_extension(s
, para_features
[i
].cap
)) {
141 features
|= (1 << para_features
[i
].feature
);
149 /* Returns the value for a specific register on the cpuid entry
151 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2
*entry
, int reg
)
171 /* Find matching entry for function/index on kvm_cpuid2 struct
173 static struct kvm_cpuid_entry2
*cpuid_find_entry(struct kvm_cpuid2
*cpuid
,
178 for (i
= 0; i
< cpuid
->nent
; ++i
) {
179 if (cpuid
->entries
[i
].function
== function
&&
180 cpuid
->entries
[i
].index
== index
) {
181 return &cpuid
->entries
[i
];
188 uint32_t kvm_arch_get_supported_cpuid(KVMState
*s
, uint32_t function
,
189 uint32_t index
, int reg
)
191 struct kvm_cpuid2
*cpuid
;
193 uint32_t cpuid_1_edx
;
196 cpuid
= get_supported_cpuid(s
);
198 struct kvm_cpuid_entry2
*entry
= cpuid_find_entry(cpuid
, function
, index
);
201 ret
= cpuid_entry_get_reg(entry
, reg
);
204 /* Fixups for the data returned by KVM, below */
206 if (function
== 1 && reg
== R_EDX
) {
207 /* KVM before 2.6.30 misreports the following features */
208 ret
|= CPUID_MTRR
| CPUID_PAT
| CPUID_MCE
| CPUID_MCA
;
209 } else if (function
== 1 && reg
== R_ECX
) {
210 /* We can set the hypervisor flag, even if KVM does not return it on
211 * GET_SUPPORTED_CPUID
213 ret
|= CPUID_EXT_HYPERVISOR
;
214 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
215 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
216 * and the irqchip is in the kernel.
218 if (kvm_irqchip_in_kernel() &&
219 kvm_check_extension(s
, KVM_CAP_TSC_DEADLINE_TIMER
)) {
220 ret
|= CPUID_EXT_TSC_DEADLINE_TIMER
;
223 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
224 * without the in-kernel irqchip
226 if (!kvm_irqchip_in_kernel()) {
227 ret
&= ~CPUID_EXT_X2APIC
;
229 } else if (function
== 0x80000001 && reg
== R_EDX
) {
230 /* On Intel, kvm returns cpuid according to the Intel spec,
231 * so add missing bits according to the AMD spec:
233 cpuid_1_edx
= kvm_arch_get_supported_cpuid(s
, 1, 0, R_EDX
);
234 ret
|= cpuid_1_edx
& CPUID_EXT2_AMD_ALIASES
;
239 /* fallback for older kernels */
240 if ((function
== KVM_CPUID_FEATURES
) && !found
) {
241 ret
= get_para_features(s
);
247 typedef struct HWPoisonPage
{
249 QLIST_ENTRY(HWPoisonPage
) list
;
252 static QLIST_HEAD(, HWPoisonPage
) hwpoison_page_list
=
253 QLIST_HEAD_INITIALIZER(hwpoison_page_list
);
255 static void kvm_unpoison_all(void *param
)
257 HWPoisonPage
*page
, *next_page
;
259 QLIST_FOREACH_SAFE(page
, &hwpoison_page_list
, list
, next_page
) {
260 QLIST_REMOVE(page
, list
);
261 qemu_ram_remap(page
->ram_addr
, TARGET_PAGE_SIZE
);
266 static void kvm_hwpoison_page_add(ram_addr_t ram_addr
)
270 QLIST_FOREACH(page
, &hwpoison_page_list
, list
) {
271 if (page
->ram_addr
== ram_addr
) {
275 page
= g_malloc(sizeof(HWPoisonPage
));
276 page
->ram_addr
= ram_addr
;
277 QLIST_INSERT_HEAD(&hwpoison_page_list
, page
, list
);
280 static int kvm_get_mce_cap_supported(KVMState
*s
, uint64_t *mce_cap
,
285 r
= kvm_check_extension(s
, KVM_CAP_MCE
);
288 return kvm_ioctl(s
, KVM_X86_GET_MCE_CAP_SUPPORTED
, mce_cap
);
293 static void kvm_mce_inject(X86CPU
*cpu
, hwaddr paddr
, int code
)
295 CPUX86State
*env
= &cpu
->env
;
296 uint64_t status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
|
297 MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
;
298 uint64_t mcg_status
= MCG_STATUS_MCIP
;
300 if (code
== BUS_MCEERR_AR
) {
301 status
|= MCI_STATUS_AR
| 0x134;
302 mcg_status
|= MCG_STATUS_EIPV
;
305 mcg_status
|= MCG_STATUS_RIPV
;
307 cpu_x86_inject_mce(NULL
, cpu
, 9, status
, mcg_status
, paddr
,
308 (MCM_ADDR_PHYS
<< 6) | 0xc,
309 cpu_x86_support_mca_broadcast(env
) ?
310 MCE_INJECT_BROADCAST
: 0);
313 static void hardware_memory_error(void)
315 fprintf(stderr
, "Hardware memory error!\n");
319 int kvm_arch_on_sigbus_vcpu(CPUState
*c
, int code
, void *addr
)
321 X86CPU
*cpu
= X86_CPU(c
);
322 CPUX86State
*env
= &cpu
->env
;
326 if ((env
->mcg_cap
& MCG_SER_P
) && addr
327 && (code
== BUS_MCEERR_AR
|| code
== BUS_MCEERR_AO
)) {
328 if (qemu_ram_addr_from_host(addr
, &ram_addr
) == NULL
||
329 !kvm_physical_memory_addr_from_host(c
->kvm_state
, addr
, &paddr
)) {
330 fprintf(stderr
, "Hardware memory error for memory used by "
331 "QEMU itself instead of guest system!\n");
332 /* Hope we are lucky for AO MCE */
333 if (code
== BUS_MCEERR_AO
) {
336 hardware_memory_error();
339 kvm_hwpoison_page_add(ram_addr
);
340 kvm_mce_inject(cpu
, paddr
, code
);
342 if (code
== BUS_MCEERR_AO
) {
344 } else if (code
== BUS_MCEERR_AR
) {
345 hardware_memory_error();
353 int kvm_arch_on_sigbus(int code
, void *addr
)
355 X86CPU
*cpu
= X86_CPU(first_cpu
);
357 if ((cpu
->env
.mcg_cap
& MCG_SER_P
) && addr
&& code
== BUS_MCEERR_AO
) {
361 /* Hope we are lucky for AO MCE */
362 if (qemu_ram_addr_from_host(addr
, &ram_addr
) == NULL
||
363 !kvm_physical_memory_addr_from_host(first_cpu
->kvm_state
,
365 fprintf(stderr
, "Hardware memory error for memory used by "
366 "QEMU itself instead of guest system!: %p\n", addr
);
369 kvm_hwpoison_page_add(ram_addr
);
370 kvm_mce_inject(X86_CPU(first_cpu
), paddr
, code
);
372 if (code
== BUS_MCEERR_AO
) {
374 } else if (code
== BUS_MCEERR_AR
) {
375 hardware_memory_error();
383 static int kvm_inject_mce_oldstyle(X86CPU
*cpu
)
385 CPUX86State
*env
= &cpu
->env
;
387 if (!kvm_has_vcpu_events() && env
->exception_injected
== EXCP12_MCHK
) {
388 unsigned int bank
, bank_num
= env
->mcg_cap
& 0xff;
389 struct kvm_x86_mce mce
;
391 env
->exception_injected
= -1;
394 * There must be at least one bank in use if an MCE is pending.
395 * Find it and use its values for the event injection.
397 for (bank
= 0; bank
< bank_num
; bank
++) {
398 if (env
->mce_banks
[bank
* 4 + 1] & MCI_STATUS_VAL
) {
402 assert(bank
< bank_num
);
405 mce
.status
= env
->mce_banks
[bank
* 4 + 1];
406 mce
.mcg_status
= env
->mcg_status
;
407 mce
.addr
= env
->mce_banks
[bank
* 4 + 2];
408 mce
.misc
= env
->mce_banks
[bank
* 4 + 3];
410 return kvm_vcpu_ioctl(CPU(cpu
), KVM_X86_SET_MCE
, &mce
);
415 static void cpu_update_state(void *opaque
, int running
, RunState state
)
417 CPUX86State
*env
= opaque
;
420 env
->tsc_valid
= false;
424 unsigned long kvm_arch_vcpu_id(CPUState
*cs
)
426 X86CPU
*cpu
= X86_CPU(cs
);
427 return cpu
->env
.cpuid_apic_id
;
430 #ifndef KVM_CPUID_SIGNATURE_NEXT
431 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
434 static bool hyperv_hypercall_available(X86CPU
*cpu
)
436 return cpu
->hyperv_vapic
||
437 (cpu
->hyperv_spinlock_attempts
!= HYPERV_SPINLOCK_NEVER_RETRY
);
440 static bool hyperv_enabled(X86CPU
*cpu
)
442 CPUState
*cs
= CPU(cpu
);
443 return kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV
) > 0 &&
444 (hyperv_hypercall_available(cpu
) ||
445 cpu
->hyperv_relaxed_timing
);
448 #define KVM_MAX_CPUID_ENTRIES 100
450 int kvm_arch_init_vcpu(CPUState
*cs
)
453 struct kvm_cpuid2 cpuid
;
454 struct kvm_cpuid_entry2 entries
[KVM_MAX_CPUID_ENTRIES
];
455 } QEMU_PACKED cpuid_data
;
456 X86CPU
*cpu
= X86_CPU(cs
);
457 CPUX86State
*env
= &cpu
->env
;
458 uint32_t limit
, i
, j
, cpuid_i
;
460 struct kvm_cpuid_entry2
*c
;
461 uint32_t signature
[3];
462 int kvm_base
= KVM_CPUID_SIGNATURE
;
465 memset(&cpuid_data
, 0, sizeof(cpuid_data
));
469 /* Paravirtualization CPUIDs */
470 if (hyperv_enabled(cpu
)) {
471 c
= &cpuid_data
.entries
[cpuid_i
++];
472 c
->function
= HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS
;
473 memcpy(signature
, "Microsoft Hv", 12);
474 c
->eax
= HYPERV_CPUID_MIN
;
475 c
->ebx
= signature
[0];
476 c
->ecx
= signature
[1];
477 c
->edx
= signature
[2];
479 c
= &cpuid_data
.entries
[cpuid_i
++];
480 c
->function
= HYPERV_CPUID_INTERFACE
;
481 memcpy(signature
, "Hv#1\0\0\0\0\0\0\0\0", 12);
482 c
->eax
= signature
[0];
487 c
= &cpuid_data
.entries
[cpuid_i
++];
488 c
->function
= HYPERV_CPUID_VERSION
;
492 c
= &cpuid_data
.entries
[cpuid_i
++];
493 c
->function
= HYPERV_CPUID_FEATURES
;
494 if (cpu
->hyperv_relaxed_timing
) {
495 c
->eax
|= HV_X64_MSR_HYPERCALL_AVAILABLE
;
497 if (cpu
->hyperv_vapic
) {
498 c
->eax
|= HV_X64_MSR_HYPERCALL_AVAILABLE
;
499 c
->eax
|= HV_X64_MSR_APIC_ACCESS_AVAILABLE
;
500 has_msr_hv_vapic
= true;
503 c
= &cpuid_data
.entries
[cpuid_i
++];
504 c
->function
= HYPERV_CPUID_ENLIGHTMENT_INFO
;
505 if (cpu
->hyperv_relaxed_timing
) {
506 c
->eax
|= HV_X64_RELAXED_TIMING_RECOMMENDED
;
508 if (has_msr_hv_vapic
) {
509 c
->eax
|= HV_X64_APIC_ACCESS_RECOMMENDED
;
511 c
->ebx
= cpu
->hyperv_spinlock_attempts
;
513 c
= &cpuid_data
.entries
[cpuid_i
++];
514 c
->function
= HYPERV_CPUID_IMPLEMENT_LIMITS
;
518 kvm_base
= KVM_CPUID_SIGNATURE_NEXT
;
519 has_msr_hv_hypercall
= true;
522 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
523 c
= &cpuid_data
.entries
[cpuid_i
++];
524 c
->function
= KVM_CPUID_SIGNATURE
| kvm_base
;
526 c
->ebx
= signature
[0];
527 c
->ecx
= signature
[1];
528 c
->edx
= signature
[2];
530 c
= &cpuid_data
.entries
[cpuid_i
++];
531 c
->function
= KVM_CPUID_FEATURES
| kvm_base
;
532 c
->eax
= env
->features
[FEAT_KVM
];
534 has_msr_async_pf_en
= c
->eax
& (1 << KVM_FEATURE_ASYNC_PF
);
536 has_msr_pv_eoi_en
= c
->eax
& (1 << KVM_FEATURE_PV_EOI
);
538 has_msr_kvm_steal_time
= c
->eax
& (1 << KVM_FEATURE_STEAL_TIME
);
540 cpu_x86_cpuid(env
, 0, 0, &limit
, &unused
, &unused
, &unused
);
542 for (i
= 0; i
<= limit
; i
++) {
543 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
544 fprintf(stderr
, "unsupported level value: 0x%x\n", limit
);
547 c
= &cpuid_data
.entries
[cpuid_i
++];
551 /* Keep reading function 2 till all the input is received */
555 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
|
556 KVM_CPUID_FLAG_STATE_READ_NEXT
;
557 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
558 times
= c
->eax
& 0xff;
560 for (j
= 1; j
< times
; ++j
) {
561 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
562 fprintf(stderr
, "cpuid_data is full, no space for "
563 "cpuid(eax:2):eax & 0xf = 0x%x\n", times
);
566 c
= &cpuid_data
.entries
[cpuid_i
++];
568 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
;
569 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
577 if (i
== 0xd && j
== 64) {
581 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
583 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
585 if (i
== 4 && c
->eax
== 0) {
588 if (i
== 0xb && !(c
->ecx
& 0xff00)) {
591 if (i
== 0xd && c
->eax
== 0) {
594 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
595 fprintf(stderr
, "cpuid_data is full, no space for "
596 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
599 c
= &cpuid_data
.entries
[cpuid_i
++];
605 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
613 cpu_x86_cpuid(env
, 0x0a, 0, &ver
, &unused
, &unused
, &unused
);
614 if ((ver
& 0xff) > 0) {
615 has_msr_architectural_pmu
= true;
616 num_architectural_pmu_counters
= (ver
& 0xff00) >> 8;
618 /* Shouldn't be more than 32, since that's the number of bits
619 * available in EBX to tell us _which_ counters are available.
622 if (num_architectural_pmu_counters
> MAX_GP_COUNTERS
) {
623 num_architectural_pmu_counters
= MAX_GP_COUNTERS
;
628 cpu_x86_cpuid(env
, 0x80000000, 0, &limit
, &unused
, &unused
, &unused
);
630 for (i
= 0x80000000; i
<= limit
; i
++) {
631 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
632 fprintf(stderr
, "unsupported xlevel value: 0x%x\n", limit
);
635 c
= &cpuid_data
.entries
[cpuid_i
++];
639 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
642 /* Call Centaur's CPUID instructions they are supported. */
643 if (env
->cpuid_xlevel2
> 0) {
644 cpu_x86_cpuid(env
, 0xC0000000, 0, &limit
, &unused
, &unused
, &unused
);
646 for (i
= 0xC0000000; i
<= limit
; i
++) {
647 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
648 fprintf(stderr
, "unsupported xlevel2 value: 0x%x\n", limit
);
651 c
= &cpuid_data
.entries
[cpuid_i
++];
655 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
659 cpuid_data
.cpuid
.nent
= cpuid_i
;
661 if (((env
->cpuid_version
>> 8)&0xF) >= 6
662 && (env
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
663 (CPUID_MCE
| CPUID_MCA
)
664 && kvm_check_extension(cs
->kvm_state
, KVM_CAP_MCE
) > 0) {
669 ret
= kvm_get_mce_cap_supported(cs
->kvm_state
, &mcg_cap
, &banks
);
671 fprintf(stderr
, "kvm_get_mce_cap_supported: %s", strerror(-ret
));
675 if (banks
> MCE_BANKS_DEF
) {
676 banks
= MCE_BANKS_DEF
;
678 mcg_cap
&= MCE_CAP_DEF
;
680 ret
= kvm_vcpu_ioctl(cs
, KVM_X86_SETUP_MCE
, &mcg_cap
);
682 fprintf(stderr
, "KVM_X86_SETUP_MCE: %s", strerror(-ret
));
686 env
->mcg_cap
= mcg_cap
;
689 qemu_add_vm_change_state_handler(cpu_update_state
, env
);
691 c
= cpuid_find_entry(&cpuid_data
.cpuid
, 1, 0);
693 has_msr_feature_control
= !!(c
->ecx
& CPUID_EXT_VMX
) ||
694 !!(c
->ecx
& CPUID_EXT_SMX
);
697 cpuid_data
.cpuid
.padding
= 0;
698 r
= kvm_vcpu_ioctl(cs
, KVM_SET_CPUID2
, &cpuid_data
);
703 r
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_TSC_CONTROL
);
704 if (r
&& env
->tsc_khz
) {
705 r
= kvm_vcpu_ioctl(cs
, KVM_SET_TSC_KHZ
, env
->tsc_khz
);
707 fprintf(stderr
, "KVM_SET_TSC_KHZ failed\n");
712 if (kvm_has_xsave()) {
713 env
->kvm_xsave_buf
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
719 void kvm_arch_reset_vcpu(CPUState
*cs
)
721 X86CPU
*cpu
= X86_CPU(cs
);
722 CPUX86State
*env
= &cpu
->env
;
724 env
->exception_injected
= -1;
725 env
->interrupt_injected
= -1;
727 if (kvm_irqchip_in_kernel()) {
728 env
->mp_state
= cpu_is_bsp(cpu
) ? KVM_MP_STATE_RUNNABLE
:
729 KVM_MP_STATE_UNINITIALIZED
;
731 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
735 static int kvm_get_supported_msrs(KVMState
*s
)
737 static int kvm_supported_msrs
;
741 if (kvm_supported_msrs
== 0) {
742 struct kvm_msr_list msr_list
, *kvm_msr_list
;
744 kvm_supported_msrs
= -1;
746 /* Obtain MSR list from KVM. These are the MSRs that we must
749 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, &msr_list
);
750 if (ret
< 0 && ret
!= -E2BIG
) {
753 /* Old kernel modules had a bug and could write beyond the provided
754 memory. Allocate at least a safe amount of 1K. */
755 kvm_msr_list
= g_malloc0(MAX(1024, sizeof(msr_list
) +
757 sizeof(msr_list
.indices
[0])));
759 kvm_msr_list
->nmsrs
= msr_list
.nmsrs
;
760 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, kvm_msr_list
);
764 for (i
= 0; i
< kvm_msr_list
->nmsrs
; i
++) {
765 if (kvm_msr_list
->indices
[i
] == MSR_STAR
) {
769 if (kvm_msr_list
->indices
[i
] == MSR_VM_HSAVE_PA
) {
770 has_msr_hsave_pa
= true;
773 if (kvm_msr_list
->indices
[i
] == MSR_TSC_ADJUST
) {
774 has_msr_tsc_adjust
= true;
777 if (kvm_msr_list
->indices
[i
] == MSR_IA32_TSCDEADLINE
) {
778 has_msr_tsc_deadline
= true;
781 if (kvm_msr_list
->indices
[i
] == MSR_IA32_MISC_ENABLE
) {
782 has_msr_misc_enable
= true;
785 if (kvm_msr_list
->indices
[i
] == MSR_IA32_BNDCFGS
) {
786 has_msr_bndcfgs
= true;
792 g_free(kvm_msr_list
);
798 int kvm_arch_init(KVMState
*s
)
800 uint64_t identity_base
= 0xfffbc000;
803 struct utsname utsname
;
805 ret
= kvm_get_supported_msrs(s
);
811 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
814 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
815 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
816 * Since these must be part of guest physical memory, we need to allocate
817 * them, both by setting their start addresses in the kernel and by
818 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
820 * Older KVM versions may not support setting the identity map base. In
821 * that case we need to stick with the default, i.e. a 256K maximum BIOS
824 if (kvm_check_extension(s
, KVM_CAP_SET_IDENTITY_MAP_ADDR
)) {
825 /* Allows up to 16M BIOSes. */
826 identity_base
= 0xfeffc000;
828 ret
= kvm_vm_ioctl(s
, KVM_SET_IDENTITY_MAP_ADDR
, &identity_base
);
834 /* Set TSS base one page after EPT identity map. */
835 ret
= kvm_vm_ioctl(s
, KVM_SET_TSS_ADDR
, identity_base
+ 0x1000);
840 /* Tell fw_cfg to notify the BIOS to reserve the range. */
841 ret
= e820_add_entry(identity_base
, 0x4000, E820_RESERVED
);
843 fprintf(stderr
, "e820_add_entry() table is full\n");
846 qemu_register_reset(kvm_unpoison_all
, NULL
);
848 shadow_mem
= qemu_opt_get_size(qemu_get_machine_opts(),
849 "kvm_shadow_mem", -1);
850 if (shadow_mem
!= -1) {
852 ret
= kvm_vm_ioctl(s
, KVM_SET_NR_MMU_PAGES
, shadow_mem
);
860 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
862 lhs
->selector
= rhs
->selector
;
863 lhs
->base
= rhs
->base
;
864 lhs
->limit
= rhs
->limit
;
876 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
878 unsigned flags
= rhs
->flags
;
879 lhs
->selector
= rhs
->selector
;
880 lhs
->base
= rhs
->base
;
881 lhs
->limit
= rhs
->limit
;
882 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
883 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
884 lhs
->dpl
= (flags
>> DESC_DPL_SHIFT
) & 3;
885 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
886 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
887 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
888 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
889 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
894 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
896 lhs
->selector
= rhs
->selector
;
897 lhs
->base
= rhs
->base
;
898 lhs
->limit
= rhs
->limit
;
899 lhs
->flags
= (rhs
->type
<< DESC_TYPE_SHIFT
) |
900 (rhs
->present
* DESC_P_MASK
) |
901 (rhs
->dpl
<< DESC_DPL_SHIFT
) |
902 (rhs
->db
<< DESC_B_SHIFT
) |
903 (rhs
->s
* DESC_S_MASK
) |
904 (rhs
->l
<< DESC_L_SHIFT
) |
905 (rhs
->g
* DESC_G_MASK
) |
906 (rhs
->avl
* DESC_AVL_MASK
);
909 static void kvm_getput_reg(__u64
*kvm_reg
, target_ulong
*qemu_reg
, int set
)
912 *kvm_reg
= *qemu_reg
;
914 *qemu_reg
= *kvm_reg
;
918 static int kvm_getput_regs(X86CPU
*cpu
, int set
)
920 CPUX86State
*env
= &cpu
->env
;
921 struct kvm_regs regs
;
925 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_REGS
, ®s
);
931 kvm_getput_reg(®s
.rax
, &env
->regs
[R_EAX
], set
);
932 kvm_getput_reg(®s
.rbx
, &env
->regs
[R_EBX
], set
);
933 kvm_getput_reg(®s
.rcx
, &env
->regs
[R_ECX
], set
);
934 kvm_getput_reg(®s
.rdx
, &env
->regs
[R_EDX
], set
);
935 kvm_getput_reg(®s
.rsi
, &env
->regs
[R_ESI
], set
);
936 kvm_getput_reg(®s
.rdi
, &env
->regs
[R_EDI
], set
);
937 kvm_getput_reg(®s
.rsp
, &env
->regs
[R_ESP
], set
);
938 kvm_getput_reg(®s
.rbp
, &env
->regs
[R_EBP
], set
);
940 kvm_getput_reg(®s
.r8
, &env
->regs
[8], set
);
941 kvm_getput_reg(®s
.r9
, &env
->regs
[9], set
);
942 kvm_getput_reg(®s
.r10
, &env
->regs
[10], set
);
943 kvm_getput_reg(®s
.r11
, &env
->regs
[11], set
);
944 kvm_getput_reg(®s
.r12
, &env
->regs
[12], set
);
945 kvm_getput_reg(®s
.r13
, &env
->regs
[13], set
);
946 kvm_getput_reg(®s
.r14
, &env
->regs
[14], set
);
947 kvm_getput_reg(®s
.r15
, &env
->regs
[15], set
);
950 kvm_getput_reg(®s
.rflags
, &env
->eflags
, set
);
951 kvm_getput_reg(®s
.rip
, &env
->eip
, set
);
954 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_REGS
, ®s
);
960 static int kvm_put_fpu(X86CPU
*cpu
)
962 CPUX86State
*env
= &cpu
->env
;
966 memset(&fpu
, 0, sizeof fpu
);
967 fpu
.fsw
= env
->fpus
& ~(7 << 11);
968 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
970 fpu
.last_opcode
= env
->fpop
;
971 fpu
.last_ip
= env
->fpip
;
972 fpu
.last_dp
= env
->fpdp
;
973 for (i
= 0; i
< 8; ++i
) {
974 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
976 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
977 memcpy(fpu
.xmm
, env
->xmm_regs
, sizeof env
->xmm_regs
);
978 fpu
.mxcsr
= env
->mxcsr
;
980 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_FPU
, &fpu
);
983 #define XSAVE_FCW_FSW 0
984 #define XSAVE_FTW_FOP 1
985 #define XSAVE_CWD_RIP 2
986 #define XSAVE_CWD_RDP 4
987 #define XSAVE_MXCSR 6
988 #define XSAVE_ST_SPACE 8
989 #define XSAVE_XMM_SPACE 40
990 #define XSAVE_XSTATE_BV 128
991 #define XSAVE_YMMH_SPACE 144
992 #define XSAVE_BNDREGS 240
993 #define XSAVE_BNDCSR 256
995 static int kvm_put_xsave(X86CPU
*cpu
)
997 CPUX86State
*env
= &cpu
->env
;
998 struct kvm_xsave
* xsave
= env
->kvm_xsave_buf
;
999 uint16_t cwd
, swd
, twd
;
1002 if (!kvm_has_xsave()) {
1003 return kvm_put_fpu(cpu
);
1006 memset(xsave
, 0, sizeof(struct kvm_xsave
));
1008 swd
= env
->fpus
& ~(7 << 11);
1009 swd
|= (env
->fpstt
& 7) << 11;
1011 for (i
= 0; i
< 8; ++i
) {
1012 twd
|= (!env
->fptags
[i
]) << i
;
1014 xsave
->region
[XSAVE_FCW_FSW
] = (uint32_t)(swd
<< 16) + cwd
;
1015 xsave
->region
[XSAVE_FTW_FOP
] = (uint32_t)(env
->fpop
<< 16) + twd
;
1016 memcpy(&xsave
->region
[XSAVE_CWD_RIP
], &env
->fpip
, sizeof(env
->fpip
));
1017 memcpy(&xsave
->region
[XSAVE_CWD_RDP
], &env
->fpdp
, sizeof(env
->fpdp
));
1018 memcpy(&xsave
->region
[XSAVE_ST_SPACE
], env
->fpregs
,
1019 sizeof env
->fpregs
);
1020 memcpy(&xsave
->region
[XSAVE_XMM_SPACE
], env
->xmm_regs
,
1021 sizeof env
->xmm_regs
);
1022 xsave
->region
[XSAVE_MXCSR
] = env
->mxcsr
;
1023 *(uint64_t *)&xsave
->region
[XSAVE_XSTATE_BV
] = env
->xstate_bv
;
1024 memcpy(&xsave
->region
[XSAVE_YMMH_SPACE
], env
->ymmh_regs
,
1025 sizeof env
->ymmh_regs
);
1026 memcpy(&xsave
->region
[XSAVE_BNDREGS
], env
->bnd_regs
,
1027 sizeof env
->bnd_regs
);
1028 memcpy(&xsave
->region
[XSAVE_BNDCSR
], &env
->bndcs_regs
,
1029 sizeof(env
->bndcs_regs
));
1030 r
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XSAVE
, xsave
);
1034 static int kvm_put_xcrs(X86CPU
*cpu
)
1036 CPUX86State
*env
= &cpu
->env
;
1037 struct kvm_xcrs xcrs
;
1039 if (!kvm_has_xcrs()) {
1045 xcrs
.xcrs
[0].xcr
= 0;
1046 xcrs
.xcrs
[0].value
= env
->xcr0
;
1047 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XCRS
, &xcrs
);
1050 static int kvm_put_sregs(X86CPU
*cpu
)
1052 CPUX86State
*env
= &cpu
->env
;
1053 struct kvm_sregs sregs
;
1055 memset(sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
1056 if (env
->interrupt_injected
>= 0) {
1057 sregs
.interrupt_bitmap
[env
->interrupt_injected
/ 64] |=
1058 (uint64_t)1 << (env
->interrupt_injected
% 64);
1061 if ((env
->eflags
& VM_MASK
)) {
1062 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
1063 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
1064 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
1065 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
1066 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
1067 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
1069 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
1070 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
1071 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
1072 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
1073 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
1074 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
1077 set_seg(&sregs
.tr
, &env
->tr
);
1078 set_seg(&sregs
.ldt
, &env
->ldt
);
1080 sregs
.idt
.limit
= env
->idt
.limit
;
1081 sregs
.idt
.base
= env
->idt
.base
;
1082 memset(sregs
.idt
.padding
, 0, sizeof sregs
.idt
.padding
);
1083 sregs
.gdt
.limit
= env
->gdt
.limit
;
1084 sregs
.gdt
.base
= env
->gdt
.base
;
1085 memset(sregs
.gdt
.padding
, 0, sizeof sregs
.gdt
.padding
);
1087 sregs
.cr0
= env
->cr
[0];
1088 sregs
.cr2
= env
->cr
[2];
1089 sregs
.cr3
= env
->cr
[3];
1090 sregs
.cr4
= env
->cr
[4];
1092 sregs
.cr8
= cpu_get_apic_tpr(cpu
->apic_state
);
1093 sregs
.apic_base
= cpu_get_apic_base(cpu
->apic_state
);
1095 sregs
.efer
= env
->efer
;
1097 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_SREGS
, &sregs
);
1100 static void kvm_msr_entry_set(struct kvm_msr_entry
*entry
,
1101 uint32_t index
, uint64_t value
)
1103 entry
->index
= index
;
1104 entry
->data
= value
;
1107 static int kvm_put_tscdeadline_msr(X86CPU
*cpu
)
1109 CPUX86State
*env
= &cpu
->env
;
1111 struct kvm_msrs info
;
1112 struct kvm_msr_entry entries
[1];
1114 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
1116 if (!has_msr_tsc_deadline
) {
1120 kvm_msr_entry_set(&msrs
[0], MSR_IA32_TSCDEADLINE
, env
->tsc_deadline
);
1122 msr_data
.info
.nmsrs
= 1;
1124 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, &msr_data
);
1128 * Provide a separate write service for the feature control MSR in order to
1129 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
1130 * before writing any other state because forcibly leaving nested mode
1131 * invalidates the VCPU state.
1133 static int kvm_put_msr_feature_control(X86CPU
*cpu
)
1136 struct kvm_msrs info
;
1137 struct kvm_msr_entry entry
;
1140 kvm_msr_entry_set(&msr_data
.entry
, MSR_IA32_FEATURE_CONTROL
,
1141 cpu
->env
.msr_ia32_feature_control
);
1142 msr_data
.info
.nmsrs
= 1;
1143 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, &msr_data
);
1146 static int kvm_put_msrs(X86CPU
*cpu
, int level
)
1148 CPUX86State
*env
= &cpu
->env
;
1150 struct kvm_msrs info
;
1151 struct kvm_msr_entry entries
[100];
1153 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
1156 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
1157 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
1158 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
1159 kvm_msr_entry_set(&msrs
[n
++], MSR_PAT
, env
->pat
);
1161 kvm_msr_entry_set(&msrs
[n
++], MSR_STAR
, env
->star
);
1163 if (has_msr_hsave_pa
) {
1164 kvm_msr_entry_set(&msrs
[n
++], MSR_VM_HSAVE_PA
, env
->vm_hsave
);
1166 if (has_msr_tsc_adjust
) {
1167 kvm_msr_entry_set(&msrs
[n
++], MSR_TSC_ADJUST
, env
->tsc_adjust
);
1169 if (has_msr_misc_enable
) {
1170 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_MISC_ENABLE
,
1171 env
->msr_ia32_misc_enable
);
1173 if (has_msr_bndcfgs
) {
1174 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_BNDCFGS
, env
->msr_bndcfgs
);
1176 #ifdef TARGET_X86_64
1177 if (lm_capable_kernel
) {
1178 kvm_msr_entry_set(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
1179 kvm_msr_entry_set(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
1180 kvm_msr_entry_set(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
1181 kvm_msr_entry_set(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
1185 * The following MSRs have side effects on the guest or are too heavy
1186 * for normal writeback. Limit them to reset or full state updates.
1188 if (level
>= KVM_PUT_RESET_STATE
) {
1189 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_TSC
, env
->tsc
);
1190 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_SYSTEM_TIME
,
1191 env
->system_time_msr
);
1192 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_WALL_CLOCK
, env
->wall_clock_msr
);
1193 if (has_msr_async_pf_en
) {
1194 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_ASYNC_PF_EN
,
1195 env
->async_pf_en_msr
);
1197 if (has_msr_pv_eoi_en
) {
1198 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_PV_EOI_EN
,
1199 env
->pv_eoi_en_msr
);
1201 if (has_msr_kvm_steal_time
) {
1202 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_STEAL_TIME
,
1203 env
->steal_time_msr
);
1205 if (has_msr_architectural_pmu
) {
1206 /* Stop the counter. */
1207 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
1208 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_GLOBAL_CTRL
, 0);
1210 /* Set the counter values. */
1211 for (i
= 0; i
< MAX_FIXED_COUNTERS
; i
++) {
1212 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_FIXED_CTR0
+ i
,
1213 env
->msr_fixed_counters
[i
]);
1215 for (i
= 0; i
< num_architectural_pmu_counters
; i
++) {
1216 kvm_msr_entry_set(&msrs
[n
++], MSR_P6_PERFCTR0
+ i
,
1217 env
->msr_gp_counters
[i
]);
1218 kvm_msr_entry_set(&msrs
[n
++], MSR_P6_EVNTSEL0
+ i
,
1219 env
->msr_gp_evtsel
[i
]);
1221 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_GLOBAL_STATUS
,
1222 env
->msr_global_status
);
1223 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_GLOBAL_OVF_CTRL
,
1224 env
->msr_global_ovf_ctrl
);
1226 /* Now start the PMU. */
1227 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_FIXED_CTR_CTRL
,
1228 env
->msr_fixed_ctr_ctrl
);
1229 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_GLOBAL_CTRL
,
1230 env
->msr_global_ctrl
);
1232 if (has_msr_hv_hypercall
) {
1233 kvm_msr_entry_set(&msrs
[n
++], HV_X64_MSR_GUEST_OS_ID
,
1234 env
->msr_hv_guest_os_id
);
1235 kvm_msr_entry_set(&msrs
[n
++], HV_X64_MSR_HYPERCALL
,
1236 env
->msr_hv_hypercall
);
1238 if (has_msr_hv_vapic
) {
1239 kvm_msr_entry_set(&msrs
[n
++], HV_X64_MSR_APIC_ASSIST_PAGE
,
1243 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
1244 * kvm_put_msr_feature_control. */
1249 kvm_msr_entry_set(&msrs
[n
++], MSR_MCG_STATUS
, env
->mcg_status
);
1250 kvm_msr_entry_set(&msrs
[n
++], MSR_MCG_CTL
, env
->mcg_ctl
);
1251 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
1252 kvm_msr_entry_set(&msrs
[n
++], MSR_MC0_CTL
+ i
, env
->mce_banks
[i
]);
1256 msr_data
.info
.nmsrs
= n
;
1258 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, &msr_data
);
1263 static int kvm_get_fpu(X86CPU
*cpu
)
1265 CPUX86State
*env
= &cpu
->env
;
1269 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_FPU
, &fpu
);
1274 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
1275 env
->fpus
= fpu
.fsw
;
1276 env
->fpuc
= fpu
.fcw
;
1277 env
->fpop
= fpu
.last_opcode
;
1278 env
->fpip
= fpu
.last_ip
;
1279 env
->fpdp
= fpu
.last_dp
;
1280 for (i
= 0; i
< 8; ++i
) {
1281 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
1283 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
1284 memcpy(env
->xmm_regs
, fpu
.xmm
, sizeof env
->xmm_regs
);
1285 env
->mxcsr
= fpu
.mxcsr
;
1290 static int kvm_get_xsave(X86CPU
*cpu
)
1292 CPUX86State
*env
= &cpu
->env
;
1293 struct kvm_xsave
* xsave
= env
->kvm_xsave_buf
;
1295 uint16_t cwd
, swd
, twd
;
1297 if (!kvm_has_xsave()) {
1298 return kvm_get_fpu(cpu
);
1301 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XSAVE
, xsave
);
1306 cwd
= (uint16_t)xsave
->region
[XSAVE_FCW_FSW
];
1307 swd
= (uint16_t)(xsave
->region
[XSAVE_FCW_FSW
] >> 16);
1308 twd
= (uint16_t)xsave
->region
[XSAVE_FTW_FOP
];
1309 env
->fpop
= (uint16_t)(xsave
->region
[XSAVE_FTW_FOP
] >> 16);
1310 env
->fpstt
= (swd
>> 11) & 7;
1313 for (i
= 0; i
< 8; ++i
) {
1314 env
->fptags
[i
] = !((twd
>> i
) & 1);
1316 memcpy(&env
->fpip
, &xsave
->region
[XSAVE_CWD_RIP
], sizeof(env
->fpip
));
1317 memcpy(&env
->fpdp
, &xsave
->region
[XSAVE_CWD_RDP
], sizeof(env
->fpdp
));
1318 env
->mxcsr
= xsave
->region
[XSAVE_MXCSR
];
1319 memcpy(env
->fpregs
, &xsave
->region
[XSAVE_ST_SPACE
],
1320 sizeof env
->fpregs
);
1321 memcpy(env
->xmm_regs
, &xsave
->region
[XSAVE_XMM_SPACE
],
1322 sizeof env
->xmm_regs
);
1323 env
->xstate_bv
= *(uint64_t *)&xsave
->region
[XSAVE_XSTATE_BV
];
1324 memcpy(env
->ymmh_regs
, &xsave
->region
[XSAVE_YMMH_SPACE
],
1325 sizeof env
->ymmh_regs
);
1326 memcpy(env
->bnd_regs
, &xsave
->region
[XSAVE_BNDREGS
],
1327 sizeof env
->bnd_regs
);
1328 memcpy(&env
->bndcs_regs
, &xsave
->region
[XSAVE_BNDCSR
],
1329 sizeof(env
->bndcs_regs
));
1333 static int kvm_get_xcrs(X86CPU
*cpu
)
1335 CPUX86State
*env
= &cpu
->env
;
1337 struct kvm_xcrs xcrs
;
1339 if (!kvm_has_xcrs()) {
1343 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XCRS
, &xcrs
);
1348 for (i
= 0; i
< xcrs
.nr_xcrs
; i
++) {
1349 /* Only support xcr0 now */
1350 if (xcrs
.xcrs
[i
].xcr
== 0) {
1351 env
->xcr0
= xcrs
.xcrs
[i
].value
;
1358 static int kvm_get_sregs(X86CPU
*cpu
)
1360 CPUX86State
*env
= &cpu
->env
;
1361 struct kvm_sregs sregs
;
1365 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_SREGS
, &sregs
);
1370 /* There can only be one pending IRQ set in the bitmap at a time, so try
1371 to find it and save its number instead (-1 for none). */
1372 env
->interrupt_injected
= -1;
1373 for (i
= 0; i
< ARRAY_SIZE(sregs
.interrupt_bitmap
); i
++) {
1374 if (sregs
.interrupt_bitmap
[i
]) {
1375 bit
= ctz64(sregs
.interrupt_bitmap
[i
]);
1376 env
->interrupt_injected
= i
* 64 + bit
;
1381 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
1382 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
1383 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
1384 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
1385 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
1386 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
1388 get_seg(&env
->tr
, &sregs
.tr
);
1389 get_seg(&env
->ldt
, &sregs
.ldt
);
1391 env
->idt
.limit
= sregs
.idt
.limit
;
1392 env
->idt
.base
= sregs
.idt
.base
;
1393 env
->gdt
.limit
= sregs
.gdt
.limit
;
1394 env
->gdt
.base
= sregs
.gdt
.base
;
1396 env
->cr
[0] = sregs
.cr0
;
1397 env
->cr
[2] = sregs
.cr2
;
1398 env
->cr
[3] = sregs
.cr3
;
1399 env
->cr
[4] = sregs
.cr4
;
1401 env
->efer
= sregs
.efer
;
1403 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
1405 #define HFLAG_COPY_MASK \
1406 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1407 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1408 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1409 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1411 hflags
= (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
1412 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
1413 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
1414 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
1415 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
1416 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
1417 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
1419 if (env
->efer
& MSR_EFER_LMA
) {
1420 hflags
|= HF_LMA_MASK
;
1423 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
1424 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
1426 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
1427 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
1428 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
1429 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
1430 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
) ||
1431 !(hflags
& HF_CS32_MASK
)) {
1432 hflags
|= HF_ADDSEG_MASK
;
1434 hflags
|= ((env
->segs
[R_DS
].base
| env
->segs
[R_ES
].base
|
1435 env
->segs
[R_SS
].base
) != 0) << HF_ADDSEG_SHIFT
;
1438 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
1443 static int kvm_get_msrs(X86CPU
*cpu
)
1445 CPUX86State
*env
= &cpu
->env
;
1447 struct kvm_msrs info
;
1448 struct kvm_msr_entry entries
[100];
1450 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
1454 msrs
[n
++].index
= MSR_IA32_SYSENTER_CS
;
1455 msrs
[n
++].index
= MSR_IA32_SYSENTER_ESP
;
1456 msrs
[n
++].index
= MSR_IA32_SYSENTER_EIP
;
1457 msrs
[n
++].index
= MSR_PAT
;
1459 msrs
[n
++].index
= MSR_STAR
;
1461 if (has_msr_hsave_pa
) {
1462 msrs
[n
++].index
= MSR_VM_HSAVE_PA
;
1464 if (has_msr_tsc_adjust
) {
1465 msrs
[n
++].index
= MSR_TSC_ADJUST
;
1467 if (has_msr_tsc_deadline
) {
1468 msrs
[n
++].index
= MSR_IA32_TSCDEADLINE
;
1470 if (has_msr_misc_enable
) {
1471 msrs
[n
++].index
= MSR_IA32_MISC_ENABLE
;
1473 if (has_msr_feature_control
) {
1474 msrs
[n
++].index
= MSR_IA32_FEATURE_CONTROL
;
1476 if (has_msr_bndcfgs
) {
1477 msrs
[n
++].index
= MSR_IA32_BNDCFGS
;
1480 if (!env
->tsc_valid
) {
1481 msrs
[n
++].index
= MSR_IA32_TSC
;
1482 env
->tsc_valid
= !runstate_is_running();
1485 #ifdef TARGET_X86_64
1486 if (lm_capable_kernel
) {
1487 msrs
[n
++].index
= MSR_CSTAR
;
1488 msrs
[n
++].index
= MSR_KERNELGSBASE
;
1489 msrs
[n
++].index
= MSR_FMASK
;
1490 msrs
[n
++].index
= MSR_LSTAR
;
1493 msrs
[n
++].index
= MSR_KVM_SYSTEM_TIME
;
1494 msrs
[n
++].index
= MSR_KVM_WALL_CLOCK
;
1495 if (has_msr_async_pf_en
) {
1496 msrs
[n
++].index
= MSR_KVM_ASYNC_PF_EN
;
1498 if (has_msr_pv_eoi_en
) {
1499 msrs
[n
++].index
= MSR_KVM_PV_EOI_EN
;
1501 if (has_msr_kvm_steal_time
) {
1502 msrs
[n
++].index
= MSR_KVM_STEAL_TIME
;
1504 if (has_msr_architectural_pmu
) {
1505 msrs
[n
++].index
= MSR_CORE_PERF_FIXED_CTR_CTRL
;
1506 msrs
[n
++].index
= MSR_CORE_PERF_GLOBAL_CTRL
;
1507 msrs
[n
++].index
= MSR_CORE_PERF_GLOBAL_STATUS
;
1508 msrs
[n
++].index
= MSR_CORE_PERF_GLOBAL_OVF_CTRL
;
1509 for (i
= 0; i
< MAX_FIXED_COUNTERS
; i
++) {
1510 msrs
[n
++].index
= MSR_CORE_PERF_FIXED_CTR0
+ i
;
1512 for (i
= 0; i
< num_architectural_pmu_counters
; i
++) {
1513 msrs
[n
++].index
= MSR_P6_PERFCTR0
+ i
;
1514 msrs
[n
++].index
= MSR_P6_EVNTSEL0
+ i
;
1519 msrs
[n
++].index
= MSR_MCG_STATUS
;
1520 msrs
[n
++].index
= MSR_MCG_CTL
;
1521 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
1522 msrs
[n
++].index
= MSR_MC0_CTL
+ i
;
1526 if (has_msr_hv_hypercall
) {
1527 msrs
[n
++].index
= HV_X64_MSR_HYPERCALL
;
1528 msrs
[n
++].index
= HV_X64_MSR_GUEST_OS_ID
;
1530 if (has_msr_hv_vapic
) {
1531 msrs
[n
++].index
= HV_X64_MSR_APIC_ASSIST_PAGE
;
1534 msr_data
.info
.nmsrs
= n
;
1535 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, &msr_data
);
1540 for (i
= 0; i
< ret
; i
++) {
1541 uint32_t index
= msrs
[i
].index
;
1543 case MSR_IA32_SYSENTER_CS
:
1544 env
->sysenter_cs
= msrs
[i
].data
;
1546 case MSR_IA32_SYSENTER_ESP
:
1547 env
->sysenter_esp
= msrs
[i
].data
;
1549 case MSR_IA32_SYSENTER_EIP
:
1550 env
->sysenter_eip
= msrs
[i
].data
;
1553 env
->pat
= msrs
[i
].data
;
1556 env
->star
= msrs
[i
].data
;
1558 #ifdef TARGET_X86_64
1560 env
->cstar
= msrs
[i
].data
;
1562 case MSR_KERNELGSBASE
:
1563 env
->kernelgsbase
= msrs
[i
].data
;
1566 env
->fmask
= msrs
[i
].data
;
1569 env
->lstar
= msrs
[i
].data
;
1573 env
->tsc
= msrs
[i
].data
;
1575 case MSR_TSC_ADJUST
:
1576 env
->tsc_adjust
= msrs
[i
].data
;
1578 case MSR_IA32_TSCDEADLINE
:
1579 env
->tsc_deadline
= msrs
[i
].data
;
1581 case MSR_VM_HSAVE_PA
:
1582 env
->vm_hsave
= msrs
[i
].data
;
1584 case MSR_KVM_SYSTEM_TIME
:
1585 env
->system_time_msr
= msrs
[i
].data
;
1587 case MSR_KVM_WALL_CLOCK
:
1588 env
->wall_clock_msr
= msrs
[i
].data
;
1590 case MSR_MCG_STATUS
:
1591 env
->mcg_status
= msrs
[i
].data
;
1594 env
->mcg_ctl
= msrs
[i
].data
;
1596 case MSR_IA32_MISC_ENABLE
:
1597 env
->msr_ia32_misc_enable
= msrs
[i
].data
;
1599 case MSR_IA32_FEATURE_CONTROL
:
1600 env
->msr_ia32_feature_control
= msrs
[i
].data
;
1602 case MSR_IA32_BNDCFGS
:
1603 env
->msr_bndcfgs
= msrs
[i
].data
;
1606 if (msrs
[i
].index
>= MSR_MC0_CTL
&&
1607 msrs
[i
].index
< MSR_MC0_CTL
+ (env
->mcg_cap
& 0xff) * 4) {
1608 env
->mce_banks
[msrs
[i
].index
- MSR_MC0_CTL
] = msrs
[i
].data
;
1611 case MSR_KVM_ASYNC_PF_EN
:
1612 env
->async_pf_en_msr
= msrs
[i
].data
;
1614 case MSR_KVM_PV_EOI_EN
:
1615 env
->pv_eoi_en_msr
= msrs
[i
].data
;
1617 case MSR_KVM_STEAL_TIME
:
1618 env
->steal_time_msr
= msrs
[i
].data
;
1620 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
1621 env
->msr_fixed_ctr_ctrl
= msrs
[i
].data
;
1623 case MSR_CORE_PERF_GLOBAL_CTRL
:
1624 env
->msr_global_ctrl
= msrs
[i
].data
;
1626 case MSR_CORE_PERF_GLOBAL_STATUS
:
1627 env
->msr_global_status
= msrs
[i
].data
;
1629 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
1630 env
->msr_global_ovf_ctrl
= msrs
[i
].data
;
1632 case MSR_CORE_PERF_FIXED_CTR0
... MSR_CORE_PERF_FIXED_CTR0
+ MAX_FIXED_COUNTERS
- 1:
1633 env
->msr_fixed_counters
[index
- MSR_CORE_PERF_FIXED_CTR0
] = msrs
[i
].data
;
1635 case MSR_P6_PERFCTR0
... MSR_P6_PERFCTR0
+ MAX_GP_COUNTERS
- 1:
1636 env
->msr_gp_counters
[index
- MSR_P6_PERFCTR0
] = msrs
[i
].data
;
1638 case MSR_P6_EVNTSEL0
... MSR_P6_EVNTSEL0
+ MAX_GP_COUNTERS
- 1:
1639 env
->msr_gp_evtsel
[index
- MSR_P6_EVNTSEL0
] = msrs
[i
].data
;
1641 case HV_X64_MSR_HYPERCALL
:
1642 env
->msr_hv_hypercall
= msrs
[i
].data
;
1644 case HV_X64_MSR_GUEST_OS_ID
:
1645 env
->msr_hv_guest_os_id
= msrs
[i
].data
;
1647 case HV_X64_MSR_APIC_ASSIST_PAGE
:
1648 env
->msr_hv_vapic
= msrs
[i
].data
;
1656 static int kvm_put_mp_state(X86CPU
*cpu
)
1658 struct kvm_mp_state mp_state
= { .mp_state
= cpu
->env
.mp_state
};
1660 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MP_STATE
, &mp_state
);
1663 static int kvm_get_mp_state(X86CPU
*cpu
)
1665 CPUState
*cs
= CPU(cpu
);
1666 CPUX86State
*env
= &cpu
->env
;
1667 struct kvm_mp_state mp_state
;
1670 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_MP_STATE
, &mp_state
);
1674 env
->mp_state
= mp_state
.mp_state
;
1675 if (kvm_irqchip_in_kernel()) {
1676 cs
->halted
= (mp_state
.mp_state
== KVM_MP_STATE_HALTED
);
1681 static int kvm_get_apic(X86CPU
*cpu
)
1683 DeviceState
*apic
= cpu
->apic_state
;
1684 struct kvm_lapic_state kapic
;
1687 if (apic
&& kvm_irqchip_in_kernel()) {
1688 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_LAPIC
, &kapic
);
1693 kvm_get_apic_state(apic
, &kapic
);
1698 static int kvm_put_apic(X86CPU
*cpu
)
1700 DeviceState
*apic
= cpu
->apic_state
;
1701 struct kvm_lapic_state kapic
;
1703 if (apic
&& kvm_irqchip_in_kernel()) {
1704 kvm_put_apic_state(apic
, &kapic
);
1706 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_LAPIC
, &kapic
);
1711 static int kvm_put_vcpu_events(X86CPU
*cpu
, int level
)
1713 CPUX86State
*env
= &cpu
->env
;
1714 struct kvm_vcpu_events events
;
1716 if (!kvm_has_vcpu_events()) {
1720 events
.exception
.injected
= (env
->exception_injected
>= 0);
1721 events
.exception
.nr
= env
->exception_injected
;
1722 events
.exception
.has_error_code
= env
->has_error_code
;
1723 events
.exception
.error_code
= env
->error_code
;
1724 events
.exception
.pad
= 0;
1726 events
.interrupt
.injected
= (env
->interrupt_injected
>= 0);
1727 events
.interrupt
.nr
= env
->interrupt_injected
;
1728 events
.interrupt
.soft
= env
->soft_interrupt
;
1730 events
.nmi
.injected
= env
->nmi_injected
;
1731 events
.nmi
.pending
= env
->nmi_pending
;
1732 events
.nmi
.masked
= !!(env
->hflags2
& HF2_NMI_MASK
);
1735 events
.sipi_vector
= env
->sipi_vector
;
1738 if (level
>= KVM_PUT_RESET_STATE
) {
1740 KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
;
1743 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_VCPU_EVENTS
, &events
);
1746 static int kvm_get_vcpu_events(X86CPU
*cpu
)
1748 CPUX86State
*env
= &cpu
->env
;
1749 struct kvm_vcpu_events events
;
1752 if (!kvm_has_vcpu_events()) {
1756 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_VCPU_EVENTS
, &events
);
1760 env
->exception_injected
=
1761 events
.exception
.injected
? events
.exception
.nr
: -1;
1762 env
->has_error_code
= events
.exception
.has_error_code
;
1763 env
->error_code
= events
.exception
.error_code
;
1765 env
->interrupt_injected
=
1766 events
.interrupt
.injected
? events
.interrupt
.nr
: -1;
1767 env
->soft_interrupt
= events
.interrupt
.soft
;
1769 env
->nmi_injected
= events
.nmi
.injected
;
1770 env
->nmi_pending
= events
.nmi
.pending
;
1771 if (events
.nmi
.masked
) {
1772 env
->hflags2
|= HF2_NMI_MASK
;
1774 env
->hflags2
&= ~HF2_NMI_MASK
;
1777 env
->sipi_vector
= events
.sipi_vector
;
1782 static int kvm_guest_debug_workarounds(X86CPU
*cpu
)
1784 CPUState
*cs
= CPU(cpu
);
1785 CPUX86State
*env
= &cpu
->env
;
1787 unsigned long reinject_trap
= 0;
1789 if (!kvm_has_vcpu_events()) {
1790 if (env
->exception_injected
== 1) {
1791 reinject_trap
= KVM_GUESTDBG_INJECT_DB
;
1792 } else if (env
->exception_injected
== 3) {
1793 reinject_trap
= KVM_GUESTDBG_INJECT_BP
;
1795 env
->exception_injected
= -1;
1799 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
1800 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
1801 * by updating the debug state once again if single-stepping is on.
1802 * Another reason to call kvm_update_guest_debug here is a pending debug
1803 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
1804 * reinject them via SET_GUEST_DEBUG.
1806 if (reinject_trap
||
1807 (!kvm_has_robust_singlestep() && cs
->singlestep_enabled
)) {
1808 ret
= kvm_update_guest_debug(cs
, reinject_trap
);
1813 static int kvm_put_debugregs(X86CPU
*cpu
)
1815 CPUX86State
*env
= &cpu
->env
;
1816 struct kvm_debugregs dbgregs
;
1819 if (!kvm_has_debugregs()) {
1823 for (i
= 0; i
< 4; i
++) {
1824 dbgregs
.db
[i
] = env
->dr
[i
];
1826 dbgregs
.dr6
= env
->dr
[6];
1827 dbgregs
.dr7
= env
->dr
[7];
1830 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_DEBUGREGS
, &dbgregs
);
1833 static int kvm_get_debugregs(X86CPU
*cpu
)
1835 CPUX86State
*env
= &cpu
->env
;
1836 struct kvm_debugregs dbgregs
;
1839 if (!kvm_has_debugregs()) {
1843 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_DEBUGREGS
, &dbgregs
);
1847 for (i
= 0; i
< 4; i
++) {
1848 env
->dr
[i
] = dbgregs
.db
[i
];
1850 env
->dr
[4] = env
->dr
[6] = dbgregs
.dr6
;
1851 env
->dr
[5] = env
->dr
[7] = dbgregs
.dr7
;
1856 int kvm_arch_put_registers(CPUState
*cpu
, int level
)
1858 X86CPU
*x86_cpu
= X86_CPU(cpu
);
1861 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
1863 if (level
>= KVM_PUT_RESET_STATE
&& has_msr_feature_control
) {
1864 ret
= kvm_put_msr_feature_control(x86_cpu
);
1870 ret
= kvm_getput_regs(x86_cpu
, 1);
1874 ret
= kvm_put_xsave(x86_cpu
);
1878 ret
= kvm_put_xcrs(x86_cpu
);
1882 ret
= kvm_put_sregs(x86_cpu
);
1886 /* must be before kvm_put_msrs */
1887 ret
= kvm_inject_mce_oldstyle(x86_cpu
);
1891 ret
= kvm_put_msrs(x86_cpu
, level
);
1895 if (level
>= KVM_PUT_RESET_STATE
) {
1896 ret
= kvm_put_mp_state(x86_cpu
);
1900 ret
= kvm_put_apic(x86_cpu
);
1906 ret
= kvm_put_tscdeadline_msr(x86_cpu
);
1911 ret
= kvm_put_vcpu_events(x86_cpu
, level
);
1915 ret
= kvm_put_debugregs(x86_cpu
);
1920 ret
= kvm_guest_debug_workarounds(x86_cpu
);
1927 int kvm_arch_get_registers(CPUState
*cs
)
1929 X86CPU
*cpu
= X86_CPU(cs
);
1932 assert(cpu_is_stopped(cs
) || qemu_cpu_is_self(cs
));
1934 ret
= kvm_getput_regs(cpu
, 0);
1938 ret
= kvm_get_xsave(cpu
);
1942 ret
= kvm_get_xcrs(cpu
);
1946 ret
= kvm_get_sregs(cpu
);
1950 ret
= kvm_get_msrs(cpu
);
1954 ret
= kvm_get_mp_state(cpu
);
1958 ret
= kvm_get_apic(cpu
);
1962 ret
= kvm_get_vcpu_events(cpu
);
1966 ret
= kvm_get_debugregs(cpu
);
1973 void kvm_arch_pre_run(CPUState
*cpu
, struct kvm_run
*run
)
1975 X86CPU
*x86_cpu
= X86_CPU(cpu
);
1976 CPUX86State
*env
= &x86_cpu
->env
;
1980 if (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) {
1981 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1982 DPRINTF("injected NMI\n");
1983 ret
= kvm_vcpu_ioctl(cpu
, KVM_NMI
);
1985 fprintf(stderr
, "KVM: injection failed, NMI lost (%s)\n",
1990 if (!kvm_irqchip_in_kernel()) {
1991 /* Force the VCPU out of its inner loop to process any INIT requests
1992 * or pending TPR access reports. */
1993 if (cpu
->interrupt_request
&
1994 (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
)) {
1995 cpu
->exit_request
= 1;
1998 /* Try to inject an interrupt if the guest can accept it */
1999 if (run
->ready_for_interrupt_injection
&&
2000 (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
2001 (env
->eflags
& IF_MASK
)) {
2004 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
2005 irq
= cpu_get_pic_interrupt(env
);
2007 struct kvm_interrupt intr
;
2010 DPRINTF("injected interrupt %d\n", irq
);
2011 ret
= kvm_vcpu_ioctl(cpu
, KVM_INTERRUPT
, &intr
);
2014 "KVM: injection failed, interrupt lost (%s)\n",
2020 /* If we have an interrupt but the guest is not ready to receive an
2021 * interrupt, request an interrupt window exit. This will
2022 * cause a return to userspace as soon as the guest is ready to
2023 * receive interrupts. */
2024 if ((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
2025 run
->request_interrupt_window
= 1;
2027 run
->request_interrupt_window
= 0;
2030 DPRINTF("setting tpr\n");
2031 run
->cr8
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
2035 void kvm_arch_post_run(CPUState
*cpu
, struct kvm_run
*run
)
2037 X86CPU
*x86_cpu
= X86_CPU(cpu
);
2038 CPUX86State
*env
= &x86_cpu
->env
;
2041 env
->eflags
|= IF_MASK
;
2043 env
->eflags
&= ~IF_MASK
;
2045 cpu_set_apic_tpr(x86_cpu
->apic_state
, run
->cr8
);
2046 cpu_set_apic_base(x86_cpu
->apic_state
, run
->apic_base
);
2049 int kvm_arch_process_async_events(CPUState
*cs
)
2051 X86CPU
*cpu
= X86_CPU(cs
);
2052 CPUX86State
*env
= &cpu
->env
;
2054 if (cs
->interrupt_request
& CPU_INTERRUPT_MCE
) {
2055 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
2056 assert(env
->mcg_cap
);
2058 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
2060 kvm_cpu_synchronize_state(cs
);
2062 if (env
->exception_injected
== EXCP08_DBLE
) {
2063 /* this means triple fault */
2064 qemu_system_reset_request();
2065 cs
->exit_request
= 1;
2068 env
->exception_injected
= EXCP12_MCHK
;
2069 env
->has_error_code
= 0;
2072 if (kvm_irqchip_in_kernel() && env
->mp_state
== KVM_MP_STATE_HALTED
) {
2073 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
2077 if (kvm_irqchip_in_kernel()) {
2081 if (cs
->interrupt_request
& CPU_INTERRUPT_POLL
) {
2082 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
2083 apic_poll_irq(cpu
->apic_state
);
2085 if (((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
2086 (env
->eflags
& IF_MASK
)) ||
2087 (cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
2090 if (cs
->interrupt_request
& CPU_INTERRUPT_INIT
) {
2091 kvm_cpu_synchronize_state(cs
);
2094 if (cs
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
2095 kvm_cpu_synchronize_state(cs
);
2098 if (cs
->interrupt_request
& CPU_INTERRUPT_TPR
) {
2099 cs
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
2100 kvm_cpu_synchronize_state(cs
);
2101 apic_handle_tpr_access_report(cpu
->apic_state
, env
->eip
,
2102 env
->tpr_access_type
);
2108 static int kvm_handle_halt(X86CPU
*cpu
)
2110 CPUState
*cs
= CPU(cpu
);
2111 CPUX86State
*env
= &cpu
->env
;
2113 if (!((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
2114 (env
->eflags
& IF_MASK
)) &&
2115 !(cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
2123 static int kvm_handle_tpr_access(X86CPU
*cpu
)
2125 CPUState
*cs
= CPU(cpu
);
2126 struct kvm_run
*run
= cs
->kvm_run
;
2128 apic_handle_tpr_access_report(cpu
->apic_state
, run
->tpr_access
.rip
,
2129 run
->tpr_access
.is_write
? TPR_ACCESS_WRITE
2134 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
2136 static const uint8_t int3
= 0xcc;
2138 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
2139 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&int3
, 1, 1)) {
2145 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
2149 if (cpu_memory_rw_debug(cs
, bp
->pc
, &int3
, 1, 0) || int3
!= 0xcc ||
2150 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1)) {
2162 static int nb_hw_breakpoint
;
2164 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
2168 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
2169 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
2170 (hw_breakpoint
[n
].len
== len
|| len
== -1)) {
2177 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
2178 target_ulong len
, int type
)
2181 case GDB_BREAKPOINT_HW
:
2184 case GDB_WATCHPOINT_WRITE
:
2185 case GDB_WATCHPOINT_ACCESS
:
2192 if (addr
& (len
- 1)) {
2204 if (nb_hw_breakpoint
== 4) {
2207 if (find_hw_breakpoint(addr
, len
, type
) >= 0) {
2210 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
2211 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
2212 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
2218 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
2219 target_ulong len
, int type
)
2223 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
2228 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
2233 void kvm_arch_remove_all_hw_breakpoints(void)
2235 nb_hw_breakpoint
= 0;
2238 static CPUWatchpoint hw_watchpoint
;
2240 static int kvm_handle_debug(X86CPU
*cpu
,
2241 struct kvm_debug_exit_arch
*arch_info
)
2243 CPUState
*cs
= CPU(cpu
);
2244 CPUX86State
*env
= &cpu
->env
;
2248 if (arch_info
->exception
== 1) {
2249 if (arch_info
->dr6
& (1 << 14)) {
2250 if (cs
->singlestep_enabled
) {
2254 for (n
= 0; n
< 4; n
++) {
2255 if (arch_info
->dr6
& (1 << n
)) {
2256 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
2262 env
->watchpoint_hit
= &hw_watchpoint
;
2263 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
2264 hw_watchpoint
.flags
= BP_MEM_WRITE
;
2268 env
->watchpoint_hit
= &hw_watchpoint
;
2269 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
2270 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
2276 } else if (kvm_find_sw_breakpoint(CPU(cpu
), arch_info
->pc
)) {
2280 cpu_synchronize_state(CPU(cpu
));
2281 assert(env
->exception_injected
== -1);
2284 env
->exception_injected
= arch_info
->exception
;
2285 env
->has_error_code
= 0;
2291 void kvm_arch_update_guest_debug(CPUState
*cpu
, struct kvm_guest_debug
*dbg
)
2293 const uint8_t type_code
[] = {
2294 [GDB_BREAKPOINT_HW
] = 0x0,
2295 [GDB_WATCHPOINT_WRITE
] = 0x1,
2296 [GDB_WATCHPOINT_ACCESS
] = 0x3
2298 const uint8_t len_code
[] = {
2299 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
2303 if (kvm_sw_breakpoints_active(cpu
)) {
2304 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
2306 if (nb_hw_breakpoint
> 0) {
2307 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
2308 dbg
->arch
.debugreg
[7] = 0x0600;
2309 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
2310 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
2311 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
2312 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
2313 ((uint32_t)len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
2318 static bool host_supports_vmx(void)
2320 uint32_t ecx
, unused
;
2322 host_cpuid(1, 0, &unused
, &unused
, &ecx
, &unused
);
2323 return ecx
& CPUID_EXT_VMX
;
2326 #define VMX_INVALID_GUEST_STATE 0x80000021
2328 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
2330 X86CPU
*cpu
= X86_CPU(cs
);
2334 switch (run
->exit_reason
) {
2336 DPRINTF("handle_hlt\n");
2337 ret
= kvm_handle_halt(cpu
);
2339 case KVM_EXIT_SET_TPR
:
2342 case KVM_EXIT_TPR_ACCESS
:
2343 ret
= kvm_handle_tpr_access(cpu
);
2345 case KVM_EXIT_FAIL_ENTRY
:
2346 code
= run
->fail_entry
.hardware_entry_failure_reason
;
2347 fprintf(stderr
, "KVM: entry failed, hardware error 0x%" PRIx64
"\n",
2349 if (host_supports_vmx() && code
== VMX_INVALID_GUEST_STATE
) {
2351 "\nIf you're running a guest on an Intel machine without "
2352 "unrestricted mode\n"
2353 "support, the failure can be most likely due to the guest "
2354 "entering an invalid\n"
2355 "state for Intel VT. For example, the guest maybe running "
2356 "in big real mode\n"
2357 "which is not supported on less recent Intel processors."
2362 case KVM_EXIT_EXCEPTION
:
2363 fprintf(stderr
, "KVM: exception %d exit (error code 0x%x)\n",
2364 run
->ex
.exception
, run
->ex
.error_code
);
2367 case KVM_EXIT_DEBUG
:
2368 DPRINTF("kvm_exit_debug\n");
2369 ret
= kvm_handle_debug(cpu
, &run
->debug
.arch
);
2372 fprintf(stderr
, "KVM: unknown exit reason %d\n", run
->exit_reason
);
2380 bool kvm_arch_stop_on_emulation_error(CPUState
*cs
)
2382 X86CPU
*cpu
= X86_CPU(cs
);
2383 CPUX86State
*env
= &cpu
->env
;
2385 kvm_cpu_synchronize_state(cs
);
2386 return !(env
->cr
[0] & CR0_PE_MASK
) ||
2387 ((env
->segs
[R_CS
].selector
& 3) != 3);
2390 void kvm_arch_init_irq_routing(KVMState
*s
)
2392 if (!kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
)) {
2393 /* If kernel can't do irq routing, interrupt source
2394 * override 0->2 cannot be set up as required by HPET.
2395 * So we have to disable it.
2399 /* We know at this point that we're using the in-kernel
2400 * irqchip, so we can use irqfds, and on x86 we know
2401 * we can use msi via irqfd and GSI routing.
2403 kvm_irqfds_allowed
= true;
2404 kvm_msi_via_irqfd_allowed
= true;
2405 kvm_gsi_routing_allowed
= true;
2408 /* Classic KVM device assignment interface. Will remain x86 only. */
2409 int kvm_device_pci_assign(KVMState
*s
, PCIHostDeviceAddress
*dev_addr
,
2410 uint32_t flags
, uint32_t *dev_id
)
2412 struct kvm_assigned_pci_dev dev_data
= {
2413 .segnr
= dev_addr
->domain
,
2414 .busnr
= dev_addr
->bus
,
2415 .devfn
= PCI_DEVFN(dev_addr
->slot
, dev_addr
->function
),
2420 dev_data
.assigned_dev_id
=
2421 (dev_addr
->domain
<< 16) | (dev_addr
->bus
<< 8) | dev_data
.devfn
;
2423 ret
= kvm_vm_ioctl(s
, KVM_ASSIGN_PCI_DEVICE
, &dev_data
);
2428 *dev_id
= dev_data
.assigned_dev_id
;
2433 int kvm_device_pci_deassign(KVMState
*s
, uint32_t dev_id
)
2435 struct kvm_assigned_pci_dev dev_data
= {
2436 .assigned_dev_id
= dev_id
,
2439 return kvm_vm_ioctl(s
, KVM_DEASSIGN_PCI_DEVICE
, &dev_data
);
2442 static int kvm_assign_irq_internal(KVMState
*s
, uint32_t dev_id
,
2443 uint32_t irq_type
, uint32_t guest_irq
)
2445 struct kvm_assigned_irq assigned_irq
= {
2446 .assigned_dev_id
= dev_id
,
2447 .guest_irq
= guest_irq
,
2451 if (kvm_check_extension(s
, KVM_CAP_ASSIGN_DEV_IRQ
)) {
2452 return kvm_vm_ioctl(s
, KVM_ASSIGN_DEV_IRQ
, &assigned_irq
);
2454 return kvm_vm_ioctl(s
, KVM_ASSIGN_IRQ
, &assigned_irq
);
2458 int kvm_device_intx_assign(KVMState
*s
, uint32_t dev_id
, bool use_host_msi
,
2461 uint32_t irq_type
= KVM_DEV_IRQ_GUEST_INTX
|
2462 (use_host_msi
? KVM_DEV_IRQ_HOST_MSI
: KVM_DEV_IRQ_HOST_INTX
);
2464 return kvm_assign_irq_internal(s
, dev_id
, irq_type
, guest_irq
);
2467 int kvm_device_intx_set_mask(KVMState
*s
, uint32_t dev_id
, bool masked
)
2469 struct kvm_assigned_pci_dev dev_data
= {
2470 .assigned_dev_id
= dev_id
,
2471 .flags
= masked
? KVM_DEV_ASSIGN_MASK_INTX
: 0,
2474 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_INTX_MASK
, &dev_data
);
2477 static int kvm_deassign_irq_internal(KVMState
*s
, uint32_t dev_id
,
2480 struct kvm_assigned_irq assigned_irq
= {
2481 .assigned_dev_id
= dev_id
,
2485 return kvm_vm_ioctl(s
, KVM_DEASSIGN_DEV_IRQ
, &assigned_irq
);
2488 int kvm_device_intx_deassign(KVMState
*s
, uint32_t dev_id
, bool use_host_msi
)
2490 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_INTX
|
2491 (use_host_msi
? KVM_DEV_IRQ_HOST_MSI
: KVM_DEV_IRQ_HOST_INTX
));
2494 int kvm_device_msi_assign(KVMState
*s
, uint32_t dev_id
, int virq
)
2496 return kvm_assign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_HOST_MSI
|
2497 KVM_DEV_IRQ_GUEST_MSI
, virq
);
2500 int kvm_device_msi_deassign(KVMState
*s
, uint32_t dev_id
)
2502 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_MSI
|
2503 KVM_DEV_IRQ_HOST_MSI
);
2506 bool kvm_device_msix_supported(KVMState
*s
)
2508 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
2509 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
2510 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_NR
, NULL
) == -EFAULT
;
2513 int kvm_device_msix_init_vectors(KVMState
*s
, uint32_t dev_id
,
2514 uint32_t nr_vectors
)
2516 struct kvm_assigned_msix_nr msix_nr
= {
2517 .assigned_dev_id
= dev_id
,
2518 .entry_nr
= nr_vectors
,
2521 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_NR
, &msix_nr
);
2524 int kvm_device_msix_set_vector(KVMState
*s
, uint32_t dev_id
, uint32_t vector
,
2527 struct kvm_assigned_msix_entry msix_entry
= {
2528 .assigned_dev_id
= dev_id
,
2533 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_ENTRY
, &msix_entry
);
2536 int kvm_device_msix_assign(KVMState
*s
, uint32_t dev_id
)
2538 return kvm_assign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_HOST_MSIX
|
2539 KVM_DEV_IRQ_GUEST_MSIX
, 0);
2542 int kvm_device_msix_deassign(KVMState
*s
, uint32_t dev_id
)
2544 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_MSIX
|
2545 KVM_DEV_IRQ_HOST_MSIX
);