]> git.proxmox.com Git - mirror_qemu.git/blame - target/i386/kvm.c
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
[mirror_qemu.git] / target / i386 / kvm.c
CommitLineData
05330448
AL
1/*
2 * QEMU KVM support
3 *
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 *
13 */
14
b6a0aa05 15#include "qemu/osdep.h"
da34e65c 16#include "qapi/error.h"
05330448 17#include <sys/ioctl.h>
25d2e361 18#include <sys/utsname.h>
05330448
AL
19
20#include <linux/kvm.h>
1814eab6 21#include "standard-headers/asm-x86/kvm_para.h"
05330448
AL
22
23#include "qemu-common.h"
33c11879 24#include "cpu.h"
9c17d615 25#include "sysemu/sysemu.h"
b3946626 26#include "sysemu/hw_accel.h"
6410848b 27#include "sysemu/kvm_int.h"
1d31f66b 28#include "kvm_i386.h"
50efe82c 29#include "hyperv.h"
5e953812 30#include "hyperv-proto.h"
50efe82c 31
022c62cb 32#include "exec/gdbstub.h"
1de7afc9
PB
33#include "qemu/host-utils.h"
34#include "qemu/config-file.h"
1c4a55db 35#include "qemu/error-report.h"
0d09e41a
PB
36#include "hw/i386/pc.h"
37#include "hw/i386/apic.h"
e0723c45
PB
38#include "hw/i386/apic_internal.h"
39#include "hw/i386/apic-msidef.h"
8b5ed7df 40#include "hw/i386/intel_iommu.h"
e1d4fb2d 41#include "hw/i386/x86-iommu.h"
50efe82c 42
a2cb15b0 43#include "hw/pci/pci.h"
15eafc2e 44#include "hw/pci/msi.h"
fd563564 45#include "hw/pci/msix.h"
795c40b8 46#include "migration/blocker.h"
4c663752 47#include "exec/memattrs.h"
8b5ed7df 48#include "trace.h"
05330448
AL
49
50//#define DEBUG_KVM
51
52#ifdef DEBUG_KVM
8c0d577e 53#define DPRINTF(fmt, ...) \
05330448
AL
54 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
55#else
8c0d577e 56#define DPRINTF(fmt, ...) \
05330448
AL
57 do { } while (0)
58#endif
59
1a03675d
GC
60#define MSR_KVM_WALL_CLOCK 0x11
61#define MSR_KVM_SYSTEM_TIME 0x12
62
d1138251
EH
63/* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
64 * 255 kvm_msr_entry structs */
65#define MSR_BUF_SIZE 4096
d71b62a1 66
94a8d39a
JK
67const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
68 KVM_CAP_INFO(SET_TSS_ADDR),
69 KVM_CAP_INFO(EXT_CPUID),
70 KVM_CAP_INFO(MP_STATE),
71 KVM_CAP_LAST_INFO
72};
25d2e361 73
c3a3a7d3
JK
74static bool has_msr_star;
75static bool has_msr_hsave_pa;
c9b8f6b6 76static bool has_msr_tsc_aux;
f28558d3 77static bool has_msr_tsc_adjust;
aa82ba54 78static bool has_msr_tsc_deadline;
df67696e 79static bool has_msr_feature_control;
21e87c46 80static bool has_msr_misc_enable;
fc12d72e 81static bool has_msr_smbase;
79e9ebeb 82static bool has_msr_bndcfgs;
25d2e361 83static int lm_capable_kernel;
7bc3d711 84static bool has_msr_hv_hypercall;
f2a53c9e 85static bool has_msr_hv_crash;
744b8a94 86static bool has_msr_hv_reset;
8c145d7c 87static bool has_msr_hv_vpindex;
e9688fab 88static bool hv_vpindex_settable;
46eb8f98 89static bool has_msr_hv_runtime;
866eea9a 90static bool has_msr_hv_synic;
ff99aa64 91static bool has_msr_hv_stimer;
d72bc7f6 92static bool has_msr_hv_frequencies;
ba6a4fd9 93static bool has_msr_hv_reenlightenment;
18cd2c17 94static bool has_msr_xss;
a33a2cfe 95static bool has_msr_spec_ctrl;
cfeea0c0 96static bool has_msr_virt_ssbd;
e13713db 97static bool has_msr_smi_count;
aec5e9c3 98static bool has_msr_arch_capabs;
b827df58 99
0b368a10
JD
100static uint32_t has_architectural_pmu_version;
101static uint32_t num_architectural_pmu_gp_counters;
102static uint32_t num_architectural_pmu_fixed_counters;
0d894367 103
28143b40
TH
104static int has_xsave;
105static int has_xcrs;
106static int has_pit_state2;
107
87f8b626
AR
108static bool has_msr_mcg_ext_ctl;
109
494e95e9 110static struct kvm_cpuid2 *cpuid_cache;
f57bceb6 111static struct kvm_msr_list *kvm_feature_msrs;
494e95e9 112
28143b40
TH
113int kvm_has_pit_state2(void)
114{
115 return has_pit_state2;
116}
117
355023f2
PB
118bool kvm_has_smm(void)
119{
120 return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
121}
122
6053a86f
MT
123bool kvm_has_adjust_clock_stable(void)
124{
125 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
126
127 return (ret == KVM_CLOCK_TSC_STABLE);
128}
129
1d31f66b
PM
130bool kvm_allows_irq0_override(void)
131{
132 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
133}
134
fb506e70
RK
135static bool kvm_x2apic_api_set_flags(uint64_t flags)
136{
137 KVMState *s = KVM_STATE(current_machine->accelerator);
138
139 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
140}
141
e391c009 142#define MEMORIZE(fn, _result) \
2a138ec3 143 ({ \
2a138ec3
RK
144 static bool _memorized; \
145 \
146 if (_memorized) { \
147 return _result; \
148 } \
149 _memorized = true; \
150 _result = fn; \
151 })
152
e391c009
IM
153static bool has_x2apic_api;
154
155bool kvm_has_x2apic_api(void)
156{
157 return has_x2apic_api;
158}
159
fb506e70
RK
160bool kvm_enable_x2apic(void)
161{
2a138ec3
RK
162 return MEMORIZE(
163 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
e391c009
IM
164 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
165 has_x2apic_api);
fb506e70
RK
166}
167
e9688fab
RK
168bool kvm_hv_vpindex_settable(void)
169{
170 return hv_vpindex_settable;
171}
172
0fd7e098
LL
173static int kvm_get_tsc(CPUState *cs)
174{
175 X86CPU *cpu = X86_CPU(cs);
176 CPUX86State *env = &cpu->env;
177 struct {
178 struct kvm_msrs info;
179 struct kvm_msr_entry entries[1];
180 } msr_data;
181 int ret;
182
183 if (env->tsc_valid) {
184 return 0;
185 }
186
187 msr_data.info.nmsrs = 1;
188 msr_data.entries[0].index = MSR_IA32_TSC;
189 env->tsc_valid = !runstate_is_running();
190
191 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
192 if (ret < 0) {
193 return ret;
194 }
195
48e1a45c 196 assert(ret == 1);
0fd7e098
LL
197 env->tsc = msr_data.entries[0].data;
198 return 0;
199}
200
14e6fe12 201static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
0fd7e098 202{
0fd7e098
LL
203 kvm_get_tsc(cpu);
204}
205
206void kvm_synchronize_all_tsc(void)
207{
208 CPUState *cpu;
209
210 if (kvm_enabled()) {
211 CPU_FOREACH(cpu) {
14e6fe12 212 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
0fd7e098
LL
213 }
214 }
215}
216
b827df58
AK
217static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
218{
219 struct kvm_cpuid2 *cpuid;
220 int r, size;
221
222 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
e42a92ae 223 cpuid = g_malloc0(size);
b827df58
AK
224 cpuid->nent = max;
225 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
76ae317f
MM
226 if (r == 0 && cpuid->nent >= max) {
227 r = -E2BIG;
228 }
b827df58
AK
229 if (r < 0) {
230 if (r == -E2BIG) {
7267c094 231 g_free(cpuid);
b827df58
AK
232 return NULL;
233 } else {
234 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
235 strerror(-r));
236 exit(1);
237 }
238 }
239 return cpuid;
240}
241
dd87f8a6
EH
242/* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
243 * for all entries.
244 */
245static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
246{
247 struct kvm_cpuid2 *cpuid;
248 int max = 1;
494e95e9
CP
249
250 if (cpuid_cache != NULL) {
251 return cpuid_cache;
252 }
dd87f8a6
EH
253 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
254 max *= 2;
255 }
494e95e9 256 cpuid_cache = cpuid;
dd87f8a6
EH
257 return cpuid;
258}
259
a443bc34 260static const struct kvm_para_features {
0c31b744
GC
261 int cap;
262 int feature;
263} para_features[] = {
264 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
265 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
266 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
0c31b744 267 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
0c31b744
GC
268};
269
ba9bc59e 270static int get_para_features(KVMState *s)
0c31b744
GC
271{
272 int i, features = 0;
273
8e03c100 274 for (i = 0; i < ARRAY_SIZE(para_features); i++) {
ba9bc59e 275 if (kvm_check_extension(s, para_features[i].cap)) {
0c31b744
GC
276 features |= (1 << para_features[i].feature);
277 }
278 }
279
280 return features;
281}
0c31b744 282
40e80ee4
EH
283static bool host_tsx_blacklisted(void)
284{
285 int family, model, stepping;\
286 char vendor[CPUID_VENDOR_SZ + 1];
287
288 host_vendor_fms(vendor, &family, &model, &stepping);
289
290 /* Check if we are running on a Haswell host known to have broken TSX */
291 return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
292 (family == 6) &&
293 ((model == 63 && stepping < 4) ||
294 model == 60 || model == 69 || model == 70);
295}
0c31b744 296
829ae2f9
EH
297/* Returns the value for a specific register on the cpuid entry
298 */
299static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
300{
301 uint32_t ret = 0;
302 switch (reg) {
303 case R_EAX:
304 ret = entry->eax;
305 break;
306 case R_EBX:
307 ret = entry->ebx;
308 break;
309 case R_ECX:
310 ret = entry->ecx;
311 break;
312 case R_EDX:
313 ret = entry->edx;
314 break;
315 }
316 return ret;
317}
318
4fb73f1d
EH
319/* Find matching entry for function/index on kvm_cpuid2 struct
320 */
321static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
322 uint32_t function,
323 uint32_t index)
324{
325 int i;
326 for (i = 0; i < cpuid->nent; ++i) {
327 if (cpuid->entries[i].function == function &&
328 cpuid->entries[i].index == index) {
329 return &cpuid->entries[i];
330 }
331 }
332 /* not found: */
333 return NULL;
334}
335
ba9bc59e 336uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
c958a8bd 337 uint32_t index, int reg)
b827df58
AK
338{
339 struct kvm_cpuid2 *cpuid;
b827df58
AK
340 uint32_t ret = 0;
341 uint32_t cpuid_1_edx;
8c723b79 342 bool found = false;
b827df58 343
dd87f8a6 344 cpuid = get_supported_cpuid(s);
b827df58 345
4fb73f1d
EH
346 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
347 if (entry) {
348 found = true;
349 ret = cpuid_entry_get_reg(entry, reg);
b827df58
AK
350 }
351
7b46e5ce
EH
352 /* Fixups for the data returned by KVM, below */
353
c2acb022
EH
354 if (function == 1 && reg == R_EDX) {
355 /* KVM before 2.6.30 misreports the following features */
356 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
84bd945c
EH
357 } else if (function == 1 && reg == R_ECX) {
358 /* We can set the hypervisor flag, even if KVM does not return it on
359 * GET_SUPPORTED_CPUID
360 */
361 ret |= CPUID_EXT_HYPERVISOR;
ac67ee26
EH
362 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
363 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
364 * and the irqchip is in the kernel.
365 */
366 if (kvm_irqchip_in_kernel() &&
367 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
368 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
369 }
41e5e76d
EH
370
371 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
372 * without the in-kernel irqchip
373 */
374 if (!kvm_irqchip_in_kernel()) {
375 ret &= ~CPUID_EXT_X2APIC;
b827df58 376 }
2266d443
MT
377
378 if (enable_cpu_pm) {
379 int disable_exits = kvm_check_extension(s,
380 KVM_CAP_X86_DISABLE_EXITS);
381
382 if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) {
383 ret |= CPUID_EXT_MONITOR;
384 }
385 }
28b8e4d0
JK
386 } else if (function == 6 && reg == R_EAX) {
387 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
40e80ee4
EH
388 } else if (function == 7 && index == 0 && reg == R_EBX) {
389 if (host_tsx_blacklisted()) {
390 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
391 }
f98bbd83
BM
392 } else if (function == 0x80000001 && reg == R_ECX) {
393 /*
394 * It's safe to enable TOPOEXT even if it's not returned by
395 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows
396 * us to keep CPU models including TOPOEXT runnable on older kernels.
397 */
398 ret |= CPUID_EXT3_TOPOEXT;
c2acb022
EH
399 } else if (function == 0x80000001 && reg == R_EDX) {
400 /* On Intel, kvm returns cpuid according to the Intel spec,
401 * so add missing bits according to the AMD spec:
402 */
403 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
404 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
64877477
EH
405 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
406 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
407 * be enabled without the in-kernel irqchip
408 */
409 if (!kvm_irqchip_in_kernel()) {
410 ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
411 }
be777326 412 } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
2af1acad 413 ret |= 1U << KVM_HINTS_REALTIME;
be777326 414 found = 1;
b827df58
AK
415 }
416
0c31b744 417 /* fallback for older kernels */
8c723b79 418 if ((function == KVM_CPUID_FEATURES) && !found) {
ba9bc59e 419 ret = get_para_features(s);
b9bec74b 420 }
0c31b744
GC
421
422 return ret;
bb0300dc 423}
bb0300dc 424
f57bceb6
RH
425uint32_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
426{
427 struct {
428 struct kvm_msrs info;
429 struct kvm_msr_entry entries[1];
430 } msr_data;
431 uint32_t ret;
432
433 if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */
434 return 0;
435 }
436
437 /* Check if requested MSR is supported feature MSR */
438 int i;
439 for (i = 0; i < kvm_feature_msrs->nmsrs; i++)
440 if (kvm_feature_msrs->indices[i] == index) {
441 break;
442 }
443 if (i == kvm_feature_msrs->nmsrs) {
444 return 0; /* if the feature MSR is not supported, simply return 0 */
445 }
446
447 msr_data.info.nmsrs = 1;
448 msr_data.entries[0].index = index;
449
450 ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data);
451 if (ret != 1) {
452 error_report("KVM get MSR (index=0x%x) feature failed, %s",
453 index, strerror(-ret));
454 exit(1);
455 }
456
457 return msr_data.entries[0].data;
458}
459
460
3c85e74f
HY
461typedef struct HWPoisonPage {
462 ram_addr_t ram_addr;
463 QLIST_ENTRY(HWPoisonPage) list;
464} HWPoisonPage;
465
466static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
467 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
468
469static void kvm_unpoison_all(void *param)
470{
471 HWPoisonPage *page, *next_page;
472
473 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
474 QLIST_REMOVE(page, list);
475 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
7267c094 476 g_free(page);
3c85e74f
HY
477 }
478}
479
3c85e74f
HY
480static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
481{
482 HWPoisonPage *page;
483
484 QLIST_FOREACH(page, &hwpoison_page_list, list) {
485 if (page->ram_addr == ram_addr) {
486 return;
487 }
488 }
ab3ad07f 489 page = g_new(HWPoisonPage, 1);
3c85e74f
HY
490 page->ram_addr = ram_addr;
491 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
492}
493
e7701825
MT
494static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
495 int *max_banks)
496{
497 int r;
498
14a09518 499 r = kvm_check_extension(s, KVM_CAP_MCE);
e7701825
MT
500 if (r > 0) {
501 *max_banks = r;
502 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
503 }
504 return -ENOSYS;
505}
506
bee615d4 507static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
e7701825 508{
87f8b626 509 CPUState *cs = CPU(cpu);
bee615d4 510 CPUX86State *env = &cpu->env;
c34d440a
JK
511 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
512 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
513 uint64_t mcg_status = MCG_STATUS_MCIP;
87f8b626 514 int flags = 0;
e7701825 515
c34d440a
JK
516 if (code == BUS_MCEERR_AR) {
517 status |= MCI_STATUS_AR | 0x134;
518 mcg_status |= MCG_STATUS_EIPV;
519 } else {
520 status |= 0xc0;
521 mcg_status |= MCG_STATUS_RIPV;
419fb20a 522 }
87f8b626
AR
523
524 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
525 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
526 * guest kernel back into env->mcg_ext_ctl.
527 */
528 cpu_synchronize_state(cs);
529 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
530 mcg_status |= MCG_STATUS_LMCE;
531 flags = 0;
532 }
533
8c5cf3b6 534 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
87f8b626 535 (MCM_ADDR_PHYS << 6) | 0xc, flags);
419fb20a 536}
419fb20a
JK
537
538static void hardware_memory_error(void)
539{
540 fprintf(stderr, "Hardware memory error!\n");
541 exit(1);
542}
543
2ae41db2 544void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
419fb20a 545{
20d695a9
AF
546 X86CPU *cpu = X86_CPU(c);
547 CPUX86State *env = &cpu->env;
419fb20a 548 ram_addr_t ram_addr;
a8170e5e 549 hwaddr paddr;
419fb20a 550
4d39892c
PB
551 /* If we get an action required MCE, it has been injected by KVM
552 * while the VM was running. An action optional MCE instead should
553 * be coming from the main thread, which qemu_init_sigbus identifies
554 * as the "early kill" thread.
555 */
a16fc07e 556 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
20e0ff59 557
20e0ff59 558 if ((env->mcg_cap & MCG_SER_P) && addr) {
07bdaa41 559 ram_addr = qemu_ram_addr_from_host(addr);
20e0ff59
PB
560 if (ram_addr != RAM_ADDR_INVALID &&
561 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
562 kvm_hwpoison_page_add(ram_addr);
563 kvm_mce_inject(cpu, paddr, code);
2ae41db2 564 return;
419fb20a 565 }
20e0ff59
PB
566
567 fprintf(stderr, "Hardware memory error for memory used by "
568 "QEMU itself instead of guest system!\n");
419fb20a 569 }
20e0ff59
PB
570
571 if (code == BUS_MCEERR_AR) {
572 hardware_memory_error();
573 }
574
575 /* Hope we are lucky for AO MCE */
419fb20a
JK
576}
577
1bc22652 578static int kvm_inject_mce_oldstyle(X86CPU *cpu)
ab443475 579{
1bc22652
AF
580 CPUX86State *env = &cpu->env;
581
ab443475
JK
582 if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
583 unsigned int bank, bank_num = env->mcg_cap & 0xff;
584 struct kvm_x86_mce mce;
585
586 env->exception_injected = -1;
587
588 /*
589 * There must be at least one bank in use if an MCE is pending.
590 * Find it and use its values for the event injection.
591 */
592 for (bank = 0; bank < bank_num; bank++) {
593 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
594 break;
595 }
596 }
597 assert(bank < bank_num);
598
599 mce.bank = bank;
600 mce.status = env->mce_banks[bank * 4 + 1];
601 mce.mcg_status = env->mcg_status;
602 mce.addr = env->mce_banks[bank * 4 + 2];
603 mce.misc = env->mce_banks[bank * 4 + 3];
604
1bc22652 605 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
ab443475 606 }
ab443475
JK
607 return 0;
608}
609
1dfb4dd9 610static void cpu_update_state(void *opaque, int running, RunState state)
b8cc45d6 611{
317ac620 612 CPUX86State *env = opaque;
b8cc45d6
GC
613
614 if (running) {
615 env->tsc_valid = false;
616 }
617}
618
83b17af5 619unsigned long kvm_arch_vcpu_id(CPUState *cs)
b164e48e 620{
83b17af5 621 X86CPU *cpu = X86_CPU(cs);
7e72a45c 622 return cpu->apic_id;
b164e48e
EH
623}
624
92067bf4
IM
625#ifndef KVM_CPUID_SIGNATURE_NEXT
626#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
627#endif
628
629static bool hyperv_hypercall_available(X86CPU *cpu)
630{
631 return cpu->hyperv_vapic ||
632 (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
633}
634
635static bool hyperv_enabled(X86CPU *cpu)
636{
7bc3d711
PB
637 CPUState *cs = CPU(cpu);
638 return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
639 (hyperv_hypercall_available(cpu) ||
48a5f3bc 640 cpu->hyperv_time ||
f2a53c9e 641 cpu->hyperv_relaxed_timing ||
744b8a94 642 cpu->hyperv_crash ||
8c145d7c 643 cpu->hyperv_reset ||
46eb8f98 644 cpu->hyperv_vpindex ||
866eea9a 645 cpu->hyperv_runtime ||
ff99aa64 646 cpu->hyperv_synic ||
ba6a4fd9 647 cpu->hyperv_stimer ||
47512009 648 cpu->hyperv_reenlightenment ||
6b7a9830
VK
649 cpu->hyperv_tlbflush ||
650 cpu->hyperv_ipi);
92067bf4
IM
651}
652
5031283d
HZ
653static int kvm_arch_set_tsc_khz(CPUState *cs)
654{
655 X86CPU *cpu = X86_CPU(cs);
656 CPUX86State *env = &cpu->env;
657 int r;
658
659 if (!env->tsc_khz) {
660 return 0;
661 }
662
663 r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
664 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
665 -ENOTSUP;
666 if (r < 0) {
667 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
668 * TSC frequency doesn't match the one we want.
669 */
670 int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
671 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
672 -ENOTSUP;
673 if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
3dc6f869
AF
674 warn_report("TSC frequency mismatch between "
675 "VM (%" PRId64 " kHz) and host (%d kHz), "
676 "and TSC scaling unavailable",
677 env->tsc_khz, cur_freq);
5031283d
HZ
678 return r;
679 }
680 }
681
682 return 0;
683}
684
4bb95b82
LP
685static bool tsc_is_stable_and_known(CPUX86State *env)
686{
687 if (!env->tsc_khz) {
688 return false;
689 }
690 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
691 || env->user_tsc_khz;
692}
693
c35bd19a
EY
694static int hyperv_handle_properties(CPUState *cs)
695{
696 X86CPU *cpu = X86_CPU(cs);
697 CPUX86State *env = &cpu->env;
698
699 if (cpu->hyperv_relaxed_timing) {
5e953812 700 env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
c35bd19a
EY
701 }
702 if (cpu->hyperv_vapic) {
5e953812
RK
703 env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
704 env->features[FEAT_HYPERV_EAX] |= HV_APIC_ACCESS_AVAILABLE;
c35bd19a 705 }
3ddcd2ed 706 if (cpu->hyperv_time) {
1221f150
RK
707 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) <= 0) {
708 fprintf(stderr, "Hyper-V clocksources "
709 "(requested by 'hv-time' cpu flag) "
710 "are not supported by kernel\n");
711 return -ENOSYS;
712 }
5e953812
RK
713 env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
714 env->features[FEAT_HYPERV_EAX] |= HV_TIME_REF_COUNT_AVAILABLE;
715 env->features[FEAT_HYPERV_EAX] |= HV_REFERENCE_TSC_AVAILABLE;
9445597b
RK
716 }
717 if (cpu->hyperv_frequencies) {
718 if (!has_msr_hv_frequencies) {
719 fprintf(stderr, "Hyper-V frequency MSRs "
720 "(requested by 'hv-frequencies' cpu flag) "
721 "are not supported by kernel\n");
722 return -ENOSYS;
d72bc7f6 723 }
9445597b
RK
724 env->features[FEAT_HYPERV_EAX] |= HV_ACCESS_FREQUENCY_MSRS;
725 env->features[FEAT_HYPERV_EDX] |= HV_FREQUENCY_MSRS_AVAILABLE;
c35bd19a 726 }
1221f150
RK
727 if (cpu->hyperv_crash) {
728 if (!has_msr_hv_crash) {
729 fprintf(stderr, "Hyper-V crash MSRs "
730 "(requested by 'hv-crash' cpu flag) "
731 "are not supported by kernel\n");
732 return -ENOSYS;
733 }
5e953812 734 env->features[FEAT_HYPERV_EDX] |= HV_GUEST_CRASH_MSR_AVAILABLE;
c35bd19a 735 }
ba6a4fd9
VK
736 if (cpu->hyperv_reenlightenment) {
737 if (!has_msr_hv_reenlightenment) {
738 fprintf(stderr,
739 "Hyper-V Reenlightenment MSRs "
740 "(requested by 'hv-reenlightenment' cpu flag) "
741 "are not supported by kernel\n");
742 return -ENOSYS;
743 }
744 env->features[FEAT_HYPERV_EAX] |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
745 }
5e953812 746 env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1221f150
RK
747 if (cpu->hyperv_reset) {
748 if (!has_msr_hv_reset) {
749 fprintf(stderr, "Hyper-V reset MSR "
750 "(requested by 'hv-reset' cpu flag) "
751 "is not supported by kernel\n");
752 return -ENOSYS;
753 }
5e953812 754 env->features[FEAT_HYPERV_EAX] |= HV_RESET_AVAILABLE;
c35bd19a 755 }
1221f150
RK
756 if (cpu->hyperv_vpindex) {
757 if (!has_msr_hv_vpindex) {
758 fprintf(stderr, "Hyper-V VP_INDEX MSR "
759 "(requested by 'hv-vpindex' cpu flag) "
760 "is not supported by kernel\n");
761 return -ENOSYS;
762 }
5e953812 763 env->features[FEAT_HYPERV_EAX] |= HV_VP_INDEX_AVAILABLE;
c35bd19a 764 }
1221f150
RK
765 if (cpu->hyperv_runtime) {
766 if (!has_msr_hv_runtime) {
767 fprintf(stderr, "Hyper-V VP_RUNTIME MSR "
768 "(requested by 'hv-runtime' cpu flag) "
769 "is not supported by kernel\n");
770 return -ENOSYS;
771 }
5e953812 772 env->features[FEAT_HYPERV_EAX] |= HV_VP_RUNTIME_AVAILABLE;
c35bd19a
EY
773 }
774 if (cpu->hyperv_synic) {
9b4cf107
RK
775 unsigned int cap = KVM_CAP_HYPERV_SYNIC;
776 if (!cpu->hyperv_synic_kvm_only) {
777 if (!cpu->hyperv_vpindex) {
778 fprintf(stderr, "Hyper-V SynIC "
779 "(requested by 'hv-synic' cpu flag) "
780 "requires Hyper-V VP_INDEX ('hv-vpindex')\n");
781 return -ENOSYS;
782 }
783 cap = KVM_CAP_HYPERV_SYNIC2;
784 }
785
786 if (!has_msr_hv_synic || !kvm_check_extension(cs->kvm_state, cap)) {
729ce7e1
RK
787 fprintf(stderr, "Hyper-V SynIC (requested by 'hv-synic' cpu flag) "
788 "is not supported by kernel\n");
c35bd19a
EY
789 return -ENOSYS;
790 }
791
5e953812 792 env->features[FEAT_HYPERV_EAX] |= HV_SYNIC_AVAILABLE;
c35bd19a
EY
793 }
794 if (cpu->hyperv_stimer) {
795 if (!has_msr_hv_stimer) {
796 fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
797 return -ENOSYS;
798 }
5e953812 799 env->features[FEAT_HYPERV_EAX] |= HV_SYNTIMERS_AVAILABLE;
c35bd19a
EY
800 }
801 return 0;
802}
803
e9688fab
RK
804static int hyperv_init_vcpu(X86CPU *cpu)
805{
729ce7e1
RK
806 CPUState *cs = CPU(cpu);
807 int ret;
808
e9688fab
RK
809 if (cpu->hyperv_vpindex && !hv_vpindex_settable) {
810 /*
811 * the kernel doesn't support setting vp_index; assert that its value
812 * is in sync
813 */
e9688fab
RK
814 struct {
815 struct kvm_msrs info;
816 struct kvm_msr_entry entries[1];
817 } msr_data = {
818 .info.nmsrs = 1,
819 .entries[0].index = HV_X64_MSR_VP_INDEX,
820 };
821
729ce7e1 822 ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data);
e9688fab
RK
823 if (ret < 0) {
824 return ret;
825 }
826 assert(ret == 1);
827
701189e3 828 if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) {
e9688fab
RK
829 error_report("kernel's vp_index != QEMU's vp_index");
830 return -ENXIO;
831 }
832 }
833
729ce7e1 834 if (cpu->hyperv_synic) {
9b4cf107
RK
835 uint32_t synic_cap = cpu->hyperv_synic_kvm_only ?
836 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
837 ret = kvm_vcpu_enable_cap(cs, synic_cap, 0);
729ce7e1
RK
838 if (ret < 0) {
839 error_report("failed to turn on HyperV SynIC in KVM: %s",
840 strerror(-ret));
841 return ret;
842 }
606c34bf 843
9b4cf107
RK
844 if (!cpu->hyperv_synic_kvm_only) {
845 ret = hyperv_x86_synic_add(cpu);
846 if (ret < 0) {
847 error_report("failed to create HyperV SynIC: %s",
848 strerror(-ret));
849 return ret;
850 }
606c34bf 851 }
729ce7e1
RK
852 }
853
e9688fab
RK
854 return 0;
855}
856
68bfd0ad 857static Error *invtsc_mig_blocker;
d98f2607 858static Error *vmx_mig_blocker;
68bfd0ad 859
f8bb0565 860#define KVM_MAX_CPUID_ENTRIES 100
0893d460 861
20d695a9 862int kvm_arch_init_vcpu(CPUState *cs)
05330448
AL
863{
864 struct {
486bd5a2 865 struct kvm_cpuid2 cpuid;
f8bb0565 866 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
9115bb12
PM
867 } cpuid_data;
868 /*
869 * The kernel defines these structs with padding fields so there
870 * should be no extra padding in our cpuid_data struct.
871 */
872 QEMU_BUILD_BUG_ON(sizeof(cpuid_data) !=
873 sizeof(struct kvm_cpuid2) +
874 sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
875
20d695a9
AF
876 X86CPU *cpu = X86_CPU(cs);
877 CPUX86State *env = &cpu->env;
486bd5a2 878 uint32_t limit, i, j, cpuid_i;
a33609ca 879 uint32_t unused;
bb0300dc 880 struct kvm_cpuid_entry2 *c;
bb0300dc 881 uint32_t signature[3];
e204ac61 882 uint16_t evmcs_version;
234cc647 883 int kvm_base = KVM_CPUID_SIGNATURE;
e7429073 884 int r;
fe44dc91 885 Error *local_err = NULL;
05330448 886
ef4cbe14
SW
887 memset(&cpuid_data, 0, sizeof(cpuid_data));
888
05330448
AL
889 cpuid_i = 0;
890
ddb98b5a
LP
891 r = kvm_arch_set_tsc_khz(cs);
892 if (r < 0) {
893 goto fail;
894 }
895
896 /* vcpu's TSC frequency is either specified by user, or following
897 * the value used by KVM if the former is not present. In the
898 * latter case, we query it from KVM and record in env->tsc_khz,
899 * so that vcpu's TSC frequency can be migrated later via this field.
900 */
901 if (!env->tsc_khz) {
902 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
903 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
904 -ENOTSUP;
905 if (r > 0) {
906 env->tsc_khz = r;
907 }
908 }
909
bb0300dc 910 /* Paravirtualization CPUIDs */
234cc647
PB
911 if (hyperv_enabled(cpu)) {
912 c = &cpuid_data.entries[cpuid_i++];
5e953812 913 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
1c4a55db
AW
914 if (!cpu->hyperv_vendor_id) {
915 memcpy(signature, "Microsoft Hv", 12);
916 } else {
917 size_t len = strlen(cpu->hyperv_vendor_id);
918
919 if (len > 12) {
920 error_report("hv-vendor-id truncated to 12 characters");
921 len = 12;
922 }
923 memset(signature, 0, 12);
924 memcpy(signature, cpu->hyperv_vendor_id, len);
925 }
e204ac61
VK
926 c->eax = cpu->hyperv_evmcs ?
927 HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
234cc647
PB
928 c->ebx = signature[0];
929 c->ecx = signature[1];
930 c->edx = signature[2];
0c31b744 931
234cc647 932 c = &cpuid_data.entries[cpuid_i++];
5e953812 933 c->function = HV_CPUID_INTERFACE;
eab70139
VR
934 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
935 c->eax = signature[0];
234cc647
PB
936 c->ebx = 0;
937 c->ecx = 0;
938 c->edx = 0;
eab70139
VR
939
940 c = &cpuid_data.entries[cpuid_i++];
5e953812 941 c->function = HV_CPUID_VERSION;
eab70139
VR
942 c->eax = 0x00001bbc;
943 c->ebx = 0x00060001;
944
945 c = &cpuid_data.entries[cpuid_i++];
5e953812 946 c->function = HV_CPUID_FEATURES;
c35bd19a
EY
947 r = hyperv_handle_properties(cs);
948 if (r) {
949 return r;
46eb8f98 950 }
c35bd19a
EY
951 c->eax = env->features[FEAT_HYPERV_EAX];
952 c->ebx = env->features[FEAT_HYPERV_EBX];
953 c->edx = env->features[FEAT_HYPERV_EDX];
866eea9a 954
eab70139 955 c = &cpuid_data.entries[cpuid_i++];
5e953812 956 c->function = HV_CPUID_ENLIGHTMENT_INFO;
92067bf4 957 if (cpu->hyperv_relaxed_timing) {
5e953812 958 c->eax |= HV_RELAXED_TIMING_RECOMMENDED;
eab70139 959 }
2d5aa872 960 if (cpu->hyperv_vapic) {
5e953812 961 c->eax |= HV_APIC_ACCESS_RECOMMENDED;
eab70139 962 }
47512009
VK
963 if (cpu->hyperv_tlbflush) {
964 if (kvm_check_extension(cs->kvm_state,
965 KVM_CAP_HYPERV_TLBFLUSH) <= 0) {
966 fprintf(stderr, "Hyper-V TLB flush support "
967 "(requested by 'hv-tlbflush' cpu flag) "
968 " is not supported by kernel\n");
969 return -ENOSYS;
970 }
971 c->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
972 c->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
973 }
6b7a9830
VK
974 if (cpu->hyperv_ipi) {
975 if (kvm_check_extension(cs->kvm_state,
976 KVM_CAP_HYPERV_SEND_IPI) <= 0) {
977 fprintf(stderr, "Hyper-V IPI send support "
978 "(requested by 'hv-ipi' cpu flag) "
979 " is not supported by kernel\n");
980 return -ENOSYS;
981 }
982 c->eax |= HV_CLUSTER_IPI_RECOMMENDED;
983 c->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
984 }
e204ac61
VK
985 if (cpu->hyperv_evmcs) {
986 if (kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
987 (uintptr_t)&evmcs_version)) {
988 fprintf(stderr, "Hyper-V Enlightened VMCS "
989 "(requested by 'hv-evmcs' cpu flag) "
990 "is not supported by kernel\n");
991 return -ENOSYS;
992 }
993 c->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
994 }
92067bf4 995 c->ebx = cpu->hyperv_spinlock_attempts;
eab70139
VR
996
997 c = &cpuid_data.entries[cpuid_i++];
5e953812 998 c->function = HV_CPUID_IMPLEMENT_LIMITS;
6c69dfb6
GA
999
1000 c->eax = cpu->hv_max_vps;
eab70139
VR
1001 c->ebx = 0x40;
1002
234cc647 1003 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
7bc3d711 1004 has_msr_hv_hypercall = true;
e204ac61
VK
1005
1006 if (cpu->hyperv_evmcs) {
1007 __u32 function;
1008
1009 /* Create zeroed 0x40000006..0x40000009 leaves */
1010 for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
1011 function < HV_CPUID_NESTED_FEATURES; function++) {
1012 c = &cpuid_data.entries[cpuid_i++];
1013 c->function = function;
1014 }
1015
1016 c = &cpuid_data.entries[cpuid_i++];
1017 c->function = HV_CPUID_NESTED_FEATURES;
1018 c->eax = evmcs_version;
1019 }
eab70139
VR
1020 }
1021
f522d2ac
AW
1022 if (cpu->expose_kvm) {
1023 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
1024 c = &cpuid_data.entries[cpuid_i++];
1025 c->function = KVM_CPUID_SIGNATURE | kvm_base;
79b6f2f6 1026 c->eax = KVM_CPUID_FEATURES | kvm_base;
f522d2ac
AW
1027 c->ebx = signature[0];
1028 c->ecx = signature[1];
1029 c->edx = signature[2];
234cc647 1030
f522d2ac
AW
1031 c = &cpuid_data.entries[cpuid_i++];
1032 c->function = KVM_CPUID_FEATURES | kvm_base;
1033 c->eax = env->features[FEAT_KVM];
be777326 1034 c->edx = env->features[FEAT_KVM_HINTS];
f522d2ac 1035 }
917367aa 1036
a33609ca 1037 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
05330448
AL
1038
1039 for (i = 0; i <= limit; i++) {
f8bb0565
IM
1040 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1041 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
1042 abort();
1043 }
bb0300dc 1044 c = &cpuid_data.entries[cpuid_i++];
486bd5a2
AL
1045
1046 switch (i) {
a36b1029
AL
1047 case 2: {
1048 /* Keep reading function 2 till all the input is received */
1049 int times;
1050
a36b1029 1051 c->function = i;
a33609ca
AL
1052 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
1053 KVM_CPUID_FLAG_STATE_READ_NEXT;
1054 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1055 times = c->eax & 0xff;
a36b1029
AL
1056
1057 for (j = 1; j < times; ++j) {
f8bb0565
IM
1058 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1059 fprintf(stderr, "cpuid_data is full, no space for "
1060 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
1061 abort();
1062 }
a33609ca 1063 c = &cpuid_data.entries[cpuid_i++];
a36b1029 1064 c->function = i;
a33609ca
AL
1065 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
1066 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
a36b1029
AL
1067 }
1068 break;
1069 }
486bd5a2
AL
1070 case 4:
1071 case 0xb:
1072 case 0xd:
1073 for (j = 0; ; j++) {
31e8c696
AP
1074 if (i == 0xd && j == 64) {
1075 break;
1076 }
486bd5a2
AL
1077 c->function = i;
1078 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1079 c->index = j;
a33609ca 1080 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
486bd5a2 1081
b9bec74b 1082 if (i == 4 && c->eax == 0) {
486bd5a2 1083 break;
b9bec74b
JK
1084 }
1085 if (i == 0xb && !(c->ecx & 0xff00)) {
486bd5a2 1086 break;
b9bec74b
JK
1087 }
1088 if (i == 0xd && c->eax == 0) {
31e8c696 1089 continue;
b9bec74b 1090 }
f8bb0565
IM
1091 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1092 fprintf(stderr, "cpuid_data is full, no space for "
1093 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1094 abort();
1095 }
a33609ca 1096 c = &cpuid_data.entries[cpuid_i++];
486bd5a2
AL
1097 }
1098 break;
e37a5c7f
CP
1099 case 0x14: {
1100 uint32_t times;
1101
1102 c->function = i;
1103 c->index = 0;
1104 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1105 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1106 times = c->eax;
1107
1108 for (j = 1; j <= times; ++j) {
1109 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1110 fprintf(stderr, "cpuid_data is full, no space for "
1111 "cpuid(eax:0x14,ecx:0x%x)\n", j);
1112 abort();
1113 }
1114 c = &cpuid_data.entries[cpuid_i++];
1115 c->function = i;
1116 c->index = j;
1117 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1118 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1119 }
1120 break;
1121 }
486bd5a2 1122 default:
486bd5a2 1123 c->function = i;
a33609ca
AL
1124 c->flags = 0;
1125 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
486bd5a2
AL
1126 break;
1127 }
05330448 1128 }
0d894367
PB
1129
1130 if (limit >= 0x0a) {
0b368a10 1131 uint32_t eax, edx;
0d894367 1132
0b368a10
JD
1133 cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
1134
1135 has_architectural_pmu_version = eax & 0xff;
1136 if (has_architectural_pmu_version > 0) {
1137 num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
0d894367
PB
1138
1139 /* Shouldn't be more than 32, since that's the number of bits
1140 * available in EBX to tell us _which_ counters are available.
1141 * Play it safe.
1142 */
0b368a10
JD
1143 if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
1144 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
1145 }
1146
1147 if (has_architectural_pmu_version > 1) {
1148 num_architectural_pmu_fixed_counters = edx & 0x1f;
1149
1150 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
1151 num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
1152 }
0d894367
PB
1153 }
1154 }
1155 }
1156
a33609ca 1157 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
05330448
AL
1158
1159 for (i = 0x80000000; i <= limit; i++) {
f8bb0565
IM
1160 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1161 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
1162 abort();
1163 }
bb0300dc 1164 c = &cpuid_data.entries[cpuid_i++];
05330448 1165
8f4202fb
BM
1166 switch (i) {
1167 case 0x8000001d:
1168 /* Query for all AMD cache information leaves */
1169 for (j = 0; ; j++) {
1170 c->function = i;
1171 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1172 c->index = j;
1173 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1174
1175 if (c->eax == 0) {
1176 break;
1177 }
1178 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1179 fprintf(stderr, "cpuid_data is full, no space for "
1180 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1181 abort();
1182 }
1183 c = &cpuid_data.entries[cpuid_i++];
1184 }
1185 break;
1186 default:
1187 c->function = i;
1188 c->flags = 0;
1189 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1190 break;
1191 }
05330448
AL
1192 }
1193
b3baa152
BW
1194 /* Call Centaur's CPUID instructions they are supported. */
1195 if (env->cpuid_xlevel2 > 0) {
b3baa152
BW
1196 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
1197
1198 for (i = 0xC0000000; i <= limit; i++) {
f8bb0565
IM
1199 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1200 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
1201 abort();
1202 }
b3baa152
BW
1203 c = &cpuid_data.entries[cpuid_i++];
1204
1205 c->function = i;
1206 c->flags = 0;
1207 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1208 }
1209 }
1210
05330448
AL
1211 cpuid_data.cpuid.nent = cpuid_i;
1212
e7701825 1213 if (((env->cpuid_version >> 8)&0xF) >= 6
0514ef2f 1214 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
fc7a504c 1215 (CPUID_MCE | CPUID_MCA)
a60f24b5 1216 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
5120901a 1217 uint64_t mcg_cap, unsupported_caps;
e7701825 1218 int banks;
32a42024 1219 int ret;
e7701825 1220
a60f24b5 1221 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
75d49497
JK
1222 if (ret < 0) {
1223 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
1224 return ret;
e7701825 1225 }
75d49497 1226
2590f15b 1227 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
49b69cbf 1228 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
2590f15b 1229 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
49b69cbf 1230 return -ENOTSUP;
75d49497 1231 }
49b69cbf 1232
5120901a
EH
1233 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
1234 if (unsupported_caps) {
87f8b626
AR
1235 if (unsupported_caps & MCG_LMCE_P) {
1236 error_report("kvm: LMCE not supported");
1237 return -ENOTSUP;
1238 }
3dc6f869
AF
1239 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
1240 unsupported_caps);
5120901a
EH
1241 }
1242
2590f15b
EH
1243 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
1244 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
75d49497
JK
1245 if (ret < 0) {
1246 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
1247 return ret;
1248 }
e7701825 1249 }
e7701825 1250
b8cc45d6
GC
1251 qemu_add_vm_change_state_handler(cpu_update_state, env);
1252
df67696e
LJ
1253 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
1254 if (c) {
1255 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
1256 !!(c->ecx & CPUID_EXT_SMX);
1257 }
1258
d98f2607
PB
1259 if ((env->features[FEAT_1_ECX] & CPUID_EXT_VMX) && !vmx_mig_blocker) {
1260 error_setg(&vmx_mig_blocker,
1261 "Nested VMX virtualization does not support live migration yet");
1262 r = migrate_add_blocker(vmx_mig_blocker, &local_err);
1263 if (local_err) {
1264 error_report_err(local_err);
1265 error_free(vmx_mig_blocker);
1266 return r;
1267 }
1268 }
1269
87f8b626
AR
1270 if (env->mcg_cap & MCG_LMCE_P) {
1271 has_msr_mcg_ext_ctl = has_msr_feature_control = true;
1272 }
1273
d99569d9
EH
1274 if (!env->user_tsc_khz) {
1275 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
1276 invtsc_mig_blocker == NULL) {
d99569d9
EH
1277 error_setg(&invtsc_mig_blocker,
1278 "State blocked by non-migratable CPU device"
1279 " (invtsc flag)");
fe44dc91
AA
1280 r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
1281 if (local_err) {
1282 error_report_err(local_err);
1283 error_free(invtsc_mig_blocker);
0c2ed83f 1284 return r;
fe44dc91 1285 }
d99569d9 1286 }
68bfd0ad
MT
1287 }
1288
9954a158
PDJ
1289 if (cpu->vmware_cpuid_freq
1290 /* Guests depend on 0x40000000 to detect this feature, so only expose
1291 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
1292 && cpu->expose_kvm
1293 && kvm_base == KVM_CPUID_SIGNATURE
1294 /* TSC clock must be stable and known for this feature. */
4bb95b82 1295 && tsc_is_stable_and_known(env)) {
9954a158
PDJ
1296
1297 c = &cpuid_data.entries[cpuid_i++];
1298 c->function = KVM_CPUID_SIGNATURE | 0x10;
1299 c->eax = env->tsc_khz;
1300 /* LAPIC resolution of 1ns (freq: 1GHz) is hardcoded in KVM's
1301 * APIC_BUS_CYCLE_NS */
1302 c->ebx = 1000000;
1303 c->ecx = c->edx = 0;
1304
1305 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
1306 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
1307 }
1308
1309 cpuid_data.cpuid.nent = cpuid_i;
1310
1311 cpuid_data.cpuid.padding = 0;
1312 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
1313 if (r) {
1314 goto fail;
1315 }
1316
28143b40 1317 if (has_xsave) {
5b8063c4 1318 env->xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
fabacc0f 1319 }
d71b62a1 1320 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
fabacc0f 1321
273c515c
PB
1322 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
1323 has_msr_tsc_aux = false;
1324 }
d1ae67f6 1325
e9688fab
RK
1326 r = hyperv_init_vcpu(cpu);
1327 if (r) {
1328 goto fail;
1329 }
1330
e7429073 1331 return 0;
fe44dc91
AA
1332
1333 fail:
1334 migrate_del_blocker(invtsc_mig_blocker);
1335 return r;
05330448
AL
1336}
1337
50a2c6e5 1338void kvm_arch_reset_vcpu(X86CPU *cpu)
caa5af0f 1339{
20d695a9 1340 CPUX86State *env = &cpu->env;
dd673288 1341
1a5e9d2f 1342 env->xcr0 = 1;
ddced198 1343 if (kvm_irqchip_in_kernel()) {
dd673288 1344 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
ddced198
MT
1345 KVM_MP_STATE_UNINITIALIZED;
1346 } else {
1347 env->mp_state = KVM_MP_STATE_RUNNABLE;
1348 }
689141dd
RK
1349
1350 if (cpu->hyperv_synic) {
1351 int i;
1352 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
1353 env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
1354 }
606c34bf
RK
1355
1356 hyperv_x86_synic_reset(cpu);
689141dd 1357 }
caa5af0f
JK
1358}
1359
e0723c45
PB
1360void kvm_arch_do_init_vcpu(X86CPU *cpu)
1361{
1362 CPUX86State *env = &cpu->env;
1363
1364 /* APs get directly into wait-for-SIPI state. */
1365 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
1366 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
1367 }
1368}
1369
f57bceb6
RH
1370static int kvm_get_supported_feature_msrs(KVMState *s)
1371{
1372 int ret = 0;
1373
1374 if (kvm_feature_msrs != NULL) {
1375 return 0;
1376 }
1377
1378 if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) {
1379 return 0;
1380 }
1381
1382 struct kvm_msr_list msr_list;
1383
1384 msr_list.nmsrs = 0;
1385 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list);
1386 if (ret < 0 && ret != -E2BIG) {
1387 error_report("Fetch KVM feature MSR list failed: %s",
1388 strerror(-ret));
1389 return ret;
1390 }
1391
1392 assert(msr_list.nmsrs > 0);
1393 kvm_feature_msrs = (struct kvm_msr_list *) \
1394 g_malloc0(sizeof(msr_list) +
1395 msr_list.nmsrs * sizeof(msr_list.indices[0]));
1396
1397 kvm_feature_msrs->nmsrs = msr_list.nmsrs;
1398 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs);
1399
1400 if (ret < 0) {
1401 error_report("Fetch KVM feature MSR list failed: %s",
1402 strerror(-ret));
1403 g_free(kvm_feature_msrs);
1404 kvm_feature_msrs = NULL;
1405 return ret;
1406 }
1407
1408 return 0;
1409}
1410
c3a3a7d3 1411static int kvm_get_supported_msrs(KVMState *s)
05330448 1412{
75b10c43 1413 static int kvm_supported_msrs;
c3a3a7d3 1414 int ret = 0;
05330448
AL
1415
1416 /* first time */
75b10c43 1417 if (kvm_supported_msrs == 0) {
05330448
AL
1418 struct kvm_msr_list msr_list, *kvm_msr_list;
1419
75b10c43 1420 kvm_supported_msrs = -1;
05330448
AL
1421
1422 /* Obtain MSR list from KVM. These are the MSRs that we must
1423 * save/restore */
4c9f7372 1424 msr_list.nmsrs = 0;
c3a3a7d3 1425 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
6fb6d245 1426 if (ret < 0 && ret != -E2BIG) {
c3a3a7d3 1427 return ret;
6fb6d245 1428 }
d9db889f
JK
1429 /* Old kernel modules had a bug and could write beyond the provided
1430 memory. Allocate at least a safe amount of 1K. */
7267c094 1431 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
d9db889f
JK
1432 msr_list.nmsrs *
1433 sizeof(msr_list.indices[0])));
05330448 1434
55308450 1435 kvm_msr_list->nmsrs = msr_list.nmsrs;
c3a3a7d3 1436 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
05330448
AL
1437 if (ret >= 0) {
1438 int i;
1439
1440 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
1d268dec
LP
1441 switch (kvm_msr_list->indices[i]) {
1442 case MSR_STAR:
c3a3a7d3 1443 has_msr_star = true;
1d268dec
LP
1444 break;
1445 case MSR_VM_HSAVE_PA:
c3a3a7d3 1446 has_msr_hsave_pa = true;
1d268dec
LP
1447 break;
1448 case MSR_TSC_AUX:
c9b8f6b6 1449 has_msr_tsc_aux = true;
1d268dec
LP
1450 break;
1451 case MSR_TSC_ADJUST:
f28558d3 1452 has_msr_tsc_adjust = true;
1d268dec
LP
1453 break;
1454 case MSR_IA32_TSCDEADLINE:
aa82ba54 1455 has_msr_tsc_deadline = true;
1d268dec
LP
1456 break;
1457 case MSR_IA32_SMBASE:
fc12d72e 1458 has_msr_smbase = true;
1d268dec 1459 break;
e13713db
LA
1460 case MSR_SMI_COUNT:
1461 has_msr_smi_count = true;
1462 break;
1d268dec 1463 case MSR_IA32_MISC_ENABLE:
21e87c46 1464 has_msr_misc_enable = true;
1d268dec
LP
1465 break;
1466 case MSR_IA32_BNDCFGS:
79e9ebeb 1467 has_msr_bndcfgs = true;
1d268dec
LP
1468 break;
1469 case MSR_IA32_XSS:
18cd2c17 1470 has_msr_xss = true;
3c254ab8 1471 break;
1d268dec 1472 case HV_X64_MSR_CRASH_CTL:
f2a53c9e 1473 has_msr_hv_crash = true;
1d268dec
LP
1474 break;
1475 case HV_X64_MSR_RESET:
744b8a94 1476 has_msr_hv_reset = true;
1d268dec
LP
1477 break;
1478 case HV_X64_MSR_VP_INDEX:
8c145d7c 1479 has_msr_hv_vpindex = true;
1d268dec
LP
1480 break;
1481 case HV_X64_MSR_VP_RUNTIME:
46eb8f98 1482 has_msr_hv_runtime = true;
1d268dec
LP
1483 break;
1484 case HV_X64_MSR_SCONTROL:
866eea9a 1485 has_msr_hv_synic = true;
1d268dec
LP
1486 break;
1487 case HV_X64_MSR_STIMER0_CONFIG:
ff99aa64 1488 has_msr_hv_stimer = true;
1d268dec 1489 break;
d72bc7f6
LP
1490 case HV_X64_MSR_TSC_FREQUENCY:
1491 has_msr_hv_frequencies = true;
1492 break;
ba6a4fd9
VK
1493 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1494 has_msr_hv_reenlightenment = true;
1495 break;
a33a2cfe
PB
1496 case MSR_IA32_SPEC_CTRL:
1497 has_msr_spec_ctrl = true;
1498 break;
cfeea0c0
KRW
1499 case MSR_VIRT_SSBD:
1500 has_msr_virt_ssbd = true;
1501 break;
aec5e9c3
BD
1502 case MSR_IA32_ARCH_CAPABILITIES:
1503 has_msr_arch_capabs = true;
1504 break;
ff99aa64 1505 }
05330448
AL
1506 }
1507 }
1508
7267c094 1509 g_free(kvm_msr_list);
05330448
AL
1510 }
1511
c3a3a7d3 1512 return ret;
05330448
AL
1513}
1514
6410848b
PB
1515static Notifier smram_machine_done;
1516static KVMMemoryListener smram_listener;
1517static AddressSpace smram_address_space;
1518static MemoryRegion smram_as_root;
1519static MemoryRegion smram_as_mem;
1520
1521static void register_smram_listener(Notifier *n, void *unused)
1522{
1523 MemoryRegion *smram =
1524 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
1525
1526 /* Outer container... */
1527 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
1528 memory_region_set_enabled(&smram_as_root, true);
1529
1530 /* ... with two regions inside: normal system memory with low
1531 * priority, and...
1532 */
1533 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
1534 get_system_memory(), 0, ~0ull);
1535 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
1536 memory_region_set_enabled(&smram_as_mem, true);
1537
1538 if (smram) {
1539 /* ... SMRAM with higher priority */
1540 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
1541 memory_region_set_enabled(smram, true);
1542 }
1543
1544 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
1545 kvm_memory_listener_register(kvm_state, &smram_listener,
1546 &smram_address_space, 1);
1547}
1548
b16565b3 1549int kvm_arch_init(MachineState *ms, KVMState *s)
20420430 1550{
11076198 1551 uint64_t identity_base = 0xfffbc000;
39d6960a 1552 uint64_t shadow_mem;
20420430 1553 int ret;
25d2e361 1554 struct utsname utsname;
20420430 1555
28143b40 1556 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
28143b40 1557 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
28143b40 1558 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
28143b40 1559
e9688fab
RK
1560 hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
1561
c3a3a7d3 1562 ret = kvm_get_supported_msrs(s);
20420430 1563 if (ret < 0) {
20420430
SY
1564 return ret;
1565 }
25d2e361 1566
f57bceb6
RH
1567 kvm_get_supported_feature_msrs(s);
1568
25d2e361
MT
1569 uname(&utsname);
1570 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
1571
4c5b10b7 1572 /*
11076198
JK
1573 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
1574 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
1575 * Since these must be part of guest physical memory, we need to allocate
1576 * them, both by setting their start addresses in the kernel and by
1577 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
1578 *
1579 * Older KVM versions may not support setting the identity map base. In
1580 * that case we need to stick with the default, i.e. a 256K maximum BIOS
1581 * size.
4c5b10b7 1582 */
11076198
JK
1583 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
1584 /* Allows up to 16M BIOSes. */
1585 identity_base = 0xfeffc000;
1586
1587 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
1588 if (ret < 0) {
1589 return ret;
1590 }
4c5b10b7 1591 }
e56ff191 1592
11076198
JK
1593 /* Set TSS base one page after EPT identity map. */
1594 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
20420430
SY
1595 if (ret < 0) {
1596 return ret;
1597 }
1598
11076198
JK
1599 /* Tell fw_cfg to notify the BIOS to reserve the range. */
1600 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
20420430 1601 if (ret < 0) {
11076198 1602 fprintf(stderr, "e820_add_entry() table is full\n");
20420430
SY
1603 return ret;
1604 }
3c85e74f 1605 qemu_register_reset(kvm_unpoison_all, NULL);
20420430 1606
4689b77b 1607 shadow_mem = machine_kvm_shadow_mem(ms);
36ad0e94
MA
1608 if (shadow_mem != -1) {
1609 shadow_mem /= 4096;
1610 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
1611 if (ret < 0) {
1612 return ret;
39d6960a
JK
1613 }
1614 }
6410848b 1615
d870cfde
GA
1616 if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
1617 object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE) &&
1618 pc_machine_is_smm_enabled(PC_MACHINE(ms))) {
6410848b
PB
1619 smram_machine_done.notify = register_smram_listener;
1620 qemu_add_machine_init_done_notifier(&smram_machine_done);
1621 }
6f131f13
MT
1622
1623 if (enable_cpu_pm) {
1624 int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
1625 int ret;
1626
1627/* Work around for kernel header with a typo. TODO: fix header and drop. */
1628#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
1629#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
1630#endif
1631 if (disable_exits) {
1632 disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
1633 KVM_X86_DISABLE_EXITS_HLT |
1634 KVM_X86_DISABLE_EXITS_PAUSE);
1635 }
1636
1637 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
1638 disable_exits);
1639 if (ret < 0) {
1640 error_report("kvm: guest stopping CPU not supported: %s",
1641 strerror(-ret));
1642 }
1643 }
1644
11076198 1645 return 0;
05330448 1646}
b9bec74b 1647
05330448
AL
1648static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1649{
1650 lhs->selector = rhs->selector;
1651 lhs->base = rhs->base;
1652 lhs->limit = rhs->limit;
1653 lhs->type = 3;
1654 lhs->present = 1;
1655 lhs->dpl = 3;
1656 lhs->db = 0;
1657 lhs->s = 1;
1658 lhs->l = 0;
1659 lhs->g = 0;
1660 lhs->avl = 0;
1661 lhs->unusable = 0;
1662}
1663
1664static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1665{
1666 unsigned flags = rhs->flags;
1667 lhs->selector = rhs->selector;
1668 lhs->base = rhs->base;
1669 lhs->limit = rhs->limit;
1670 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
1671 lhs->present = (flags & DESC_P_MASK) != 0;
acaa7550 1672 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
05330448
AL
1673 lhs->db = (flags >> DESC_B_SHIFT) & 1;
1674 lhs->s = (flags & DESC_S_MASK) != 0;
1675 lhs->l = (flags >> DESC_L_SHIFT) & 1;
1676 lhs->g = (flags & DESC_G_MASK) != 0;
1677 lhs->avl = (flags & DESC_AVL_MASK) != 0;
4cae9c97 1678 lhs->unusable = !lhs->present;
7e680753 1679 lhs->padding = 0;
05330448
AL
1680}
1681
1682static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
1683{
1684 lhs->selector = rhs->selector;
1685 lhs->base = rhs->base;
1686 lhs->limit = rhs->limit;
d45fc087
RP
1687 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
1688 ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
1689 (rhs->dpl << DESC_DPL_SHIFT) |
1690 (rhs->db << DESC_B_SHIFT) |
1691 (rhs->s * DESC_S_MASK) |
1692 (rhs->l << DESC_L_SHIFT) |
1693 (rhs->g * DESC_G_MASK) |
1694 (rhs->avl * DESC_AVL_MASK);
05330448
AL
1695}
1696
1697static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
1698{
b9bec74b 1699 if (set) {
05330448 1700 *kvm_reg = *qemu_reg;
b9bec74b 1701 } else {
05330448 1702 *qemu_reg = *kvm_reg;
b9bec74b 1703 }
05330448
AL
1704}
1705
1bc22652 1706static int kvm_getput_regs(X86CPU *cpu, int set)
05330448 1707{
1bc22652 1708 CPUX86State *env = &cpu->env;
05330448
AL
1709 struct kvm_regs regs;
1710 int ret = 0;
1711
1712 if (!set) {
1bc22652 1713 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
b9bec74b 1714 if (ret < 0) {
05330448 1715 return ret;
b9bec74b 1716 }
05330448
AL
1717 }
1718
1719 kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
1720 kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
1721 kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
1722 kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
1723 kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
1724 kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
1725 kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
1726 kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
1727#ifdef TARGET_X86_64
1728 kvm_getput_reg(&regs.r8, &env->regs[8], set);
1729 kvm_getput_reg(&regs.r9, &env->regs[9], set);
1730 kvm_getput_reg(&regs.r10, &env->regs[10], set);
1731 kvm_getput_reg(&regs.r11, &env->regs[11], set);
1732 kvm_getput_reg(&regs.r12, &env->regs[12], set);
1733 kvm_getput_reg(&regs.r13, &env->regs[13], set);
1734 kvm_getput_reg(&regs.r14, &env->regs[14], set);
1735 kvm_getput_reg(&regs.r15, &env->regs[15], set);
1736#endif
1737
1738 kvm_getput_reg(&regs.rflags, &env->eflags, set);
1739 kvm_getput_reg(&regs.rip, &env->eip, set);
1740
b9bec74b 1741 if (set) {
1bc22652 1742 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
b9bec74b 1743 }
05330448
AL
1744
1745 return ret;
1746}
1747
1bc22652 1748static int kvm_put_fpu(X86CPU *cpu)
05330448 1749{
1bc22652 1750 CPUX86State *env = &cpu->env;
05330448
AL
1751 struct kvm_fpu fpu;
1752 int i;
1753
1754 memset(&fpu, 0, sizeof fpu);
1755 fpu.fsw = env->fpus & ~(7 << 11);
1756 fpu.fsw |= (env->fpstt & 7) << 11;
1757 fpu.fcw = env->fpuc;
42cc8fa6
JK
1758 fpu.last_opcode = env->fpop;
1759 fpu.last_ip = env->fpip;
1760 fpu.last_dp = env->fpdp;
b9bec74b
JK
1761 for (i = 0; i < 8; ++i) {
1762 fpu.ftwx |= (!env->fptags[i]) << i;
1763 }
05330448 1764 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
bee81887 1765 for (i = 0; i < CPU_NB_REGS; i++) {
19cbd87c
EH
1766 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
1767 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
bee81887 1768 }
05330448
AL
1769 fpu.mxcsr = env->mxcsr;
1770
1bc22652 1771 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
05330448
AL
1772}
1773
6b42494b
JK
1774#define XSAVE_FCW_FSW 0
1775#define XSAVE_FTW_FOP 1
f1665b21
SY
1776#define XSAVE_CWD_RIP 2
1777#define XSAVE_CWD_RDP 4
1778#define XSAVE_MXCSR 6
1779#define XSAVE_ST_SPACE 8
1780#define XSAVE_XMM_SPACE 40
1781#define XSAVE_XSTATE_BV 128
1782#define XSAVE_YMMH_SPACE 144
79e9ebeb
LJ
1783#define XSAVE_BNDREGS 240
1784#define XSAVE_BNDCSR 256
9aecd6f8
CP
1785#define XSAVE_OPMASK 272
1786#define XSAVE_ZMM_Hi256 288
1787#define XSAVE_Hi16_ZMM 416
f74eefe0 1788#define XSAVE_PKRU 672
f1665b21 1789
b503717d 1790#define XSAVE_BYTE_OFFSET(word_offset) \
f18793b0 1791 ((word_offset) * sizeof_field(struct kvm_xsave, region[0]))
b503717d
EH
1792
1793#define ASSERT_OFFSET(word_offset, field) \
1794 QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
1795 offsetof(X86XSaveArea, field))
1796
1797ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
1798ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
1799ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
1800ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
1801ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
1802ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
1803ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
1804ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
1805ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
1806ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
1807ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
1808ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
1809ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
1810ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
1811ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
1812
1bc22652 1813static int kvm_put_xsave(X86CPU *cpu)
f1665b21 1814{
1bc22652 1815 CPUX86State *env = &cpu->env;
5b8063c4 1816 X86XSaveArea *xsave = env->xsave_buf;
f1665b21 1817
28143b40 1818 if (!has_xsave) {
1bc22652 1819 return kvm_put_fpu(cpu);
b9bec74b 1820 }
86a57621 1821 x86_cpu_xsave_all_areas(cpu, xsave);
f1665b21 1822
9be38598 1823 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
f1665b21
SY
1824}
1825
1bc22652 1826static int kvm_put_xcrs(X86CPU *cpu)
f1665b21 1827{
1bc22652 1828 CPUX86State *env = &cpu->env;
bdfc8480 1829 struct kvm_xcrs xcrs = {};
f1665b21 1830
28143b40 1831 if (!has_xcrs) {
f1665b21 1832 return 0;
b9bec74b 1833 }
f1665b21
SY
1834
1835 xcrs.nr_xcrs = 1;
1836 xcrs.flags = 0;
1837 xcrs.xcrs[0].xcr = 0;
1838 xcrs.xcrs[0].value = env->xcr0;
1bc22652 1839 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
f1665b21
SY
1840}
1841
1bc22652 1842static int kvm_put_sregs(X86CPU *cpu)
05330448 1843{
1bc22652 1844 CPUX86State *env = &cpu->env;
05330448
AL
1845 struct kvm_sregs sregs;
1846
0e607a80
JK
1847 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
1848 if (env->interrupt_injected >= 0) {
1849 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
1850 (uint64_t)1 << (env->interrupt_injected % 64);
1851 }
05330448
AL
1852
1853 if ((env->eflags & VM_MASK)) {
b9bec74b
JK
1854 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
1855 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
1856 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
1857 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
1858 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
1859 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
05330448 1860 } else {
b9bec74b
JK
1861 set_seg(&sregs.cs, &env->segs[R_CS]);
1862 set_seg(&sregs.ds, &env->segs[R_DS]);
1863 set_seg(&sregs.es, &env->segs[R_ES]);
1864 set_seg(&sregs.fs, &env->segs[R_FS]);
1865 set_seg(&sregs.gs, &env->segs[R_GS]);
1866 set_seg(&sregs.ss, &env->segs[R_SS]);
05330448
AL
1867 }
1868
1869 set_seg(&sregs.tr, &env->tr);
1870 set_seg(&sregs.ldt, &env->ldt);
1871
1872 sregs.idt.limit = env->idt.limit;
1873 sregs.idt.base = env->idt.base;
7e680753 1874 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
05330448
AL
1875 sregs.gdt.limit = env->gdt.limit;
1876 sregs.gdt.base = env->gdt.base;
7e680753 1877 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
05330448
AL
1878
1879 sregs.cr0 = env->cr[0];
1880 sregs.cr2 = env->cr[2];
1881 sregs.cr3 = env->cr[3];
1882 sregs.cr4 = env->cr[4];
1883
02e51483
CF
1884 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
1885 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
05330448
AL
1886
1887 sregs.efer = env->efer;
1888
1bc22652 1889 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
05330448
AL
1890}
1891
d71b62a1
EH
1892static void kvm_msr_buf_reset(X86CPU *cpu)
1893{
1894 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
1895}
1896
9c600a84
EH
1897static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
1898{
1899 struct kvm_msrs *msrs = cpu->kvm_msr_buf;
1900 void *limit = ((void *)msrs) + MSR_BUF_SIZE;
1901 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
1902
1903 assert((void *)(entry + 1) <= limit);
1904
1abc2cae
EH
1905 entry->index = index;
1906 entry->reserved = 0;
1907 entry->data = value;
9c600a84
EH
1908 msrs->nmsrs++;
1909}
1910
73e1b8f2
PB
1911static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
1912{
1913 kvm_msr_buf_reset(cpu);
1914 kvm_msr_entry_add(cpu, index, value);
1915
1916 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
1917}
1918
f8d9ccf8
DDAG
1919void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
1920{
1921 int ret;
1922
1923 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
1924 assert(ret == 1);
1925}
1926
7477cd38
MT
1927static int kvm_put_tscdeadline_msr(X86CPU *cpu)
1928{
1929 CPUX86State *env = &cpu->env;
48e1a45c 1930 int ret;
7477cd38
MT
1931
1932 if (!has_msr_tsc_deadline) {
1933 return 0;
1934 }
1935
73e1b8f2 1936 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
48e1a45c
PB
1937 if (ret < 0) {
1938 return ret;
1939 }
1940
1941 assert(ret == 1);
1942 return 0;
7477cd38
MT
1943}
1944
6bdf863d
JK
1945/*
1946 * Provide a separate write service for the feature control MSR in order to
1947 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
1948 * before writing any other state because forcibly leaving nested mode
1949 * invalidates the VCPU state.
1950 */
1951static int kvm_put_msr_feature_control(X86CPU *cpu)
1952{
48e1a45c
PB
1953 int ret;
1954
1955 if (!has_msr_feature_control) {
1956 return 0;
1957 }
6bdf863d 1958
73e1b8f2
PB
1959 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
1960 cpu->env.msr_ia32_feature_control);
48e1a45c
PB
1961 if (ret < 0) {
1962 return ret;
1963 }
1964
1965 assert(ret == 1);
1966 return 0;
6bdf863d
JK
1967}
1968
1bc22652 1969static int kvm_put_msrs(X86CPU *cpu, int level)
05330448 1970{
1bc22652 1971 CPUX86State *env = &cpu->env;
9c600a84 1972 int i;
48e1a45c 1973 int ret;
05330448 1974
d71b62a1
EH
1975 kvm_msr_buf_reset(cpu);
1976
9c600a84
EH
1977 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
1978 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
1979 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
1980 kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
c3a3a7d3 1981 if (has_msr_star) {
9c600a84 1982 kvm_msr_entry_add(cpu, MSR_STAR, env->star);
b9bec74b 1983 }
c3a3a7d3 1984 if (has_msr_hsave_pa) {
9c600a84 1985 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
b9bec74b 1986 }
c9b8f6b6 1987 if (has_msr_tsc_aux) {
9c600a84 1988 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
c9b8f6b6 1989 }
f28558d3 1990 if (has_msr_tsc_adjust) {
9c600a84 1991 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
f28558d3 1992 }
21e87c46 1993 if (has_msr_misc_enable) {
9c600a84 1994 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
21e87c46
AK
1995 env->msr_ia32_misc_enable);
1996 }
fc12d72e 1997 if (has_msr_smbase) {
9c600a84 1998 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
fc12d72e 1999 }
e13713db
LA
2000 if (has_msr_smi_count) {
2001 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
2002 }
439d19f2 2003 if (has_msr_bndcfgs) {
9c600a84 2004 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
439d19f2 2005 }
18cd2c17 2006 if (has_msr_xss) {
9c600a84 2007 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
18cd2c17 2008 }
a33a2cfe
PB
2009 if (has_msr_spec_ctrl) {
2010 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
2011 }
cfeea0c0
KRW
2012 if (has_msr_virt_ssbd) {
2013 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
2014 }
2015
05330448 2016#ifdef TARGET_X86_64
25d2e361 2017 if (lm_capable_kernel) {
9c600a84
EH
2018 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
2019 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
2020 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
2021 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
25d2e361 2022 }
05330448 2023#endif
a33a2cfe 2024
d86f9636 2025 /* If host supports feature MSR, write down. */
aec5e9c3
BD
2026 if (has_msr_arch_capabs) {
2027 kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
2028 env->features[FEAT_ARCH_CAPABILITIES]);
d86f9636
RH
2029 }
2030
ff5c186b 2031 /*
0d894367
PB
2032 * The following MSRs have side effects on the guest or are too heavy
2033 * for normal writeback. Limit them to reset or full state updates.
ff5c186b
JK
2034 */
2035 if (level >= KVM_PUT_RESET_STATE) {
9c600a84
EH
2036 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
2037 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
2038 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
55c911a5 2039 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
9c600a84 2040 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
c5999bfc 2041 }
55c911a5 2042 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
9c600a84 2043 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
bc9a839d 2044 }
55c911a5 2045 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
9c600a84 2046 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
917367aa 2047 }
0b368a10
JD
2048 if (has_architectural_pmu_version > 0) {
2049 if (has_architectural_pmu_version > 1) {
2050 /* Stop the counter. */
2051 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
2052 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
2053 }
0d894367
PB
2054
2055 /* Set the counter values. */
0b368a10 2056 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
9c600a84 2057 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
0d894367
PB
2058 env->msr_fixed_counters[i]);
2059 }
0b368a10 2060 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
9c600a84 2061 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
0d894367 2062 env->msr_gp_counters[i]);
9c600a84 2063 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
0d894367
PB
2064 env->msr_gp_evtsel[i]);
2065 }
0b368a10
JD
2066 if (has_architectural_pmu_version > 1) {
2067 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
2068 env->msr_global_status);
2069 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
2070 env->msr_global_ovf_ctrl);
2071
2072 /* Now start the PMU. */
2073 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
2074 env->msr_fixed_ctr_ctrl);
2075 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
2076 env->msr_global_ctrl);
2077 }
0d894367 2078 }
da1cc323
EY
2079 /*
2080 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
2081 * only sync them to KVM on the first cpu
2082 */
2083 if (current_cpu == first_cpu) {
2084 if (has_msr_hv_hypercall) {
2085 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
2086 env->msr_hv_guest_os_id);
2087 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
2088 env->msr_hv_hypercall);
2089 }
2090 if (cpu->hyperv_time) {
2091 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
2092 env->msr_hv_tsc);
2093 }
ba6a4fd9
VK
2094 if (cpu->hyperv_reenlightenment) {
2095 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
2096 env->msr_hv_reenlightenment_control);
2097 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
2098 env->msr_hv_tsc_emulation_control);
2099 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
2100 env->msr_hv_tsc_emulation_status);
2101 }
eab70139 2102 }
2d5aa872 2103 if (cpu->hyperv_vapic) {
9c600a84 2104 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
5ef68987 2105 env->msr_hv_vapic);
eab70139 2106 }
f2a53c9e
AS
2107 if (has_msr_hv_crash) {
2108 int j;
2109
5e953812 2110 for (j = 0; j < HV_CRASH_PARAMS; j++)
9c600a84 2111 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
f2a53c9e
AS
2112 env->msr_hv_crash_params[j]);
2113
5e953812 2114 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
f2a53c9e 2115 }
46eb8f98 2116 if (has_msr_hv_runtime) {
9c600a84 2117 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
46eb8f98 2118 }
e9688fab 2119 if (cpu->hyperv_vpindex && hv_vpindex_settable) {
701189e3
RK
2120 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX,
2121 hyperv_vp_index(CPU(cpu)));
e9688fab 2122 }
866eea9a
AS
2123 if (cpu->hyperv_synic) {
2124 int j;
2125
09df29b6
RK
2126 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
2127
9c600a84 2128 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
866eea9a 2129 env->msr_hv_synic_control);
9c600a84 2130 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
866eea9a 2131 env->msr_hv_synic_evt_page);
9c600a84 2132 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
866eea9a
AS
2133 env->msr_hv_synic_msg_page);
2134
2135 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
9c600a84 2136 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
866eea9a
AS
2137 env->msr_hv_synic_sint[j]);
2138 }
2139 }
ff99aa64
AS
2140 if (has_msr_hv_stimer) {
2141 int j;
2142
2143 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
9c600a84 2144 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
ff99aa64
AS
2145 env->msr_hv_stimer_config[j]);
2146 }
2147
2148 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
9c600a84 2149 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
ff99aa64
AS
2150 env->msr_hv_stimer_count[j]);
2151 }
2152 }
1eabfce6 2153 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
112dad69
DDAG
2154 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
2155
9c600a84
EH
2156 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
2157 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
2158 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
2159 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
2160 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
2161 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
2162 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
2163 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
2164 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
2165 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
2166 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
2167 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
d1ae67f6 2168 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
112dad69
DDAG
2169 /* The CPU GPs if we write to a bit above the physical limit of
2170 * the host CPU (and KVM emulates that)
2171 */
2172 uint64_t mask = env->mtrr_var[i].mask;
2173 mask &= phys_mask;
2174
9c600a84
EH
2175 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
2176 env->mtrr_var[i].base);
112dad69 2177 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
d1ae67f6
AW
2178 }
2179 }
b77146e9
CP
2180 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
2181 int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
2182 0x14, 1, R_EAX) & 0x7;
2183
2184 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
2185 env->msr_rtit_ctrl);
2186 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
2187 env->msr_rtit_status);
2188 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
2189 env->msr_rtit_output_base);
2190 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
2191 env->msr_rtit_output_mask);
2192 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
2193 env->msr_rtit_cr3_match);
2194 for (i = 0; i < addr_num; i++) {
2195 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
2196 env->msr_rtit_addrs[i]);
2197 }
2198 }
6bdf863d
JK
2199
2200 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
2201 * kvm_put_msr_feature_control. */
ea643051 2202 }
57780495 2203 if (env->mcg_cap) {
d8da8574 2204 int i;
b9bec74b 2205
9c600a84
EH
2206 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
2207 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
87f8b626
AR
2208 if (has_msr_mcg_ext_ctl) {
2209 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
2210 }
c34d440a 2211 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
9c600a84 2212 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
57780495
MT
2213 }
2214 }
1a03675d 2215
d71b62a1 2216 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
48e1a45c
PB
2217 if (ret < 0) {
2218 return ret;
2219 }
05330448 2220
c70b11d1
EH
2221 if (ret < cpu->kvm_msr_buf->nmsrs) {
2222 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
2223 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
2224 (uint32_t)e->index, (uint64_t)e->data);
2225 }
2226
9c600a84 2227 assert(ret == cpu->kvm_msr_buf->nmsrs);
48e1a45c 2228 return 0;
05330448
AL
2229}
2230
2231
1bc22652 2232static int kvm_get_fpu(X86CPU *cpu)
05330448 2233{
1bc22652 2234 CPUX86State *env = &cpu->env;
05330448
AL
2235 struct kvm_fpu fpu;
2236 int i, ret;
2237
1bc22652 2238 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
b9bec74b 2239 if (ret < 0) {
05330448 2240 return ret;
b9bec74b 2241 }
05330448
AL
2242
2243 env->fpstt = (fpu.fsw >> 11) & 7;
2244 env->fpus = fpu.fsw;
2245 env->fpuc = fpu.fcw;
42cc8fa6
JK
2246 env->fpop = fpu.last_opcode;
2247 env->fpip = fpu.last_ip;
2248 env->fpdp = fpu.last_dp;
b9bec74b
JK
2249 for (i = 0; i < 8; ++i) {
2250 env->fptags[i] = !((fpu.ftwx >> i) & 1);
2251 }
05330448 2252 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
bee81887 2253 for (i = 0; i < CPU_NB_REGS; i++) {
19cbd87c
EH
2254 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
2255 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
bee81887 2256 }
05330448
AL
2257 env->mxcsr = fpu.mxcsr;
2258
2259 return 0;
2260}
2261
1bc22652 2262static int kvm_get_xsave(X86CPU *cpu)
f1665b21 2263{
1bc22652 2264 CPUX86State *env = &cpu->env;
5b8063c4 2265 X86XSaveArea *xsave = env->xsave_buf;
86a57621 2266 int ret;
f1665b21 2267
28143b40 2268 if (!has_xsave) {
1bc22652 2269 return kvm_get_fpu(cpu);
b9bec74b 2270 }
f1665b21 2271
1bc22652 2272 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
0f53994f 2273 if (ret < 0) {
f1665b21 2274 return ret;
0f53994f 2275 }
86a57621 2276 x86_cpu_xrstor_all_areas(cpu, xsave);
f1665b21 2277
f1665b21 2278 return 0;
f1665b21
SY
2279}
2280
1bc22652 2281static int kvm_get_xcrs(X86CPU *cpu)
f1665b21 2282{
1bc22652 2283 CPUX86State *env = &cpu->env;
f1665b21
SY
2284 int i, ret;
2285 struct kvm_xcrs xcrs;
2286
28143b40 2287 if (!has_xcrs) {
f1665b21 2288 return 0;
b9bec74b 2289 }
f1665b21 2290
1bc22652 2291 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
b9bec74b 2292 if (ret < 0) {
f1665b21 2293 return ret;
b9bec74b 2294 }
f1665b21 2295
b9bec74b 2296 for (i = 0; i < xcrs.nr_xcrs; i++) {
f1665b21 2297 /* Only support xcr0 now */
0fd53fec
PB
2298 if (xcrs.xcrs[i].xcr == 0) {
2299 env->xcr0 = xcrs.xcrs[i].value;
f1665b21
SY
2300 break;
2301 }
b9bec74b 2302 }
f1665b21 2303 return 0;
f1665b21
SY
2304}
2305
1bc22652 2306static int kvm_get_sregs(X86CPU *cpu)
05330448 2307{
1bc22652 2308 CPUX86State *env = &cpu->env;
05330448 2309 struct kvm_sregs sregs;
0e607a80 2310 int bit, i, ret;
05330448 2311
1bc22652 2312 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
b9bec74b 2313 if (ret < 0) {
05330448 2314 return ret;
b9bec74b 2315 }
05330448 2316
0e607a80
JK
2317 /* There can only be one pending IRQ set in the bitmap at a time, so try
2318 to find it and save its number instead (-1 for none). */
2319 env->interrupt_injected = -1;
2320 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
2321 if (sregs.interrupt_bitmap[i]) {
2322 bit = ctz64(sregs.interrupt_bitmap[i]);
2323 env->interrupt_injected = i * 64 + bit;
2324 break;
2325 }
2326 }
05330448
AL
2327
2328 get_seg(&env->segs[R_CS], &sregs.cs);
2329 get_seg(&env->segs[R_DS], &sregs.ds);
2330 get_seg(&env->segs[R_ES], &sregs.es);
2331 get_seg(&env->segs[R_FS], &sregs.fs);
2332 get_seg(&env->segs[R_GS], &sregs.gs);
2333 get_seg(&env->segs[R_SS], &sregs.ss);
2334
2335 get_seg(&env->tr, &sregs.tr);
2336 get_seg(&env->ldt, &sregs.ldt);
2337
2338 env->idt.limit = sregs.idt.limit;
2339 env->idt.base = sregs.idt.base;
2340 env->gdt.limit = sregs.gdt.limit;
2341 env->gdt.base = sregs.gdt.base;
2342
2343 env->cr[0] = sregs.cr0;
2344 env->cr[2] = sregs.cr2;
2345 env->cr[3] = sregs.cr3;
2346 env->cr[4] = sregs.cr4;
2347
05330448 2348 env->efer = sregs.efer;
cce47516
JK
2349
2350 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
35b1b927 2351 x86_update_hflags(env);
05330448
AL
2352
2353 return 0;
2354}
2355
1bc22652 2356static int kvm_get_msrs(X86CPU *cpu)
05330448 2357{
1bc22652 2358 CPUX86State *env = &cpu->env;
d71b62a1 2359 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
9c600a84 2360 int ret, i;
fcc35e7c 2361 uint64_t mtrr_top_bits;
05330448 2362
d71b62a1
EH
2363 kvm_msr_buf_reset(cpu);
2364
9c600a84
EH
2365 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
2366 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
2367 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
2368 kvm_msr_entry_add(cpu, MSR_PAT, 0);
c3a3a7d3 2369 if (has_msr_star) {
9c600a84 2370 kvm_msr_entry_add(cpu, MSR_STAR, 0);
b9bec74b 2371 }
c3a3a7d3 2372 if (has_msr_hsave_pa) {
9c600a84 2373 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
b9bec74b 2374 }
c9b8f6b6 2375 if (has_msr_tsc_aux) {
9c600a84 2376 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
c9b8f6b6 2377 }
f28558d3 2378 if (has_msr_tsc_adjust) {
9c600a84 2379 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
f28558d3 2380 }
aa82ba54 2381 if (has_msr_tsc_deadline) {
9c600a84 2382 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
aa82ba54 2383 }
21e87c46 2384 if (has_msr_misc_enable) {
9c600a84 2385 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
21e87c46 2386 }
fc12d72e 2387 if (has_msr_smbase) {
9c600a84 2388 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
fc12d72e 2389 }
e13713db
LA
2390 if (has_msr_smi_count) {
2391 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
2392 }
df67696e 2393 if (has_msr_feature_control) {
9c600a84 2394 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
df67696e 2395 }
79e9ebeb 2396 if (has_msr_bndcfgs) {
9c600a84 2397 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
79e9ebeb 2398 }
18cd2c17 2399 if (has_msr_xss) {
9c600a84 2400 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
18cd2c17 2401 }
a33a2cfe
PB
2402 if (has_msr_spec_ctrl) {
2403 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
2404 }
cfeea0c0
KRW
2405 if (has_msr_virt_ssbd) {
2406 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
2407 }
b8cc45d6 2408 if (!env->tsc_valid) {
9c600a84 2409 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
1354869c 2410 env->tsc_valid = !runstate_is_running();
b8cc45d6
GC
2411 }
2412
05330448 2413#ifdef TARGET_X86_64
25d2e361 2414 if (lm_capable_kernel) {
9c600a84
EH
2415 kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
2416 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
2417 kvm_msr_entry_add(cpu, MSR_FMASK, 0);
2418 kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
25d2e361 2419 }
05330448 2420#endif
9c600a84
EH
2421 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
2422 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
55c911a5 2423 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
9c600a84 2424 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
c5999bfc 2425 }
55c911a5 2426 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
9c600a84 2427 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
bc9a839d 2428 }
55c911a5 2429 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
9c600a84 2430 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
917367aa 2431 }
0b368a10
JD
2432 if (has_architectural_pmu_version > 0) {
2433 if (has_architectural_pmu_version > 1) {
2434 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
2435 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
2436 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
2437 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
2438 }
2439 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
9c600a84 2440 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
0d894367 2441 }
0b368a10 2442 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
9c600a84
EH
2443 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
2444 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
0d894367
PB
2445 }
2446 }
1a03675d 2447
57780495 2448 if (env->mcg_cap) {
9c600a84
EH
2449 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
2450 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
87f8b626
AR
2451 if (has_msr_mcg_ext_ctl) {
2452 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
2453 }
b9bec74b 2454 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
9c600a84 2455 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
b9bec74b 2456 }
57780495 2457 }
57780495 2458
1c90ef26 2459 if (has_msr_hv_hypercall) {
9c600a84
EH
2460 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
2461 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
1c90ef26 2462 }
2d5aa872 2463 if (cpu->hyperv_vapic) {
9c600a84 2464 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
5ef68987 2465 }
3ddcd2ed 2466 if (cpu->hyperv_time) {
9c600a84 2467 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
48a5f3bc 2468 }
ba6a4fd9
VK
2469 if (cpu->hyperv_reenlightenment) {
2470 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
2471 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
2472 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
2473 }
f2a53c9e
AS
2474 if (has_msr_hv_crash) {
2475 int j;
2476
5e953812 2477 for (j = 0; j < HV_CRASH_PARAMS; j++) {
9c600a84 2478 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
f2a53c9e
AS
2479 }
2480 }
46eb8f98 2481 if (has_msr_hv_runtime) {
9c600a84 2482 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
46eb8f98 2483 }
866eea9a
AS
2484 if (cpu->hyperv_synic) {
2485 uint32_t msr;
2486
9c600a84 2487 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
9c600a84
EH
2488 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
2489 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
866eea9a 2490 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
9c600a84 2491 kvm_msr_entry_add(cpu, msr, 0);
866eea9a
AS
2492 }
2493 }
ff99aa64
AS
2494 if (has_msr_hv_stimer) {
2495 uint32_t msr;
2496
2497 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
2498 msr++) {
9c600a84 2499 kvm_msr_entry_add(cpu, msr, 0);
ff99aa64
AS
2500 }
2501 }
1eabfce6 2502 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
9c600a84
EH
2503 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
2504 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
2505 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
2506 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
2507 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
2508 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
2509 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
2510 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
2511 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
2512 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
2513 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
2514 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
d1ae67f6 2515 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
9c600a84
EH
2516 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
2517 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
d1ae67f6
AW
2518 }
2519 }
5ef68987 2520
b77146e9
CP
2521 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
2522 int addr_num =
2523 kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
2524
2525 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
2526 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
2527 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
2528 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
2529 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
2530 for (i = 0; i < addr_num; i++) {
2531 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
2532 }
2533 }
2534
d71b62a1 2535 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
b9bec74b 2536 if (ret < 0) {
05330448 2537 return ret;
b9bec74b 2538 }
05330448 2539
c70b11d1
EH
2540 if (ret < cpu->kvm_msr_buf->nmsrs) {
2541 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
2542 error_report("error: failed to get MSR 0x%" PRIx32,
2543 (uint32_t)e->index);
2544 }
2545
9c600a84 2546 assert(ret == cpu->kvm_msr_buf->nmsrs);
fcc35e7c
DDAG
2547 /*
2548 * MTRR masks: Each mask consists of 5 parts
2549 * a 10..0: must be zero
2550 * b 11 : valid bit
2551 * c n-1.12: actual mask bits
2552 * d 51..n: reserved must be zero
2553 * e 63.52: reserved must be zero
2554 *
2555 * 'n' is the number of physical bits supported by the CPU and is
2556 * apparently always <= 52. We know our 'n' but don't know what
2557 * the destinations 'n' is; it might be smaller, in which case
2558 * it masks (c) on loading. It might be larger, in which case
2559 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
2560 * we're migrating to.
2561 */
2562
2563 if (cpu->fill_mtrr_mask) {
2564 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
2565 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
2566 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
2567 } else {
2568 mtrr_top_bits = 0;
2569 }
2570
05330448 2571 for (i = 0; i < ret; i++) {
0d894367
PB
2572 uint32_t index = msrs[i].index;
2573 switch (index) {
05330448
AL
2574 case MSR_IA32_SYSENTER_CS:
2575 env->sysenter_cs = msrs[i].data;
2576 break;
2577 case MSR_IA32_SYSENTER_ESP:
2578 env->sysenter_esp = msrs[i].data;
2579 break;
2580 case MSR_IA32_SYSENTER_EIP:
2581 env->sysenter_eip = msrs[i].data;
2582 break;
0c03266a
JK
2583 case MSR_PAT:
2584 env->pat = msrs[i].data;
2585 break;
05330448
AL
2586 case MSR_STAR:
2587 env->star = msrs[i].data;
2588 break;
2589#ifdef TARGET_X86_64
2590 case MSR_CSTAR:
2591 env->cstar = msrs[i].data;
2592 break;
2593 case MSR_KERNELGSBASE:
2594 env->kernelgsbase = msrs[i].data;
2595 break;
2596 case MSR_FMASK:
2597 env->fmask = msrs[i].data;
2598 break;
2599 case MSR_LSTAR:
2600 env->lstar = msrs[i].data;
2601 break;
2602#endif
2603 case MSR_IA32_TSC:
2604 env->tsc = msrs[i].data;
2605 break;
c9b8f6b6
AS
2606 case MSR_TSC_AUX:
2607 env->tsc_aux = msrs[i].data;
2608 break;
f28558d3
WA
2609 case MSR_TSC_ADJUST:
2610 env->tsc_adjust = msrs[i].data;
2611 break;
aa82ba54
LJ
2612 case MSR_IA32_TSCDEADLINE:
2613 env->tsc_deadline = msrs[i].data;
2614 break;
aa851e36
MT
2615 case MSR_VM_HSAVE_PA:
2616 env->vm_hsave = msrs[i].data;
2617 break;
1a03675d
GC
2618 case MSR_KVM_SYSTEM_TIME:
2619 env->system_time_msr = msrs[i].data;
2620 break;
2621 case MSR_KVM_WALL_CLOCK:
2622 env->wall_clock_msr = msrs[i].data;
2623 break;
57780495
MT
2624 case MSR_MCG_STATUS:
2625 env->mcg_status = msrs[i].data;
2626 break;
2627 case MSR_MCG_CTL:
2628 env->mcg_ctl = msrs[i].data;
2629 break;
87f8b626
AR
2630 case MSR_MCG_EXT_CTL:
2631 env->mcg_ext_ctl = msrs[i].data;
2632 break;
21e87c46
AK
2633 case MSR_IA32_MISC_ENABLE:
2634 env->msr_ia32_misc_enable = msrs[i].data;
2635 break;
fc12d72e
PB
2636 case MSR_IA32_SMBASE:
2637 env->smbase = msrs[i].data;
2638 break;
e13713db
LA
2639 case MSR_SMI_COUNT:
2640 env->msr_smi_count = msrs[i].data;
2641 break;
0779caeb
ACL
2642 case MSR_IA32_FEATURE_CONTROL:
2643 env->msr_ia32_feature_control = msrs[i].data;
df67696e 2644 break;
79e9ebeb
LJ
2645 case MSR_IA32_BNDCFGS:
2646 env->msr_bndcfgs = msrs[i].data;
2647 break;
18cd2c17
WL
2648 case MSR_IA32_XSS:
2649 env->xss = msrs[i].data;
2650 break;
57780495 2651 default:
57780495
MT
2652 if (msrs[i].index >= MSR_MC0_CTL &&
2653 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
2654 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
57780495 2655 }
d8da8574 2656 break;
f6584ee2
GN
2657 case MSR_KVM_ASYNC_PF_EN:
2658 env->async_pf_en_msr = msrs[i].data;
2659 break;
bc9a839d
MT
2660 case MSR_KVM_PV_EOI_EN:
2661 env->pv_eoi_en_msr = msrs[i].data;
2662 break;
917367aa
MT
2663 case MSR_KVM_STEAL_TIME:
2664 env->steal_time_msr = msrs[i].data;
2665 break;
0d894367
PB
2666 case MSR_CORE_PERF_FIXED_CTR_CTRL:
2667 env->msr_fixed_ctr_ctrl = msrs[i].data;
2668 break;
2669 case MSR_CORE_PERF_GLOBAL_CTRL:
2670 env->msr_global_ctrl = msrs[i].data;
2671 break;
2672 case MSR_CORE_PERF_GLOBAL_STATUS:
2673 env->msr_global_status = msrs[i].data;
2674 break;
2675 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
2676 env->msr_global_ovf_ctrl = msrs[i].data;
2677 break;
2678 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
2679 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
2680 break;
2681 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
2682 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
2683 break;
2684 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
2685 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
2686 break;
1c90ef26
VR
2687 case HV_X64_MSR_HYPERCALL:
2688 env->msr_hv_hypercall = msrs[i].data;
2689 break;
2690 case HV_X64_MSR_GUEST_OS_ID:
2691 env->msr_hv_guest_os_id = msrs[i].data;
2692 break;
5ef68987
VR
2693 case HV_X64_MSR_APIC_ASSIST_PAGE:
2694 env->msr_hv_vapic = msrs[i].data;
2695 break;
48a5f3bc
VR
2696 case HV_X64_MSR_REFERENCE_TSC:
2697 env->msr_hv_tsc = msrs[i].data;
2698 break;
f2a53c9e
AS
2699 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2700 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
2701 break;
46eb8f98
AS
2702 case HV_X64_MSR_VP_RUNTIME:
2703 env->msr_hv_runtime = msrs[i].data;
2704 break;
866eea9a
AS
2705 case HV_X64_MSR_SCONTROL:
2706 env->msr_hv_synic_control = msrs[i].data;
2707 break;
866eea9a
AS
2708 case HV_X64_MSR_SIEFP:
2709 env->msr_hv_synic_evt_page = msrs[i].data;
2710 break;
2711 case HV_X64_MSR_SIMP:
2712 env->msr_hv_synic_msg_page = msrs[i].data;
2713 break;
2714 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
2715 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
ff99aa64
AS
2716 break;
2717 case HV_X64_MSR_STIMER0_CONFIG:
2718 case HV_X64_MSR_STIMER1_CONFIG:
2719 case HV_X64_MSR_STIMER2_CONFIG:
2720 case HV_X64_MSR_STIMER3_CONFIG:
2721 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
2722 msrs[i].data;
2723 break;
2724 case HV_X64_MSR_STIMER0_COUNT:
2725 case HV_X64_MSR_STIMER1_COUNT:
2726 case HV_X64_MSR_STIMER2_COUNT:
2727 case HV_X64_MSR_STIMER3_COUNT:
2728 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
2729 msrs[i].data;
866eea9a 2730 break;
ba6a4fd9
VK
2731 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
2732 env->msr_hv_reenlightenment_control = msrs[i].data;
2733 break;
2734 case HV_X64_MSR_TSC_EMULATION_CONTROL:
2735 env->msr_hv_tsc_emulation_control = msrs[i].data;
2736 break;
2737 case HV_X64_MSR_TSC_EMULATION_STATUS:
2738 env->msr_hv_tsc_emulation_status = msrs[i].data;
2739 break;
d1ae67f6
AW
2740 case MSR_MTRRdefType:
2741 env->mtrr_deftype = msrs[i].data;
2742 break;
2743 case MSR_MTRRfix64K_00000:
2744 env->mtrr_fixed[0] = msrs[i].data;
2745 break;
2746 case MSR_MTRRfix16K_80000:
2747 env->mtrr_fixed[1] = msrs[i].data;
2748 break;
2749 case MSR_MTRRfix16K_A0000:
2750 env->mtrr_fixed[2] = msrs[i].data;
2751 break;
2752 case MSR_MTRRfix4K_C0000:
2753 env->mtrr_fixed[3] = msrs[i].data;
2754 break;
2755 case MSR_MTRRfix4K_C8000:
2756 env->mtrr_fixed[4] = msrs[i].data;
2757 break;
2758 case MSR_MTRRfix4K_D0000:
2759 env->mtrr_fixed[5] = msrs[i].data;
2760 break;
2761 case MSR_MTRRfix4K_D8000:
2762 env->mtrr_fixed[6] = msrs[i].data;
2763 break;
2764 case MSR_MTRRfix4K_E0000:
2765 env->mtrr_fixed[7] = msrs[i].data;
2766 break;
2767 case MSR_MTRRfix4K_E8000:
2768 env->mtrr_fixed[8] = msrs[i].data;
2769 break;
2770 case MSR_MTRRfix4K_F0000:
2771 env->mtrr_fixed[9] = msrs[i].data;
2772 break;
2773 case MSR_MTRRfix4K_F8000:
2774 env->mtrr_fixed[10] = msrs[i].data;
2775 break;
2776 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
2777 if (index & 1) {
fcc35e7c
DDAG
2778 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
2779 mtrr_top_bits;
d1ae67f6
AW
2780 } else {
2781 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
2782 }
2783 break;
a33a2cfe
PB
2784 case MSR_IA32_SPEC_CTRL:
2785 env->spec_ctrl = msrs[i].data;
2786 break;
cfeea0c0
KRW
2787 case MSR_VIRT_SSBD:
2788 env->virt_ssbd = msrs[i].data;
2789 break;
b77146e9
CP
2790 case MSR_IA32_RTIT_CTL:
2791 env->msr_rtit_ctrl = msrs[i].data;
2792 break;
2793 case MSR_IA32_RTIT_STATUS:
2794 env->msr_rtit_status = msrs[i].data;
2795 break;
2796 case MSR_IA32_RTIT_OUTPUT_BASE:
2797 env->msr_rtit_output_base = msrs[i].data;
2798 break;
2799 case MSR_IA32_RTIT_OUTPUT_MASK:
2800 env->msr_rtit_output_mask = msrs[i].data;
2801 break;
2802 case MSR_IA32_RTIT_CR3_MATCH:
2803 env->msr_rtit_cr3_match = msrs[i].data;
2804 break;
2805 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2806 env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
2807 break;
05330448
AL
2808 }
2809 }
2810
2811 return 0;
2812}
2813
1bc22652 2814static int kvm_put_mp_state(X86CPU *cpu)
9bdbe550 2815{
1bc22652 2816 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
9bdbe550 2817
1bc22652 2818 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
9bdbe550
HB
2819}
2820
23d02d9b 2821static int kvm_get_mp_state(X86CPU *cpu)
9bdbe550 2822{
259186a7 2823 CPUState *cs = CPU(cpu);
23d02d9b 2824 CPUX86State *env = &cpu->env;
9bdbe550
HB
2825 struct kvm_mp_state mp_state;
2826 int ret;
2827
259186a7 2828 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
9bdbe550
HB
2829 if (ret < 0) {
2830 return ret;
2831 }
2832 env->mp_state = mp_state.mp_state;
c14750e8 2833 if (kvm_irqchip_in_kernel()) {
259186a7 2834 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
c14750e8 2835 }
9bdbe550
HB
2836 return 0;
2837}
2838
1bc22652 2839static int kvm_get_apic(X86CPU *cpu)
680c1c6f 2840{
02e51483 2841 DeviceState *apic = cpu->apic_state;
680c1c6f
JK
2842 struct kvm_lapic_state kapic;
2843 int ret;
2844
3d4b2649 2845 if (apic && kvm_irqchip_in_kernel()) {
1bc22652 2846 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
680c1c6f
JK
2847 if (ret < 0) {
2848 return ret;
2849 }
2850
2851 kvm_get_apic_state(apic, &kapic);
2852 }
2853 return 0;
2854}
2855
1bc22652 2856static int kvm_put_vcpu_events(X86CPU *cpu, int level)
a0fb002c 2857{
fc12d72e 2858 CPUState *cs = CPU(cpu);
1bc22652 2859 CPUX86State *env = &cpu->env;
076796f8 2860 struct kvm_vcpu_events events = {};
a0fb002c
JK
2861
2862 if (!kvm_has_vcpu_events()) {
2863 return 0;
2864 }
2865
31827373
JK
2866 events.exception.injected = (env->exception_injected >= 0);
2867 events.exception.nr = env->exception_injected;
a0fb002c
JK
2868 events.exception.has_error_code = env->has_error_code;
2869 events.exception.error_code = env->error_code;
2870
2871 events.interrupt.injected = (env->interrupt_injected >= 0);
2872 events.interrupt.nr = env->interrupt_injected;
2873 events.interrupt.soft = env->soft_interrupt;
2874
2875 events.nmi.injected = env->nmi_injected;
2876 events.nmi.pending = env->nmi_pending;
2877 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
2878
2879 events.sipi_vector = env->sipi_vector;
68c6efe0 2880 events.flags = 0;
a0fb002c 2881
fc12d72e
PB
2882 if (has_msr_smbase) {
2883 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
2884 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
2885 if (kvm_irqchip_in_kernel()) {
2886 /* As soon as these are moved to the kernel, remove them
2887 * from cs->interrupt_request.
2888 */
2889 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
2890 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
2891 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
2892 } else {
2893 /* Keep these in cs->interrupt_request. */
2894 events.smi.pending = 0;
2895 events.smi.latched_init = 0;
2896 }
fc3a1fd7
DDAG
2897 /* Stop SMI delivery on old machine types to avoid a reboot
2898 * on an inward migration of an old VM.
2899 */
2900 if (!cpu->kvm_no_smi_migration) {
2901 events.flags |= KVM_VCPUEVENT_VALID_SMM;
2902 }
fc12d72e
PB
2903 }
2904
ea643051 2905 if (level >= KVM_PUT_RESET_STATE) {
4fadfa00
PH
2906 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
2907 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
2908 events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
2909 }
ea643051 2910 }
aee028b9 2911
1bc22652 2912 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
a0fb002c
JK
2913}
2914
1bc22652 2915static int kvm_get_vcpu_events(X86CPU *cpu)
a0fb002c 2916{
1bc22652 2917 CPUX86State *env = &cpu->env;
a0fb002c
JK
2918 struct kvm_vcpu_events events;
2919 int ret;
2920
2921 if (!kvm_has_vcpu_events()) {
2922 return 0;
2923 }
2924
fc12d72e 2925 memset(&events, 0, sizeof(events));
1bc22652 2926 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
a0fb002c
JK
2927 if (ret < 0) {
2928 return ret;
2929 }
31827373 2930 env->exception_injected =
a0fb002c
JK
2931 events.exception.injected ? events.exception.nr : -1;
2932 env->has_error_code = events.exception.has_error_code;
2933 env->error_code = events.exception.error_code;
2934
2935 env->interrupt_injected =
2936 events.interrupt.injected ? events.interrupt.nr : -1;
2937 env->soft_interrupt = events.interrupt.soft;
2938
2939 env->nmi_injected = events.nmi.injected;
2940 env->nmi_pending = events.nmi.pending;
2941 if (events.nmi.masked) {
2942 env->hflags2 |= HF2_NMI_MASK;
2943 } else {
2944 env->hflags2 &= ~HF2_NMI_MASK;
2945 }
2946
fc12d72e
PB
2947 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
2948 if (events.smi.smm) {
2949 env->hflags |= HF_SMM_MASK;
2950 } else {
2951 env->hflags &= ~HF_SMM_MASK;
2952 }
2953 if (events.smi.pending) {
2954 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2955 } else {
2956 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2957 }
2958 if (events.smi.smm_inside_nmi) {
2959 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
2960 } else {
2961 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
2962 }
2963 if (events.smi.latched_init) {
2964 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2965 } else {
2966 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2967 }
2968 }
2969
a0fb002c 2970 env->sipi_vector = events.sipi_vector;
a0fb002c
JK
2971
2972 return 0;
2973}
2974
1bc22652 2975static int kvm_guest_debug_workarounds(X86CPU *cpu)
b0b1d690 2976{
ed2803da 2977 CPUState *cs = CPU(cpu);
1bc22652 2978 CPUX86State *env = &cpu->env;
b0b1d690 2979 int ret = 0;
b0b1d690
JK
2980 unsigned long reinject_trap = 0;
2981
2982 if (!kvm_has_vcpu_events()) {
2983 if (env->exception_injected == 1) {
2984 reinject_trap = KVM_GUESTDBG_INJECT_DB;
2985 } else if (env->exception_injected == 3) {
2986 reinject_trap = KVM_GUESTDBG_INJECT_BP;
2987 }
2988 env->exception_injected = -1;
2989 }
2990
2991 /*
2992 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
2993 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
2994 * by updating the debug state once again if single-stepping is on.
2995 * Another reason to call kvm_update_guest_debug here is a pending debug
2996 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
2997 * reinject them via SET_GUEST_DEBUG.
2998 */
2999 if (reinject_trap ||
ed2803da 3000 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
38e478ec 3001 ret = kvm_update_guest_debug(cs, reinject_trap);
b0b1d690 3002 }
b0b1d690
JK
3003 return ret;
3004}
3005
1bc22652 3006static int kvm_put_debugregs(X86CPU *cpu)
ff44f1a3 3007{
1bc22652 3008 CPUX86State *env = &cpu->env;
ff44f1a3
JK
3009 struct kvm_debugregs dbgregs;
3010 int i;
3011
3012 if (!kvm_has_debugregs()) {
3013 return 0;
3014 }
3015
3016 for (i = 0; i < 4; i++) {
3017 dbgregs.db[i] = env->dr[i];
3018 }
3019 dbgregs.dr6 = env->dr[6];
3020 dbgregs.dr7 = env->dr[7];
3021 dbgregs.flags = 0;
3022
1bc22652 3023 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
ff44f1a3
JK
3024}
3025
1bc22652 3026static int kvm_get_debugregs(X86CPU *cpu)
ff44f1a3 3027{
1bc22652 3028 CPUX86State *env = &cpu->env;
ff44f1a3
JK
3029 struct kvm_debugregs dbgregs;
3030 int i, ret;
3031
3032 if (!kvm_has_debugregs()) {
3033 return 0;
3034 }
3035
1bc22652 3036 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
ff44f1a3 3037 if (ret < 0) {
b9bec74b 3038 return ret;
ff44f1a3
JK
3039 }
3040 for (i = 0; i < 4; i++) {
3041 env->dr[i] = dbgregs.db[i];
3042 }
3043 env->dr[4] = env->dr[6] = dbgregs.dr6;
3044 env->dr[5] = env->dr[7] = dbgregs.dr7;
ff44f1a3
JK
3045
3046 return 0;
3047}
3048
20d695a9 3049int kvm_arch_put_registers(CPUState *cpu, int level)
05330448 3050{
20d695a9 3051 X86CPU *x86_cpu = X86_CPU(cpu);
05330448
AL
3052 int ret;
3053
2fa45344 3054 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
dbaa07c4 3055
48e1a45c 3056 if (level >= KVM_PUT_RESET_STATE) {
6bdf863d
JK
3057 ret = kvm_put_msr_feature_control(x86_cpu);
3058 if (ret < 0) {
3059 return ret;
3060 }
3061 }
3062
36f96c4b
HZ
3063 if (level == KVM_PUT_FULL_STATE) {
3064 /* We don't check for kvm_arch_set_tsc_khz() errors here,
3065 * because TSC frequency mismatch shouldn't abort migration,
3066 * unless the user explicitly asked for a more strict TSC
3067 * setting (e.g. using an explicit "tsc-freq" option).
3068 */
3069 kvm_arch_set_tsc_khz(cpu);
3070 }
3071
1bc22652 3072 ret = kvm_getput_regs(x86_cpu, 1);
b9bec74b 3073 if (ret < 0) {
05330448 3074 return ret;
b9bec74b 3075 }
1bc22652 3076 ret = kvm_put_xsave(x86_cpu);
b9bec74b 3077 if (ret < 0) {
f1665b21 3078 return ret;
b9bec74b 3079 }
1bc22652 3080 ret = kvm_put_xcrs(x86_cpu);
b9bec74b 3081 if (ret < 0) {
05330448 3082 return ret;
b9bec74b 3083 }
1bc22652 3084 ret = kvm_put_sregs(x86_cpu);
b9bec74b 3085 if (ret < 0) {
05330448 3086 return ret;
b9bec74b 3087 }
ab443475 3088 /* must be before kvm_put_msrs */
1bc22652 3089 ret = kvm_inject_mce_oldstyle(x86_cpu);
ab443475
JK
3090 if (ret < 0) {
3091 return ret;
3092 }
1bc22652 3093 ret = kvm_put_msrs(x86_cpu, level);
b9bec74b 3094 if (ret < 0) {
05330448 3095 return ret;
b9bec74b 3096 }
4fadfa00
PH
3097 ret = kvm_put_vcpu_events(x86_cpu, level);
3098 if (ret < 0) {
3099 return ret;
3100 }
ea643051 3101 if (level >= KVM_PUT_RESET_STATE) {
1bc22652 3102 ret = kvm_put_mp_state(x86_cpu);
b9bec74b 3103 if (ret < 0) {
680c1c6f
JK
3104 return ret;
3105 }
ea643051 3106 }
7477cd38
MT
3107
3108 ret = kvm_put_tscdeadline_msr(x86_cpu);
3109 if (ret < 0) {
3110 return ret;
3111 }
1bc22652 3112 ret = kvm_put_debugregs(x86_cpu);
b9bec74b 3113 if (ret < 0) {
b0b1d690 3114 return ret;
b9bec74b 3115 }
b0b1d690 3116 /* must be last */
1bc22652 3117 ret = kvm_guest_debug_workarounds(x86_cpu);
b9bec74b 3118 if (ret < 0) {
ff44f1a3 3119 return ret;
b9bec74b 3120 }
05330448
AL
3121 return 0;
3122}
3123
20d695a9 3124int kvm_arch_get_registers(CPUState *cs)
05330448 3125{
20d695a9 3126 X86CPU *cpu = X86_CPU(cs);
05330448
AL
3127 int ret;
3128
20d695a9 3129 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
dbaa07c4 3130
4fadfa00 3131 ret = kvm_get_vcpu_events(cpu);
b9bec74b 3132 if (ret < 0) {
f4f1110e 3133 goto out;
b9bec74b 3134 }
4fadfa00
PH
3135 /*
3136 * KVM_GET_MPSTATE can modify CS and RIP, call it before
3137 * KVM_GET_REGS and KVM_GET_SREGS.
3138 */
3139 ret = kvm_get_mp_state(cpu);
b9bec74b 3140 if (ret < 0) {
f4f1110e 3141 goto out;
b9bec74b 3142 }
4fadfa00 3143 ret = kvm_getput_regs(cpu, 0);
b9bec74b 3144 if (ret < 0) {
f4f1110e 3145 goto out;
b9bec74b 3146 }
4fadfa00 3147 ret = kvm_get_xsave(cpu);
b9bec74b 3148 if (ret < 0) {
f4f1110e 3149 goto out;
b9bec74b 3150 }
4fadfa00 3151 ret = kvm_get_xcrs(cpu);
b9bec74b 3152 if (ret < 0) {
f4f1110e 3153 goto out;
b9bec74b 3154 }
4fadfa00 3155 ret = kvm_get_sregs(cpu);
b9bec74b 3156 if (ret < 0) {
f4f1110e 3157 goto out;
b9bec74b 3158 }
4fadfa00 3159 ret = kvm_get_msrs(cpu);
680c1c6f 3160 if (ret < 0) {
f4f1110e 3161 goto out;
680c1c6f 3162 }
4fadfa00 3163 ret = kvm_get_apic(cpu);
b9bec74b 3164 if (ret < 0) {
f4f1110e 3165 goto out;
b9bec74b 3166 }
1bc22652 3167 ret = kvm_get_debugregs(cpu);
b9bec74b 3168 if (ret < 0) {
f4f1110e 3169 goto out;
b9bec74b 3170 }
f4f1110e
RH
3171 ret = 0;
3172 out:
3173 cpu_sync_bndcs_hflags(&cpu->env);
3174 return ret;
05330448
AL
3175}
3176
20d695a9 3177void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
05330448 3178{
20d695a9
AF
3179 X86CPU *x86_cpu = X86_CPU(cpu);
3180 CPUX86State *env = &x86_cpu->env;
ce377af3
JK
3181 int ret;
3182
276ce815 3183 /* Inject NMI */
fc12d72e
PB
3184 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
3185 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
3186 qemu_mutex_lock_iothread();
3187 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
3188 qemu_mutex_unlock_iothread();
3189 DPRINTF("injected NMI\n");
3190 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
3191 if (ret < 0) {
3192 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
3193 strerror(-ret));
3194 }
3195 }
3196 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
3197 qemu_mutex_lock_iothread();
3198 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
3199 qemu_mutex_unlock_iothread();
3200 DPRINTF("injected SMI\n");
3201 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
3202 if (ret < 0) {
3203 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
3204 strerror(-ret));
3205 }
ce377af3 3206 }
276ce815
LJ
3207 }
3208
15eafc2e 3209 if (!kvm_pic_in_kernel()) {
4b8523ee
JK
3210 qemu_mutex_lock_iothread();
3211 }
3212
e0723c45
PB
3213 /* Force the VCPU out of its inner loop to process any INIT requests
3214 * or (for userspace APIC, but it is cheap to combine the checks here)
3215 * pending TPR access reports.
3216 */
3217 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
fc12d72e
PB
3218 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
3219 !(env->hflags & HF_SMM_MASK)) {
3220 cpu->exit_request = 1;
3221 }
3222 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
3223 cpu->exit_request = 1;
3224 }
e0723c45 3225 }
05330448 3226
15eafc2e 3227 if (!kvm_pic_in_kernel()) {
db1669bc
JK
3228 /* Try to inject an interrupt if the guest can accept it */
3229 if (run->ready_for_interrupt_injection &&
259186a7 3230 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
db1669bc
JK
3231 (env->eflags & IF_MASK)) {
3232 int irq;
3233
259186a7 3234 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
db1669bc
JK
3235 irq = cpu_get_pic_interrupt(env);
3236 if (irq >= 0) {
3237 struct kvm_interrupt intr;
3238
3239 intr.irq = irq;
db1669bc 3240 DPRINTF("injected interrupt %d\n", irq);
1bc22652 3241 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
ce377af3
JK
3242 if (ret < 0) {
3243 fprintf(stderr,
3244 "KVM: injection failed, interrupt lost (%s)\n",
3245 strerror(-ret));
3246 }
db1669bc
JK
3247 }
3248 }
05330448 3249
db1669bc
JK
3250 /* If we have an interrupt but the guest is not ready to receive an
3251 * interrupt, request an interrupt window exit. This will
3252 * cause a return to userspace as soon as the guest is ready to
3253 * receive interrupts. */
259186a7 3254 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
db1669bc
JK
3255 run->request_interrupt_window = 1;
3256 } else {
3257 run->request_interrupt_window = 0;
3258 }
3259
3260 DPRINTF("setting tpr\n");
02e51483 3261 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
4b8523ee
JK
3262
3263 qemu_mutex_unlock_iothread();
db1669bc 3264 }
05330448
AL
3265}
3266
4c663752 3267MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
05330448 3268{
20d695a9
AF
3269 X86CPU *x86_cpu = X86_CPU(cpu);
3270 CPUX86State *env = &x86_cpu->env;
3271
fc12d72e
PB
3272 if (run->flags & KVM_RUN_X86_SMM) {
3273 env->hflags |= HF_SMM_MASK;
3274 } else {
f5c052b9 3275 env->hflags &= ~HF_SMM_MASK;
fc12d72e 3276 }
b9bec74b 3277 if (run->if_flag) {
05330448 3278 env->eflags |= IF_MASK;
b9bec74b 3279 } else {
05330448 3280 env->eflags &= ~IF_MASK;
b9bec74b 3281 }
4b8523ee
JK
3282
3283 /* We need to protect the apic state against concurrent accesses from
3284 * different threads in case the userspace irqchip is used. */
3285 if (!kvm_irqchip_in_kernel()) {
3286 qemu_mutex_lock_iothread();
3287 }
02e51483
CF
3288 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
3289 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
4b8523ee
JK
3290 if (!kvm_irqchip_in_kernel()) {
3291 qemu_mutex_unlock_iothread();
3292 }
f794aa4a 3293 return cpu_get_mem_attrs(env);
05330448
AL
3294}
3295
20d695a9 3296int kvm_arch_process_async_events(CPUState *cs)
0af691d7 3297{
20d695a9
AF
3298 X86CPU *cpu = X86_CPU(cs);
3299 CPUX86State *env = &cpu->env;
232fc23b 3300
259186a7 3301 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
ab443475
JK
3302 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
3303 assert(env->mcg_cap);
3304
259186a7 3305 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
ab443475 3306
dd1750d7 3307 kvm_cpu_synchronize_state(cs);
ab443475
JK
3308
3309 if (env->exception_injected == EXCP08_DBLE) {
3310 /* this means triple fault */
cf83f140 3311 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
fcd7d003 3312 cs->exit_request = 1;
ab443475
JK
3313 return 0;
3314 }
3315 env->exception_injected = EXCP12_MCHK;
3316 env->has_error_code = 0;
3317
259186a7 3318 cs->halted = 0;
ab443475
JK
3319 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
3320 env->mp_state = KVM_MP_STATE_RUNNABLE;
3321 }
3322 }
3323
fc12d72e
PB
3324 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
3325 !(env->hflags & HF_SMM_MASK)) {
e0723c45
PB
3326 kvm_cpu_synchronize_state(cs);
3327 do_cpu_init(cpu);
3328 }
3329
db1669bc
JK
3330 if (kvm_irqchip_in_kernel()) {
3331 return 0;
3332 }
3333
259186a7
AF
3334 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
3335 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
02e51483 3336 apic_poll_irq(cpu->apic_state);
5d62c43a 3337 }
259186a7 3338 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4601f7b0 3339 (env->eflags & IF_MASK)) ||
259186a7
AF
3340 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
3341 cs->halted = 0;
6792a57b 3342 }
259186a7 3343 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
dd1750d7 3344 kvm_cpu_synchronize_state(cs);
232fc23b 3345 do_cpu_sipi(cpu);
0af691d7 3346 }
259186a7
AF
3347 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
3348 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
dd1750d7 3349 kvm_cpu_synchronize_state(cs);
02e51483 3350 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
d362e757
JK
3351 env->tpr_access_type);
3352 }
0af691d7 3353
259186a7 3354 return cs->halted;
0af691d7
MT
3355}
3356
839b5630 3357static int kvm_handle_halt(X86CPU *cpu)
05330448 3358{
259186a7 3359 CPUState *cs = CPU(cpu);
839b5630
AF
3360 CPUX86State *env = &cpu->env;
3361
259186a7 3362 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
05330448 3363 (env->eflags & IF_MASK)) &&
259186a7
AF
3364 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
3365 cs->halted = 1;
bb4ea393 3366 return EXCP_HLT;
05330448
AL
3367 }
3368
bb4ea393 3369 return 0;
05330448
AL
3370}
3371
f7575c96 3372static int kvm_handle_tpr_access(X86CPU *cpu)
d362e757 3373{
f7575c96
AF
3374 CPUState *cs = CPU(cpu);
3375 struct kvm_run *run = cs->kvm_run;
d362e757 3376
02e51483 3377 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
d362e757
JK
3378 run->tpr_access.is_write ? TPR_ACCESS_WRITE
3379 : TPR_ACCESS_READ);
3380 return 1;
3381}
3382
f17ec444 3383int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
e22a25c9 3384{
38972938 3385 static const uint8_t int3 = 0xcc;
64bf3f4e 3386
f17ec444
AF
3387 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
3388 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
e22a25c9 3389 return -EINVAL;
b9bec74b 3390 }
e22a25c9
AL
3391 return 0;
3392}
3393
f17ec444 3394int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
e22a25c9
AL
3395{
3396 uint8_t int3;
3397
f17ec444
AF
3398 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
3399 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
e22a25c9 3400 return -EINVAL;
b9bec74b 3401 }
e22a25c9
AL
3402 return 0;
3403}
3404
3405static struct {
3406 target_ulong addr;
3407 int len;
3408 int type;
3409} hw_breakpoint[4];
3410
3411static int nb_hw_breakpoint;
3412
3413static int find_hw_breakpoint(target_ulong addr, int len, int type)
3414{
3415 int n;
3416
b9bec74b 3417 for (n = 0; n < nb_hw_breakpoint; n++) {
e22a25c9 3418 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
b9bec74b 3419 (hw_breakpoint[n].len == len || len == -1)) {
e22a25c9 3420 return n;
b9bec74b
JK
3421 }
3422 }
e22a25c9
AL
3423 return -1;
3424}
3425
3426int kvm_arch_insert_hw_breakpoint(target_ulong addr,
3427 target_ulong len, int type)
3428{
3429 switch (type) {
3430 case GDB_BREAKPOINT_HW:
3431 len = 1;
3432 break;
3433 case GDB_WATCHPOINT_WRITE:
3434 case GDB_WATCHPOINT_ACCESS:
3435 switch (len) {
3436 case 1:
3437 break;
3438 case 2:
3439 case 4:
3440 case 8:
b9bec74b 3441 if (addr & (len - 1)) {
e22a25c9 3442 return -EINVAL;
b9bec74b 3443 }
e22a25c9
AL
3444 break;
3445 default:
3446 return -EINVAL;
3447 }
3448 break;
3449 default:
3450 return -ENOSYS;
3451 }
3452
b9bec74b 3453 if (nb_hw_breakpoint == 4) {
e22a25c9 3454 return -ENOBUFS;
b9bec74b
JK
3455 }
3456 if (find_hw_breakpoint(addr, len, type) >= 0) {
e22a25c9 3457 return -EEXIST;
b9bec74b 3458 }
e22a25c9
AL
3459 hw_breakpoint[nb_hw_breakpoint].addr = addr;
3460 hw_breakpoint[nb_hw_breakpoint].len = len;
3461 hw_breakpoint[nb_hw_breakpoint].type = type;
3462 nb_hw_breakpoint++;
3463
3464 return 0;
3465}
3466
3467int kvm_arch_remove_hw_breakpoint(target_ulong addr,
3468 target_ulong len, int type)
3469{
3470 int n;
3471
3472 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
b9bec74b 3473 if (n < 0) {
e22a25c9 3474 return -ENOENT;
b9bec74b 3475 }
e22a25c9
AL
3476 nb_hw_breakpoint--;
3477 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
3478
3479 return 0;
3480}
3481
3482void kvm_arch_remove_all_hw_breakpoints(void)
3483{
3484 nb_hw_breakpoint = 0;
3485}
3486
3487static CPUWatchpoint hw_watchpoint;
3488
a60f24b5 3489static int kvm_handle_debug(X86CPU *cpu,
48405526 3490 struct kvm_debug_exit_arch *arch_info)
e22a25c9 3491{
ed2803da 3492 CPUState *cs = CPU(cpu);
a60f24b5 3493 CPUX86State *env = &cpu->env;
f2574737 3494 int ret = 0;
e22a25c9
AL
3495 int n;
3496
3497 if (arch_info->exception == 1) {
3498 if (arch_info->dr6 & (1 << 14)) {
ed2803da 3499 if (cs->singlestep_enabled) {
f2574737 3500 ret = EXCP_DEBUG;
b9bec74b 3501 }
e22a25c9 3502 } else {
b9bec74b
JK
3503 for (n = 0; n < 4; n++) {
3504 if (arch_info->dr6 & (1 << n)) {
e22a25c9
AL
3505 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
3506 case 0x0:
f2574737 3507 ret = EXCP_DEBUG;
e22a25c9
AL
3508 break;
3509 case 0x1:
f2574737 3510 ret = EXCP_DEBUG;
ff4700b0 3511 cs->watchpoint_hit = &hw_watchpoint;
e22a25c9
AL
3512 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
3513 hw_watchpoint.flags = BP_MEM_WRITE;
3514 break;
3515 case 0x3:
f2574737 3516 ret = EXCP_DEBUG;
ff4700b0 3517 cs->watchpoint_hit = &hw_watchpoint;
e22a25c9
AL
3518 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
3519 hw_watchpoint.flags = BP_MEM_ACCESS;
3520 break;
3521 }
b9bec74b
JK
3522 }
3523 }
e22a25c9 3524 }
ff4700b0 3525 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
f2574737 3526 ret = EXCP_DEBUG;
b9bec74b 3527 }
f2574737 3528 if (ret == 0) {
ff4700b0 3529 cpu_synchronize_state(cs);
48405526 3530 assert(env->exception_injected == -1);
b0b1d690 3531
f2574737 3532 /* pass to guest */
48405526
BS
3533 env->exception_injected = arch_info->exception;
3534 env->has_error_code = 0;
b0b1d690 3535 }
e22a25c9 3536
f2574737 3537 return ret;
e22a25c9
AL
3538}
3539
20d695a9 3540void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
e22a25c9
AL
3541{
3542 const uint8_t type_code[] = {
3543 [GDB_BREAKPOINT_HW] = 0x0,
3544 [GDB_WATCHPOINT_WRITE] = 0x1,
3545 [GDB_WATCHPOINT_ACCESS] = 0x3
3546 };
3547 const uint8_t len_code[] = {
3548 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
3549 };
3550 int n;
3551
a60f24b5 3552 if (kvm_sw_breakpoints_active(cpu)) {
e22a25c9 3553 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
b9bec74b 3554 }
e22a25c9
AL
3555 if (nb_hw_breakpoint > 0) {
3556 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
3557 dbg->arch.debugreg[7] = 0x0600;
3558 for (n = 0; n < nb_hw_breakpoint; n++) {
3559 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
3560 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
3561 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
95c077c9 3562 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
e22a25c9
AL
3563 }
3564 }
3565}
4513d923 3566
2a4dac83
JK
3567static bool host_supports_vmx(void)
3568{
3569 uint32_t ecx, unused;
3570
3571 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
3572 return ecx & CPUID_EXT_VMX;
3573}
3574
3575#define VMX_INVALID_GUEST_STATE 0x80000021
3576
20d695a9 3577int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
2a4dac83 3578{
20d695a9 3579 X86CPU *cpu = X86_CPU(cs);
2a4dac83
JK
3580 uint64_t code;
3581 int ret;
3582
3583 switch (run->exit_reason) {
3584 case KVM_EXIT_HLT:
3585 DPRINTF("handle_hlt\n");
4b8523ee 3586 qemu_mutex_lock_iothread();
839b5630 3587 ret = kvm_handle_halt(cpu);
4b8523ee 3588 qemu_mutex_unlock_iothread();
2a4dac83
JK
3589 break;
3590 case KVM_EXIT_SET_TPR:
3591 ret = 0;
3592 break;
d362e757 3593 case KVM_EXIT_TPR_ACCESS:
4b8523ee 3594 qemu_mutex_lock_iothread();
f7575c96 3595 ret = kvm_handle_tpr_access(cpu);
4b8523ee 3596 qemu_mutex_unlock_iothread();
d362e757 3597 break;
2a4dac83
JK
3598 case KVM_EXIT_FAIL_ENTRY:
3599 code = run->fail_entry.hardware_entry_failure_reason;
3600 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
3601 code);
3602 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
3603 fprintf(stderr,
12619721 3604 "\nIf you're running a guest on an Intel machine without "
2a4dac83
JK
3605 "unrestricted mode\n"
3606 "support, the failure can be most likely due to the guest "
3607 "entering an invalid\n"
3608 "state for Intel VT. For example, the guest maybe running "
3609 "in big real mode\n"
3610 "which is not supported on less recent Intel processors."
3611 "\n\n");
3612 }
3613 ret = -1;
3614 break;
3615 case KVM_EXIT_EXCEPTION:
3616 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
3617 run->ex.exception, run->ex.error_code);
3618 ret = -1;
3619 break;
f2574737
JK
3620 case KVM_EXIT_DEBUG:
3621 DPRINTF("kvm_exit_debug\n");
4b8523ee 3622 qemu_mutex_lock_iothread();
a60f24b5 3623 ret = kvm_handle_debug(cpu, &run->debug.arch);
4b8523ee 3624 qemu_mutex_unlock_iothread();
f2574737 3625 break;
50efe82c
AS
3626 case KVM_EXIT_HYPERV:
3627 ret = kvm_hv_handle_exit(cpu, &run->hyperv);
3628 break;
15eafc2e
PB
3629 case KVM_EXIT_IOAPIC_EOI:
3630 ioapic_eoi_broadcast(run->eoi.vector);
3631 ret = 0;
3632 break;
2a4dac83
JK
3633 default:
3634 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
3635 ret = -1;
3636 break;
3637 }
3638
3639 return ret;
3640}
3641
20d695a9 3642bool kvm_arch_stop_on_emulation_error(CPUState *cs)
4513d923 3643{
20d695a9
AF
3644 X86CPU *cpu = X86_CPU(cs);
3645 CPUX86State *env = &cpu->env;
3646
dd1750d7 3647 kvm_cpu_synchronize_state(cs);
b9bec74b
JK
3648 return !(env->cr[0] & CR0_PE_MASK) ||
3649 ((env->segs[R_CS].selector & 3) != 3);
4513d923 3650}
84b058d7
JK
3651
3652void kvm_arch_init_irq_routing(KVMState *s)
3653{
3654 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
3655 /* If kernel can't do irq routing, interrupt source
3656 * override 0->2 cannot be set up as required by HPET.
3657 * So we have to disable it.
3658 */
3659 no_hpet = 1;
3660 }
cc7e0ddf 3661 /* We know at this point that we're using the in-kernel
614e41bc 3662 * irqchip, so we can use irqfds, and on x86 we know
f3e1bed8 3663 * we can use msi via irqfd and GSI routing.
cc7e0ddf 3664 */
614e41bc 3665 kvm_msi_via_irqfd_allowed = true;
f3e1bed8 3666 kvm_gsi_routing_allowed = true;
15eafc2e
PB
3667
3668 if (kvm_irqchip_is_split()) {
3669 int i;
3670
3671 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
3672 MSI routes for signaling interrupts to the local apics. */
3673 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
d1f6af6a 3674 if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) {
15eafc2e
PB
3675 error_report("Could not enable split IRQ mode.");
3676 exit(1);
3677 }
3678 }
3679 }
3680}
3681
3682int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
3683{
3684 int ret;
3685 if (machine_kernel_irqchip_split(ms)) {
3686 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
3687 if (ret) {
df3c286c 3688 error_report("Could not enable split irqchip mode: %s",
15eafc2e
PB
3689 strerror(-ret));
3690 exit(1);
3691 } else {
3692 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
3693 kvm_split_irqchip = true;
3694 return 1;
3695 }
3696 } else {
3697 return 0;
3698 }
84b058d7 3699}
b139bd30
JK
3700
3701/* Classic KVM device assignment interface. Will remain x86 only. */
3702int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
3703 uint32_t flags, uint32_t *dev_id)
3704{
3705 struct kvm_assigned_pci_dev dev_data = {
3706 .segnr = dev_addr->domain,
3707 .busnr = dev_addr->bus,
3708 .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
3709 .flags = flags,
3710 };
3711 int ret;
3712
3713 dev_data.assigned_dev_id =
3714 (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
3715
3716 ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
3717 if (ret < 0) {
3718 return ret;
3719 }
3720
3721 *dev_id = dev_data.assigned_dev_id;
3722
3723 return 0;
3724}
3725
3726int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
3727{
3728 struct kvm_assigned_pci_dev dev_data = {
3729 .assigned_dev_id = dev_id,
3730 };
3731
3732 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
3733}
3734
3735static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
3736 uint32_t irq_type, uint32_t guest_irq)
3737{
3738 struct kvm_assigned_irq assigned_irq = {
3739 .assigned_dev_id = dev_id,
3740 .guest_irq = guest_irq,
3741 .flags = irq_type,
3742 };
3743
3744 if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
3745 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
3746 } else {
3747 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
3748 }
3749}
3750
3751int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
3752 uint32_t guest_irq)
3753{
3754 uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
3755 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
3756
3757 return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
3758}
3759
3760int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
3761{
3762 struct kvm_assigned_pci_dev dev_data = {
3763 .assigned_dev_id = dev_id,
3764 .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
3765 };
3766
3767 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
3768}
3769
3770static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
3771 uint32_t type)
3772{
3773 struct kvm_assigned_irq assigned_irq = {
3774 .assigned_dev_id = dev_id,
3775 .flags = type,
3776 };
3777
3778 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
3779}
3780
3781int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
3782{
3783 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
3784 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
3785}
3786
3787int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
3788{
3789 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
3790 KVM_DEV_IRQ_GUEST_MSI, virq);
3791}
3792
3793int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
3794{
3795 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
3796 KVM_DEV_IRQ_HOST_MSI);
3797}
3798
3799bool kvm_device_msix_supported(KVMState *s)
3800{
3801 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
3802 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
3803 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
3804}
3805
3806int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
3807 uint32_t nr_vectors)
3808{
3809 struct kvm_assigned_msix_nr msix_nr = {
3810 .assigned_dev_id = dev_id,
3811 .entry_nr = nr_vectors,
3812 };
3813
3814 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
3815}
3816
3817int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
3818 int virq)
3819{
3820 struct kvm_assigned_msix_entry msix_entry = {
3821 .assigned_dev_id = dev_id,
3822 .gsi = virq,
3823 .entry = vector,
3824 };
3825
3826 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
3827}
3828
3829int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
3830{
3831 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
3832 KVM_DEV_IRQ_GUEST_MSIX, 0);
3833}
3834
3835int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
3836{
3837 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
3838 KVM_DEV_IRQ_HOST_MSIX);
3839}
9e03a040
FB
3840
3841int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
dc9f06ca 3842 uint64_t address, uint32_t data, PCIDevice *dev)
9e03a040 3843{
8b5ed7df
PX
3844 X86IOMMUState *iommu = x86_iommu_get_default();
3845
3846 if (iommu) {
3847 int ret;
3848 MSIMessage src, dst;
3849 X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu);
3850
0ea1472d
JK
3851 if (!class->int_remap) {
3852 return 0;
3853 }
3854
8b5ed7df
PX
3855 src.address = route->u.msi.address_hi;
3856 src.address <<= VTD_MSI_ADDR_HI_SHIFT;
3857 src.address |= route->u.msi.address_lo;
3858 src.data = route->u.msi.data;
3859
3860 ret = class->int_remap(iommu, &src, &dst, dev ? \
3861 pci_requester_id(dev) : \
3862 X86_IOMMU_SID_INVALID);
3863 if (ret) {
3864 trace_kvm_x86_fixup_msi_error(route->gsi);
3865 return 1;
3866 }
3867
3868 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
3869 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
3870 route->u.msi.data = dst.data;
3871 }
3872
9e03a040
FB
3873 return 0;
3874}
1850b6b7 3875
38d87493
PX
3876typedef struct MSIRouteEntry MSIRouteEntry;
3877
3878struct MSIRouteEntry {
3879 PCIDevice *dev; /* Device pointer */
3880 int vector; /* MSI/MSIX vector index */
3881 int virq; /* Virtual IRQ index */
3882 QLIST_ENTRY(MSIRouteEntry) list;
3883};
3884
3885/* List of used GSI routes */
3886static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
3887 QLIST_HEAD_INITIALIZER(msi_route_list);
3888
e1d4fb2d
PX
3889static void kvm_update_msi_routes_all(void *private, bool global,
3890 uint32_t index, uint32_t mask)
3891{
3892 int cnt = 0;
3893 MSIRouteEntry *entry;
3894 MSIMessage msg;
fd563564
PX
3895 PCIDevice *dev;
3896
e1d4fb2d
PX
3897 /* TODO: explicit route update */
3898 QLIST_FOREACH(entry, &msi_route_list, list) {
3899 cnt++;
fd563564
PX
3900 dev = entry->dev;
3901 if (!msix_enabled(dev) && !msi_enabled(dev)) {
3902 continue;
3903 }
3904 msg = pci_get_msi_message(dev, entry->vector);
3905 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
e1d4fb2d 3906 }
3f1fea0f 3907 kvm_irqchip_commit_routes(kvm_state);
e1d4fb2d
PX
3908 trace_kvm_x86_update_msi_routes(cnt);
3909}
3910
38d87493
PX
3911int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
3912 int vector, PCIDevice *dev)
3913{
e1d4fb2d 3914 static bool notify_list_inited = false;
38d87493
PX
3915 MSIRouteEntry *entry;
3916
3917 if (!dev) {
3918 /* These are (possibly) IOAPIC routes only used for split
3919 * kernel irqchip mode, while what we are housekeeping are
3920 * PCI devices only. */
3921 return 0;
3922 }
3923
3924 entry = g_new0(MSIRouteEntry, 1);
3925 entry->dev = dev;
3926 entry->vector = vector;
3927 entry->virq = route->gsi;
3928 QLIST_INSERT_HEAD(&msi_route_list, entry, list);
3929
3930 trace_kvm_x86_add_msi_route(route->gsi);
e1d4fb2d
PX
3931
3932 if (!notify_list_inited) {
3933 /* For the first time we do add route, add ourselves into
3934 * IOMMU's IEC notify list if needed. */
3935 X86IOMMUState *iommu = x86_iommu_get_default();
3936 if (iommu) {
3937 x86_iommu_iec_register_notifier(iommu,
3938 kvm_update_msi_routes_all,
3939 NULL);
3940 }
3941 notify_list_inited = true;
3942 }
38d87493
PX
3943 return 0;
3944}
3945
3946int kvm_arch_release_virq_post(int virq)
3947{
3948 MSIRouteEntry *entry, *next;
3949 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
3950 if (entry->virq == virq) {
3951 trace_kvm_x86_remove_msi_route(virq);
3952 QLIST_REMOVE(entry, list);
01960e6d 3953 g_free(entry);
38d87493
PX
3954 break;
3955 }
3956 }
9e03a040
FB
3957 return 0;
3958}
1850b6b7
EA
3959
3960int kvm_arch_msi_data_to_gsi(uint32_t data)
3961{
3962 abort();
3963}