]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/kvm.c
memory: split memory_region_from_host from qemu_ram_addr_from_host
[mirror_qemu.git] / target-i386 / kvm.c
CommitLineData
05330448
AL
1/*
2 * QEMU KVM support
3 *
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 *
13 */
14
b6a0aa05 15#include "qemu/osdep.h"
da34e65c 16#include "qapi/error.h"
05330448
AL
17#include <sys/ioctl.h>
18#include <sys/mman.h>
25d2e361 19#include <sys/utsname.h>
05330448
AL
20
21#include <linux/kvm.h>
5802e066 22#include <linux/kvm_para.h>
05330448
AL
23
24#include "qemu-common.h"
33c11879 25#include "cpu.h"
9c17d615 26#include "sysemu/sysemu.h"
6410848b 27#include "sysemu/kvm_int.h"
1d31f66b 28#include "kvm_i386.h"
50efe82c
AS
29#include "hyperv.h"
30
022c62cb 31#include "exec/gdbstub.h"
1de7afc9
PB
32#include "qemu/host-utils.h"
33#include "qemu/config-file.h"
1c4a55db 34#include "qemu/error-report.h"
0d09e41a
PB
35#include "hw/i386/pc.h"
36#include "hw/i386/apic.h"
e0723c45
PB
37#include "hw/i386/apic_internal.h"
38#include "hw/i386/apic-msidef.h"
50efe82c 39
022c62cb 40#include "exec/ioport.h"
73aa529a 41#include "standard-headers/asm-x86/hyperv.h"
a2cb15b0 42#include "hw/pci/pci.h"
15eafc2e 43#include "hw/pci/msi.h"
68bfd0ad 44#include "migration/migration.h"
4c663752 45#include "exec/memattrs.h"
05330448
AL
46
47//#define DEBUG_KVM
48
49#ifdef DEBUG_KVM
8c0d577e 50#define DPRINTF(fmt, ...) \
05330448
AL
51 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
52#else
8c0d577e 53#define DPRINTF(fmt, ...) \
05330448
AL
54 do { } while (0)
55#endif
56
1a03675d
GC
57#define MSR_KVM_WALL_CLOCK 0x11
58#define MSR_KVM_SYSTEM_TIME 0x12
59
d1138251
EH
60/* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
61 * 255 kvm_msr_entry structs */
62#define MSR_BUF_SIZE 4096
d71b62a1 63
c0532a76
MT
64#ifndef BUS_MCEERR_AR
65#define BUS_MCEERR_AR 4
66#endif
67#ifndef BUS_MCEERR_AO
68#define BUS_MCEERR_AO 5
69#endif
70
94a8d39a
JK
71const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
72 KVM_CAP_INFO(SET_TSS_ADDR),
73 KVM_CAP_INFO(EXT_CPUID),
74 KVM_CAP_INFO(MP_STATE),
75 KVM_CAP_LAST_INFO
76};
25d2e361 77
c3a3a7d3
JK
78static bool has_msr_star;
79static bool has_msr_hsave_pa;
c9b8f6b6 80static bool has_msr_tsc_aux;
f28558d3 81static bool has_msr_tsc_adjust;
aa82ba54 82static bool has_msr_tsc_deadline;
df67696e 83static bool has_msr_feature_control;
c5999bfc 84static bool has_msr_async_pf_en;
bc9a839d 85static bool has_msr_pv_eoi_en;
21e87c46 86static bool has_msr_misc_enable;
fc12d72e 87static bool has_msr_smbase;
79e9ebeb 88static bool has_msr_bndcfgs;
917367aa 89static bool has_msr_kvm_steal_time;
25d2e361 90static int lm_capable_kernel;
7bc3d711
PB
91static bool has_msr_hv_hypercall;
92static bool has_msr_hv_vapic;
48a5f3bc 93static bool has_msr_hv_tsc;
f2a53c9e 94static bool has_msr_hv_crash;
744b8a94 95static bool has_msr_hv_reset;
8c145d7c 96static bool has_msr_hv_vpindex;
46eb8f98 97static bool has_msr_hv_runtime;
866eea9a 98static bool has_msr_hv_synic;
ff99aa64 99static bool has_msr_hv_stimer;
d1ae67f6 100static bool has_msr_mtrr;
18cd2c17 101static bool has_msr_xss;
b827df58 102
0d894367
PB
103static bool has_msr_architectural_pmu;
104static uint32_t num_architectural_pmu_counters;
105
28143b40
TH
106static int has_xsave;
107static int has_xcrs;
108static int has_pit_state2;
109
110int kvm_has_pit_state2(void)
111{
112 return has_pit_state2;
113}
114
355023f2
PB
115bool kvm_has_smm(void)
116{
117 return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
118}
119
1d31f66b
PM
120bool kvm_allows_irq0_override(void)
121{
122 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
123}
124
0fd7e098
LL
125static int kvm_get_tsc(CPUState *cs)
126{
127 X86CPU *cpu = X86_CPU(cs);
128 CPUX86State *env = &cpu->env;
129 struct {
130 struct kvm_msrs info;
131 struct kvm_msr_entry entries[1];
132 } msr_data;
133 int ret;
134
135 if (env->tsc_valid) {
136 return 0;
137 }
138
139 msr_data.info.nmsrs = 1;
140 msr_data.entries[0].index = MSR_IA32_TSC;
141 env->tsc_valid = !runstate_is_running();
142
143 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
144 if (ret < 0) {
145 return ret;
146 }
147
48e1a45c 148 assert(ret == 1);
0fd7e098
LL
149 env->tsc = msr_data.entries[0].data;
150 return 0;
151}
152
153static inline void do_kvm_synchronize_tsc(void *arg)
154{
155 CPUState *cpu = arg;
156
157 kvm_get_tsc(cpu);
158}
159
160void kvm_synchronize_all_tsc(void)
161{
162 CPUState *cpu;
163
164 if (kvm_enabled()) {
165 CPU_FOREACH(cpu) {
166 run_on_cpu(cpu, do_kvm_synchronize_tsc, cpu);
167 }
168 }
169}
170
b827df58
AK
171static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
172{
173 struct kvm_cpuid2 *cpuid;
174 int r, size;
175
176 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
e42a92ae 177 cpuid = g_malloc0(size);
b827df58
AK
178 cpuid->nent = max;
179 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
76ae317f
MM
180 if (r == 0 && cpuid->nent >= max) {
181 r = -E2BIG;
182 }
b827df58
AK
183 if (r < 0) {
184 if (r == -E2BIG) {
7267c094 185 g_free(cpuid);
b827df58
AK
186 return NULL;
187 } else {
188 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
189 strerror(-r));
190 exit(1);
191 }
192 }
193 return cpuid;
194}
195
dd87f8a6
EH
196/* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
197 * for all entries.
198 */
199static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
200{
201 struct kvm_cpuid2 *cpuid;
202 int max = 1;
203 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
204 max *= 2;
205 }
206 return cpuid;
207}
208
a443bc34 209static const struct kvm_para_features {
0c31b744
GC
210 int cap;
211 int feature;
212} para_features[] = {
213 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
214 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
215 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
0c31b744 216 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
0c31b744
GC
217};
218
ba9bc59e 219static int get_para_features(KVMState *s)
0c31b744
GC
220{
221 int i, features = 0;
222
8e03c100 223 for (i = 0; i < ARRAY_SIZE(para_features); i++) {
ba9bc59e 224 if (kvm_check_extension(s, para_features[i].cap)) {
0c31b744
GC
225 features |= (1 << para_features[i].feature);
226 }
227 }
228
229 return features;
230}
0c31b744
GC
231
232
829ae2f9
EH
233/* Returns the value for a specific register on the cpuid entry
234 */
235static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
236{
237 uint32_t ret = 0;
238 switch (reg) {
239 case R_EAX:
240 ret = entry->eax;
241 break;
242 case R_EBX:
243 ret = entry->ebx;
244 break;
245 case R_ECX:
246 ret = entry->ecx;
247 break;
248 case R_EDX:
249 ret = entry->edx;
250 break;
251 }
252 return ret;
253}
254
4fb73f1d
EH
255/* Find matching entry for function/index on kvm_cpuid2 struct
256 */
257static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
258 uint32_t function,
259 uint32_t index)
260{
261 int i;
262 for (i = 0; i < cpuid->nent; ++i) {
263 if (cpuid->entries[i].function == function &&
264 cpuid->entries[i].index == index) {
265 return &cpuid->entries[i];
266 }
267 }
268 /* not found: */
269 return NULL;
270}
271
ba9bc59e 272uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
c958a8bd 273 uint32_t index, int reg)
b827df58
AK
274{
275 struct kvm_cpuid2 *cpuid;
b827df58
AK
276 uint32_t ret = 0;
277 uint32_t cpuid_1_edx;
8c723b79 278 bool found = false;
b827df58 279
dd87f8a6 280 cpuid = get_supported_cpuid(s);
b827df58 281
4fb73f1d
EH
282 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
283 if (entry) {
284 found = true;
285 ret = cpuid_entry_get_reg(entry, reg);
b827df58
AK
286 }
287
7b46e5ce
EH
288 /* Fixups for the data returned by KVM, below */
289
c2acb022
EH
290 if (function == 1 && reg == R_EDX) {
291 /* KVM before 2.6.30 misreports the following features */
292 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
84bd945c
EH
293 } else if (function == 1 && reg == R_ECX) {
294 /* We can set the hypervisor flag, even if KVM does not return it on
295 * GET_SUPPORTED_CPUID
296 */
297 ret |= CPUID_EXT_HYPERVISOR;
ac67ee26
EH
298 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
299 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
300 * and the irqchip is in the kernel.
301 */
302 if (kvm_irqchip_in_kernel() &&
303 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
304 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
305 }
41e5e76d
EH
306
307 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
308 * without the in-kernel irqchip
309 */
310 if (!kvm_irqchip_in_kernel()) {
311 ret &= ~CPUID_EXT_X2APIC;
b827df58 312 }
28b8e4d0
JK
313 } else if (function == 6 && reg == R_EAX) {
314 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
c2acb022
EH
315 } else if (function == 0x80000001 && reg == R_EDX) {
316 /* On Intel, kvm returns cpuid according to the Intel spec,
317 * so add missing bits according to the AMD spec:
318 */
319 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
320 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
b827df58
AK
321 }
322
7267c094 323 g_free(cpuid);
b827df58 324
0c31b744 325 /* fallback for older kernels */
8c723b79 326 if ((function == KVM_CPUID_FEATURES) && !found) {
ba9bc59e 327 ret = get_para_features(s);
b9bec74b 328 }
0c31b744
GC
329
330 return ret;
bb0300dc 331}
bb0300dc 332
3c85e74f
HY
333typedef struct HWPoisonPage {
334 ram_addr_t ram_addr;
335 QLIST_ENTRY(HWPoisonPage) list;
336} HWPoisonPage;
337
338static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
339 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
340
341static void kvm_unpoison_all(void *param)
342{
343 HWPoisonPage *page, *next_page;
344
345 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
346 QLIST_REMOVE(page, list);
347 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
7267c094 348 g_free(page);
3c85e74f
HY
349 }
350}
351
3c85e74f
HY
352static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
353{
354 HWPoisonPage *page;
355
356 QLIST_FOREACH(page, &hwpoison_page_list, list) {
357 if (page->ram_addr == ram_addr) {
358 return;
359 }
360 }
ab3ad07f 361 page = g_new(HWPoisonPage, 1);
3c85e74f
HY
362 page->ram_addr = ram_addr;
363 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
364}
365
e7701825
MT
366static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
367 int *max_banks)
368{
369 int r;
370
14a09518 371 r = kvm_check_extension(s, KVM_CAP_MCE);
e7701825
MT
372 if (r > 0) {
373 *max_banks = r;
374 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
375 }
376 return -ENOSYS;
377}
378
bee615d4 379static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
e7701825 380{
bee615d4 381 CPUX86State *env = &cpu->env;
c34d440a
JK
382 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
383 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
384 uint64_t mcg_status = MCG_STATUS_MCIP;
e7701825 385
c34d440a
JK
386 if (code == BUS_MCEERR_AR) {
387 status |= MCI_STATUS_AR | 0x134;
388 mcg_status |= MCG_STATUS_EIPV;
389 } else {
390 status |= 0xc0;
391 mcg_status |= MCG_STATUS_RIPV;
419fb20a 392 }
8c5cf3b6 393 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
c34d440a
JK
394 (MCM_ADDR_PHYS << 6) | 0xc,
395 cpu_x86_support_mca_broadcast(env) ?
396 MCE_INJECT_BROADCAST : 0);
419fb20a 397}
419fb20a
JK
398
399static void hardware_memory_error(void)
400{
401 fprintf(stderr, "Hardware memory error!\n");
402 exit(1);
403}
404
20d695a9 405int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
419fb20a 406{
20d695a9
AF
407 X86CPU *cpu = X86_CPU(c);
408 CPUX86State *env = &cpu->env;
419fb20a 409 ram_addr_t ram_addr;
a8170e5e 410 hwaddr paddr;
419fb20a
JK
411
412 if ((env->mcg_cap & MCG_SER_P) && addr
c34d440a 413 && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
07bdaa41
PB
414 ram_addr = qemu_ram_addr_from_host(addr);
415 if (ram_addr == RAM_ADDR_INVALID ||
a60f24b5 416 !kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
419fb20a
JK
417 fprintf(stderr, "Hardware memory error for memory used by "
418 "QEMU itself instead of guest system!\n");
419 /* Hope we are lucky for AO MCE */
420 if (code == BUS_MCEERR_AO) {
421 return 0;
422 } else {
423 hardware_memory_error();
424 }
425 }
3c85e74f 426 kvm_hwpoison_page_add(ram_addr);
bee615d4 427 kvm_mce_inject(cpu, paddr, code);
e56ff191 428 } else {
419fb20a
JK
429 if (code == BUS_MCEERR_AO) {
430 return 0;
431 } else if (code == BUS_MCEERR_AR) {
432 hardware_memory_error();
433 } else {
434 return 1;
435 }
436 }
437 return 0;
438}
439
440int kvm_arch_on_sigbus(int code, void *addr)
441{
182735ef
AF
442 X86CPU *cpu = X86_CPU(first_cpu);
443
444 if ((cpu->env.mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
419fb20a 445 ram_addr_t ram_addr;
a8170e5e 446 hwaddr paddr;
419fb20a
JK
447
448 /* Hope we are lucky for AO MCE */
07bdaa41
PB
449 ram_addr = qemu_ram_addr_from_host(addr);
450 if (ram_addr == RAM_ADDR_INVALID ||
182735ef 451 !kvm_physical_memory_addr_from_host(first_cpu->kvm_state,
a60f24b5 452 addr, &paddr)) {
419fb20a
JK
453 fprintf(stderr, "Hardware memory error for memory used by "
454 "QEMU itself instead of guest system!: %p\n", addr);
455 return 0;
456 }
3c85e74f 457 kvm_hwpoison_page_add(ram_addr);
182735ef 458 kvm_mce_inject(X86_CPU(first_cpu), paddr, code);
e56ff191 459 } else {
419fb20a
JK
460 if (code == BUS_MCEERR_AO) {
461 return 0;
462 } else if (code == BUS_MCEERR_AR) {
463 hardware_memory_error();
464 } else {
465 return 1;
466 }
467 }
468 return 0;
469}
e7701825 470
1bc22652 471static int kvm_inject_mce_oldstyle(X86CPU *cpu)
ab443475 472{
1bc22652
AF
473 CPUX86State *env = &cpu->env;
474
ab443475
JK
475 if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
476 unsigned int bank, bank_num = env->mcg_cap & 0xff;
477 struct kvm_x86_mce mce;
478
479 env->exception_injected = -1;
480
481 /*
482 * There must be at least one bank in use if an MCE is pending.
483 * Find it and use its values for the event injection.
484 */
485 for (bank = 0; bank < bank_num; bank++) {
486 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
487 break;
488 }
489 }
490 assert(bank < bank_num);
491
492 mce.bank = bank;
493 mce.status = env->mce_banks[bank * 4 + 1];
494 mce.mcg_status = env->mcg_status;
495 mce.addr = env->mce_banks[bank * 4 + 2];
496 mce.misc = env->mce_banks[bank * 4 + 3];
497
1bc22652 498 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
ab443475 499 }
ab443475
JK
500 return 0;
501}
502
1dfb4dd9 503static void cpu_update_state(void *opaque, int running, RunState state)
b8cc45d6 504{
317ac620 505 CPUX86State *env = opaque;
b8cc45d6
GC
506
507 if (running) {
508 env->tsc_valid = false;
509 }
510}
511
83b17af5 512unsigned long kvm_arch_vcpu_id(CPUState *cs)
b164e48e 513{
83b17af5 514 X86CPU *cpu = X86_CPU(cs);
7e72a45c 515 return cpu->apic_id;
b164e48e
EH
516}
517
92067bf4
IM
518#ifndef KVM_CPUID_SIGNATURE_NEXT
519#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
520#endif
521
522static bool hyperv_hypercall_available(X86CPU *cpu)
523{
524 return cpu->hyperv_vapic ||
525 (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
526}
527
528static bool hyperv_enabled(X86CPU *cpu)
529{
7bc3d711
PB
530 CPUState *cs = CPU(cpu);
531 return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
532 (hyperv_hypercall_available(cpu) ||
48a5f3bc 533 cpu->hyperv_time ||
f2a53c9e 534 cpu->hyperv_relaxed_timing ||
744b8a94 535 cpu->hyperv_crash ||
8c145d7c 536 cpu->hyperv_reset ||
46eb8f98 537 cpu->hyperv_vpindex ||
866eea9a 538 cpu->hyperv_runtime ||
ff99aa64
AS
539 cpu->hyperv_synic ||
540 cpu->hyperv_stimer);
92067bf4
IM
541}
542
5031283d
HZ
543static int kvm_arch_set_tsc_khz(CPUState *cs)
544{
545 X86CPU *cpu = X86_CPU(cs);
546 CPUX86State *env = &cpu->env;
547 int r;
548
549 if (!env->tsc_khz) {
550 return 0;
551 }
552
553 r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
554 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
555 -ENOTSUP;
556 if (r < 0) {
557 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
558 * TSC frequency doesn't match the one we want.
559 */
560 int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
561 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
562 -ENOTSUP;
563 if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
564 error_report("warning: TSC frequency mismatch between "
565 "VM and host, and TSC scaling unavailable");
566 return r;
567 }
568 }
569
570 return 0;
571}
572
68bfd0ad
MT
573static Error *invtsc_mig_blocker;
574
f8bb0565 575#define KVM_MAX_CPUID_ENTRIES 100
0893d460 576
20d695a9 577int kvm_arch_init_vcpu(CPUState *cs)
05330448
AL
578{
579 struct {
486bd5a2 580 struct kvm_cpuid2 cpuid;
f8bb0565 581 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
541dc0d4 582 } QEMU_PACKED cpuid_data;
20d695a9
AF
583 X86CPU *cpu = X86_CPU(cs);
584 CPUX86State *env = &cpu->env;
486bd5a2 585 uint32_t limit, i, j, cpuid_i;
a33609ca 586 uint32_t unused;
bb0300dc 587 struct kvm_cpuid_entry2 *c;
bb0300dc 588 uint32_t signature[3];
234cc647 589 int kvm_base = KVM_CPUID_SIGNATURE;
e7429073 590 int r;
05330448 591
ef4cbe14
SW
592 memset(&cpuid_data, 0, sizeof(cpuid_data));
593
05330448
AL
594 cpuid_i = 0;
595
bb0300dc 596 /* Paravirtualization CPUIDs */
234cc647
PB
597 if (hyperv_enabled(cpu)) {
598 c = &cpuid_data.entries[cpuid_i++];
599 c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
1c4a55db
AW
600 if (!cpu->hyperv_vendor_id) {
601 memcpy(signature, "Microsoft Hv", 12);
602 } else {
603 size_t len = strlen(cpu->hyperv_vendor_id);
604
605 if (len > 12) {
606 error_report("hv-vendor-id truncated to 12 characters");
607 len = 12;
608 }
609 memset(signature, 0, 12);
610 memcpy(signature, cpu->hyperv_vendor_id, len);
611 }
eab70139 612 c->eax = HYPERV_CPUID_MIN;
234cc647
PB
613 c->ebx = signature[0];
614 c->ecx = signature[1];
615 c->edx = signature[2];
0c31b744 616
234cc647
PB
617 c = &cpuid_data.entries[cpuid_i++];
618 c->function = HYPERV_CPUID_INTERFACE;
eab70139
VR
619 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
620 c->eax = signature[0];
234cc647
PB
621 c->ebx = 0;
622 c->ecx = 0;
623 c->edx = 0;
eab70139
VR
624
625 c = &cpuid_data.entries[cpuid_i++];
eab70139
VR
626 c->function = HYPERV_CPUID_VERSION;
627 c->eax = 0x00001bbc;
628 c->ebx = 0x00060001;
629
630 c = &cpuid_data.entries[cpuid_i++];
eab70139 631 c->function = HYPERV_CPUID_FEATURES;
92067bf4 632 if (cpu->hyperv_relaxed_timing) {
eab70139
VR
633 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
634 }
92067bf4 635 if (cpu->hyperv_vapic) {
eab70139
VR
636 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
637 c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
7bc3d711 638 has_msr_hv_vapic = true;
eab70139 639 }
48a5f3bc
VR
640 if (cpu->hyperv_time &&
641 kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
642 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
643 c->eax |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
644 c->eax |= 0x200;
645 has_msr_hv_tsc = true;
646 }
f2a53c9e
AS
647 if (cpu->hyperv_crash && has_msr_hv_crash) {
648 c->edx |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
649 }
4467c6c1 650 c->edx |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
744b8a94
AS
651 if (cpu->hyperv_reset && has_msr_hv_reset) {
652 c->eax |= HV_X64_MSR_RESET_AVAILABLE;
653 }
8c145d7c
AS
654 if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
655 c->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
656 }
46eb8f98
AS
657 if (cpu->hyperv_runtime && has_msr_hv_runtime) {
658 c->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
659 }
866eea9a
AS
660 if (cpu->hyperv_synic) {
661 int sint;
662
663 if (!has_msr_hv_synic ||
664 kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
665 fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
666 return -ENOSYS;
667 }
668
669 c->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
670 env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
671 for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) {
672 env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED;
673 }
674 }
ff99aa64
AS
675 if (cpu->hyperv_stimer) {
676 if (!has_msr_hv_stimer) {
677 fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
678 return -ENOSYS;
679 }
680 c->eax |= HV_X64_MSR_SYNTIMER_AVAILABLE;
681 }
eab70139 682 c = &cpuid_data.entries[cpuid_i++];
eab70139 683 c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
92067bf4 684 if (cpu->hyperv_relaxed_timing) {
eab70139
VR
685 c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
686 }
7bc3d711 687 if (has_msr_hv_vapic) {
eab70139
VR
688 c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
689 }
92067bf4 690 c->ebx = cpu->hyperv_spinlock_attempts;
eab70139
VR
691
692 c = &cpuid_data.entries[cpuid_i++];
eab70139
VR
693 c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
694 c->eax = 0x40;
695 c->ebx = 0x40;
696
234cc647 697 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
7bc3d711 698 has_msr_hv_hypercall = true;
eab70139
VR
699 }
700
f522d2ac
AW
701 if (cpu->expose_kvm) {
702 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
703 c = &cpuid_data.entries[cpuid_i++];
704 c->function = KVM_CPUID_SIGNATURE | kvm_base;
79b6f2f6 705 c->eax = KVM_CPUID_FEATURES | kvm_base;
f522d2ac
AW
706 c->ebx = signature[0];
707 c->ecx = signature[1];
708 c->edx = signature[2];
234cc647 709
f522d2ac
AW
710 c = &cpuid_data.entries[cpuid_i++];
711 c->function = KVM_CPUID_FEATURES | kvm_base;
712 c->eax = env->features[FEAT_KVM];
234cc647 713
f522d2ac 714 has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
bb0300dc 715
f522d2ac 716 has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
bc9a839d 717
f522d2ac
AW
718 has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
719 }
917367aa 720
a33609ca 721 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
05330448
AL
722
723 for (i = 0; i <= limit; i++) {
f8bb0565
IM
724 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
725 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
726 abort();
727 }
bb0300dc 728 c = &cpuid_data.entries[cpuid_i++];
486bd5a2
AL
729
730 switch (i) {
a36b1029
AL
731 case 2: {
732 /* Keep reading function 2 till all the input is received */
733 int times;
734
a36b1029 735 c->function = i;
a33609ca
AL
736 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
737 KVM_CPUID_FLAG_STATE_READ_NEXT;
738 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
739 times = c->eax & 0xff;
a36b1029
AL
740
741 for (j = 1; j < times; ++j) {
f8bb0565
IM
742 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
743 fprintf(stderr, "cpuid_data is full, no space for "
744 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
745 abort();
746 }
a33609ca 747 c = &cpuid_data.entries[cpuid_i++];
a36b1029 748 c->function = i;
a33609ca
AL
749 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
750 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
a36b1029
AL
751 }
752 break;
753 }
486bd5a2
AL
754 case 4:
755 case 0xb:
756 case 0xd:
757 for (j = 0; ; j++) {
31e8c696
AP
758 if (i == 0xd && j == 64) {
759 break;
760 }
486bd5a2
AL
761 c->function = i;
762 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
763 c->index = j;
a33609ca 764 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
486bd5a2 765
b9bec74b 766 if (i == 4 && c->eax == 0) {
486bd5a2 767 break;
b9bec74b
JK
768 }
769 if (i == 0xb && !(c->ecx & 0xff00)) {
486bd5a2 770 break;
b9bec74b
JK
771 }
772 if (i == 0xd && c->eax == 0) {
31e8c696 773 continue;
b9bec74b 774 }
f8bb0565
IM
775 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
776 fprintf(stderr, "cpuid_data is full, no space for "
777 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
778 abort();
779 }
a33609ca 780 c = &cpuid_data.entries[cpuid_i++];
486bd5a2
AL
781 }
782 break;
783 default:
486bd5a2 784 c->function = i;
a33609ca
AL
785 c->flags = 0;
786 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
486bd5a2
AL
787 break;
788 }
05330448 789 }
0d894367
PB
790
791 if (limit >= 0x0a) {
792 uint32_t ver;
793
794 cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused);
795 if ((ver & 0xff) > 0) {
796 has_msr_architectural_pmu = true;
797 num_architectural_pmu_counters = (ver & 0xff00) >> 8;
798
799 /* Shouldn't be more than 32, since that's the number of bits
800 * available in EBX to tell us _which_ counters are available.
801 * Play it safe.
802 */
803 if (num_architectural_pmu_counters > MAX_GP_COUNTERS) {
804 num_architectural_pmu_counters = MAX_GP_COUNTERS;
805 }
806 }
807 }
808
a33609ca 809 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
05330448
AL
810
811 for (i = 0x80000000; i <= limit; i++) {
f8bb0565
IM
812 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
813 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
814 abort();
815 }
bb0300dc 816 c = &cpuid_data.entries[cpuid_i++];
05330448 817
05330448 818 c->function = i;
a33609ca
AL
819 c->flags = 0;
820 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
05330448
AL
821 }
822
b3baa152
BW
823 /* Call Centaur's CPUID instructions they are supported. */
824 if (env->cpuid_xlevel2 > 0) {
b3baa152
BW
825 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
826
827 for (i = 0xC0000000; i <= limit; i++) {
f8bb0565
IM
828 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
829 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
830 abort();
831 }
b3baa152
BW
832 c = &cpuid_data.entries[cpuid_i++];
833
834 c->function = i;
835 c->flags = 0;
836 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
837 }
838 }
839
05330448
AL
840 cpuid_data.cpuid.nent = cpuid_i;
841
e7701825 842 if (((env->cpuid_version >> 8)&0xF) >= 6
0514ef2f 843 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
fc7a504c 844 (CPUID_MCE | CPUID_MCA)
a60f24b5 845 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
5120901a 846 uint64_t mcg_cap, unsupported_caps;
e7701825 847 int banks;
32a42024 848 int ret;
e7701825 849
a60f24b5 850 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
75d49497
JK
851 if (ret < 0) {
852 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
853 return ret;
e7701825 854 }
75d49497 855
2590f15b 856 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
49b69cbf 857 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
2590f15b 858 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
49b69cbf 859 return -ENOTSUP;
75d49497 860 }
49b69cbf 861
5120901a
EH
862 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
863 if (unsupported_caps) {
864 error_report("warning: Unsupported MCG_CAP bits: 0x%" PRIx64,
865 unsupported_caps);
866 }
867
2590f15b
EH
868 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
869 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
75d49497
JK
870 if (ret < 0) {
871 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
872 return ret;
873 }
e7701825 874 }
e7701825 875
b8cc45d6
GC
876 qemu_add_vm_change_state_handler(cpu_update_state, env);
877
df67696e
LJ
878 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
879 if (c) {
880 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
881 !!(c->ecx & CPUID_EXT_SMX);
882 }
883
68bfd0ad
MT
884 c = cpuid_find_entry(&cpuid_data.cpuid, 0x80000007, 0);
885 if (c && (c->edx & 1<<8) && invtsc_mig_blocker == NULL) {
886 /* for migration */
887 error_setg(&invtsc_mig_blocker,
888 "State blocked by non-migratable CPU device"
889 " (invtsc flag)");
890 migrate_add_blocker(invtsc_mig_blocker);
891 /* for savevm */
892 vmstate_x86_cpu.unmigratable = 1;
893 }
894
7e680753 895 cpuid_data.cpuid.padding = 0;
1bc22652 896 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
fdc9c41a
JK
897 if (r) {
898 return r;
899 }
e7429073 900
5031283d
HZ
901 r = kvm_arch_set_tsc_khz(cs);
902 if (r < 0) {
903 return r;
e7429073 904 }
e7429073 905
bcffbeeb
HZ
906 /* vcpu's TSC frequency is either specified by user, or following
907 * the value used by KVM if the former is not present. In the
908 * latter case, we query it from KVM and record in env->tsc_khz,
909 * so that vcpu's TSC frequency can be migrated later via this field.
910 */
911 if (!env->tsc_khz) {
912 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
913 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
914 -ENOTSUP;
915 if (r > 0) {
916 env->tsc_khz = r;
917 }
918 }
919
28143b40 920 if (has_xsave) {
fabacc0f
JK
921 env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
922 }
d71b62a1 923 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
fabacc0f 924
d1ae67f6
AW
925 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
926 has_msr_mtrr = true;
927 }
273c515c
PB
928 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
929 has_msr_tsc_aux = false;
930 }
d1ae67f6 931
e7429073 932 return 0;
05330448
AL
933}
934
50a2c6e5 935void kvm_arch_reset_vcpu(X86CPU *cpu)
caa5af0f 936{
20d695a9 937 CPUX86State *env = &cpu->env;
dd673288 938
e73223a5 939 env->exception_injected = -1;
0e607a80 940 env->interrupt_injected = -1;
1a5e9d2f 941 env->xcr0 = 1;
ddced198 942 if (kvm_irqchip_in_kernel()) {
dd673288 943 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
ddced198
MT
944 KVM_MP_STATE_UNINITIALIZED;
945 } else {
946 env->mp_state = KVM_MP_STATE_RUNNABLE;
947 }
caa5af0f
JK
948}
949
e0723c45
PB
950void kvm_arch_do_init_vcpu(X86CPU *cpu)
951{
952 CPUX86State *env = &cpu->env;
953
954 /* APs get directly into wait-for-SIPI state. */
955 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
956 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
957 }
958}
959
c3a3a7d3 960static int kvm_get_supported_msrs(KVMState *s)
05330448 961{
75b10c43 962 static int kvm_supported_msrs;
c3a3a7d3 963 int ret = 0;
05330448
AL
964
965 /* first time */
75b10c43 966 if (kvm_supported_msrs == 0) {
05330448
AL
967 struct kvm_msr_list msr_list, *kvm_msr_list;
968
75b10c43 969 kvm_supported_msrs = -1;
05330448
AL
970
971 /* Obtain MSR list from KVM. These are the MSRs that we must
972 * save/restore */
4c9f7372 973 msr_list.nmsrs = 0;
c3a3a7d3 974 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
6fb6d245 975 if (ret < 0 && ret != -E2BIG) {
c3a3a7d3 976 return ret;
6fb6d245 977 }
d9db889f
JK
978 /* Old kernel modules had a bug and could write beyond the provided
979 memory. Allocate at least a safe amount of 1K. */
7267c094 980 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
d9db889f
JK
981 msr_list.nmsrs *
982 sizeof(msr_list.indices[0])));
05330448 983
55308450 984 kvm_msr_list->nmsrs = msr_list.nmsrs;
c3a3a7d3 985 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
05330448
AL
986 if (ret >= 0) {
987 int i;
988
989 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
990 if (kvm_msr_list->indices[i] == MSR_STAR) {
c3a3a7d3 991 has_msr_star = true;
75b10c43
MT
992 continue;
993 }
994 if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
c3a3a7d3 995 has_msr_hsave_pa = true;
75b10c43 996 continue;
05330448 997 }
c9b8f6b6
AS
998 if (kvm_msr_list->indices[i] == MSR_TSC_AUX) {
999 has_msr_tsc_aux = true;
1000 continue;
1001 }
f28558d3
WA
1002 if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
1003 has_msr_tsc_adjust = true;
1004 continue;
1005 }
aa82ba54
LJ
1006 if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
1007 has_msr_tsc_deadline = true;
1008 continue;
1009 }
fc12d72e
PB
1010 if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) {
1011 has_msr_smbase = true;
1012 continue;
1013 }
21e87c46
AK
1014 if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
1015 has_msr_misc_enable = true;
1016 continue;
1017 }
79e9ebeb
LJ
1018 if (kvm_msr_list->indices[i] == MSR_IA32_BNDCFGS) {
1019 has_msr_bndcfgs = true;
1020 continue;
1021 }
18cd2c17
WL
1022 if (kvm_msr_list->indices[i] == MSR_IA32_XSS) {
1023 has_msr_xss = true;
1024 continue;
1025 }
f2a53c9e
AS
1026 if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) {
1027 has_msr_hv_crash = true;
1028 continue;
1029 }
744b8a94
AS
1030 if (kvm_msr_list->indices[i] == HV_X64_MSR_RESET) {
1031 has_msr_hv_reset = true;
1032 continue;
1033 }
8c145d7c
AS
1034 if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_INDEX) {
1035 has_msr_hv_vpindex = true;
1036 continue;
1037 }
46eb8f98
AS
1038 if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) {
1039 has_msr_hv_runtime = true;
1040 continue;
1041 }
866eea9a
AS
1042 if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) {
1043 has_msr_hv_synic = true;
1044 continue;
1045 }
ff99aa64
AS
1046 if (kvm_msr_list->indices[i] == HV_X64_MSR_STIMER0_CONFIG) {
1047 has_msr_hv_stimer = true;
1048 continue;
1049 }
05330448
AL
1050 }
1051 }
1052
7267c094 1053 g_free(kvm_msr_list);
05330448
AL
1054 }
1055
c3a3a7d3 1056 return ret;
05330448
AL
1057}
1058
6410848b
PB
1059static Notifier smram_machine_done;
1060static KVMMemoryListener smram_listener;
1061static AddressSpace smram_address_space;
1062static MemoryRegion smram_as_root;
1063static MemoryRegion smram_as_mem;
1064
1065static void register_smram_listener(Notifier *n, void *unused)
1066{
1067 MemoryRegion *smram =
1068 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
1069
1070 /* Outer container... */
1071 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
1072 memory_region_set_enabled(&smram_as_root, true);
1073
1074 /* ... with two regions inside: normal system memory with low
1075 * priority, and...
1076 */
1077 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
1078 get_system_memory(), 0, ~0ull);
1079 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
1080 memory_region_set_enabled(&smram_as_mem, true);
1081
1082 if (smram) {
1083 /* ... SMRAM with higher priority */
1084 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
1085 memory_region_set_enabled(smram, true);
1086 }
1087
1088 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
1089 kvm_memory_listener_register(kvm_state, &smram_listener,
1090 &smram_address_space, 1);
1091}
1092
b16565b3 1093int kvm_arch_init(MachineState *ms, KVMState *s)
20420430 1094{
11076198 1095 uint64_t identity_base = 0xfffbc000;
39d6960a 1096 uint64_t shadow_mem;
20420430 1097 int ret;
25d2e361 1098 struct utsname utsname;
20420430 1099
28143b40
TH
1100#ifdef KVM_CAP_XSAVE
1101 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1102#endif
1103
1104#ifdef KVM_CAP_XCRS
1105 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1106#endif
1107
1108#ifdef KVM_CAP_PIT_STATE2
1109 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1110#endif
1111
c3a3a7d3 1112 ret = kvm_get_supported_msrs(s);
20420430 1113 if (ret < 0) {
20420430
SY
1114 return ret;
1115 }
25d2e361
MT
1116
1117 uname(&utsname);
1118 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
1119
4c5b10b7 1120 /*
11076198
JK
1121 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
1122 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
1123 * Since these must be part of guest physical memory, we need to allocate
1124 * them, both by setting their start addresses in the kernel and by
1125 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
1126 *
1127 * Older KVM versions may not support setting the identity map base. In
1128 * that case we need to stick with the default, i.e. a 256K maximum BIOS
1129 * size.
4c5b10b7 1130 */
11076198
JK
1131 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
1132 /* Allows up to 16M BIOSes. */
1133 identity_base = 0xfeffc000;
1134
1135 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
1136 if (ret < 0) {
1137 return ret;
1138 }
4c5b10b7 1139 }
e56ff191 1140
11076198
JK
1141 /* Set TSS base one page after EPT identity map. */
1142 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
20420430
SY
1143 if (ret < 0) {
1144 return ret;
1145 }
1146
11076198
JK
1147 /* Tell fw_cfg to notify the BIOS to reserve the range. */
1148 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
20420430 1149 if (ret < 0) {
11076198 1150 fprintf(stderr, "e820_add_entry() table is full\n");
20420430
SY
1151 return ret;
1152 }
3c85e74f 1153 qemu_register_reset(kvm_unpoison_all, NULL);
20420430 1154
4689b77b 1155 shadow_mem = machine_kvm_shadow_mem(ms);
36ad0e94
MA
1156 if (shadow_mem != -1) {
1157 shadow_mem /= 4096;
1158 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
1159 if (ret < 0) {
1160 return ret;
39d6960a
JK
1161 }
1162 }
6410848b
PB
1163
1164 if (kvm_check_extension(s, KVM_CAP_X86_SMM)) {
1165 smram_machine_done.notify = register_smram_listener;
1166 qemu_add_machine_init_done_notifier(&smram_machine_done);
1167 }
11076198 1168 return 0;
05330448 1169}
b9bec74b 1170
05330448
AL
1171static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1172{
1173 lhs->selector = rhs->selector;
1174 lhs->base = rhs->base;
1175 lhs->limit = rhs->limit;
1176 lhs->type = 3;
1177 lhs->present = 1;
1178 lhs->dpl = 3;
1179 lhs->db = 0;
1180 lhs->s = 1;
1181 lhs->l = 0;
1182 lhs->g = 0;
1183 lhs->avl = 0;
1184 lhs->unusable = 0;
1185}
1186
1187static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1188{
1189 unsigned flags = rhs->flags;
1190 lhs->selector = rhs->selector;
1191 lhs->base = rhs->base;
1192 lhs->limit = rhs->limit;
1193 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
1194 lhs->present = (flags & DESC_P_MASK) != 0;
acaa7550 1195 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
05330448
AL
1196 lhs->db = (flags >> DESC_B_SHIFT) & 1;
1197 lhs->s = (flags & DESC_S_MASK) != 0;
1198 lhs->l = (flags >> DESC_L_SHIFT) & 1;
1199 lhs->g = (flags & DESC_G_MASK) != 0;
1200 lhs->avl = (flags & DESC_AVL_MASK) != 0;
4cae9c97 1201 lhs->unusable = !lhs->present;
7e680753 1202 lhs->padding = 0;
05330448
AL
1203}
1204
1205static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
1206{
1207 lhs->selector = rhs->selector;
1208 lhs->base = rhs->base;
1209 lhs->limit = rhs->limit;
4cae9c97
MC
1210 if (rhs->unusable) {
1211 lhs->flags = 0;
1212 } else {
1213 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
1214 (rhs->present * DESC_P_MASK) |
1215 (rhs->dpl << DESC_DPL_SHIFT) |
1216 (rhs->db << DESC_B_SHIFT) |
1217 (rhs->s * DESC_S_MASK) |
1218 (rhs->l << DESC_L_SHIFT) |
1219 (rhs->g * DESC_G_MASK) |
1220 (rhs->avl * DESC_AVL_MASK);
1221 }
05330448
AL
1222}
1223
1224static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
1225{
b9bec74b 1226 if (set) {
05330448 1227 *kvm_reg = *qemu_reg;
b9bec74b 1228 } else {
05330448 1229 *qemu_reg = *kvm_reg;
b9bec74b 1230 }
05330448
AL
1231}
1232
1bc22652 1233static int kvm_getput_regs(X86CPU *cpu, int set)
05330448 1234{
1bc22652 1235 CPUX86State *env = &cpu->env;
05330448
AL
1236 struct kvm_regs regs;
1237 int ret = 0;
1238
1239 if (!set) {
1bc22652 1240 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
b9bec74b 1241 if (ret < 0) {
05330448 1242 return ret;
b9bec74b 1243 }
05330448
AL
1244 }
1245
1246 kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
1247 kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
1248 kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
1249 kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
1250 kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
1251 kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
1252 kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
1253 kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
1254#ifdef TARGET_X86_64
1255 kvm_getput_reg(&regs.r8, &env->regs[8], set);
1256 kvm_getput_reg(&regs.r9, &env->regs[9], set);
1257 kvm_getput_reg(&regs.r10, &env->regs[10], set);
1258 kvm_getput_reg(&regs.r11, &env->regs[11], set);
1259 kvm_getput_reg(&regs.r12, &env->regs[12], set);
1260 kvm_getput_reg(&regs.r13, &env->regs[13], set);
1261 kvm_getput_reg(&regs.r14, &env->regs[14], set);
1262 kvm_getput_reg(&regs.r15, &env->regs[15], set);
1263#endif
1264
1265 kvm_getput_reg(&regs.rflags, &env->eflags, set);
1266 kvm_getput_reg(&regs.rip, &env->eip, set);
1267
b9bec74b 1268 if (set) {
1bc22652 1269 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
b9bec74b 1270 }
05330448
AL
1271
1272 return ret;
1273}
1274
1bc22652 1275static int kvm_put_fpu(X86CPU *cpu)
05330448 1276{
1bc22652 1277 CPUX86State *env = &cpu->env;
05330448
AL
1278 struct kvm_fpu fpu;
1279 int i;
1280
1281 memset(&fpu, 0, sizeof fpu);
1282 fpu.fsw = env->fpus & ~(7 << 11);
1283 fpu.fsw |= (env->fpstt & 7) << 11;
1284 fpu.fcw = env->fpuc;
42cc8fa6
JK
1285 fpu.last_opcode = env->fpop;
1286 fpu.last_ip = env->fpip;
1287 fpu.last_dp = env->fpdp;
b9bec74b
JK
1288 for (i = 0; i < 8; ++i) {
1289 fpu.ftwx |= (!env->fptags[i]) << i;
1290 }
05330448 1291 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
bee81887 1292 for (i = 0; i < CPU_NB_REGS; i++) {
19cbd87c
EH
1293 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
1294 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
bee81887 1295 }
05330448
AL
1296 fpu.mxcsr = env->mxcsr;
1297
1bc22652 1298 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
05330448
AL
1299}
1300
6b42494b
JK
1301#define XSAVE_FCW_FSW 0
1302#define XSAVE_FTW_FOP 1
f1665b21
SY
1303#define XSAVE_CWD_RIP 2
1304#define XSAVE_CWD_RDP 4
1305#define XSAVE_MXCSR 6
1306#define XSAVE_ST_SPACE 8
1307#define XSAVE_XMM_SPACE 40
1308#define XSAVE_XSTATE_BV 128
1309#define XSAVE_YMMH_SPACE 144
79e9ebeb
LJ
1310#define XSAVE_BNDREGS 240
1311#define XSAVE_BNDCSR 256
9aecd6f8
CP
1312#define XSAVE_OPMASK 272
1313#define XSAVE_ZMM_Hi256 288
1314#define XSAVE_Hi16_ZMM 416
f74eefe0 1315#define XSAVE_PKRU 672
f1665b21 1316
b503717d
EH
1317#define XSAVE_BYTE_OFFSET(word_offset) \
1318 ((word_offset) * sizeof(((struct kvm_xsave *)0)->region[0]))
1319
1320#define ASSERT_OFFSET(word_offset, field) \
1321 QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
1322 offsetof(X86XSaveArea, field))
1323
1324ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
1325ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
1326ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
1327ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
1328ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
1329ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
1330ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
1331ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
1332ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
1333ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
1334ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
1335ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
1336ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
1337ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
1338ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
1339
1bc22652 1340static int kvm_put_xsave(X86CPU *cpu)
f1665b21 1341{
1bc22652 1342 CPUX86State *env = &cpu->env;
86cd2ea0 1343 X86XSaveArea *xsave = env->kvm_xsave_buf;
42cc8fa6 1344 uint16_t cwd, swd, twd;
fabacc0f 1345 int i, r;
f1665b21 1346
28143b40 1347 if (!has_xsave) {
1bc22652 1348 return kvm_put_fpu(cpu);
b9bec74b 1349 }
f1665b21 1350
f1665b21 1351 memset(xsave, 0, sizeof(struct kvm_xsave));
6115c0a8 1352 twd = 0;
f1665b21
SY
1353 swd = env->fpus & ~(7 << 11);
1354 swd |= (env->fpstt & 7) << 11;
1355 cwd = env->fpuc;
b9bec74b 1356 for (i = 0; i < 8; ++i) {
f1665b21 1357 twd |= (!env->fptags[i]) << i;
b9bec74b 1358 }
86cd2ea0
EH
1359 xsave->legacy.fcw = cwd;
1360 xsave->legacy.fsw = swd;
1361 xsave->legacy.ftw = twd;
1362 xsave->legacy.fpop = env->fpop;
1363 xsave->legacy.fpip = env->fpip;
1364 xsave->legacy.fpdp = env->fpdp;
1365 memcpy(&xsave->legacy.fpregs, env->fpregs,
f1665b21 1366 sizeof env->fpregs);
86cd2ea0
EH
1367 xsave->legacy.mxcsr = env->mxcsr;
1368 xsave->header.xstate_bv = env->xstate_bv;
1369 memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
79e9ebeb 1370 sizeof env->bnd_regs);
86cd2ea0
EH
1371 xsave->bndcsr_state.bndcsr = env->bndcs_regs;
1372 memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
9aecd6f8 1373 sizeof env->opmask_regs);
bee81887 1374
86cd2ea0
EH
1375 for (i = 0; i < CPU_NB_REGS; i++) {
1376 uint8_t *xmm = xsave->legacy.xmm_regs[i];
1377 uint8_t *ymmh = xsave->avx_state.ymmh[i];
1378 uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
19cbd87c
EH
1379 stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
1380 stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
1381 stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
1382 stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
1383 stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
1384 stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
1385 stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
1386 stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
bee81887
PB
1387 }
1388
9aecd6f8 1389#ifdef TARGET_X86_64
86cd2ea0 1390 memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
b7711471 1391 16 * sizeof env->xmm_regs[16]);
86cd2ea0 1392 memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
9aecd6f8 1393#endif
1bc22652 1394 r = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
0f53994f 1395 return r;
f1665b21
SY
1396}
1397
1bc22652 1398static int kvm_put_xcrs(X86CPU *cpu)
f1665b21 1399{
1bc22652 1400 CPUX86State *env = &cpu->env;
bdfc8480 1401 struct kvm_xcrs xcrs = {};
f1665b21 1402
28143b40 1403 if (!has_xcrs) {
f1665b21 1404 return 0;
b9bec74b 1405 }
f1665b21
SY
1406
1407 xcrs.nr_xcrs = 1;
1408 xcrs.flags = 0;
1409 xcrs.xcrs[0].xcr = 0;
1410 xcrs.xcrs[0].value = env->xcr0;
1bc22652 1411 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
f1665b21
SY
1412}
1413
1bc22652 1414static int kvm_put_sregs(X86CPU *cpu)
05330448 1415{
1bc22652 1416 CPUX86State *env = &cpu->env;
05330448
AL
1417 struct kvm_sregs sregs;
1418
0e607a80
JK
1419 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
1420 if (env->interrupt_injected >= 0) {
1421 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
1422 (uint64_t)1 << (env->interrupt_injected % 64);
1423 }
05330448
AL
1424
1425 if ((env->eflags & VM_MASK)) {
b9bec74b
JK
1426 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
1427 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
1428 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
1429 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
1430 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
1431 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
05330448 1432 } else {
b9bec74b
JK
1433 set_seg(&sregs.cs, &env->segs[R_CS]);
1434 set_seg(&sregs.ds, &env->segs[R_DS]);
1435 set_seg(&sregs.es, &env->segs[R_ES]);
1436 set_seg(&sregs.fs, &env->segs[R_FS]);
1437 set_seg(&sregs.gs, &env->segs[R_GS]);
1438 set_seg(&sregs.ss, &env->segs[R_SS]);
05330448
AL
1439 }
1440
1441 set_seg(&sregs.tr, &env->tr);
1442 set_seg(&sregs.ldt, &env->ldt);
1443
1444 sregs.idt.limit = env->idt.limit;
1445 sregs.idt.base = env->idt.base;
7e680753 1446 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
05330448
AL
1447 sregs.gdt.limit = env->gdt.limit;
1448 sregs.gdt.base = env->gdt.base;
7e680753 1449 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
05330448
AL
1450
1451 sregs.cr0 = env->cr[0];
1452 sregs.cr2 = env->cr[2];
1453 sregs.cr3 = env->cr[3];
1454 sregs.cr4 = env->cr[4];
1455
02e51483
CF
1456 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
1457 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
05330448
AL
1458
1459 sregs.efer = env->efer;
1460
1bc22652 1461 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
05330448
AL
1462}
1463
d71b62a1
EH
1464static void kvm_msr_buf_reset(X86CPU *cpu)
1465{
1466 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
1467}
1468
9c600a84
EH
1469static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
1470{
1471 struct kvm_msrs *msrs = cpu->kvm_msr_buf;
1472 void *limit = ((void *)msrs) + MSR_BUF_SIZE;
1473 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
1474
1475 assert((void *)(entry + 1) <= limit);
1476
1abc2cae
EH
1477 entry->index = index;
1478 entry->reserved = 0;
1479 entry->data = value;
9c600a84
EH
1480 msrs->nmsrs++;
1481}
1482
7477cd38
MT
1483static int kvm_put_tscdeadline_msr(X86CPU *cpu)
1484{
1485 CPUX86State *env = &cpu->env;
48e1a45c 1486 int ret;
7477cd38
MT
1487
1488 if (!has_msr_tsc_deadline) {
1489 return 0;
1490 }
1491
e25ffda7
EH
1492 kvm_msr_buf_reset(cpu);
1493 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
7477cd38 1494
e25ffda7 1495 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
48e1a45c
PB
1496 if (ret < 0) {
1497 return ret;
1498 }
1499
1500 assert(ret == 1);
1501 return 0;
7477cd38
MT
1502}
1503
6bdf863d
JK
1504/*
1505 * Provide a separate write service for the feature control MSR in order to
1506 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
1507 * before writing any other state because forcibly leaving nested mode
1508 * invalidates the VCPU state.
1509 */
1510static int kvm_put_msr_feature_control(X86CPU *cpu)
1511{
48e1a45c
PB
1512 int ret;
1513
1514 if (!has_msr_feature_control) {
1515 return 0;
1516 }
6bdf863d 1517
e25ffda7
EH
1518 kvm_msr_buf_reset(cpu);
1519 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL,
6bdf863d 1520 cpu->env.msr_ia32_feature_control);
c7fe4b12 1521
e25ffda7 1522 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
48e1a45c
PB
1523 if (ret < 0) {
1524 return ret;
1525 }
1526
1527 assert(ret == 1);
1528 return 0;
6bdf863d
JK
1529}
1530
1bc22652 1531static int kvm_put_msrs(X86CPU *cpu, int level)
05330448 1532{
1bc22652 1533 CPUX86State *env = &cpu->env;
9c600a84 1534 int i;
48e1a45c 1535 int ret;
05330448 1536
d71b62a1
EH
1537 kvm_msr_buf_reset(cpu);
1538
9c600a84
EH
1539 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
1540 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
1541 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
1542 kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
c3a3a7d3 1543 if (has_msr_star) {
9c600a84 1544 kvm_msr_entry_add(cpu, MSR_STAR, env->star);
b9bec74b 1545 }
c3a3a7d3 1546 if (has_msr_hsave_pa) {
9c600a84 1547 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
b9bec74b 1548 }
c9b8f6b6 1549 if (has_msr_tsc_aux) {
9c600a84 1550 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
c9b8f6b6 1551 }
f28558d3 1552 if (has_msr_tsc_adjust) {
9c600a84 1553 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
f28558d3 1554 }
21e87c46 1555 if (has_msr_misc_enable) {
9c600a84 1556 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
21e87c46
AK
1557 env->msr_ia32_misc_enable);
1558 }
fc12d72e 1559 if (has_msr_smbase) {
9c600a84 1560 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
fc12d72e 1561 }
439d19f2 1562 if (has_msr_bndcfgs) {
9c600a84 1563 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
439d19f2 1564 }
18cd2c17 1565 if (has_msr_xss) {
9c600a84 1566 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
18cd2c17 1567 }
05330448 1568#ifdef TARGET_X86_64
25d2e361 1569 if (lm_capable_kernel) {
9c600a84
EH
1570 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
1571 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
1572 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
1573 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
25d2e361 1574 }
05330448 1575#endif
ff5c186b 1576 /*
0d894367
PB
1577 * The following MSRs have side effects on the guest or are too heavy
1578 * for normal writeback. Limit them to reset or full state updates.
ff5c186b
JK
1579 */
1580 if (level >= KVM_PUT_RESET_STATE) {
9c600a84
EH
1581 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
1582 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
1583 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
c5999bfc 1584 if (has_msr_async_pf_en) {
9c600a84 1585 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
c5999bfc 1586 }
bc9a839d 1587 if (has_msr_pv_eoi_en) {
9c600a84 1588 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
bc9a839d 1589 }
917367aa 1590 if (has_msr_kvm_steal_time) {
9c600a84 1591 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
917367aa 1592 }
0d894367
PB
1593 if (has_msr_architectural_pmu) {
1594 /* Stop the counter. */
9c600a84
EH
1595 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
1596 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
0d894367
PB
1597
1598 /* Set the counter values. */
1599 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
9c600a84 1600 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
0d894367
PB
1601 env->msr_fixed_counters[i]);
1602 }
1603 for (i = 0; i < num_architectural_pmu_counters; i++) {
9c600a84 1604 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
0d894367 1605 env->msr_gp_counters[i]);
9c600a84 1606 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
0d894367
PB
1607 env->msr_gp_evtsel[i]);
1608 }
9c600a84 1609 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
0d894367 1610 env->msr_global_status);
9c600a84 1611 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
0d894367
PB
1612 env->msr_global_ovf_ctrl);
1613
1614 /* Now start the PMU. */
9c600a84 1615 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
0d894367 1616 env->msr_fixed_ctr_ctrl);
9c600a84 1617 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
0d894367
PB
1618 env->msr_global_ctrl);
1619 }
7bc3d711 1620 if (has_msr_hv_hypercall) {
9c600a84 1621 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
1c90ef26 1622 env->msr_hv_guest_os_id);
9c600a84 1623 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
1c90ef26 1624 env->msr_hv_hypercall);
eab70139 1625 }
7bc3d711 1626 if (has_msr_hv_vapic) {
9c600a84 1627 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
5ef68987 1628 env->msr_hv_vapic);
eab70139 1629 }
48a5f3bc 1630 if (has_msr_hv_tsc) {
9c600a84 1631 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
48a5f3bc 1632 }
f2a53c9e
AS
1633 if (has_msr_hv_crash) {
1634 int j;
1635
1636 for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
9c600a84 1637 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
f2a53c9e
AS
1638 env->msr_hv_crash_params[j]);
1639
9c600a84 1640 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL,
f2a53c9e
AS
1641 HV_X64_MSR_CRASH_CTL_NOTIFY);
1642 }
46eb8f98 1643 if (has_msr_hv_runtime) {
9c600a84 1644 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
46eb8f98 1645 }
866eea9a
AS
1646 if (cpu->hyperv_synic) {
1647 int j;
1648
9c600a84 1649 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
866eea9a 1650 env->msr_hv_synic_control);
9c600a84 1651 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION,
866eea9a 1652 env->msr_hv_synic_version);
9c600a84 1653 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
866eea9a 1654 env->msr_hv_synic_evt_page);
9c600a84 1655 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
866eea9a
AS
1656 env->msr_hv_synic_msg_page);
1657
1658 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
9c600a84 1659 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
866eea9a
AS
1660 env->msr_hv_synic_sint[j]);
1661 }
1662 }
ff99aa64
AS
1663 if (has_msr_hv_stimer) {
1664 int j;
1665
1666 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
9c600a84 1667 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
ff99aa64
AS
1668 env->msr_hv_stimer_config[j]);
1669 }
1670
1671 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
9c600a84 1672 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
ff99aa64
AS
1673 env->msr_hv_stimer_count[j]);
1674 }
1675 }
d1ae67f6 1676 if (has_msr_mtrr) {
9c600a84
EH
1677 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
1678 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
1679 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
1680 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
1681 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
1682 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
1683 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
1684 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
1685 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
1686 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
1687 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
1688 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
d1ae67f6 1689 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
9c600a84
EH
1690 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
1691 env->mtrr_var[i].base);
1692 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i),
1693 env->mtrr_var[i].mask);
d1ae67f6
AW
1694 }
1695 }
6bdf863d
JK
1696
1697 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
1698 * kvm_put_msr_feature_control. */
ea643051 1699 }
57780495 1700 if (env->mcg_cap) {
d8da8574 1701 int i;
b9bec74b 1702
9c600a84
EH
1703 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
1704 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
c34d440a 1705 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
9c600a84 1706 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
57780495
MT
1707 }
1708 }
1a03675d 1709
d71b62a1 1710 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
48e1a45c
PB
1711 if (ret < 0) {
1712 return ret;
1713 }
05330448 1714
9c600a84 1715 assert(ret == cpu->kvm_msr_buf->nmsrs);
48e1a45c 1716 return 0;
05330448
AL
1717}
1718
1719
1bc22652 1720static int kvm_get_fpu(X86CPU *cpu)
05330448 1721{
1bc22652 1722 CPUX86State *env = &cpu->env;
05330448
AL
1723 struct kvm_fpu fpu;
1724 int i, ret;
1725
1bc22652 1726 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
b9bec74b 1727 if (ret < 0) {
05330448 1728 return ret;
b9bec74b 1729 }
05330448
AL
1730
1731 env->fpstt = (fpu.fsw >> 11) & 7;
1732 env->fpus = fpu.fsw;
1733 env->fpuc = fpu.fcw;
42cc8fa6
JK
1734 env->fpop = fpu.last_opcode;
1735 env->fpip = fpu.last_ip;
1736 env->fpdp = fpu.last_dp;
b9bec74b
JK
1737 for (i = 0; i < 8; ++i) {
1738 env->fptags[i] = !((fpu.ftwx >> i) & 1);
1739 }
05330448 1740 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
bee81887 1741 for (i = 0; i < CPU_NB_REGS; i++) {
19cbd87c
EH
1742 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
1743 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
bee81887 1744 }
05330448
AL
1745 env->mxcsr = fpu.mxcsr;
1746
1747 return 0;
1748}
1749
1bc22652 1750static int kvm_get_xsave(X86CPU *cpu)
f1665b21 1751{
1bc22652 1752 CPUX86State *env = &cpu->env;
86cd2ea0 1753 X86XSaveArea *xsave = env->kvm_xsave_buf;
f1665b21 1754 int ret, i;
42cc8fa6 1755 uint16_t cwd, swd, twd;
f1665b21 1756
28143b40 1757 if (!has_xsave) {
1bc22652 1758 return kvm_get_fpu(cpu);
b9bec74b 1759 }
f1665b21 1760
1bc22652 1761 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
0f53994f 1762 if (ret < 0) {
f1665b21 1763 return ret;
0f53994f 1764 }
f1665b21 1765
86cd2ea0
EH
1766 cwd = xsave->legacy.fcw;
1767 swd = xsave->legacy.fsw;
1768 twd = xsave->legacy.ftw;
1769 env->fpop = xsave->legacy.fpop;
f1665b21
SY
1770 env->fpstt = (swd >> 11) & 7;
1771 env->fpus = swd;
1772 env->fpuc = cwd;
b9bec74b 1773 for (i = 0; i < 8; ++i) {
f1665b21 1774 env->fptags[i] = !((twd >> i) & 1);
b9bec74b 1775 }
86cd2ea0
EH
1776 env->fpip = xsave->legacy.fpip;
1777 env->fpdp = xsave->legacy.fpdp;
1778 env->mxcsr = xsave->legacy.mxcsr;
1779 memcpy(env->fpregs, &xsave->legacy.fpregs,
f1665b21 1780 sizeof env->fpregs);
86cd2ea0
EH
1781 env->xstate_bv = xsave->header.xstate_bv;
1782 memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
79e9ebeb 1783 sizeof env->bnd_regs);
86cd2ea0
EH
1784 env->bndcs_regs = xsave->bndcsr_state.bndcsr;
1785 memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
9aecd6f8 1786 sizeof env->opmask_regs);
bee81887 1787
86cd2ea0
EH
1788 for (i = 0; i < CPU_NB_REGS; i++) {
1789 uint8_t *xmm = xsave->legacy.xmm_regs[i];
1790 uint8_t *ymmh = xsave->avx_state.ymmh[i];
1791 uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
19cbd87c
EH
1792 env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
1793 env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
1794 env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
1795 env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
1796 env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
1797 env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
1798 env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
1799 env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
bee81887
PB
1800 }
1801
9aecd6f8 1802#ifdef TARGET_X86_64
86cd2ea0 1803 memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
b7711471 1804 16 * sizeof env->xmm_regs[16]);
86cd2ea0 1805 memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
9aecd6f8 1806#endif
f1665b21 1807 return 0;
f1665b21
SY
1808}
1809
1bc22652 1810static int kvm_get_xcrs(X86CPU *cpu)
f1665b21 1811{
1bc22652 1812 CPUX86State *env = &cpu->env;
f1665b21
SY
1813 int i, ret;
1814 struct kvm_xcrs xcrs;
1815
28143b40 1816 if (!has_xcrs) {
f1665b21 1817 return 0;
b9bec74b 1818 }
f1665b21 1819
1bc22652 1820 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
b9bec74b 1821 if (ret < 0) {
f1665b21 1822 return ret;
b9bec74b 1823 }
f1665b21 1824
b9bec74b 1825 for (i = 0; i < xcrs.nr_xcrs; i++) {
f1665b21 1826 /* Only support xcr0 now */
0fd53fec
PB
1827 if (xcrs.xcrs[i].xcr == 0) {
1828 env->xcr0 = xcrs.xcrs[i].value;
f1665b21
SY
1829 break;
1830 }
b9bec74b 1831 }
f1665b21 1832 return 0;
f1665b21
SY
1833}
1834
1bc22652 1835static int kvm_get_sregs(X86CPU *cpu)
05330448 1836{
1bc22652 1837 CPUX86State *env = &cpu->env;
05330448
AL
1838 struct kvm_sregs sregs;
1839 uint32_t hflags;
0e607a80 1840 int bit, i, ret;
05330448 1841
1bc22652 1842 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
b9bec74b 1843 if (ret < 0) {
05330448 1844 return ret;
b9bec74b 1845 }
05330448 1846
0e607a80
JK
1847 /* There can only be one pending IRQ set in the bitmap at a time, so try
1848 to find it and save its number instead (-1 for none). */
1849 env->interrupt_injected = -1;
1850 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
1851 if (sregs.interrupt_bitmap[i]) {
1852 bit = ctz64(sregs.interrupt_bitmap[i]);
1853 env->interrupt_injected = i * 64 + bit;
1854 break;
1855 }
1856 }
05330448
AL
1857
1858 get_seg(&env->segs[R_CS], &sregs.cs);
1859 get_seg(&env->segs[R_DS], &sregs.ds);
1860 get_seg(&env->segs[R_ES], &sregs.es);
1861 get_seg(&env->segs[R_FS], &sregs.fs);
1862 get_seg(&env->segs[R_GS], &sregs.gs);
1863 get_seg(&env->segs[R_SS], &sregs.ss);
1864
1865 get_seg(&env->tr, &sregs.tr);
1866 get_seg(&env->ldt, &sregs.ldt);
1867
1868 env->idt.limit = sregs.idt.limit;
1869 env->idt.base = sregs.idt.base;
1870 env->gdt.limit = sregs.gdt.limit;
1871 env->gdt.base = sregs.gdt.base;
1872
1873 env->cr[0] = sregs.cr0;
1874 env->cr[2] = sregs.cr2;
1875 env->cr[3] = sregs.cr3;
1876 env->cr[4] = sregs.cr4;
1877
05330448 1878 env->efer = sregs.efer;
cce47516
JK
1879
1880 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
05330448 1881
b9bec74b
JK
1882#define HFLAG_COPY_MASK \
1883 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1884 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1885 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1886 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
05330448 1887
19dc85db
RH
1888 hflags = env->hflags & HFLAG_COPY_MASK;
1889 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
05330448
AL
1890 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
1891 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
b9bec74b 1892 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
05330448 1893 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
19dc85db
RH
1894
1895 if (env->cr[4] & CR4_OSFXSR_MASK) {
1896 hflags |= HF_OSFXSR_MASK;
1897 }
05330448
AL
1898
1899 if (env->efer & MSR_EFER_LMA) {
1900 hflags |= HF_LMA_MASK;
1901 }
1902
1903 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
1904 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1905 } else {
1906 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
b9bec74b 1907 (DESC_B_SHIFT - HF_CS32_SHIFT);
05330448 1908 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
b9bec74b
JK
1909 (DESC_B_SHIFT - HF_SS32_SHIFT);
1910 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
1911 !(hflags & HF_CS32_MASK)) {
1912 hflags |= HF_ADDSEG_MASK;
1913 } else {
1914 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
1915 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
1916 }
05330448 1917 }
19dc85db 1918 env->hflags = hflags;
05330448
AL
1919
1920 return 0;
1921}
1922
1bc22652 1923static int kvm_get_msrs(X86CPU *cpu)
05330448 1924{
1bc22652 1925 CPUX86State *env = &cpu->env;
d71b62a1 1926 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
9c600a84 1927 int ret, i;
05330448 1928
d71b62a1
EH
1929 kvm_msr_buf_reset(cpu);
1930
9c600a84
EH
1931 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
1932 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
1933 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
1934 kvm_msr_entry_add(cpu, MSR_PAT, 0);
c3a3a7d3 1935 if (has_msr_star) {
9c600a84 1936 kvm_msr_entry_add(cpu, MSR_STAR, 0);
b9bec74b 1937 }
c3a3a7d3 1938 if (has_msr_hsave_pa) {
9c600a84 1939 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
b9bec74b 1940 }
c9b8f6b6 1941 if (has_msr_tsc_aux) {
9c600a84 1942 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
c9b8f6b6 1943 }
f28558d3 1944 if (has_msr_tsc_adjust) {
9c600a84 1945 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
f28558d3 1946 }
aa82ba54 1947 if (has_msr_tsc_deadline) {
9c600a84 1948 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
aa82ba54 1949 }
21e87c46 1950 if (has_msr_misc_enable) {
9c600a84 1951 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
21e87c46 1952 }
fc12d72e 1953 if (has_msr_smbase) {
9c600a84 1954 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
fc12d72e 1955 }
df67696e 1956 if (has_msr_feature_control) {
9c600a84 1957 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
df67696e 1958 }
79e9ebeb 1959 if (has_msr_bndcfgs) {
9c600a84 1960 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
79e9ebeb 1961 }
18cd2c17 1962 if (has_msr_xss) {
9c600a84 1963 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
18cd2c17
WL
1964 }
1965
b8cc45d6
GC
1966
1967 if (!env->tsc_valid) {
9c600a84 1968 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
1354869c 1969 env->tsc_valid = !runstate_is_running();
b8cc45d6
GC
1970 }
1971
05330448 1972#ifdef TARGET_X86_64
25d2e361 1973 if (lm_capable_kernel) {
9c600a84
EH
1974 kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
1975 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
1976 kvm_msr_entry_add(cpu, MSR_FMASK, 0);
1977 kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
25d2e361 1978 }
05330448 1979#endif
9c600a84
EH
1980 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
1981 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
c5999bfc 1982 if (has_msr_async_pf_en) {
9c600a84 1983 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
c5999bfc 1984 }
bc9a839d 1985 if (has_msr_pv_eoi_en) {
9c600a84 1986 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
bc9a839d 1987 }
917367aa 1988 if (has_msr_kvm_steal_time) {
9c600a84 1989 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
917367aa 1990 }
0d894367 1991 if (has_msr_architectural_pmu) {
9c600a84
EH
1992 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
1993 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
1994 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
1995 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
0d894367 1996 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
9c600a84 1997 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
0d894367
PB
1998 }
1999 for (i = 0; i < num_architectural_pmu_counters; i++) {
9c600a84
EH
2000 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
2001 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
0d894367
PB
2002 }
2003 }
1a03675d 2004
57780495 2005 if (env->mcg_cap) {
9c600a84
EH
2006 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
2007 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
b9bec74b 2008 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
9c600a84 2009 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
b9bec74b 2010 }
57780495 2011 }
57780495 2012
1c90ef26 2013 if (has_msr_hv_hypercall) {
9c600a84
EH
2014 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
2015 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
1c90ef26 2016 }
5ef68987 2017 if (has_msr_hv_vapic) {
9c600a84 2018 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
5ef68987 2019 }
48a5f3bc 2020 if (has_msr_hv_tsc) {
9c600a84 2021 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
48a5f3bc 2022 }
f2a53c9e
AS
2023 if (has_msr_hv_crash) {
2024 int j;
2025
2026 for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
9c600a84 2027 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
f2a53c9e
AS
2028 }
2029 }
46eb8f98 2030 if (has_msr_hv_runtime) {
9c600a84 2031 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
46eb8f98 2032 }
866eea9a
AS
2033 if (cpu->hyperv_synic) {
2034 uint32_t msr;
2035
9c600a84
EH
2036 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
2037 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, 0);
2038 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
2039 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
866eea9a 2040 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
9c600a84 2041 kvm_msr_entry_add(cpu, msr, 0);
866eea9a
AS
2042 }
2043 }
ff99aa64
AS
2044 if (has_msr_hv_stimer) {
2045 uint32_t msr;
2046
2047 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
2048 msr++) {
9c600a84 2049 kvm_msr_entry_add(cpu, msr, 0);
ff99aa64
AS
2050 }
2051 }
d1ae67f6 2052 if (has_msr_mtrr) {
9c600a84
EH
2053 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
2054 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
2055 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
2056 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
2057 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
2058 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
2059 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
2060 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
2061 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
2062 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
2063 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
2064 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
d1ae67f6 2065 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
9c600a84
EH
2066 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
2067 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
d1ae67f6
AW
2068 }
2069 }
5ef68987 2070
d71b62a1 2071 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
b9bec74b 2072 if (ret < 0) {
05330448 2073 return ret;
b9bec74b 2074 }
05330448 2075
9c600a84 2076 assert(ret == cpu->kvm_msr_buf->nmsrs);
05330448 2077 for (i = 0; i < ret; i++) {
0d894367
PB
2078 uint32_t index = msrs[i].index;
2079 switch (index) {
05330448
AL
2080 case MSR_IA32_SYSENTER_CS:
2081 env->sysenter_cs = msrs[i].data;
2082 break;
2083 case MSR_IA32_SYSENTER_ESP:
2084 env->sysenter_esp = msrs[i].data;
2085 break;
2086 case MSR_IA32_SYSENTER_EIP:
2087 env->sysenter_eip = msrs[i].data;
2088 break;
0c03266a
JK
2089 case MSR_PAT:
2090 env->pat = msrs[i].data;
2091 break;
05330448
AL
2092 case MSR_STAR:
2093 env->star = msrs[i].data;
2094 break;
2095#ifdef TARGET_X86_64
2096 case MSR_CSTAR:
2097 env->cstar = msrs[i].data;
2098 break;
2099 case MSR_KERNELGSBASE:
2100 env->kernelgsbase = msrs[i].data;
2101 break;
2102 case MSR_FMASK:
2103 env->fmask = msrs[i].data;
2104 break;
2105 case MSR_LSTAR:
2106 env->lstar = msrs[i].data;
2107 break;
2108#endif
2109 case MSR_IA32_TSC:
2110 env->tsc = msrs[i].data;
2111 break;
c9b8f6b6
AS
2112 case MSR_TSC_AUX:
2113 env->tsc_aux = msrs[i].data;
2114 break;
f28558d3
WA
2115 case MSR_TSC_ADJUST:
2116 env->tsc_adjust = msrs[i].data;
2117 break;
aa82ba54
LJ
2118 case MSR_IA32_TSCDEADLINE:
2119 env->tsc_deadline = msrs[i].data;
2120 break;
aa851e36
MT
2121 case MSR_VM_HSAVE_PA:
2122 env->vm_hsave = msrs[i].data;
2123 break;
1a03675d
GC
2124 case MSR_KVM_SYSTEM_TIME:
2125 env->system_time_msr = msrs[i].data;
2126 break;
2127 case MSR_KVM_WALL_CLOCK:
2128 env->wall_clock_msr = msrs[i].data;
2129 break;
57780495
MT
2130 case MSR_MCG_STATUS:
2131 env->mcg_status = msrs[i].data;
2132 break;
2133 case MSR_MCG_CTL:
2134 env->mcg_ctl = msrs[i].data;
2135 break;
21e87c46
AK
2136 case MSR_IA32_MISC_ENABLE:
2137 env->msr_ia32_misc_enable = msrs[i].data;
2138 break;
fc12d72e
PB
2139 case MSR_IA32_SMBASE:
2140 env->smbase = msrs[i].data;
2141 break;
0779caeb
ACL
2142 case MSR_IA32_FEATURE_CONTROL:
2143 env->msr_ia32_feature_control = msrs[i].data;
df67696e 2144 break;
79e9ebeb
LJ
2145 case MSR_IA32_BNDCFGS:
2146 env->msr_bndcfgs = msrs[i].data;
2147 break;
18cd2c17
WL
2148 case MSR_IA32_XSS:
2149 env->xss = msrs[i].data;
2150 break;
57780495 2151 default:
57780495
MT
2152 if (msrs[i].index >= MSR_MC0_CTL &&
2153 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
2154 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
57780495 2155 }
d8da8574 2156 break;
f6584ee2
GN
2157 case MSR_KVM_ASYNC_PF_EN:
2158 env->async_pf_en_msr = msrs[i].data;
2159 break;
bc9a839d
MT
2160 case MSR_KVM_PV_EOI_EN:
2161 env->pv_eoi_en_msr = msrs[i].data;
2162 break;
917367aa
MT
2163 case MSR_KVM_STEAL_TIME:
2164 env->steal_time_msr = msrs[i].data;
2165 break;
0d894367
PB
2166 case MSR_CORE_PERF_FIXED_CTR_CTRL:
2167 env->msr_fixed_ctr_ctrl = msrs[i].data;
2168 break;
2169 case MSR_CORE_PERF_GLOBAL_CTRL:
2170 env->msr_global_ctrl = msrs[i].data;
2171 break;
2172 case MSR_CORE_PERF_GLOBAL_STATUS:
2173 env->msr_global_status = msrs[i].data;
2174 break;
2175 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
2176 env->msr_global_ovf_ctrl = msrs[i].data;
2177 break;
2178 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
2179 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
2180 break;
2181 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
2182 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
2183 break;
2184 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
2185 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
2186 break;
1c90ef26
VR
2187 case HV_X64_MSR_HYPERCALL:
2188 env->msr_hv_hypercall = msrs[i].data;
2189 break;
2190 case HV_X64_MSR_GUEST_OS_ID:
2191 env->msr_hv_guest_os_id = msrs[i].data;
2192 break;
5ef68987
VR
2193 case HV_X64_MSR_APIC_ASSIST_PAGE:
2194 env->msr_hv_vapic = msrs[i].data;
2195 break;
48a5f3bc
VR
2196 case HV_X64_MSR_REFERENCE_TSC:
2197 env->msr_hv_tsc = msrs[i].data;
2198 break;
f2a53c9e
AS
2199 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2200 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
2201 break;
46eb8f98
AS
2202 case HV_X64_MSR_VP_RUNTIME:
2203 env->msr_hv_runtime = msrs[i].data;
2204 break;
866eea9a
AS
2205 case HV_X64_MSR_SCONTROL:
2206 env->msr_hv_synic_control = msrs[i].data;
2207 break;
2208 case HV_X64_MSR_SVERSION:
2209 env->msr_hv_synic_version = msrs[i].data;
2210 break;
2211 case HV_X64_MSR_SIEFP:
2212 env->msr_hv_synic_evt_page = msrs[i].data;
2213 break;
2214 case HV_X64_MSR_SIMP:
2215 env->msr_hv_synic_msg_page = msrs[i].data;
2216 break;
2217 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
2218 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
ff99aa64
AS
2219 break;
2220 case HV_X64_MSR_STIMER0_CONFIG:
2221 case HV_X64_MSR_STIMER1_CONFIG:
2222 case HV_X64_MSR_STIMER2_CONFIG:
2223 case HV_X64_MSR_STIMER3_CONFIG:
2224 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
2225 msrs[i].data;
2226 break;
2227 case HV_X64_MSR_STIMER0_COUNT:
2228 case HV_X64_MSR_STIMER1_COUNT:
2229 case HV_X64_MSR_STIMER2_COUNT:
2230 case HV_X64_MSR_STIMER3_COUNT:
2231 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
2232 msrs[i].data;
866eea9a 2233 break;
d1ae67f6
AW
2234 case MSR_MTRRdefType:
2235 env->mtrr_deftype = msrs[i].data;
2236 break;
2237 case MSR_MTRRfix64K_00000:
2238 env->mtrr_fixed[0] = msrs[i].data;
2239 break;
2240 case MSR_MTRRfix16K_80000:
2241 env->mtrr_fixed[1] = msrs[i].data;
2242 break;
2243 case MSR_MTRRfix16K_A0000:
2244 env->mtrr_fixed[2] = msrs[i].data;
2245 break;
2246 case MSR_MTRRfix4K_C0000:
2247 env->mtrr_fixed[3] = msrs[i].data;
2248 break;
2249 case MSR_MTRRfix4K_C8000:
2250 env->mtrr_fixed[4] = msrs[i].data;
2251 break;
2252 case MSR_MTRRfix4K_D0000:
2253 env->mtrr_fixed[5] = msrs[i].data;
2254 break;
2255 case MSR_MTRRfix4K_D8000:
2256 env->mtrr_fixed[6] = msrs[i].data;
2257 break;
2258 case MSR_MTRRfix4K_E0000:
2259 env->mtrr_fixed[7] = msrs[i].data;
2260 break;
2261 case MSR_MTRRfix4K_E8000:
2262 env->mtrr_fixed[8] = msrs[i].data;
2263 break;
2264 case MSR_MTRRfix4K_F0000:
2265 env->mtrr_fixed[9] = msrs[i].data;
2266 break;
2267 case MSR_MTRRfix4K_F8000:
2268 env->mtrr_fixed[10] = msrs[i].data;
2269 break;
2270 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
2271 if (index & 1) {
2272 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data;
2273 } else {
2274 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
2275 }
2276 break;
05330448
AL
2277 }
2278 }
2279
2280 return 0;
2281}
2282
1bc22652 2283static int kvm_put_mp_state(X86CPU *cpu)
9bdbe550 2284{
1bc22652 2285 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
9bdbe550 2286
1bc22652 2287 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
9bdbe550
HB
2288}
2289
23d02d9b 2290static int kvm_get_mp_state(X86CPU *cpu)
9bdbe550 2291{
259186a7 2292 CPUState *cs = CPU(cpu);
23d02d9b 2293 CPUX86State *env = &cpu->env;
9bdbe550
HB
2294 struct kvm_mp_state mp_state;
2295 int ret;
2296
259186a7 2297 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
9bdbe550
HB
2298 if (ret < 0) {
2299 return ret;
2300 }
2301 env->mp_state = mp_state.mp_state;
c14750e8 2302 if (kvm_irqchip_in_kernel()) {
259186a7 2303 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
c14750e8 2304 }
9bdbe550
HB
2305 return 0;
2306}
2307
1bc22652 2308static int kvm_get_apic(X86CPU *cpu)
680c1c6f 2309{
02e51483 2310 DeviceState *apic = cpu->apic_state;
680c1c6f
JK
2311 struct kvm_lapic_state kapic;
2312 int ret;
2313
3d4b2649 2314 if (apic && kvm_irqchip_in_kernel()) {
1bc22652 2315 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
680c1c6f
JK
2316 if (ret < 0) {
2317 return ret;
2318 }
2319
2320 kvm_get_apic_state(apic, &kapic);
2321 }
2322 return 0;
2323}
2324
1bc22652 2325static int kvm_put_apic(X86CPU *cpu)
680c1c6f 2326{
02e51483 2327 DeviceState *apic = cpu->apic_state;
680c1c6f
JK
2328 struct kvm_lapic_state kapic;
2329
3d4b2649 2330 if (apic && kvm_irqchip_in_kernel()) {
680c1c6f
JK
2331 kvm_put_apic_state(apic, &kapic);
2332
1bc22652 2333 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_LAPIC, &kapic);
680c1c6f
JK
2334 }
2335 return 0;
2336}
2337
1bc22652 2338static int kvm_put_vcpu_events(X86CPU *cpu, int level)
a0fb002c 2339{
fc12d72e 2340 CPUState *cs = CPU(cpu);
1bc22652 2341 CPUX86State *env = &cpu->env;
076796f8 2342 struct kvm_vcpu_events events = {};
a0fb002c
JK
2343
2344 if (!kvm_has_vcpu_events()) {
2345 return 0;
2346 }
2347
31827373
JK
2348 events.exception.injected = (env->exception_injected >= 0);
2349 events.exception.nr = env->exception_injected;
a0fb002c
JK
2350 events.exception.has_error_code = env->has_error_code;
2351 events.exception.error_code = env->error_code;
7e680753 2352 events.exception.pad = 0;
a0fb002c
JK
2353
2354 events.interrupt.injected = (env->interrupt_injected >= 0);
2355 events.interrupt.nr = env->interrupt_injected;
2356 events.interrupt.soft = env->soft_interrupt;
2357
2358 events.nmi.injected = env->nmi_injected;
2359 events.nmi.pending = env->nmi_pending;
2360 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
7e680753 2361 events.nmi.pad = 0;
a0fb002c
JK
2362
2363 events.sipi_vector = env->sipi_vector;
2364
fc12d72e
PB
2365 if (has_msr_smbase) {
2366 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
2367 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
2368 if (kvm_irqchip_in_kernel()) {
2369 /* As soon as these are moved to the kernel, remove them
2370 * from cs->interrupt_request.
2371 */
2372 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
2373 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
2374 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
2375 } else {
2376 /* Keep these in cs->interrupt_request. */
2377 events.smi.pending = 0;
2378 events.smi.latched_init = 0;
2379 }
2380 events.flags |= KVM_VCPUEVENT_VALID_SMM;
2381 }
2382
ea643051
JK
2383 events.flags = 0;
2384 if (level >= KVM_PUT_RESET_STATE) {
2385 events.flags |=
2386 KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
2387 }
aee028b9 2388
1bc22652 2389 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
a0fb002c
JK
2390}
2391
1bc22652 2392static int kvm_get_vcpu_events(X86CPU *cpu)
a0fb002c 2393{
1bc22652 2394 CPUX86State *env = &cpu->env;
a0fb002c
JK
2395 struct kvm_vcpu_events events;
2396 int ret;
2397
2398 if (!kvm_has_vcpu_events()) {
2399 return 0;
2400 }
2401
fc12d72e 2402 memset(&events, 0, sizeof(events));
1bc22652 2403 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
a0fb002c
JK
2404 if (ret < 0) {
2405 return ret;
2406 }
31827373 2407 env->exception_injected =
a0fb002c
JK
2408 events.exception.injected ? events.exception.nr : -1;
2409 env->has_error_code = events.exception.has_error_code;
2410 env->error_code = events.exception.error_code;
2411
2412 env->interrupt_injected =
2413 events.interrupt.injected ? events.interrupt.nr : -1;
2414 env->soft_interrupt = events.interrupt.soft;
2415
2416 env->nmi_injected = events.nmi.injected;
2417 env->nmi_pending = events.nmi.pending;
2418 if (events.nmi.masked) {
2419 env->hflags2 |= HF2_NMI_MASK;
2420 } else {
2421 env->hflags2 &= ~HF2_NMI_MASK;
2422 }
2423
fc12d72e
PB
2424 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
2425 if (events.smi.smm) {
2426 env->hflags |= HF_SMM_MASK;
2427 } else {
2428 env->hflags &= ~HF_SMM_MASK;
2429 }
2430 if (events.smi.pending) {
2431 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2432 } else {
2433 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2434 }
2435 if (events.smi.smm_inside_nmi) {
2436 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
2437 } else {
2438 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
2439 }
2440 if (events.smi.latched_init) {
2441 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2442 } else {
2443 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2444 }
2445 }
2446
a0fb002c 2447 env->sipi_vector = events.sipi_vector;
a0fb002c
JK
2448
2449 return 0;
2450}
2451
1bc22652 2452static int kvm_guest_debug_workarounds(X86CPU *cpu)
b0b1d690 2453{
ed2803da 2454 CPUState *cs = CPU(cpu);
1bc22652 2455 CPUX86State *env = &cpu->env;
b0b1d690 2456 int ret = 0;
b0b1d690
JK
2457 unsigned long reinject_trap = 0;
2458
2459 if (!kvm_has_vcpu_events()) {
2460 if (env->exception_injected == 1) {
2461 reinject_trap = KVM_GUESTDBG_INJECT_DB;
2462 } else if (env->exception_injected == 3) {
2463 reinject_trap = KVM_GUESTDBG_INJECT_BP;
2464 }
2465 env->exception_injected = -1;
2466 }
2467
2468 /*
2469 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
2470 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
2471 * by updating the debug state once again if single-stepping is on.
2472 * Another reason to call kvm_update_guest_debug here is a pending debug
2473 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
2474 * reinject them via SET_GUEST_DEBUG.
2475 */
2476 if (reinject_trap ||
ed2803da 2477 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
38e478ec 2478 ret = kvm_update_guest_debug(cs, reinject_trap);
b0b1d690 2479 }
b0b1d690
JK
2480 return ret;
2481}
2482
1bc22652 2483static int kvm_put_debugregs(X86CPU *cpu)
ff44f1a3 2484{
1bc22652 2485 CPUX86State *env = &cpu->env;
ff44f1a3
JK
2486 struct kvm_debugregs dbgregs;
2487 int i;
2488
2489 if (!kvm_has_debugregs()) {
2490 return 0;
2491 }
2492
2493 for (i = 0; i < 4; i++) {
2494 dbgregs.db[i] = env->dr[i];
2495 }
2496 dbgregs.dr6 = env->dr[6];
2497 dbgregs.dr7 = env->dr[7];
2498 dbgregs.flags = 0;
2499
1bc22652 2500 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
ff44f1a3
JK
2501}
2502
1bc22652 2503static int kvm_get_debugregs(X86CPU *cpu)
ff44f1a3 2504{
1bc22652 2505 CPUX86State *env = &cpu->env;
ff44f1a3
JK
2506 struct kvm_debugregs dbgregs;
2507 int i, ret;
2508
2509 if (!kvm_has_debugregs()) {
2510 return 0;
2511 }
2512
1bc22652 2513 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
ff44f1a3 2514 if (ret < 0) {
b9bec74b 2515 return ret;
ff44f1a3
JK
2516 }
2517 for (i = 0; i < 4; i++) {
2518 env->dr[i] = dbgregs.db[i];
2519 }
2520 env->dr[4] = env->dr[6] = dbgregs.dr6;
2521 env->dr[5] = env->dr[7] = dbgregs.dr7;
ff44f1a3
JK
2522
2523 return 0;
2524}
2525
20d695a9 2526int kvm_arch_put_registers(CPUState *cpu, int level)
05330448 2527{
20d695a9 2528 X86CPU *x86_cpu = X86_CPU(cpu);
05330448
AL
2529 int ret;
2530
2fa45344 2531 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
dbaa07c4 2532
48e1a45c 2533 if (level >= KVM_PUT_RESET_STATE) {
6bdf863d
JK
2534 ret = kvm_put_msr_feature_control(x86_cpu);
2535 if (ret < 0) {
2536 return ret;
2537 }
2538 }
2539
36f96c4b
HZ
2540 if (level == KVM_PUT_FULL_STATE) {
2541 /* We don't check for kvm_arch_set_tsc_khz() errors here,
2542 * because TSC frequency mismatch shouldn't abort migration,
2543 * unless the user explicitly asked for a more strict TSC
2544 * setting (e.g. using an explicit "tsc-freq" option).
2545 */
2546 kvm_arch_set_tsc_khz(cpu);
2547 }
2548
1bc22652 2549 ret = kvm_getput_regs(x86_cpu, 1);
b9bec74b 2550 if (ret < 0) {
05330448 2551 return ret;
b9bec74b 2552 }
1bc22652 2553 ret = kvm_put_xsave(x86_cpu);
b9bec74b 2554 if (ret < 0) {
f1665b21 2555 return ret;
b9bec74b 2556 }
1bc22652 2557 ret = kvm_put_xcrs(x86_cpu);
b9bec74b 2558 if (ret < 0) {
05330448 2559 return ret;
b9bec74b 2560 }
1bc22652 2561 ret = kvm_put_sregs(x86_cpu);
b9bec74b 2562 if (ret < 0) {
05330448 2563 return ret;
b9bec74b 2564 }
ab443475 2565 /* must be before kvm_put_msrs */
1bc22652 2566 ret = kvm_inject_mce_oldstyle(x86_cpu);
ab443475
JK
2567 if (ret < 0) {
2568 return ret;
2569 }
1bc22652 2570 ret = kvm_put_msrs(x86_cpu, level);
b9bec74b 2571 if (ret < 0) {
05330448 2572 return ret;
b9bec74b 2573 }
ea643051 2574 if (level >= KVM_PUT_RESET_STATE) {
1bc22652 2575 ret = kvm_put_mp_state(x86_cpu);
b9bec74b 2576 if (ret < 0) {
ea643051 2577 return ret;
b9bec74b 2578 }
1bc22652 2579 ret = kvm_put_apic(x86_cpu);
680c1c6f
JK
2580 if (ret < 0) {
2581 return ret;
2582 }
ea643051 2583 }
7477cd38
MT
2584
2585 ret = kvm_put_tscdeadline_msr(x86_cpu);
2586 if (ret < 0) {
2587 return ret;
2588 }
2589
1bc22652 2590 ret = kvm_put_vcpu_events(x86_cpu, level);
b9bec74b 2591 if (ret < 0) {
a0fb002c 2592 return ret;
b9bec74b 2593 }
1bc22652 2594 ret = kvm_put_debugregs(x86_cpu);
b9bec74b 2595 if (ret < 0) {
b0b1d690 2596 return ret;
b9bec74b 2597 }
b0b1d690 2598 /* must be last */
1bc22652 2599 ret = kvm_guest_debug_workarounds(x86_cpu);
b9bec74b 2600 if (ret < 0) {
ff44f1a3 2601 return ret;
b9bec74b 2602 }
05330448
AL
2603 return 0;
2604}
2605
20d695a9 2606int kvm_arch_get_registers(CPUState *cs)
05330448 2607{
20d695a9 2608 X86CPU *cpu = X86_CPU(cs);
05330448
AL
2609 int ret;
2610
20d695a9 2611 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
dbaa07c4 2612
1bc22652 2613 ret = kvm_getput_regs(cpu, 0);
b9bec74b 2614 if (ret < 0) {
f4f1110e 2615 goto out;
b9bec74b 2616 }
1bc22652 2617 ret = kvm_get_xsave(cpu);
b9bec74b 2618 if (ret < 0) {
f4f1110e 2619 goto out;
b9bec74b 2620 }
1bc22652 2621 ret = kvm_get_xcrs(cpu);
b9bec74b 2622 if (ret < 0) {
f4f1110e 2623 goto out;
b9bec74b 2624 }
1bc22652 2625 ret = kvm_get_sregs(cpu);
b9bec74b 2626 if (ret < 0) {
f4f1110e 2627 goto out;
b9bec74b 2628 }
1bc22652 2629 ret = kvm_get_msrs(cpu);
b9bec74b 2630 if (ret < 0) {
f4f1110e 2631 goto out;
b9bec74b 2632 }
23d02d9b 2633 ret = kvm_get_mp_state(cpu);
b9bec74b 2634 if (ret < 0) {
f4f1110e 2635 goto out;
b9bec74b 2636 }
1bc22652 2637 ret = kvm_get_apic(cpu);
680c1c6f 2638 if (ret < 0) {
f4f1110e 2639 goto out;
680c1c6f 2640 }
1bc22652 2641 ret = kvm_get_vcpu_events(cpu);
b9bec74b 2642 if (ret < 0) {
f4f1110e 2643 goto out;
b9bec74b 2644 }
1bc22652 2645 ret = kvm_get_debugregs(cpu);
b9bec74b 2646 if (ret < 0) {
f4f1110e 2647 goto out;
b9bec74b 2648 }
f4f1110e
RH
2649 ret = 0;
2650 out:
2651 cpu_sync_bndcs_hflags(&cpu->env);
2652 return ret;
05330448
AL
2653}
2654
20d695a9 2655void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
05330448 2656{
20d695a9
AF
2657 X86CPU *x86_cpu = X86_CPU(cpu);
2658 CPUX86State *env = &x86_cpu->env;
ce377af3
JK
2659 int ret;
2660
276ce815 2661 /* Inject NMI */
fc12d72e
PB
2662 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
2663 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
2664 qemu_mutex_lock_iothread();
2665 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
2666 qemu_mutex_unlock_iothread();
2667 DPRINTF("injected NMI\n");
2668 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
2669 if (ret < 0) {
2670 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
2671 strerror(-ret));
2672 }
2673 }
2674 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
2675 qemu_mutex_lock_iothread();
2676 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
2677 qemu_mutex_unlock_iothread();
2678 DPRINTF("injected SMI\n");
2679 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
2680 if (ret < 0) {
2681 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
2682 strerror(-ret));
2683 }
ce377af3 2684 }
276ce815
LJ
2685 }
2686
15eafc2e 2687 if (!kvm_pic_in_kernel()) {
4b8523ee
JK
2688 qemu_mutex_lock_iothread();
2689 }
2690
e0723c45
PB
2691 /* Force the VCPU out of its inner loop to process any INIT requests
2692 * or (for userspace APIC, but it is cheap to combine the checks here)
2693 * pending TPR access reports.
2694 */
2695 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
fc12d72e
PB
2696 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
2697 !(env->hflags & HF_SMM_MASK)) {
2698 cpu->exit_request = 1;
2699 }
2700 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
2701 cpu->exit_request = 1;
2702 }
e0723c45 2703 }
05330448 2704
15eafc2e 2705 if (!kvm_pic_in_kernel()) {
db1669bc
JK
2706 /* Try to inject an interrupt if the guest can accept it */
2707 if (run->ready_for_interrupt_injection &&
259186a7 2708 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
db1669bc
JK
2709 (env->eflags & IF_MASK)) {
2710 int irq;
2711
259186a7 2712 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
db1669bc
JK
2713 irq = cpu_get_pic_interrupt(env);
2714 if (irq >= 0) {
2715 struct kvm_interrupt intr;
2716
2717 intr.irq = irq;
db1669bc 2718 DPRINTF("injected interrupt %d\n", irq);
1bc22652 2719 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
ce377af3
JK
2720 if (ret < 0) {
2721 fprintf(stderr,
2722 "KVM: injection failed, interrupt lost (%s)\n",
2723 strerror(-ret));
2724 }
db1669bc
JK
2725 }
2726 }
05330448 2727
db1669bc
JK
2728 /* If we have an interrupt but the guest is not ready to receive an
2729 * interrupt, request an interrupt window exit. This will
2730 * cause a return to userspace as soon as the guest is ready to
2731 * receive interrupts. */
259186a7 2732 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
db1669bc
JK
2733 run->request_interrupt_window = 1;
2734 } else {
2735 run->request_interrupt_window = 0;
2736 }
2737
2738 DPRINTF("setting tpr\n");
02e51483 2739 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
4b8523ee
JK
2740
2741 qemu_mutex_unlock_iothread();
db1669bc 2742 }
05330448
AL
2743}
2744
4c663752 2745MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
05330448 2746{
20d695a9
AF
2747 X86CPU *x86_cpu = X86_CPU(cpu);
2748 CPUX86State *env = &x86_cpu->env;
2749
fc12d72e
PB
2750 if (run->flags & KVM_RUN_X86_SMM) {
2751 env->hflags |= HF_SMM_MASK;
2752 } else {
2753 env->hflags &= HF_SMM_MASK;
2754 }
b9bec74b 2755 if (run->if_flag) {
05330448 2756 env->eflags |= IF_MASK;
b9bec74b 2757 } else {
05330448 2758 env->eflags &= ~IF_MASK;
b9bec74b 2759 }
4b8523ee
JK
2760
2761 /* We need to protect the apic state against concurrent accesses from
2762 * different threads in case the userspace irqchip is used. */
2763 if (!kvm_irqchip_in_kernel()) {
2764 qemu_mutex_lock_iothread();
2765 }
02e51483
CF
2766 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
2767 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
4b8523ee
JK
2768 if (!kvm_irqchip_in_kernel()) {
2769 qemu_mutex_unlock_iothread();
2770 }
f794aa4a 2771 return cpu_get_mem_attrs(env);
05330448
AL
2772}
2773
20d695a9 2774int kvm_arch_process_async_events(CPUState *cs)
0af691d7 2775{
20d695a9
AF
2776 X86CPU *cpu = X86_CPU(cs);
2777 CPUX86State *env = &cpu->env;
232fc23b 2778
259186a7 2779 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
ab443475
JK
2780 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
2781 assert(env->mcg_cap);
2782
259186a7 2783 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
ab443475 2784
dd1750d7 2785 kvm_cpu_synchronize_state(cs);
ab443475
JK
2786
2787 if (env->exception_injected == EXCP08_DBLE) {
2788 /* this means triple fault */
2789 qemu_system_reset_request();
fcd7d003 2790 cs->exit_request = 1;
ab443475
JK
2791 return 0;
2792 }
2793 env->exception_injected = EXCP12_MCHK;
2794 env->has_error_code = 0;
2795
259186a7 2796 cs->halted = 0;
ab443475
JK
2797 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
2798 env->mp_state = KVM_MP_STATE_RUNNABLE;
2799 }
2800 }
2801
fc12d72e
PB
2802 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
2803 !(env->hflags & HF_SMM_MASK)) {
e0723c45
PB
2804 kvm_cpu_synchronize_state(cs);
2805 do_cpu_init(cpu);
2806 }
2807
db1669bc
JK
2808 if (kvm_irqchip_in_kernel()) {
2809 return 0;
2810 }
2811
259186a7
AF
2812 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
2813 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
02e51483 2814 apic_poll_irq(cpu->apic_state);
5d62c43a 2815 }
259186a7 2816 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4601f7b0 2817 (env->eflags & IF_MASK)) ||
259186a7
AF
2818 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2819 cs->halted = 0;
6792a57b 2820 }
259186a7 2821 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
dd1750d7 2822 kvm_cpu_synchronize_state(cs);
232fc23b 2823 do_cpu_sipi(cpu);
0af691d7 2824 }
259186a7
AF
2825 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
2826 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
dd1750d7 2827 kvm_cpu_synchronize_state(cs);
02e51483 2828 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
d362e757
JK
2829 env->tpr_access_type);
2830 }
0af691d7 2831
259186a7 2832 return cs->halted;
0af691d7
MT
2833}
2834
839b5630 2835static int kvm_handle_halt(X86CPU *cpu)
05330448 2836{
259186a7 2837 CPUState *cs = CPU(cpu);
839b5630
AF
2838 CPUX86State *env = &cpu->env;
2839
259186a7 2840 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
05330448 2841 (env->eflags & IF_MASK)) &&
259186a7
AF
2842 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2843 cs->halted = 1;
bb4ea393 2844 return EXCP_HLT;
05330448
AL
2845 }
2846
bb4ea393 2847 return 0;
05330448
AL
2848}
2849
f7575c96 2850static int kvm_handle_tpr_access(X86CPU *cpu)
d362e757 2851{
f7575c96
AF
2852 CPUState *cs = CPU(cpu);
2853 struct kvm_run *run = cs->kvm_run;
d362e757 2854
02e51483 2855 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
d362e757
JK
2856 run->tpr_access.is_write ? TPR_ACCESS_WRITE
2857 : TPR_ACCESS_READ);
2858 return 1;
2859}
2860
f17ec444 2861int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
e22a25c9 2862{
38972938 2863 static const uint8_t int3 = 0xcc;
64bf3f4e 2864
f17ec444
AF
2865 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
2866 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
e22a25c9 2867 return -EINVAL;
b9bec74b 2868 }
e22a25c9
AL
2869 return 0;
2870}
2871
f17ec444 2872int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
e22a25c9
AL
2873{
2874 uint8_t int3;
2875
f17ec444
AF
2876 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
2877 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
e22a25c9 2878 return -EINVAL;
b9bec74b 2879 }
e22a25c9
AL
2880 return 0;
2881}
2882
2883static struct {
2884 target_ulong addr;
2885 int len;
2886 int type;
2887} hw_breakpoint[4];
2888
2889static int nb_hw_breakpoint;
2890
2891static int find_hw_breakpoint(target_ulong addr, int len, int type)
2892{
2893 int n;
2894
b9bec74b 2895 for (n = 0; n < nb_hw_breakpoint; n++) {
e22a25c9 2896 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
b9bec74b 2897 (hw_breakpoint[n].len == len || len == -1)) {
e22a25c9 2898 return n;
b9bec74b
JK
2899 }
2900 }
e22a25c9
AL
2901 return -1;
2902}
2903
2904int kvm_arch_insert_hw_breakpoint(target_ulong addr,
2905 target_ulong len, int type)
2906{
2907 switch (type) {
2908 case GDB_BREAKPOINT_HW:
2909 len = 1;
2910 break;
2911 case GDB_WATCHPOINT_WRITE:
2912 case GDB_WATCHPOINT_ACCESS:
2913 switch (len) {
2914 case 1:
2915 break;
2916 case 2:
2917 case 4:
2918 case 8:
b9bec74b 2919 if (addr & (len - 1)) {
e22a25c9 2920 return -EINVAL;
b9bec74b 2921 }
e22a25c9
AL
2922 break;
2923 default:
2924 return -EINVAL;
2925 }
2926 break;
2927 default:
2928 return -ENOSYS;
2929 }
2930
b9bec74b 2931 if (nb_hw_breakpoint == 4) {
e22a25c9 2932 return -ENOBUFS;
b9bec74b
JK
2933 }
2934 if (find_hw_breakpoint(addr, len, type) >= 0) {
e22a25c9 2935 return -EEXIST;
b9bec74b 2936 }
e22a25c9
AL
2937 hw_breakpoint[nb_hw_breakpoint].addr = addr;
2938 hw_breakpoint[nb_hw_breakpoint].len = len;
2939 hw_breakpoint[nb_hw_breakpoint].type = type;
2940 nb_hw_breakpoint++;
2941
2942 return 0;
2943}
2944
2945int kvm_arch_remove_hw_breakpoint(target_ulong addr,
2946 target_ulong len, int type)
2947{
2948 int n;
2949
2950 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
b9bec74b 2951 if (n < 0) {
e22a25c9 2952 return -ENOENT;
b9bec74b 2953 }
e22a25c9
AL
2954 nb_hw_breakpoint--;
2955 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
2956
2957 return 0;
2958}
2959
2960void kvm_arch_remove_all_hw_breakpoints(void)
2961{
2962 nb_hw_breakpoint = 0;
2963}
2964
2965static CPUWatchpoint hw_watchpoint;
2966
a60f24b5 2967static int kvm_handle_debug(X86CPU *cpu,
48405526 2968 struct kvm_debug_exit_arch *arch_info)
e22a25c9 2969{
ed2803da 2970 CPUState *cs = CPU(cpu);
a60f24b5 2971 CPUX86State *env = &cpu->env;
f2574737 2972 int ret = 0;
e22a25c9
AL
2973 int n;
2974
2975 if (arch_info->exception == 1) {
2976 if (arch_info->dr6 & (1 << 14)) {
ed2803da 2977 if (cs->singlestep_enabled) {
f2574737 2978 ret = EXCP_DEBUG;
b9bec74b 2979 }
e22a25c9 2980 } else {
b9bec74b
JK
2981 for (n = 0; n < 4; n++) {
2982 if (arch_info->dr6 & (1 << n)) {
e22a25c9
AL
2983 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
2984 case 0x0:
f2574737 2985 ret = EXCP_DEBUG;
e22a25c9
AL
2986 break;
2987 case 0x1:
f2574737 2988 ret = EXCP_DEBUG;
ff4700b0 2989 cs->watchpoint_hit = &hw_watchpoint;
e22a25c9
AL
2990 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
2991 hw_watchpoint.flags = BP_MEM_WRITE;
2992 break;
2993 case 0x3:
f2574737 2994 ret = EXCP_DEBUG;
ff4700b0 2995 cs->watchpoint_hit = &hw_watchpoint;
e22a25c9
AL
2996 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
2997 hw_watchpoint.flags = BP_MEM_ACCESS;
2998 break;
2999 }
b9bec74b
JK
3000 }
3001 }
e22a25c9 3002 }
ff4700b0 3003 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
f2574737 3004 ret = EXCP_DEBUG;
b9bec74b 3005 }
f2574737 3006 if (ret == 0) {
ff4700b0 3007 cpu_synchronize_state(cs);
48405526 3008 assert(env->exception_injected == -1);
b0b1d690 3009
f2574737 3010 /* pass to guest */
48405526
BS
3011 env->exception_injected = arch_info->exception;
3012 env->has_error_code = 0;
b0b1d690 3013 }
e22a25c9 3014
f2574737 3015 return ret;
e22a25c9
AL
3016}
3017
20d695a9 3018void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
e22a25c9
AL
3019{
3020 const uint8_t type_code[] = {
3021 [GDB_BREAKPOINT_HW] = 0x0,
3022 [GDB_WATCHPOINT_WRITE] = 0x1,
3023 [GDB_WATCHPOINT_ACCESS] = 0x3
3024 };
3025 const uint8_t len_code[] = {
3026 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
3027 };
3028 int n;
3029
a60f24b5 3030 if (kvm_sw_breakpoints_active(cpu)) {
e22a25c9 3031 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
b9bec74b 3032 }
e22a25c9
AL
3033 if (nb_hw_breakpoint > 0) {
3034 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
3035 dbg->arch.debugreg[7] = 0x0600;
3036 for (n = 0; n < nb_hw_breakpoint; n++) {
3037 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
3038 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
3039 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
95c077c9 3040 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
e22a25c9
AL
3041 }
3042 }
3043}
4513d923 3044
2a4dac83
JK
3045static bool host_supports_vmx(void)
3046{
3047 uint32_t ecx, unused;
3048
3049 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
3050 return ecx & CPUID_EXT_VMX;
3051}
3052
3053#define VMX_INVALID_GUEST_STATE 0x80000021
3054
20d695a9 3055int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
2a4dac83 3056{
20d695a9 3057 X86CPU *cpu = X86_CPU(cs);
2a4dac83
JK
3058 uint64_t code;
3059 int ret;
3060
3061 switch (run->exit_reason) {
3062 case KVM_EXIT_HLT:
3063 DPRINTF("handle_hlt\n");
4b8523ee 3064 qemu_mutex_lock_iothread();
839b5630 3065 ret = kvm_handle_halt(cpu);
4b8523ee 3066 qemu_mutex_unlock_iothread();
2a4dac83
JK
3067 break;
3068 case KVM_EXIT_SET_TPR:
3069 ret = 0;
3070 break;
d362e757 3071 case KVM_EXIT_TPR_ACCESS:
4b8523ee 3072 qemu_mutex_lock_iothread();
f7575c96 3073 ret = kvm_handle_tpr_access(cpu);
4b8523ee 3074 qemu_mutex_unlock_iothread();
d362e757 3075 break;
2a4dac83
JK
3076 case KVM_EXIT_FAIL_ENTRY:
3077 code = run->fail_entry.hardware_entry_failure_reason;
3078 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
3079 code);
3080 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
3081 fprintf(stderr,
12619721 3082 "\nIf you're running a guest on an Intel machine without "
2a4dac83
JK
3083 "unrestricted mode\n"
3084 "support, the failure can be most likely due to the guest "
3085 "entering an invalid\n"
3086 "state for Intel VT. For example, the guest maybe running "
3087 "in big real mode\n"
3088 "which is not supported on less recent Intel processors."
3089 "\n\n");
3090 }
3091 ret = -1;
3092 break;
3093 case KVM_EXIT_EXCEPTION:
3094 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
3095 run->ex.exception, run->ex.error_code);
3096 ret = -1;
3097 break;
f2574737
JK
3098 case KVM_EXIT_DEBUG:
3099 DPRINTF("kvm_exit_debug\n");
4b8523ee 3100 qemu_mutex_lock_iothread();
a60f24b5 3101 ret = kvm_handle_debug(cpu, &run->debug.arch);
4b8523ee 3102 qemu_mutex_unlock_iothread();
f2574737 3103 break;
50efe82c
AS
3104 case KVM_EXIT_HYPERV:
3105 ret = kvm_hv_handle_exit(cpu, &run->hyperv);
3106 break;
15eafc2e
PB
3107 case KVM_EXIT_IOAPIC_EOI:
3108 ioapic_eoi_broadcast(run->eoi.vector);
3109 ret = 0;
3110 break;
2a4dac83
JK
3111 default:
3112 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
3113 ret = -1;
3114 break;
3115 }
3116
3117 return ret;
3118}
3119
20d695a9 3120bool kvm_arch_stop_on_emulation_error(CPUState *cs)
4513d923 3121{
20d695a9
AF
3122 X86CPU *cpu = X86_CPU(cs);
3123 CPUX86State *env = &cpu->env;
3124
dd1750d7 3125 kvm_cpu_synchronize_state(cs);
b9bec74b
JK
3126 return !(env->cr[0] & CR0_PE_MASK) ||
3127 ((env->segs[R_CS].selector & 3) != 3);
4513d923 3128}
84b058d7
JK
3129
3130void kvm_arch_init_irq_routing(KVMState *s)
3131{
3132 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
3133 /* If kernel can't do irq routing, interrupt source
3134 * override 0->2 cannot be set up as required by HPET.
3135 * So we have to disable it.
3136 */
3137 no_hpet = 1;
3138 }
cc7e0ddf 3139 /* We know at this point that we're using the in-kernel
614e41bc 3140 * irqchip, so we can use irqfds, and on x86 we know
f3e1bed8 3141 * we can use msi via irqfd and GSI routing.
cc7e0ddf 3142 */
614e41bc 3143 kvm_msi_via_irqfd_allowed = true;
f3e1bed8 3144 kvm_gsi_routing_allowed = true;
15eafc2e
PB
3145
3146 if (kvm_irqchip_is_split()) {
3147 int i;
3148
3149 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
3150 MSI routes for signaling interrupts to the local apics. */
3151 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
3152 struct MSIMessage msg = { 0x0, 0x0 };
3153 if (kvm_irqchip_add_msi_route(s, msg, NULL) < 0) {
3154 error_report("Could not enable split IRQ mode.");
3155 exit(1);
3156 }
3157 }
3158 }
3159}
3160
3161int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
3162{
3163 int ret;
3164 if (machine_kernel_irqchip_split(ms)) {
3165 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
3166 if (ret) {
3167 error_report("Could not enable split irqchip mode: %s\n",
3168 strerror(-ret));
3169 exit(1);
3170 } else {
3171 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
3172 kvm_split_irqchip = true;
3173 return 1;
3174 }
3175 } else {
3176 return 0;
3177 }
84b058d7 3178}
b139bd30
JK
3179
3180/* Classic KVM device assignment interface. Will remain x86 only. */
3181int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
3182 uint32_t flags, uint32_t *dev_id)
3183{
3184 struct kvm_assigned_pci_dev dev_data = {
3185 .segnr = dev_addr->domain,
3186 .busnr = dev_addr->bus,
3187 .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
3188 .flags = flags,
3189 };
3190 int ret;
3191
3192 dev_data.assigned_dev_id =
3193 (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
3194
3195 ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
3196 if (ret < 0) {
3197 return ret;
3198 }
3199
3200 *dev_id = dev_data.assigned_dev_id;
3201
3202 return 0;
3203}
3204
3205int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
3206{
3207 struct kvm_assigned_pci_dev dev_data = {
3208 .assigned_dev_id = dev_id,
3209 };
3210
3211 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
3212}
3213
3214static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
3215 uint32_t irq_type, uint32_t guest_irq)
3216{
3217 struct kvm_assigned_irq assigned_irq = {
3218 .assigned_dev_id = dev_id,
3219 .guest_irq = guest_irq,
3220 .flags = irq_type,
3221 };
3222
3223 if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
3224 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
3225 } else {
3226 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
3227 }
3228}
3229
3230int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
3231 uint32_t guest_irq)
3232{
3233 uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
3234 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
3235
3236 return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
3237}
3238
3239int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
3240{
3241 struct kvm_assigned_pci_dev dev_data = {
3242 .assigned_dev_id = dev_id,
3243 .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
3244 };
3245
3246 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
3247}
3248
3249static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
3250 uint32_t type)
3251{
3252 struct kvm_assigned_irq assigned_irq = {
3253 .assigned_dev_id = dev_id,
3254 .flags = type,
3255 };
3256
3257 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
3258}
3259
3260int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
3261{
3262 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
3263 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
3264}
3265
3266int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
3267{
3268 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
3269 KVM_DEV_IRQ_GUEST_MSI, virq);
3270}
3271
3272int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
3273{
3274 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
3275 KVM_DEV_IRQ_HOST_MSI);
3276}
3277
3278bool kvm_device_msix_supported(KVMState *s)
3279{
3280 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
3281 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
3282 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
3283}
3284
3285int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
3286 uint32_t nr_vectors)
3287{
3288 struct kvm_assigned_msix_nr msix_nr = {
3289 .assigned_dev_id = dev_id,
3290 .entry_nr = nr_vectors,
3291 };
3292
3293 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
3294}
3295
3296int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
3297 int virq)
3298{
3299 struct kvm_assigned_msix_entry msix_entry = {
3300 .assigned_dev_id = dev_id,
3301 .gsi = virq,
3302 .entry = vector,
3303 };
3304
3305 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
3306}
3307
3308int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
3309{
3310 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
3311 KVM_DEV_IRQ_GUEST_MSIX, 0);
3312}
3313
3314int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
3315{
3316 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
3317 KVM_DEV_IRQ_HOST_MSIX);
3318}
9e03a040
FB
3319
3320int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
dc9f06ca 3321 uint64_t address, uint32_t data, PCIDevice *dev)
9e03a040
FB
3322{
3323 return 0;
3324}
1850b6b7
EA
3325
3326int kvm_arch_msi_data_to_gsi(uint32_t data)
3327{
3328 abort();
3329}