]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/kvm.c
kvm: Move x86-specific functions into target-i386/kvm.c
[mirror_qemu.git] / target-i386 / kvm.c
CommitLineData
05330448
AL
1/*
2 * QEMU KVM support
3 *
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 *
13 */
14
15#include <sys/types.h>
16#include <sys/ioctl.h>
17#include <sys/mman.h>
25d2e361 18#include <sys/utsname.h>
05330448
AL
19
20#include <linux/kvm.h>
5802e066 21#include <linux/kvm_para.h>
05330448
AL
22
23#include "qemu-common.h"
9c17d615 24#include "sysemu/sysemu.h"
6410848b 25#include "sysemu/kvm_int.h"
1d31f66b 26#include "kvm_i386.h"
05330448 27#include "cpu.h"
022c62cb 28#include "exec/gdbstub.h"
1de7afc9
PB
29#include "qemu/host-utils.h"
30#include "qemu/config-file.h"
0d09e41a
PB
31#include "hw/i386/pc.h"
32#include "hw/i386/apic.h"
e0723c45
PB
33#include "hw/i386/apic_internal.h"
34#include "hw/i386/apic-msidef.h"
022c62cb 35#include "exec/ioport.h"
73aa529a 36#include "standard-headers/asm-x86/hyperv.h"
a2cb15b0 37#include "hw/pci/pci.h"
68bfd0ad 38#include "migration/migration.h"
4c663752 39#include "exec/memattrs.h"
05330448
AL
40
41//#define DEBUG_KVM
42
43#ifdef DEBUG_KVM
8c0d577e 44#define DPRINTF(fmt, ...) \
05330448
AL
45 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
46#else
8c0d577e 47#define DPRINTF(fmt, ...) \
05330448
AL
48 do { } while (0)
49#endif
50
1a03675d
GC
51#define MSR_KVM_WALL_CLOCK 0x11
52#define MSR_KVM_SYSTEM_TIME 0x12
53
c0532a76
MT
54#ifndef BUS_MCEERR_AR
55#define BUS_MCEERR_AR 4
56#endif
57#ifndef BUS_MCEERR_AO
58#define BUS_MCEERR_AO 5
59#endif
60
94a8d39a
JK
61const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
62 KVM_CAP_INFO(SET_TSS_ADDR),
63 KVM_CAP_INFO(EXT_CPUID),
64 KVM_CAP_INFO(MP_STATE),
65 KVM_CAP_LAST_INFO
66};
25d2e361 67
c3a3a7d3
JK
68static bool has_msr_star;
69static bool has_msr_hsave_pa;
c9b8f6b6 70static bool has_msr_tsc_aux;
f28558d3 71static bool has_msr_tsc_adjust;
aa82ba54 72static bool has_msr_tsc_deadline;
df67696e 73static bool has_msr_feature_control;
c5999bfc 74static bool has_msr_async_pf_en;
bc9a839d 75static bool has_msr_pv_eoi_en;
21e87c46 76static bool has_msr_misc_enable;
fc12d72e 77static bool has_msr_smbase;
79e9ebeb 78static bool has_msr_bndcfgs;
917367aa 79static bool has_msr_kvm_steal_time;
25d2e361 80static int lm_capable_kernel;
7bc3d711
PB
81static bool has_msr_hv_hypercall;
82static bool has_msr_hv_vapic;
48a5f3bc 83static bool has_msr_hv_tsc;
f2a53c9e 84static bool has_msr_hv_crash;
744b8a94 85static bool has_msr_hv_reset;
8c145d7c 86static bool has_msr_hv_vpindex;
46eb8f98 87static bool has_msr_hv_runtime;
d1ae67f6 88static bool has_msr_mtrr;
18cd2c17 89static bool has_msr_xss;
b827df58 90
0d894367
PB
91static bool has_msr_architectural_pmu;
92static uint32_t num_architectural_pmu_counters;
93
28143b40
TH
94static int has_xsave;
95static int has_xcrs;
96static int has_pit_state2;
97
98int kvm_has_pit_state2(void)
99{
100 return has_pit_state2;
101}
102
355023f2
PB
103bool kvm_has_smm(void)
104{
105 return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
106}
107
1d31f66b
PM
108bool kvm_allows_irq0_override(void)
109{
110 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
111}
112
b827df58
AK
113static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
114{
115 struct kvm_cpuid2 *cpuid;
116 int r, size;
117
118 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
e42a92ae 119 cpuid = g_malloc0(size);
b827df58
AK
120 cpuid->nent = max;
121 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
76ae317f
MM
122 if (r == 0 && cpuid->nent >= max) {
123 r = -E2BIG;
124 }
b827df58
AK
125 if (r < 0) {
126 if (r == -E2BIG) {
7267c094 127 g_free(cpuid);
b827df58
AK
128 return NULL;
129 } else {
130 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
131 strerror(-r));
132 exit(1);
133 }
134 }
135 return cpuid;
136}
137
dd87f8a6
EH
138/* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
139 * for all entries.
140 */
141static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
142{
143 struct kvm_cpuid2 *cpuid;
144 int max = 1;
145 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
146 max *= 2;
147 }
148 return cpuid;
149}
150
a443bc34 151static const struct kvm_para_features {
0c31b744
GC
152 int cap;
153 int feature;
154} para_features[] = {
155 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
156 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
157 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
0c31b744 158 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
0c31b744
GC
159};
160
ba9bc59e 161static int get_para_features(KVMState *s)
0c31b744
GC
162{
163 int i, features = 0;
164
8e03c100 165 for (i = 0; i < ARRAY_SIZE(para_features); i++) {
ba9bc59e 166 if (kvm_check_extension(s, para_features[i].cap)) {
0c31b744
GC
167 features |= (1 << para_features[i].feature);
168 }
169 }
170
171 return features;
172}
0c31b744
GC
173
174
829ae2f9
EH
175/* Returns the value for a specific register on the cpuid entry
176 */
177static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
178{
179 uint32_t ret = 0;
180 switch (reg) {
181 case R_EAX:
182 ret = entry->eax;
183 break;
184 case R_EBX:
185 ret = entry->ebx;
186 break;
187 case R_ECX:
188 ret = entry->ecx;
189 break;
190 case R_EDX:
191 ret = entry->edx;
192 break;
193 }
194 return ret;
195}
196
4fb73f1d
EH
197/* Find matching entry for function/index on kvm_cpuid2 struct
198 */
199static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
200 uint32_t function,
201 uint32_t index)
202{
203 int i;
204 for (i = 0; i < cpuid->nent; ++i) {
205 if (cpuid->entries[i].function == function &&
206 cpuid->entries[i].index == index) {
207 return &cpuid->entries[i];
208 }
209 }
210 /* not found: */
211 return NULL;
212}
213
ba9bc59e 214uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
c958a8bd 215 uint32_t index, int reg)
b827df58
AK
216{
217 struct kvm_cpuid2 *cpuid;
b827df58
AK
218 uint32_t ret = 0;
219 uint32_t cpuid_1_edx;
8c723b79 220 bool found = false;
b827df58 221
dd87f8a6 222 cpuid = get_supported_cpuid(s);
b827df58 223
4fb73f1d
EH
224 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
225 if (entry) {
226 found = true;
227 ret = cpuid_entry_get_reg(entry, reg);
b827df58
AK
228 }
229
7b46e5ce
EH
230 /* Fixups for the data returned by KVM, below */
231
c2acb022
EH
232 if (function == 1 && reg == R_EDX) {
233 /* KVM before 2.6.30 misreports the following features */
234 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
84bd945c
EH
235 } else if (function == 1 && reg == R_ECX) {
236 /* We can set the hypervisor flag, even if KVM does not return it on
237 * GET_SUPPORTED_CPUID
238 */
239 ret |= CPUID_EXT_HYPERVISOR;
ac67ee26
EH
240 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
241 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
242 * and the irqchip is in the kernel.
243 */
244 if (kvm_irqchip_in_kernel() &&
245 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
246 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
247 }
41e5e76d
EH
248
249 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
250 * without the in-kernel irqchip
251 */
252 if (!kvm_irqchip_in_kernel()) {
253 ret &= ~CPUID_EXT_X2APIC;
b827df58 254 }
28b8e4d0
JK
255 } else if (function == 6 && reg == R_EAX) {
256 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
c2acb022
EH
257 } else if (function == 0x80000001 && reg == R_EDX) {
258 /* On Intel, kvm returns cpuid according to the Intel spec,
259 * so add missing bits according to the AMD spec:
260 */
261 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
262 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
b827df58
AK
263 }
264
7267c094 265 g_free(cpuid);
b827df58 266
0c31b744 267 /* fallback for older kernels */
8c723b79 268 if ((function == KVM_CPUID_FEATURES) && !found) {
ba9bc59e 269 ret = get_para_features(s);
b9bec74b 270 }
0c31b744
GC
271
272 return ret;
bb0300dc 273}
bb0300dc 274
3c85e74f
HY
275typedef struct HWPoisonPage {
276 ram_addr_t ram_addr;
277 QLIST_ENTRY(HWPoisonPage) list;
278} HWPoisonPage;
279
280static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
281 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
282
283static void kvm_unpoison_all(void *param)
284{
285 HWPoisonPage *page, *next_page;
286
287 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
288 QLIST_REMOVE(page, list);
289 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
7267c094 290 g_free(page);
3c85e74f
HY
291 }
292}
293
3c85e74f
HY
294static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
295{
296 HWPoisonPage *page;
297
298 QLIST_FOREACH(page, &hwpoison_page_list, list) {
299 if (page->ram_addr == ram_addr) {
300 return;
301 }
302 }
ab3ad07f 303 page = g_new(HWPoisonPage, 1);
3c85e74f
HY
304 page->ram_addr = ram_addr;
305 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
306}
307
e7701825
MT
308static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
309 int *max_banks)
310{
311 int r;
312
14a09518 313 r = kvm_check_extension(s, KVM_CAP_MCE);
e7701825
MT
314 if (r > 0) {
315 *max_banks = r;
316 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
317 }
318 return -ENOSYS;
319}
320
bee615d4 321static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
e7701825 322{
bee615d4 323 CPUX86State *env = &cpu->env;
c34d440a
JK
324 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
325 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
326 uint64_t mcg_status = MCG_STATUS_MCIP;
e7701825 327
c34d440a
JK
328 if (code == BUS_MCEERR_AR) {
329 status |= MCI_STATUS_AR | 0x134;
330 mcg_status |= MCG_STATUS_EIPV;
331 } else {
332 status |= 0xc0;
333 mcg_status |= MCG_STATUS_RIPV;
419fb20a 334 }
8c5cf3b6 335 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
c34d440a
JK
336 (MCM_ADDR_PHYS << 6) | 0xc,
337 cpu_x86_support_mca_broadcast(env) ?
338 MCE_INJECT_BROADCAST : 0);
419fb20a 339}
419fb20a
JK
340
341static void hardware_memory_error(void)
342{
343 fprintf(stderr, "Hardware memory error!\n");
344 exit(1);
345}
346
20d695a9 347int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
419fb20a 348{
20d695a9
AF
349 X86CPU *cpu = X86_CPU(c);
350 CPUX86State *env = &cpu->env;
419fb20a 351 ram_addr_t ram_addr;
a8170e5e 352 hwaddr paddr;
419fb20a
JK
353
354 if ((env->mcg_cap & MCG_SER_P) && addr
c34d440a 355 && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
1b5ec234 356 if (qemu_ram_addr_from_host(addr, &ram_addr) == NULL ||
a60f24b5 357 !kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
419fb20a
JK
358 fprintf(stderr, "Hardware memory error for memory used by "
359 "QEMU itself instead of guest system!\n");
360 /* Hope we are lucky for AO MCE */
361 if (code == BUS_MCEERR_AO) {
362 return 0;
363 } else {
364 hardware_memory_error();
365 }
366 }
3c85e74f 367 kvm_hwpoison_page_add(ram_addr);
bee615d4 368 kvm_mce_inject(cpu, paddr, code);
e56ff191 369 } else {
419fb20a
JK
370 if (code == BUS_MCEERR_AO) {
371 return 0;
372 } else if (code == BUS_MCEERR_AR) {
373 hardware_memory_error();
374 } else {
375 return 1;
376 }
377 }
378 return 0;
379}
380
381int kvm_arch_on_sigbus(int code, void *addr)
382{
182735ef
AF
383 X86CPU *cpu = X86_CPU(first_cpu);
384
385 if ((cpu->env.mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
419fb20a 386 ram_addr_t ram_addr;
a8170e5e 387 hwaddr paddr;
419fb20a
JK
388
389 /* Hope we are lucky for AO MCE */
1b5ec234 390 if (qemu_ram_addr_from_host(addr, &ram_addr) == NULL ||
182735ef 391 !kvm_physical_memory_addr_from_host(first_cpu->kvm_state,
a60f24b5 392 addr, &paddr)) {
419fb20a
JK
393 fprintf(stderr, "Hardware memory error for memory used by "
394 "QEMU itself instead of guest system!: %p\n", addr);
395 return 0;
396 }
3c85e74f 397 kvm_hwpoison_page_add(ram_addr);
182735ef 398 kvm_mce_inject(X86_CPU(first_cpu), paddr, code);
e56ff191 399 } else {
419fb20a
JK
400 if (code == BUS_MCEERR_AO) {
401 return 0;
402 } else if (code == BUS_MCEERR_AR) {
403 hardware_memory_error();
404 } else {
405 return 1;
406 }
407 }
408 return 0;
409}
e7701825 410
1bc22652 411static int kvm_inject_mce_oldstyle(X86CPU *cpu)
ab443475 412{
1bc22652
AF
413 CPUX86State *env = &cpu->env;
414
ab443475
JK
415 if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
416 unsigned int bank, bank_num = env->mcg_cap & 0xff;
417 struct kvm_x86_mce mce;
418
419 env->exception_injected = -1;
420
421 /*
422 * There must be at least one bank in use if an MCE is pending.
423 * Find it and use its values for the event injection.
424 */
425 for (bank = 0; bank < bank_num; bank++) {
426 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
427 break;
428 }
429 }
430 assert(bank < bank_num);
431
432 mce.bank = bank;
433 mce.status = env->mce_banks[bank * 4 + 1];
434 mce.mcg_status = env->mcg_status;
435 mce.addr = env->mce_banks[bank * 4 + 2];
436 mce.misc = env->mce_banks[bank * 4 + 3];
437
1bc22652 438 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
ab443475 439 }
ab443475
JK
440 return 0;
441}
442
1dfb4dd9 443static void cpu_update_state(void *opaque, int running, RunState state)
b8cc45d6 444{
317ac620 445 CPUX86State *env = opaque;
b8cc45d6
GC
446
447 if (running) {
448 env->tsc_valid = false;
449 }
450}
451
83b17af5 452unsigned long kvm_arch_vcpu_id(CPUState *cs)
b164e48e 453{
83b17af5 454 X86CPU *cpu = X86_CPU(cs);
7e72a45c 455 return cpu->apic_id;
b164e48e
EH
456}
457
92067bf4
IM
458#ifndef KVM_CPUID_SIGNATURE_NEXT
459#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
460#endif
461
462static bool hyperv_hypercall_available(X86CPU *cpu)
463{
464 return cpu->hyperv_vapic ||
465 (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
466}
467
468static bool hyperv_enabled(X86CPU *cpu)
469{
7bc3d711
PB
470 CPUState *cs = CPU(cpu);
471 return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
472 (hyperv_hypercall_available(cpu) ||
48a5f3bc 473 cpu->hyperv_time ||
f2a53c9e 474 cpu->hyperv_relaxed_timing ||
744b8a94 475 cpu->hyperv_crash ||
8c145d7c 476 cpu->hyperv_reset ||
46eb8f98
AS
477 cpu->hyperv_vpindex ||
478 cpu->hyperv_runtime);
92067bf4
IM
479}
480
68bfd0ad
MT
481static Error *invtsc_mig_blocker;
482
f8bb0565 483#define KVM_MAX_CPUID_ENTRIES 100
0893d460 484
20d695a9 485int kvm_arch_init_vcpu(CPUState *cs)
05330448
AL
486{
487 struct {
486bd5a2 488 struct kvm_cpuid2 cpuid;
f8bb0565 489 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
541dc0d4 490 } QEMU_PACKED cpuid_data;
20d695a9
AF
491 X86CPU *cpu = X86_CPU(cs);
492 CPUX86State *env = &cpu->env;
486bd5a2 493 uint32_t limit, i, j, cpuid_i;
a33609ca 494 uint32_t unused;
bb0300dc 495 struct kvm_cpuid_entry2 *c;
bb0300dc 496 uint32_t signature[3];
234cc647 497 int kvm_base = KVM_CPUID_SIGNATURE;
e7429073 498 int r;
05330448 499
ef4cbe14
SW
500 memset(&cpuid_data, 0, sizeof(cpuid_data));
501
05330448
AL
502 cpuid_i = 0;
503
bb0300dc 504 /* Paravirtualization CPUIDs */
234cc647
PB
505 if (hyperv_enabled(cpu)) {
506 c = &cpuid_data.entries[cpuid_i++];
507 c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
eab70139
VR
508 memcpy(signature, "Microsoft Hv", 12);
509 c->eax = HYPERV_CPUID_MIN;
234cc647
PB
510 c->ebx = signature[0];
511 c->ecx = signature[1];
512 c->edx = signature[2];
0c31b744 513
234cc647
PB
514 c = &cpuid_data.entries[cpuid_i++];
515 c->function = HYPERV_CPUID_INTERFACE;
eab70139
VR
516 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
517 c->eax = signature[0];
234cc647
PB
518 c->ebx = 0;
519 c->ecx = 0;
520 c->edx = 0;
eab70139
VR
521
522 c = &cpuid_data.entries[cpuid_i++];
eab70139
VR
523 c->function = HYPERV_CPUID_VERSION;
524 c->eax = 0x00001bbc;
525 c->ebx = 0x00060001;
526
527 c = &cpuid_data.entries[cpuid_i++];
eab70139 528 c->function = HYPERV_CPUID_FEATURES;
92067bf4 529 if (cpu->hyperv_relaxed_timing) {
eab70139
VR
530 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
531 }
92067bf4 532 if (cpu->hyperv_vapic) {
eab70139
VR
533 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
534 c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
7bc3d711 535 has_msr_hv_vapic = true;
eab70139 536 }
48a5f3bc
VR
537 if (cpu->hyperv_time &&
538 kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
539 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
540 c->eax |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
541 c->eax |= 0x200;
542 has_msr_hv_tsc = true;
543 }
f2a53c9e
AS
544 if (cpu->hyperv_crash && has_msr_hv_crash) {
545 c->edx |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
546 }
744b8a94
AS
547 if (cpu->hyperv_reset && has_msr_hv_reset) {
548 c->eax |= HV_X64_MSR_RESET_AVAILABLE;
549 }
8c145d7c
AS
550 if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
551 c->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
552 }
46eb8f98
AS
553 if (cpu->hyperv_runtime && has_msr_hv_runtime) {
554 c->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
555 }
eab70139 556 c = &cpuid_data.entries[cpuid_i++];
eab70139 557 c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
92067bf4 558 if (cpu->hyperv_relaxed_timing) {
eab70139
VR
559 c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
560 }
7bc3d711 561 if (has_msr_hv_vapic) {
eab70139
VR
562 c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
563 }
92067bf4 564 c->ebx = cpu->hyperv_spinlock_attempts;
eab70139
VR
565
566 c = &cpuid_data.entries[cpuid_i++];
eab70139
VR
567 c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
568 c->eax = 0x40;
569 c->ebx = 0x40;
570
234cc647 571 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
7bc3d711 572 has_msr_hv_hypercall = true;
eab70139
VR
573 }
574
f522d2ac
AW
575 if (cpu->expose_kvm) {
576 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
577 c = &cpuid_data.entries[cpuid_i++];
578 c->function = KVM_CPUID_SIGNATURE | kvm_base;
79b6f2f6 579 c->eax = KVM_CPUID_FEATURES | kvm_base;
f522d2ac
AW
580 c->ebx = signature[0];
581 c->ecx = signature[1];
582 c->edx = signature[2];
234cc647 583
f522d2ac
AW
584 c = &cpuid_data.entries[cpuid_i++];
585 c->function = KVM_CPUID_FEATURES | kvm_base;
586 c->eax = env->features[FEAT_KVM];
234cc647 587
f522d2ac 588 has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
bb0300dc 589
f522d2ac 590 has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
bc9a839d 591
f522d2ac
AW
592 has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
593 }
917367aa 594
a33609ca 595 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
05330448
AL
596
597 for (i = 0; i <= limit; i++) {
f8bb0565
IM
598 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
599 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
600 abort();
601 }
bb0300dc 602 c = &cpuid_data.entries[cpuid_i++];
486bd5a2
AL
603
604 switch (i) {
a36b1029
AL
605 case 2: {
606 /* Keep reading function 2 till all the input is received */
607 int times;
608
a36b1029 609 c->function = i;
a33609ca
AL
610 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
611 KVM_CPUID_FLAG_STATE_READ_NEXT;
612 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
613 times = c->eax & 0xff;
a36b1029
AL
614
615 for (j = 1; j < times; ++j) {
f8bb0565
IM
616 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
617 fprintf(stderr, "cpuid_data is full, no space for "
618 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
619 abort();
620 }
a33609ca 621 c = &cpuid_data.entries[cpuid_i++];
a36b1029 622 c->function = i;
a33609ca
AL
623 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
624 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
a36b1029
AL
625 }
626 break;
627 }
486bd5a2
AL
628 case 4:
629 case 0xb:
630 case 0xd:
631 for (j = 0; ; j++) {
31e8c696
AP
632 if (i == 0xd && j == 64) {
633 break;
634 }
486bd5a2
AL
635 c->function = i;
636 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
637 c->index = j;
a33609ca 638 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
486bd5a2 639
b9bec74b 640 if (i == 4 && c->eax == 0) {
486bd5a2 641 break;
b9bec74b
JK
642 }
643 if (i == 0xb && !(c->ecx & 0xff00)) {
486bd5a2 644 break;
b9bec74b
JK
645 }
646 if (i == 0xd && c->eax == 0) {
31e8c696 647 continue;
b9bec74b 648 }
f8bb0565
IM
649 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
650 fprintf(stderr, "cpuid_data is full, no space for "
651 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
652 abort();
653 }
a33609ca 654 c = &cpuid_data.entries[cpuid_i++];
486bd5a2
AL
655 }
656 break;
657 default:
486bd5a2 658 c->function = i;
a33609ca
AL
659 c->flags = 0;
660 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
486bd5a2
AL
661 break;
662 }
05330448 663 }
0d894367
PB
664
665 if (limit >= 0x0a) {
666 uint32_t ver;
667
668 cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused);
669 if ((ver & 0xff) > 0) {
670 has_msr_architectural_pmu = true;
671 num_architectural_pmu_counters = (ver & 0xff00) >> 8;
672
673 /* Shouldn't be more than 32, since that's the number of bits
674 * available in EBX to tell us _which_ counters are available.
675 * Play it safe.
676 */
677 if (num_architectural_pmu_counters > MAX_GP_COUNTERS) {
678 num_architectural_pmu_counters = MAX_GP_COUNTERS;
679 }
680 }
681 }
682
a33609ca 683 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
05330448
AL
684
685 for (i = 0x80000000; i <= limit; i++) {
f8bb0565
IM
686 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
687 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
688 abort();
689 }
bb0300dc 690 c = &cpuid_data.entries[cpuid_i++];
05330448 691
05330448 692 c->function = i;
a33609ca
AL
693 c->flags = 0;
694 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
05330448
AL
695 }
696
b3baa152
BW
697 /* Call Centaur's CPUID instructions they are supported. */
698 if (env->cpuid_xlevel2 > 0) {
b3baa152
BW
699 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
700
701 for (i = 0xC0000000; i <= limit; i++) {
f8bb0565
IM
702 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
703 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
704 abort();
705 }
b3baa152
BW
706 c = &cpuid_data.entries[cpuid_i++];
707
708 c->function = i;
709 c->flags = 0;
710 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
711 }
712 }
713
05330448
AL
714 cpuid_data.cpuid.nent = cpuid_i;
715
e7701825 716 if (((env->cpuid_version >> 8)&0xF) >= 6
0514ef2f 717 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
fc7a504c 718 (CPUID_MCE | CPUID_MCA)
a60f24b5 719 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
e7701825
MT
720 uint64_t mcg_cap;
721 int banks;
32a42024 722 int ret;
e7701825 723
a60f24b5 724 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
75d49497
JK
725 if (ret < 0) {
726 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
727 return ret;
e7701825 728 }
75d49497
JK
729
730 if (banks > MCE_BANKS_DEF) {
731 banks = MCE_BANKS_DEF;
732 }
733 mcg_cap &= MCE_CAP_DEF;
734 mcg_cap |= banks;
1bc22652 735 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &mcg_cap);
75d49497
JK
736 if (ret < 0) {
737 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
738 return ret;
739 }
740
741 env->mcg_cap = mcg_cap;
e7701825 742 }
e7701825 743
b8cc45d6
GC
744 qemu_add_vm_change_state_handler(cpu_update_state, env);
745
df67696e
LJ
746 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
747 if (c) {
748 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
749 !!(c->ecx & CPUID_EXT_SMX);
750 }
751
68bfd0ad
MT
752 c = cpuid_find_entry(&cpuid_data.cpuid, 0x80000007, 0);
753 if (c && (c->edx & 1<<8) && invtsc_mig_blocker == NULL) {
754 /* for migration */
755 error_setg(&invtsc_mig_blocker,
756 "State blocked by non-migratable CPU device"
757 " (invtsc flag)");
758 migrate_add_blocker(invtsc_mig_blocker);
759 /* for savevm */
760 vmstate_x86_cpu.unmigratable = 1;
761 }
762
7e680753 763 cpuid_data.cpuid.padding = 0;
1bc22652 764 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
fdc9c41a
JK
765 if (r) {
766 return r;
767 }
e7429073 768
a60f24b5 769 r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL);
e7429073 770 if (r && env->tsc_khz) {
1bc22652 771 r = kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz);
e7429073
JR
772 if (r < 0) {
773 fprintf(stderr, "KVM_SET_TSC_KHZ failed\n");
774 return r;
775 }
776 }
e7429073 777
28143b40 778 if (has_xsave) {
fabacc0f
JK
779 env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
780 }
781
d1ae67f6
AW
782 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
783 has_msr_mtrr = true;
784 }
785
e7429073 786 return 0;
05330448
AL
787}
788
50a2c6e5 789void kvm_arch_reset_vcpu(X86CPU *cpu)
caa5af0f 790{
20d695a9 791 CPUX86State *env = &cpu->env;
dd673288 792
e73223a5 793 env->exception_injected = -1;
0e607a80 794 env->interrupt_injected = -1;
1a5e9d2f 795 env->xcr0 = 1;
ddced198 796 if (kvm_irqchip_in_kernel()) {
dd673288 797 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
ddced198
MT
798 KVM_MP_STATE_UNINITIALIZED;
799 } else {
800 env->mp_state = KVM_MP_STATE_RUNNABLE;
801 }
caa5af0f
JK
802}
803
e0723c45
PB
804void kvm_arch_do_init_vcpu(X86CPU *cpu)
805{
806 CPUX86State *env = &cpu->env;
807
808 /* APs get directly into wait-for-SIPI state. */
809 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
810 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
811 }
812}
813
c3a3a7d3 814static int kvm_get_supported_msrs(KVMState *s)
05330448 815{
75b10c43 816 static int kvm_supported_msrs;
c3a3a7d3 817 int ret = 0;
05330448
AL
818
819 /* first time */
75b10c43 820 if (kvm_supported_msrs == 0) {
05330448
AL
821 struct kvm_msr_list msr_list, *kvm_msr_list;
822
75b10c43 823 kvm_supported_msrs = -1;
05330448
AL
824
825 /* Obtain MSR list from KVM. These are the MSRs that we must
826 * save/restore */
4c9f7372 827 msr_list.nmsrs = 0;
c3a3a7d3 828 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
6fb6d245 829 if (ret < 0 && ret != -E2BIG) {
c3a3a7d3 830 return ret;
6fb6d245 831 }
d9db889f
JK
832 /* Old kernel modules had a bug and could write beyond the provided
833 memory. Allocate at least a safe amount of 1K. */
7267c094 834 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
d9db889f
JK
835 msr_list.nmsrs *
836 sizeof(msr_list.indices[0])));
05330448 837
55308450 838 kvm_msr_list->nmsrs = msr_list.nmsrs;
c3a3a7d3 839 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
05330448
AL
840 if (ret >= 0) {
841 int i;
842
843 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
844 if (kvm_msr_list->indices[i] == MSR_STAR) {
c3a3a7d3 845 has_msr_star = true;
75b10c43
MT
846 continue;
847 }
848 if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
c3a3a7d3 849 has_msr_hsave_pa = true;
75b10c43 850 continue;
05330448 851 }
c9b8f6b6
AS
852 if (kvm_msr_list->indices[i] == MSR_TSC_AUX) {
853 has_msr_tsc_aux = true;
854 continue;
855 }
f28558d3
WA
856 if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
857 has_msr_tsc_adjust = true;
858 continue;
859 }
aa82ba54
LJ
860 if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
861 has_msr_tsc_deadline = true;
862 continue;
863 }
fc12d72e
PB
864 if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) {
865 has_msr_smbase = true;
866 continue;
867 }
21e87c46
AK
868 if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
869 has_msr_misc_enable = true;
870 continue;
871 }
79e9ebeb
LJ
872 if (kvm_msr_list->indices[i] == MSR_IA32_BNDCFGS) {
873 has_msr_bndcfgs = true;
874 continue;
875 }
18cd2c17
WL
876 if (kvm_msr_list->indices[i] == MSR_IA32_XSS) {
877 has_msr_xss = true;
878 continue;
879 }
f2a53c9e
AS
880 if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) {
881 has_msr_hv_crash = true;
882 continue;
883 }
744b8a94
AS
884 if (kvm_msr_list->indices[i] == HV_X64_MSR_RESET) {
885 has_msr_hv_reset = true;
886 continue;
887 }
8c145d7c
AS
888 if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_INDEX) {
889 has_msr_hv_vpindex = true;
890 continue;
891 }
46eb8f98
AS
892 if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) {
893 has_msr_hv_runtime = true;
894 continue;
895 }
05330448
AL
896 }
897 }
898
7267c094 899 g_free(kvm_msr_list);
05330448
AL
900 }
901
c3a3a7d3 902 return ret;
05330448
AL
903}
904
6410848b
PB
905static Notifier smram_machine_done;
906static KVMMemoryListener smram_listener;
907static AddressSpace smram_address_space;
908static MemoryRegion smram_as_root;
909static MemoryRegion smram_as_mem;
910
911static void register_smram_listener(Notifier *n, void *unused)
912{
913 MemoryRegion *smram =
914 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
915
916 /* Outer container... */
917 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
918 memory_region_set_enabled(&smram_as_root, true);
919
920 /* ... with two regions inside: normal system memory with low
921 * priority, and...
922 */
923 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
924 get_system_memory(), 0, ~0ull);
925 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
926 memory_region_set_enabled(&smram_as_mem, true);
927
928 if (smram) {
929 /* ... SMRAM with higher priority */
930 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
931 memory_region_set_enabled(smram, true);
932 }
933
934 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
935 kvm_memory_listener_register(kvm_state, &smram_listener,
936 &smram_address_space, 1);
937}
938
b16565b3 939int kvm_arch_init(MachineState *ms, KVMState *s)
20420430 940{
11076198 941 uint64_t identity_base = 0xfffbc000;
39d6960a 942 uint64_t shadow_mem;
20420430 943 int ret;
25d2e361 944 struct utsname utsname;
20420430 945
28143b40
TH
946#ifdef KVM_CAP_XSAVE
947 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
948#endif
949
950#ifdef KVM_CAP_XCRS
951 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
952#endif
953
954#ifdef KVM_CAP_PIT_STATE2
955 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
956#endif
957
c3a3a7d3 958 ret = kvm_get_supported_msrs(s);
20420430 959 if (ret < 0) {
20420430
SY
960 return ret;
961 }
25d2e361
MT
962
963 uname(&utsname);
964 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
965
4c5b10b7 966 /*
11076198
JK
967 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
968 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
969 * Since these must be part of guest physical memory, we need to allocate
970 * them, both by setting their start addresses in the kernel and by
971 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
972 *
973 * Older KVM versions may not support setting the identity map base. In
974 * that case we need to stick with the default, i.e. a 256K maximum BIOS
975 * size.
4c5b10b7 976 */
11076198
JK
977 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
978 /* Allows up to 16M BIOSes. */
979 identity_base = 0xfeffc000;
980
981 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
982 if (ret < 0) {
983 return ret;
984 }
4c5b10b7 985 }
e56ff191 986
11076198
JK
987 /* Set TSS base one page after EPT identity map. */
988 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
20420430
SY
989 if (ret < 0) {
990 return ret;
991 }
992
11076198
JK
993 /* Tell fw_cfg to notify the BIOS to reserve the range. */
994 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
20420430 995 if (ret < 0) {
11076198 996 fprintf(stderr, "e820_add_entry() table is full\n");
20420430
SY
997 return ret;
998 }
3c85e74f 999 qemu_register_reset(kvm_unpoison_all, NULL);
20420430 1000
4689b77b 1001 shadow_mem = machine_kvm_shadow_mem(ms);
36ad0e94
MA
1002 if (shadow_mem != -1) {
1003 shadow_mem /= 4096;
1004 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
1005 if (ret < 0) {
1006 return ret;
39d6960a
JK
1007 }
1008 }
6410848b
PB
1009
1010 if (kvm_check_extension(s, KVM_CAP_X86_SMM)) {
1011 smram_machine_done.notify = register_smram_listener;
1012 qemu_add_machine_init_done_notifier(&smram_machine_done);
1013 }
11076198 1014 return 0;
05330448 1015}
b9bec74b 1016
05330448
AL
1017static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1018{
1019 lhs->selector = rhs->selector;
1020 lhs->base = rhs->base;
1021 lhs->limit = rhs->limit;
1022 lhs->type = 3;
1023 lhs->present = 1;
1024 lhs->dpl = 3;
1025 lhs->db = 0;
1026 lhs->s = 1;
1027 lhs->l = 0;
1028 lhs->g = 0;
1029 lhs->avl = 0;
1030 lhs->unusable = 0;
1031}
1032
1033static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1034{
1035 unsigned flags = rhs->flags;
1036 lhs->selector = rhs->selector;
1037 lhs->base = rhs->base;
1038 lhs->limit = rhs->limit;
1039 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
1040 lhs->present = (flags & DESC_P_MASK) != 0;
acaa7550 1041 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
05330448
AL
1042 lhs->db = (flags >> DESC_B_SHIFT) & 1;
1043 lhs->s = (flags & DESC_S_MASK) != 0;
1044 lhs->l = (flags >> DESC_L_SHIFT) & 1;
1045 lhs->g = (flags & DESC_G_MASK) != 0;
1046 lhs->avl = (flags & DESC_AVL_MASK) != 0;
1047 lhs->unusable = 0;
7e680753 1048 lhs->padding = 0;
05330448
AL
1049}
1050
1051static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
1052{
1053 lhs->selector = rhs->selector;
1054 lhs->base = rhs->base;
1055 lhs->limit = rhs->limit;
b9bec74b
JK
1056 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
1057 (rhs->present * DESC_P_MASK) |
1058 (rhs->dpl << DESC_DPL_SHIFT) |
1059 (rhs->db << DESC_B_SHIFT) |
1060 (rhs->s * DESC_S_MASK) |
1061 (rhs->l << DESC_L_SHIFT) |
1062 (rhs->g * DESC_G_MASK) |
1063 (rhs->avl * DESC_AVL_MASK);
05330448
AL
1064}
1065
1066static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
1067{
b9bec74b 1068 if (set) {
05330448 1069 *kvm_reg = *qemu_reg;
b9bec74b 1070 } else {
05330448 1071 *qemu_reg = *kvm_reg;
b9bec74b 1072 }
05330448
AL
1073}
1074
1bc22652 1075static int kvm_getput_regs(X86CPU *cpu, int set)
05330448 1076{
1bc22652 1077 CPUX86State *env = &cpu->env;
05330448
AL
1078 struct kvm_regs regs;
1079 int ret = 0;
1080
1081 if (!set) {
1bc22652 1082 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
b9bec74b 1083 if (ret < 0) {
05330448 1084 return ret;
b9bec74b 1085 }
05330448
AL
1086 }
1087
1088 kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
1089 kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
1090 kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
1091 kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
1092 kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
1093 kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
1094 kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
1095 kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
1096#ifdef TARGET_X86_64
1097 kvm_getput_reg(&regs.r8, &env->regs[8], set);
1098 kvm_getput_reg(&regs.r9, &env->regs[9], set);
1099 kvm_getput_reg(&regs.r10, &env->regs[10], set);
1100 kvm_getput_reg(&regs.r11, &env->regs[11], set);
1101 kvm_getput_reg(&regs.r12, &env->regs[12], set);
1102 kvm_getput_reg(&regs.r13, &env->regs[13], set);
1103 kvm_getput_reg(&regs.r14, &env->regs[14], set);
1104 kvm_getput_reg(&regs.r15, &env->regs[15], set);
1105#endif
1106
1107 kvm_getput_reg(&regs.rflags, &env->eflags, set);
1108 kvm_getput_reg(&regs.rip, &env->eip, set);
1109
b9bec74b 1110 if (set) {
1bc22652 1111 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
b9bec74b 1112 }
05330448
AL
1113
1114 return ret;
1115}
1116
1bc22652 1117static int kvm_put_fpu(X86CPU *cpu)
05330448 1118{
1bc22652 1119 CPUX86State *env = &cpu->env;
05330448
AL
1120 struct kvm_fpu fpu;
1121 int i;
1122
1123 memset(&fpu, 0, sizeof fpu);
1124 fpu.fsw = env->fpus & ~(7 << 11);
1125 fpu.fsw |= (env->fpstt & 7) << 11;
1126 fpu.fcw = env->fpuc;
42cc8fa6
JK
1127 fpu.last_opcode = env->fpop;
1128 fpu.last_ip = env->fpip;
1129 fpu.last_dp = env->fpdp;
b9bec74b
JK
1130 for (i = 0; i < 8; ++i) {
1131 fpu.ftwx |= (!env->fptags[i]) << i;
1132 }
05330448 1133 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
bee81887
PB
1134 for (i = 0; i < CPU_NB_REGS; i++) {
1135 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].XMM_Q(0));
1136 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].XMM_Q(1));
1137 }
05330448
AL
1138 fpu.mxcsr = env->mxcsr;
1139
1bc22652 1140 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
05330448
AL
1141}
1142
6b42494b
JK
1143#define XSAVE_FCW_FSW 0
1144#define XSAVE_FTW_FOP 1
f1665b21
SY
1145#define XSAVE_CWD_RIP 2
1146#define XSAVE_CWD_RDP 4
1147#define XSAVE_MXCSR 6
1148#define XSAVE_ST_SPACE 8
1149#define XSAVE_XMM_SPACE 40
1150#define XSAVE_XSTATE_BV 128
1151#define XSAVE_YMMH_SPACE 144
79e9ebeb
LJ
1152#define XSAVE_BNDREGS 240
1153#define XSAVE_BNDCSR 256
9aecd6f8
CP
1154#define XSAVE_OPMASK 272
1155#define XSAVE_ZMM_Hi256 288
1156#define XSAVE_Hi16_ZMM 416
f1665b21 1157
1bc22652 1158static int kvm_put_xsave(X86CPU *cpu)
f1665b21 1159{
1bc22652 1160 CPUX86State *env = &cpu->env;
fabacc0f 1161 struct kvm_xsave* xsave = env->kvm_xsave_buf;
42cc8fa6 1162 uint16_t cwd, swd, twd;
b7711471 1163 uint8_t *xmm, *ymmh, *zmmh;
fabacc0f 1164 int i, r;
f1665b21 1165
28143b40 1166 if (!has_xsave) {
1bc22652 1167 return kvm_put_fpu(cpu);
b9bec74b 1168 }
f1665b21 1169
f1665b21 1170 memset(xsave, 0, sizeof(struct kvm_xsave));
6115c0a8 1171 twd = 0;
f1665b21
SY
1172 swd = env->fpus & ~(7 << 11);
1173 swd |= (env->fpstt & 7) << 11;
1174 cwd = env->fpuc;
b9bec74b 1175 for (i = 0; i < 8; ++i) {
f1665b21 1176 twd |= (!env->fptags[i]) << i;
b9bec74b 1177 }
6b42494b
JK
1178 xsave->region[XSAVE_FCW_FSW] = (uint32_t)(swd << 16) + cwd;
1179 xsave->region[XSAVE_FTW_FOP] = (uint32_t)(env->fpop << 16) + twd;
42cc8fa6
JK
1180 memcpy(&xsave->region[XSAVE_CWD_RIP], &env->fpip, sizeof(env->fpip));
1181 memcpy(&xsave->region[XSAVE_CWD_RDP], &env->fpdp, sizeof(env->fpdp));
f1665b21
SY
1182 memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
1183 sizeof env->fpregs);
f1665b21
SY
1184 xsave->region[XSAVE_MXCSR] = env->mxcsr;
1185 *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
79e9ebeb
LJ
1186 memcpy(&xsave->region[XSAVE_BNDREGS], env->bnd_regs,
1187 sizeof env->bnd_regs);
1188 memcpy(&xsave->region[XSAVE_BNDCSR], &env->bndcs_regs,
1189 sizeof(env->bndcs_regs));
9aecd6f8
CP
1190 memcpy(&xsave->region[XSAVE_OPMASK], env->opmask_regs,
1191 sizeof env->opmask_regs);
bee81887
PB
1192
1193 xmm = (uint8_t *)&xsave->region[XSAVE_XMM_SPACE];
b7711471
PB
1194 ymmh = (uint8_t *)&xsave->region[XSAVE_YMMH_SPACE];
1195 zmmh = (uint8_t *)&xsave->region[XSAVE_ZMM_Hi256];
1196 for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) {
bee81887
PB
1197 stq_p(xmm, env->xmm_regs[i].XMM_Q(0));
1198 stq_p(xmm+8, env->xmm_regs[i].XMM_Q(1));
b7711471
PB
1199 stq_p(ymmh, env->xmm_regs[i].XMM_Q(2));
1200 stq_p(ymmh+8, env->xmm_regs[i].XMM_Q(3));
1201 stq_p(zmmh, env->xmm_regs[i].XMM_Q(4));
1202 stq_p(zmmh+8, env->xmm_regs[i].XMM_Q(5));
1203 stq_p(zmmh+16, env->xmm_regs[i].XMM_Q(6));
1204 stq_p(zmmh+24, env->xmm_regs[i].XMM_Q(7));
bee81887
PB
1205 }
1206
9aecd6f8 1207#ifdef TARGET_X86_64
b7711471
PB
1208 memcpy(&xsave->region[XSAVE_Hi16_ZMM], &env->xmm_regs[16],
1209 16 * sizeof env->xmm_regs[16]);
9aecd6f8 1210#endif
1bc22652 1211 r = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
0f53994f 1212 return r;
f1665b21
SY
1213}
1214
1bc22652 1215static int kvm_put_xcrs(X86CPU *cpu)
f1665b21 1216{
1bc22652 1217 CPUX86State *env = &cpu->env;
bdfc8480 1218 struct kvm_xcrs xcrs = {};
f1665b21 1219
28143b40 1220 if (!has_xcrs) {
f1665b21 1221 return 0;
b9bec74b 1222 }
f1665b21
SY
1223
1224 xcrs.nr_xcrs = 1;
1225 xcrs.flags = 0;
1226 xcrs.xcrs[0].xcr = 0;
1227 xcrs.xcrs[0].value = env->xcr0;
1bc22652 1228 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
f1665b21
SY
1229}
1230
1bc22652 1231static int kvm_put_sregs(X86CPU *cpu)
05330448 1232{
1bc22652 1233 CPUX86State *env = &cpu->env;
05330448
AL
1234 struct kvm_sregs sregs;
1235
0e607a80
JK
1236 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
1237 if (env->interrupt_injected >= 0) {
1238 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
1239 (uint64_t)1 << (env->interrupt_injected % 64);
1240 }
05330448
AL
1241
1242 if ((env->eflags & VM_MASK)) {
b9bec74b
JK
1243 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
1244 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
1245 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
1246 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
1247 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
1248 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
05330448 1249 } else {
b9bec74b
JK
1250 set_seg(&sregs.cs, &env->segs[R_CS]);
1251 set_seg(&sregs.ds, &env->segs[R_DS]);
1252 set_seg(&sregs.es, &env->segs[R_ES]);
1253 set_seg(&sregs.fs, &env->segs[R_FS]);
1254 set_seg(&sregs.gs, &env->segs[R_GS]);
1255 set_seg(&sregs.ss, &env->segs[R_SS]);
05330448
AL
1256 }
1257
1258 set_seg(&sregs.tr, &env->tr);
1259 set_seg(&sregs.ldt, &env->ldt);
1260
1261 sregs.idt.limit = env->idt.limit;
1262 sregs.idt.base = env->idt.base;
7e680753 1263 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
05330448
AL
1264 sregs.gdt.limit = env->gdt.limit;
1265 sregs.gdt.base = env->gdt.base;
7e680753 1266 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
05330448
AL
1267
1268 sregs.cr0 = env->cr[0];
1269 sregs.cr2 = env->cr[2];
1270 sregs.cr3 = env->cr[3];
1271 sregs.cr4 = env->cr[4];
1272
02e51483
CF
1273 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
1274 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
05330448
AL
1275
1276 sregs.efer = env->efer;
1277
1bc22652 1278 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
05330448
AL
1279}
1280
1281static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
1282 uint32_t index, uint64_t value)
1283{
1284 entry->index = index;
c7fe4b12 1285 entry->reserved = 0;
05330448
AL
1286 entry->data = value;
1287}
1288
7477cd38
MT
1289static int kvm_put_tscdeadline_msr(X86CPU *cpu)
1290{
1291 CPUX86State *env = &cpu->env;
1292 struct {
1293 struct kvm_msrs info;
1294 struct kvm_msr_entry entries[1];
1295 } msr_data;
1296 struct kvm_msr_entry *msrs = msr_data.entries;
1297
1298 if (!has_msr_tsc_deadline) {
1299 return 0;
1300 }
1301
1302 kvm_msr_entry_set(&msrs[0], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
1303
c7fe4b12
CB
1304 msr_data.info = (struct kvm_msrs) {
1305 .nmsrs = 1,
1306 };
7477cd38
MT
1307
1308 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
1309}
1310
6bdf863d
JK
1311/*
1312 * Provide a separate write service for the feature control MSR in order to
1313 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
1314 * before writing any other state because forcibly leaving nested mode
1315 * invalidates the VCPU state.
1316 */
1317static int kvm_put_msr_feature_control(X86CPU *cpu)
1318{
1319 struct {
1320 struct kvm_msrs info;
1321 struct kvm_msr_entry entry;
1322 } msr_data;
1323
1324 kvm_msr_entry_set(&msr_data.entry, MSR_IA32_FEATURE_CONTROL,
1325 cpu->env.msr_ia32_feature_control);
c7fe4b12
CB
1326
1327 msr_data.info = (struct kvm_msrs) {
1328 .nmsrs = 1,
1329 };
1330
6bdf863d
JK
1331 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
1332}
1333
1bc22652 1334static int kvm_put_msrs(X86CPU *cpu, int level)
05330448 1335{
1bc22652 1336 CPUX86State *env = &cpu->env;
05330448
AL
1337 struct {
1338 struct kvm_msrs info;
d1ae67f6 1339 struct kvm_msr_entry entries[150];
05330448
AL
1340 } msr_data;
1341 struct kvm_msr_entry *msrs = msr_data.entries;
0d894367 1342 int n = 0, i;
05330448
AL
1343
1344 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
1345 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
1346 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
0c03266a 1347 kvm_msr_entry_set(&msrs[n++], MSR_PAT, env->pat);
c3a3a7d3 1348 if (has_msr_star) {
b9bec74b
JK
1349 kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
1350 }
c3a3a7d3 1351 if (has_msr_hsave_pa) {
75b10c43 1352 kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
b9bec74b 1353 }
c9b8f6b6
AS
1354 if (has_msr_tsc_aux) {
1355 kvm_msr_entry_set(&msrs[n++], MSR_TSC_AUX, env->tsc_aux);
1356 }
f28558d3
WA
1357 if (has_msr_tsc_adjust) {
1358 kvm_msr_entry_set(&msrs[n++], MSR_TSC_ADJUST, env->tsc_adjust);
1359 }
21e87c46
AK
1360 if (has_msr_misc_enable) {
1361 kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE,
1362 env->msr_ia32_misc_enable);
1363 }
fc12d72e
PB
1364 if (has_msr_smbase) {
1365 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SMBASE, env->smbase);
1366 }
439d19f2
PB
1367 if (has_msr_bndcfgs) {
1368 kvm_msr_entry_set(&msrs[n++], MSR_IA32_BNDCFGS, env->msr_bndcfgs);
1369 }
18cd2c17
WL
1370 if (has_msr_xss) {
1371 kvm_msr_entry_set(&msrs[n++], MSR_IA32_XSS, env->xss);
1372 }
05330448 1373#ifdef TARGET_X86_64
25d2e361
MT
1374 if (lm_capable_kernel) {
1375 kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
1376 kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
1377 kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
1378 kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
1379 }
05330448 1380#endif
ff5c186b 1381 /*
0d894367
PB
1382 * The following MSRs have side effects on the guest or are too heavy
1383 * for normal writeback. Limit them to reset or full state updates.
ff5c186b
JK
1384 */
1385 if (level >= KVM_PUT_RESET_STATE) {
0522604b 1386 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
ea643051
JK
1387 kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
1388 env->system_time_msr);
1389 kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
c5999bfc
JK
1390 if (has_msr_async_pf_en) {
1391 kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN,
1392 env->async_pf_en_msr);
1393 }
bc9a839d
MT
1394 if (has_msr_pv_eoi_en) {
1395 kvm_msr_entry_set(&msrs[n++], MSR_KVM_PV_EOI_EN,
1396 env->pv_eoi_en_msr);
1397 }
917367aa
MT
1398 if (has_msr_kvm_steal_time) {
1399 kvm_msr_entry_set(&msrs[n++], MSR_KVM_STEAL_TIME,
1400 env->steal_time_msr);
1401 }
0d894367
PB
1402 if (has_msr_architectural_pmu) {
1403 /* Stop the counter. */
1404 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
1405 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL, 0);
1406
1407 /* Set the counter values. */
1408 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
1409 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR0 + i,
1410 env->msr_fixed_counters[i]);
1411 }
1412 for (i = 0; i < num_architectural_pmu_counters; i++) {
1413 kvm_msr_entry_set(&msrs[n++], MSR_P6_PERFCTR0 + i,
1414 env->msr_gp_counters[i]);
1415 kvm_msr_entry_set(&msrs[n++], MSR_P6_EVNTSEL0 + i,
1416 env->msr_gp_evtsel[i]);
1417 }
1418 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_STATUS,
1419 env->msr_global_status);
1420 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_OVF_CTRL,
1421 env->msr_global_ovf_ctrl);
1422
1423 /* Now start the PMU. */
1424 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR_CTRL,
1425 env->msr_fixed_ctr_ctrl);
1426 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL,
1427 env->msr_global_ctrl);
1428 }
7bc3d711 1429 if (has_msr_hv_hypercall) {
1c90ef26
VR
1430 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID,
1431 env->msr_hv_guest_os_id);
1432 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL,
1433 env->msr_hv_hypercall);
eab70139 1434 }
7bc3d711 1435 if (has_msr_hv_vapic) {
5ef68987
VR
1436 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE,
1437 env->msr_hv_vapic);
eab70139 1438 }
48a5f3bc
VR
1439 if (has_msr_hv_tsc) {
1440 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC,
1441 env->msr_hv_tsc);
1442 }
f2a53c9e
AS
1443 if (has_msr_hv_crash) {
1444 int j;
1445
1446 for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
1447 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_P0 + j,
1448 env->msr_hv_crash_params[j]);
1449
1450 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_CTL,
1451 HV_X64_MSR_CRASH_CTL_NOTIFY);
1452 }
46eb8f98
AS
1453 if (has_msr_hv_runtime) {
1454 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_VP_RUNTIME,
1455 env->msr_hv_runtime);
1456 }
d1ae67f6
AW
1457 if (has_msr_mtrr) {
1458 kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
1459 kvm_msr_entry_set(&msrs[n++],
1460 MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
1461 kvm_msr_entry_set(&msrs[n++],
1462 MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
1463 kvm_msr_entry_set(&msrs[n++],
1464 MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
1465 kvm_msr_entry_set(&msrs[n++],
1466 MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
1467 kvm_msr_entry_set(&msrs[n++],
1468 MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
1469 kvm_msr_entry_set(&msrs[n++],
1470 MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
1471 kvm_msr_entry_set(&msrs[n++],
1472 MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
1473 kvm_msr_entry_set(&msrs[n++],
1474 MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
1475 kvm_msr_entry_set(&msrs[n++],
1476 MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
1477 kvm_msr_entry_set(&msrs[n++],
1478 MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
1479 kvm_msr_entry_set(&msrs[n++],
1480 MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
1481 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
1482 kvm_msr_entry_set(&msrs[n++],
1483 MSR_MTRRphysBase(i), env->mtrr_var[i].base);
1484 kvm_msr_entry_set(&msrs[n++],
1485 MSR_MTRRphysMask(i), env->mtrr_var[i].mask);
1486 }
1487 }
6bdf863d
JK
1488
1489 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
1490 * kvm_put_msr_feature_control. */
ea643051 1491 }
57780495 1492 if (env->mcg_cap) {
d8da8574 1493 int i;
b9bec74b 1494
c34d440a
JK
1495 kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
1496 kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
1497 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
1498 kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
57780495
MT
1499 }
1500 }
1a03675d 1501
c7fe4b12
CB
1502 msr_data.info = (struct kvm_msrs) {
1503 .nmsrs = n,
1504 };
05330448 1505
1bc22652 1506 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
05330448
AL
1507
1508}
1509
1510
1bc22652 1511static int kvm_get_fpu(X86CPU *cpu)
05330448 1512{
1bc22652 1513 CPUX86State *env = &cpu->env;
05330448
AL
1514 struct kvm_fpu fpu;
1515 int i, ret;
1516
1bc22652 1517 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
b9bec74b 1518 if (ret < 0) {
05330448 1519 return ret;
b9bec74b 1520 }
05330448
AL
1521
1522 env->fpstt = (fpu.fsw >> 11) & 7;
1523 env->fpus = fpu.fsw;
1524 env->fpuc = fpu.fcw;
42cc8fa6
JK
1525 env->fpop = fpu.last_opcode;
1526 env->fpip = fpu.last_ip;
1527 env->fpdp = fpu.last_dp;
b9bec74b
JK
1528 for (i = 0; i < 8; ++i) {
1529 env->fptags[i] = !((fpu.ftwx >> i) & 1);
1530 }
05330448 1531 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
bee81887
PB
1532 for (i = 0; i < CPU_NB_REGS; i++) {
1533 env->xmm_regs[i].XMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
1534 env->xmm_regs[i].XMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
1535 }
05330448
AL
1536 env->mxcsr = fpu.mxcsr;
1537
1538 return 0;
1539}
1540
1bc22652 1541static int kvm_get_xsave(X86CPU *cpu)
f1665b21 1542{
1bc22652 1543 CPUX86State *env = &cpu->env;
fabacc0f 1544 struct kvm_xsave* xsave = env->kvm_xsave_buf;
f1665b21 1545 int ret, i;
b7711471 1546 const uint8_t *xmm, *ymmh, *zmmh;
42cc8fa6 1547 uint16_t cwd, swd, twd;
f1665b21 1548
28143b40 1549 if (!has_xsave) {
1bc22652 1550 return kvm_get_fpu(cpu);
b9bec74b 1551 }
f1665b21 1552
1bc22652 1553 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
0f53994f 1554 if (ret < 0) {
f1665b21 1555 return ret;
0f53994f 1556 }
f1665b21 1557
6b42494b
JK
1558 cwd = (uint16_t)xsave->region[XSAVE_FCW_FSW];
1559 swd = (uint16_t)(xsave->region[XSAVE_FCW_FSW] >> 16);
1560 twd = (uint16_t)xsave->region[XSAVE_FTW_FOP];
1561 env->fpop = (uint16_t)(xsave->region[XSAVE_FTW_FOP] >> 16);
f1665b21
SY
1562 env->fpstt = (swd >> 11) & 7;
1563 env->fpus = swd;
1564 env->fpuc = cwd;
b9bec74b 1565 for (i = 0; i < 8; ++i) {
f1665b21 1566 env->fptags[i] = !((twd >> i) & 1);
b9bec74b 1567 }
42cc8fa6
JK
1568 memcpy(&env->fpip, &xsave->region[XSAVE_CWD_RIP], sizeof(env->fpip));
1569 memcpy(&env->fpdp, &xsave->region[XSAVE_CWD_RDP], sizeof(env->fpdp));
f1665b21
SY
1570 env->mxcsr = xsave->region[XSAVE_MXCSR];
1571 memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
1572 sizeof env->fpregs);
f1665b21 1573 env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
79e9ebeb
LJ
1574 memcpy(env->bnd_regs, &xsave->region[XSAVE_BNDREGS],
1575 sizeof env->bnd_regs);
1576 memcpy(&env->bndcs_regs, &xsave->region[XSAVE_BNDCSR],
1577 sizeof(env->bndcs_regs));
9aecd6f8
CP
1578 memcpy(env->opmask_regs, &xsave->region[XSAVE_OPMASK],
1579 sizeof env->opmask_regs);
bee81887
PB
1580
1581 xmm = (const uint8_t *)&xsave->region[XSAVE_XMM_SPACE];
b7711471
PB
1582 ymmh = (const uint8_t *)&xsave->region[XSAVE_YMMH_SPACE];
1583 zmmh = (const uint8_t *)&xsave->region[XSAVE_ZMM_Hi256];
1584 for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) {
bee81887
PB
1585 env->xmm_regs[i].XMM_Q(0) = ldq_p(xmm);
1586 env->xmm_regs[i].XMM_Q(1) = ldq_p(xmm+8);
b7711471
PB
1587 env->xmm_regs[i].XMM_Q(2) = ldq_p(ymmh);
1588 env->xmm_regs[i].XMM_Q(3) = ldq_p(ymmh+8);
1589 env->xmm_regs[i].XMM_Q(4) = ldq_p(zmmh);
1590 env->xmm_regs[i].XMM_Q(5) = ldq_p(zmmh+8);
1591 env->xmm_regs[i].XMM_Q(6) = ldq_p(zmmh+16);
1592 env->xmm_regs[i].XMM_Q(7) = ldq_p(zmmh+24);
bee81887
PB
1593 }
1594
9aecd6f8 1595#ifdef TARGET_X86_64
b7711471
PB
1596 memcpy(&env->xmm_regs[16], &xsave->region[XSAVE_Hi16_ZMM],
1597 16 * sizeof env->xmm_regs[16]);
9aecd6f8 1598#endif
f1665b21 1599 return 0;
f1665b21
SY
1600}
1601
1bc22652 1602static int kvm_get_xcrs(X86CPU *cpu)
f1665b21 1603{
1bc22652 1604 CPUX86State *env = &cpu->env;
f1665b21
SY
1605 int i, ret;
1606 struct kvm_xcrs xcrs;
1607
28143b40 1608 if (!has_xcrs) {
f1665b21 1609 return 0;
b9bec74b 1610 }
f1665b21 1611
1bc22652 1612 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
b9bec74b 1613 if (ret < 0) {
f1665b21 1614 return ret;
b9bec74b 1615 }
f1665b21 1616
b9bec74b 1617 for (i = 0; i < xcrs.nr_xcrs; i++) {
f1665b21 1618 /* Only support xcr0 now */
0fd53fec
PB
1619 if (xcrs.xcrs[i].xcr == 0) {
1620 env->xcr0 = xcrs.xcrs[i].value;
f1665b21
SY
1621 break;
1622 }
b9bec74b 1623 }
f1665b21 1624 return 0;
f1665b21
SY
1625}
1626
1bc22652 1627static int kvm_get_sregs(X86CPU *cpu)
05330448 1628{
1bc22652 1629 CPUX86State *env = &cpu->env;
05330448
AL
1630 struct kvm_sregs sregs;
1631 uint32_t hflags;
0e607a80 1632 int bit, i, ret;
05330448 1633
1bc22652 1634 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
b9bec74b 1635 if (ret < 0) {
05330448 1636 return ret;
b9bec74b 1637 }
05330448 1638
0e607a80
JK
1639 /* There can only be one pending IRQ set in the bitmap at a time, so try
1640 to find it and save its number instead (-1 for none). */
1641 env->interrupt_injected = -1;
1642 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
1643 if (sregs.interrupt_bitmap[i]) {
1644 bit = ctz64(sregs.interrupt_bitmap[i]);
1645 env->interrupt_injected = i * 64 + bit;
1646 break;
1647 }
1648 }
05330448
AL
1649
1650 get_seg(&env->segs[R_CS], &sregs.cs);
1651 get_seg(&env->segs[R_DS], &sregs.ds);
1652 get_seg(&env->segs[R_ES], &sregs.es);
1653 get_seg(&env->segs[R_FS], &sregs.fs);
1654 get_seg(&env->segs[R_GS], &sregs.gs);
1655 get_seg(&env->segs[R_SS], &sregs.ss);
1656
1657 get_seg(&env->tr, &sregs.tr);
1658 get_seg(&env->ldt, &sregs.ldt);
1659
1660 env->idt.limit = sregs.idt.limit;
1661 env->idt.base = sregs.idt.base;
1662 env->gdt.limit = sregs.gdt.limit;
1663 env->gdt.base = sregs.gdt.base;
1664
1665 env->cr[0] = sregs.cr0;
1666 env->cr[2] = sregs.cr2;
1667 env->cr[3] = sregs.cr3;
1668 env->cr[4] = sregs.cr4;
1669
05330448 1670 env->efer = sregs.efer;
cce47516
JK
1671
1672 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
05330448 1673
b9bec74b
JK
1674#define HFLAG_COPY_MASK \
1675 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1676 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1677 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1678 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
05330448 1679
7125c937 1680 hflags = (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
05330448
AL
1681 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
1682 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
b9bec74b 1683 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
05330448
AL
1684 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
1685 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
b9bec74b 1686 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
05330448
AL
1687
1688 if (env->efer & MSR_EFER_LMA) {
1689 hflags |= HF_LMA_MASK;
1690 }
1691
1692 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
1693 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1694 } else {
1695 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
b9bec74b 1696 (DESC_B_SHIFT - HF_CS32_SHIFT);
05330448 1697 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
b9bec74b
JK
1698 (DESC_B_SHIFT - HF_SS32_SHIFT);
1699 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
1700 !(hflags & HF_CS32_MASK)) {
1701 hflags |= HF_ADDSEG_MASK;
1702 } else {
1703 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
1704 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
1705 }
05330448
AL
1706 }
1707 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
05330448
AL
1708
1709 return 0;
1710}
1711
1bc22652 1712static int kvm_get_msrs(X86CPU *cpu)
05330448 1713{
1bc22652 1714 CPUX86State *env = &cpu->env;
05330448
AL
1715 struct {
1716 struct kvm_msrs info;
d1ae67f6 1717 struct kvm_msr_entry entries[150];
05330448
AL
1718 } msr_data;
1719 struct kvm_msr_entry *msrs = msr_data.entries;
1720 int ret, i, n;
1721
1722 n = 0;
1723 msrs[n++].index = MSR_IA32_SYSENTER_CS;
1724 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
1725 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
0c03266a 1726 msrs[n++].index = MSR_PAT;
c3a3a7d3 1727 if (has_msr_star) {
b9bec74b
JK
1728 msrs[n++].index = MSR_STAR;
1729 }
c3a3a7d3 1730 if (has_msr_hsave_pa) {
75b10c43 1731 msrs[n++].index = MSR_VM_HSAVE_PA;
b9bec74b 1732 }
c9b8f6b6
AS
1733 if (has_msr_tsc_aux) {
1734 msrs[n++].index = MSR_TSC_AUX;
1735 }
f28558d3
WA
1736 if (has_msr_tsc_adjust) {
1737 msrs[n++].index = MSR_TSC_ADJUST;
1738 }
aa82ba54
LJ
1739 if (has_msr_tsc_deadline) {
1740 msrs[n++].index = MSR_IA32_TSCDEADLINE;
1741 }
21e87c46
AK
1742 if (has_msr_misc_enable) {
1743 msrs[n++].index = MSR_IA32_MISC_ENABLE;
1744 }
fc12d72e
PB
1745 if (has_msr_smbase) {
1746 msrs[n++].index = MSR_IA32_SMBASE;
1747 }
df67696e
LJ
1748 if (has_msr_feature_control) {
1749 msrs[n++].index = MSR_IA32_FEATURE_CONTROL;
1750 }
79e9ebeb
LJ
1751 if (has_msr_bndcfgs) {
1752 msrs[n++].index = MSR_IA32_BNDCFGS;
1753 }
18cd2c17
WL
1754 if (has_msr_xss) {
1755 msrs[n++].index = MSR_IA32_XSS;
1756 }
1757
b8cc45d6
GC
1758
1759 if (!env->tsc_valid) {
1760 msrs[n++].index = MSR_IA32_TSC;
1354869c 1761 env->tsc_valid = !runstate_is_running();
b8cc45d6
GC
1762 }
1763
05330448 1764#ifdef TARGET_X86_64
25d2e361
MT
1765 if (lm_capable_kernel) {
1766 msrs[n++].index = MSR_CSTAR;
1767 msrs[n++].index = MSR_KERNELGSBASE;
1768 msrs[n++].index = MSR_FMASK;
1769 msrs[n++].index = MSR_LSTAR;
1770 }
05330448 1771#endif
1a03675d
GC
1772 msrs[n++].index = MSR_KVM_SYSTEM_TIME;
1773 msrs[n++].index = MSR_KVM_WALL_CLOCK;
c5999bfc
JK
1774 if (has_msr_async_pf_en) {
1775 msrs[n++].index = MSR_KVM_ASYNC_PF_EN;
1776 }
bc9a839d
MT
1777 if (has_msr_pv_eoi_en) {
1778 msrs[n++].index = MSR_KVM_PV_EOI_EN;
1779 }
917367aa
MT
1780 if (has_msr_kvm_steal_time) {
1781 msrs[n++].index = MSR_KVM_STEAL_TIME;
1782 }
0d894367
PB
1783 if (has_msr_architectural_pmu) {
1784 msrs[n++].index = MSR_CORE_PERF_FIXED_CTR_CTRL;
1785 msrs[n++].index = MSR_CORE_PERF_GLOBAL_CTRL;
1786 msrs[n++].index = MSR_CORE_PERF_GLOBAL_STATUS;
1787 msrs[n++].index = MSR_CORE_PERF_GLOBAL_OVF_CTRL;
1788 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
1789 msrs[n++].index = MSR_CORE_PERF_FIXED_CTR0 + i;
1790 }
1791 for (i = 0; i < num_architectural_pmu_counters; i++) {
1792 msrs[n++].index = MSR_P6_PERFCTR0 + i;
1793 msrs[n++].index = MSR_P6_EVNTSEL0 + i;
1794 }
1795 }
1a03675d 1796
57780495
MT
1797 if (env->mcg_cap) {
1798 msrs[n++].index = MSR_MCG_STATUS;
1799 msrs[n++].index = MSR_MCG_CTL;
b9bec74b 1800 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
57780495 1801 msrs[n++].index = MSR_MC0_CTL + i;
b9bec74b 1802 }
57780495 1803 }
57780495 1804
1c90ef26
VR
1805 if (has_msr_hv_hypercall) {
1806 msrs[n++].index = HV_X64_MSR_HYPERCALL;
1807 msrs[n++].index = HV_X64_MSR_GUEST_OS_ID;
1808 }
5ef68987
VR
1809 if (has_msr_hv_vapic) {
1810 msrs[n++].index = HV_X64_MSR_APIC_ASSIST_PAGE;
1811 }
48a5f3bc
VR
1812 if (has_msr_hv_tsc) {
1813 msrs[n++].index = HV_X64_MSR_REFERENCE_TSC;
1814 }
f2a53c9e
AS
1815 if (has_msr_hv_crash) {
1816 int j;
1817
1818 for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
1819 msrs[n++].index = HV_X64_MSR_CRASH_P0 + j;
1820 }
1821 }
46eb8f98
AS
1822 if (has_msr_hv_runtime) {
1823 msrs[n++].index = HV_X64_MSR_VP_RUNTIME;
1824 }
d1ae67f6
AW
1825 if (has_msr_mtrr) {
1826 msrs[n++].index = MSR_MTRRdefType;
1827 msrs[n++].index = MSR_MTRRfix64K_00000;
1828 msrs[n++].index = MSR_MTRRfix16K_80000;
1829 msrs[n++].index = MSR_MTRRfix16K_A0000;
1830 msrs[n++].index = MSR_MTRRfix4K_C0000;
1831 msrs[n++].index = MSR_MTRRfix4K_C8000;
1832 msrs[n++].index = MSR_MTRRfix4K_D0000;
1833 msrs[n++].index = MSR_MTRRfix4K_D8000;
1834 msrs[n++].index = MSR_MTRRfix4K_E0000;
1835 msrs[n++].index = MSR_MTRRfix4K_E8000;
1836 msrs[n++].index = MSR_MTRRfix4K_F0000;
1837 msrs[n++].index = MSR_MTRRfix4K_F8000;
1838 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
1839 msrs[n++].index = MSR_MTRRphysBase(i);
1840 msrs[n++].index = MSR_MTRRphysMask(i);
1841 }
1842 }
5ef68987 1843
d19ae73e
CB
1844 msr_data.info = (struct kvm_msrs) {
1845 .nmsrs = n,
1846 };
1847
1bc22652 1848 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
b9bec74b 1849 if (ret < 0) {
05330448 1850 return ret;
b9bec74b 1851 }
05330448
AL
1852
1853 for (i = 0; i < ret; i++) {
0d894367
PB
1854 uint32_t index = msrs[i].index;
1855 switch (index) {
05330448
AL
1856 case MSR_IA32_SYSENTER_CS:
1857 env->sysenter_cs = msrs[i].data;
1858 break;
1859 case MSR_IA32_SYSENTER_ESP:
1860 env->sysenter_esp = msrs[i].data;
1861 break;
1862 case MSR_IA32_SYSENTER_EIP:
1863 env->sysenter_eip = msrs[i].data;
1864 break;
0c03266a
JK
1865 case MSR_PAT:
1866 env->pat = msrs[i].data;
1867 break;
05330448
AL
1868 case MSR_STAR:
1869 env->star = msrs[i].data;
1870 break;
1871#ifdef TARGET_X86_64
1872 case MSR_CSTAR:
1873 env->cstar = msrs[i].data;
1874 break;
1875 case MSR_KERNELGSBASE:
1876 env->kernelgsbase = msrs[i].data;
1877 break;
1878 case MSR_FMASK:
1879 env->fmask = msrs[i].data;
1880 break;
1881 case MSR_LSTAR:
1882 env->lstar = msrs[i].data;
1883 break;
1884#endif
1885 case MSR_IA32_TSC:
1886 env->tsc = msrs[i].data;
1887 break;
c9b8f6b6
AS
1888 case MSR_TSC_AUX:
1889 env->tsc_aux = msrs[i].data;
1890 break;
f28558d3
WA
1891 case MSR_TSC_ADJUST:
1892 env->tsc_adjust = msrs[i].data;
1893 break;
aa82ba54
LJ
1894 case MSR_IA32_TSCDEADLINE:
1895 env->tsc_deadline = msrs[i].data;
1896 break;
aa851e36
MT
1897 case MSR_VM_HSAVE_PA:
1898 env->vm_hsave = msrs[i].data;
1899 break;
1a03675d
GC
1900 case MSR_KVM_SYSTEM_TIME:
1901 env->system_time_msr = msrs[i].data;
1902 break;
1903 case MSR_KVM_WALL_CLOCK:
1904 env->wall_clock_msr = msrs[i].data;
1905 break;
57780495
MT
1906 case MSR_MCG_STATUS:
1907 env->mcg_status = msrs[i].data;
1908 break;
1909 case MSR_MCG_CTL:
1910 env->mcg_ctl = msrs[i].data;
1911 break;
21e87c46
AK
1912 case MSR_IA32_MISC_ENABLE:
1913 env->msr_ia32_misc_enable = msrs[i].data;
1914 break;
fc12d72e
PB
1915 case MSR_IA32_SMBASE:
1916 env->smbase = msrs[i].data;
1917 break;
0779caeb
ACL
1918 case MSR_IA32_FEATURE_CONTROL:
1919 env->msr_ia32_feature_control = msrs[i].data;
df67696e 1920 break;
79e9ebeb
LJ
1921 case MSR_IA32_BNDCFGS:
1922 env->msr_bndcfgs = msrs[i].data;
1923 break;
18cd2c17
WL
1924 case MSR_IA32_XSS:
1925 env->xss = msrs[i].data;
1926 break;
57780495 1927 default:
57780495
MT
1928 if (msrs[i].index >= MSR_MC0_CTL &&
1929 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
1930 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
57780495 1931 }
d8da8574 1932 break;
f6584ee2
GN
1933 case MSR_KVM_ASYNC_PF_EN:
1934 env->async_pf_en_msr = msrs[i].data;
1935 break;
bc9a839d
MT
1936 case MSR_KVM_PV_EOI_EN:
1937 env->pv_eoi_en_msr = msrs[i].data;
1938 break;
917367aa
MT
1939 case MSR_KVM_STEAL_TIME:
1940 env->steal_time_msr = msrs[i].data;
1941 break;
0d894367
PB
1942 case MSR_CORE_PERF_FIXED_CTR_CTRL:
1943 env->msr_fixed_ctr_ctrl = msrs[i].data;
1944 break;
1945 case MSR_CORE_PERF_GLOBAL_CTRL:
1946 env->msr_global_ctrl = msrs[i].data;
1947 break;
1948 case MSR_CORE_PERF_GLOBAL_STATUS:
1949 env->msr_global_status = msrs[i].data;
1950 break;
1951 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
1952 env->msr_global_ovf_ctrl = msrs[i].data;
1953 break;
1954 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
1955 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
1956 break;
1957 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
1958 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
1959 break;
1960 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
1961 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
1962 break;
1c90ef26
VR
1963 case HV_X64_MSR_HYPERCALL:
1964 env->msr_hv_hypercall = msrs[i].data;
1965 break;
1966 case HV_X64_MSR_GUEST_OS_ID:
1967 env->msr_hv_guest_os_id = msrs[i].data;
1968 break;
5ef68987
VR
1969 case HV_X64_MSR_APIC_ASSIST_PAGE:
1970 env->msr_hv_vapic = msrs[i].data;
1971 break;
48a5f3bc
VR
1972 case HV_X64_MSR_REFERENCE_TSC:
1973 env->msr_hv_tsc = msrs[i].data;
1974 break;
f2a53c9e
AS
1975 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1976 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
1977 break;
46eb8f98
AS
1978 case HV_X64_MSR_VP_RUNTIME:
1979 env->msr_hv_runtime = msrs[i].data;
1980 break;
d1ae67f6
AW
1981 case MSR_MTRRdefType:
1982 env->mtrr_deftype = msrs[i].data;
1983 break;
1984 case MSR_MTRRfix64K_00000:
1985 env->mtrr_fixed[0] = msrs[i].data;
1986 break;
1987 case MSR_MTRRfix16K_80000:
1988 env->mtrr_fixed[1] = msrs[i].data;
1989 break;
1990 case MSR_MTRRfix16K_A0000:
1991 env->mtrr_fixed[2] = msrs[i].data;
1992 break;
1993 case MSR_MTRRfix4K_C0000:
1994 env->mtrr_fixed[3] = msrs[i].data;
1995 break;
1996 case MSR_MTRRfix4K_C8000:
1997 env->mtrr_fixed[4] = msrs[i].data;
1998 break;
1999 case MSR_MTRRfix4K_D0000:
2000 env->mtrr_fixed[5] = msrs[i].data;
2001 break;
2002 case MSR_MTRRfix4K_D8000:
2003 env->mtrr_fixed[6] = msrs[i].data;
2004 break;
2005 case MSR_MTRRfix4K_E0000:
2006 env->mtrr_fixed[7] = msrs[i].data;
2007 break;
2008 case MSR_MTRRfix4K_E8000:
2009 env->mtrr_fixed[8] = msrs[i].data;
2010 break;
2011 case MSR_MTRRfix4K_F0000:
2012 env->mtrr_fixed[9] = msrs[i].data;
2013 break;
2014 case MSR_MTRRfix4K_F8000:
2015 env->mtrr_fixed[10] = msrs[i].data;
2016 break;
2017 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
2018 if (index & 1) {
2019 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data;
2020 } else {
2021 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
2022 }
2023 break;
05330448
AL
2024 }
2025 }
2026
2027 return 0;
2028}
2029
1bc22652 2030static int kvm_put_mp_state(X86CPU *cpu)
9bdbe550 2031{
1bc22652 2032 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
9bdbe550 2033
1bc22652 2034 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
9bdbe550
HB
2035}
2036
23d02d9b 2037static int kvm_get_mp_state(X86CPU *cpu)
9bdbe550 2038{
259186a7 2039 CPUState *cs = CPU(cpu);
23d02d9b 2040 CPUX86State *env = &cpu->env;
9bdbe550
HB
2041 struct kvm_mp_state mp_state;
2042 int ret;
2043
259186a7 2044 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
9bdbe550
HB
2045 if (ret < 0) {
2046 return ret;
2047 }
2048 env->mp_state = mp_state.mp_state;
c14750e8 2049 if (kvm_irqchip_in_kernel()) {
259186a7 2050 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
c14750e8 2051 }
9bdbe550
HB
2052 return 0;
2053}
2054
1bc22652 2055static int kvm_get_apic(X86CPU *cpu)
680c1c6f 2056{
02e51483 2057 DeviceState *apic = cpu->apic_state;
680c1c6f
JK
2058 struct kvm_lapic_state kapic;
2059 int ret;
2060
3d4b2649 2061 if (apic && kvm_irqchip_in_kernel()) {
1bc22652 2062 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
680c1c6f
JK
2063 if (ret < 0) {
2064 return ret;
2065 }
2066
2067 kvm_get_apic_state(apic, &kapic);
2068 }
2069 return 0;
2070}
2071
1bc22652 2072static int kvm_put_apic(X86CPU *cpu)
680c1c6f 2073{
02e51483 2074 DeviceState *apic = cpu->apic_state;
680c1c6f
JK
2075 struct kvm_lapic_state kapic;
2076
3d4b2649 2077 if (apic && kvm_irqchip_in_kernel()) {
680c1c6f
JK
2078 kvm_put_apic_state(apic, &kapic);
2079
1bc22652 2080 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_LAPIC, &kapic);
680c1c6f
JK
2081 }
2082 return 0;
2083}
2084
1bc22652 2085static int kvm_put_vcpu_events(X86CPU *cpu, int level)
a0fb002c 2086{
fc12d72e 2087 CPUState *cs = CPU(cpu);
1bc22652 2088 CPUX86State *env = &cpu->env;
076796f8 2089 struct kvm_vcpu_events events = {};
a0fb002c
JK
2090
2091 if (!kvm_has_vcpu_events()) {
2092 return 0;
2093 }
2094
31827373
JK
2095 events.exception.injected = (env->exception_injected >= 0);
2096 events.exception.nr = env->exception_injected;
a0fb002c
JK
2097 events.exception.has_error_code = env->has_error_code;
2098 events.exception.error_code = env->error_code;
7e680753 2099 events.exception.pad = 0;
a0fb002c
JK
2100
2101 events.interrupt.injected = (env->interrupt_injected >= 0);
2102 events.interrupt.nr = env->interrupt_injected;
2103 events.interrupt.soft = env->soft_interrupt;
2104
2105 events.nmi.injected = env->nmi_injected;
2106 events.nmi.pending = env->nmi_pending;
2107 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
7e680753 2108 events.nmi.pad = 0;
a0fb002c
JK
2109
2110 events.sipi_vector = env->sipi_vector;
2111
fc12d72e
PB
2112 if (has_msr_smbase) {
2113 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
2114 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
2115 if (kvm_irqchip_in_kernel()) {
2116 /* As soon as these are moved to the kernel, remove them
2117 * from cs->interrupt_request.
2118 */
2119 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
2120 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
2121 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
2122 } else {
2123 /* Keep these in cs->interrupt_request. */
2124 events.smi.pending = 0;
2125 events.smi.latched_init = 0;
2126 }
2127 events.flags |= KVM_VCPUEVENT_VALID_SMM;
2128 }
2129
ea643051
JK
2130 events.flags = 0;
2131 if (level >= KVM_PUT_RESET_STATE) {
2132 events.flags |=
2133 KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
2134 }
aee028b9 2135
1bc22652 2136 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
a0fb002c
JK
2137}
2138
1bc22652 2139static int kvm_get_vcpu_events(X86CPU *cpu)
a0fb002c 2140{
1bc22652 2141 CPUX86State *env = &cpu->env;
a0fb002c
JK
2142 struct kvm_vcpu_events events;
2143 int ret;
2144
2145 if (!kvm_has_vcpu_events()) {
2146 return 0;
2147 }
2148
fc12d72e 2149 memset(&events, 0, sizeof(events));
1bc22652 2150 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
a0fb002c
JK
2151 if (ret < 0) {
2152 return ret;
2153 }
31827373 2154 env->exception_injected =
a0fb002c
JK
2155 events.exception.injected ? events.exception.nr : -1;
2156 env->has_error_code = events.exception.has_error_code;
2157 env->error_code = events.exception.error_code;
2158
2159 env->interrupt_injected =
2160 events.interrupt.injected ? events.interrupt.nr : -1;
2161 env->soft_interrupt = events.interrupt.soft;
2162
2163 env->nmi_injected = events.nmi.injected;
2164 env->nmi_pending = events.nmi.pending;
2165 if (events.nmi.masked) {
2166 env->hflags2 |= HF2_NMI_MASK;
2167 } else {
2168 env->hflags2 &= ~HF2_NMI_MASK;
2169 }
2170
fc12d72e
PB
2171 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
2172 if (events.smi.smm) {
2173 env->hflags |= HF_SMM_MASK;
2174 } else {
2175 env->hflags &= ~HF_SMM_MASK;
2176 }
2177 if (events.smi.pending) {
2178 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2179 } else {
2180 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2181 }
2182 if (events.smi.smm_inside_nmi) {
2183 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
2184 } else {
2185 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
2186 }
2187 if (events.smi.latched_init) {
2188 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2189 } else {
2190 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2191 }
2192 }
2193
a0fb002c 2194 env->sipi_vector = events.sipi_vector;
a0fb002c
JK
2195
2196 return 0;
2197}
2198
1bc22652 2199static int kvm_guest_debug_workarounds(X86CPU *cpu)
b0b1d690 2200{
ed2803da 2201 CPUState *cs = CPU(cpu);
1bc22652 2202 CPUX86State *env = &cpu->env;
b0b1d690 2203 int ret = 0;
b0b1d690
JK
2204 unsigned long reinject_trap = 0;
2205
2206 if (!kvm_has_vcpu_events()) {
2207 if (env->exception_injected == 1) {
2208 reinject_trap = KVM_GUESTDBG_INJECT_DB;
2209 } else if (env->exception_injected == 3) {
2210 reinject_trap = KVM_GUESTDBG_INJECT_BP;
2211 }
2212 env->exception_injected = -1;
2213 }
2214
2215 /*
2216 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
2217 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
2218 * by updating the debug state once again if single-stepping is on.
2219 * Another reason to call kvm_update_guest_debug here is a pending debug
2220 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
2221 * reinject them via SET_GUEST_DEBUG.
2222 */
2223 if (reinject_trap ||
ed2803da 2224 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
38e478ec 2225 ret = kvm_update_guest_debug(cs, reinject_trap);
b0b1d690 2226 }
b0b1d690
JK
2227 return ret;
2228}
2229
1bc22652 2230static int kvm_put_debugregs(X86CPU *cpu)
ff44f1a3 2231{
1bc22652 2232 CPUX86State *env = &cpu->env;
ff44f1a3
JK
2233 struct kvm_debugregs dbgregs;
2234 int i;
2235
2236 if (!kvm_has_debugregs()) {
2237 return 0;
2238 }
2239
2240 for (i = 0; i < 4; i++) {
2241 dbgregs.db[i] = env->dr[i];
2242 }
2243 dbgregs.dr6 = env->dr[6];
2244 dbgregs.dr7 = env->dr[7];
2245 dbgregs.flags = 0;
2246
1bc22652 2247 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
ff44f1a3
JK
2248}
2249
1bc22652 2250static int kvm_get_debugregs(X86CPU *cpu)
ff44f1a3 2251{
1bc22652 2252 CPUX86State *env = &cpu->env;
ff44f1a3
JK
2253 struct kvm_debugregs dbgregs;
2254 int i, ret;
2255
2256 if (!kvm_has_debugregs()) {
2257 return 0;
2258 }
2259
1bc22652 2260 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
ff44f1a3 2261 if (ret < 0) {
b9bec74b 2262 return ret;
ff44f1a3
JK
2263 }
2264 for (i = 0; i < 4; i++) {
2265 env->dr[i] = dbgregs.db[i];
2266 }
2267 env->dr[4] = env->dr[6] = dbgregs.dr6;
2268 env->dr[5] = env->dr[7] = dbgregs.dr7;
ff44f1a3
JK
2269
2270 return 0;
2271}
2272
20d695a9 2273int kvm_arch_put_registers(CPUState *cpu, int level)
05330448 2274{
20d695a9 2275 X86CPU *x86_cpu = X86_CPU(cpu);
05330448
AL
2276 int ret;
2277
2fa45344 2278 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
dbaa07c4 2279
6bdf863d
JK
2280 if (level >= KVM_PUT_RESET_STATE && has_msr_feature_control) {
2281 ret = kvm_put_msr_feature_control(x86_cpu);
2282 if (ret < 0) {
2283 return ret;
2284 }
2285 }
2286
1bc22652 2287 ret = kvm_getput_regs(x86_cpu, 1);
b9bec74b 2288 if (ret < 0) {
05330448 2289 return ret;
b9bec74b 2290 }
1bc22652 2291 ret = kvm_put_xsave(x86_cpu);
b9bec74b 2292 if (ret < 0) {
f1665b21 2293 return ret;
b9bec74b 2294 }
1bc22652 2295 ret = kvm_put_xcrs(x86_cpu);
b9bec74b 2296 if (ret < 0) {
05330448 2297 return ret;
b9bec74b 2298 }
1bc22652 2299 ret = kvm_put_sregs(x86_cpu);
b9bec74b 2300 if (ret < 0) {
05330448 2301 return ret;
b9bec74b 2302 }
ab443475 2303 /* must be before kvm_put_msrs */
1bc22652 2304 ret = kvm_inject_mce_oldstyle(x86_cpu);
ab443475
JK
2305 if (ret < 0) {
2306 return ret;
2307 }
1bc22652 2308 ret = kvm_put_msrs(x86_cpu, level);
b9bec74b 2309 if (ret < 0) {
05330448 2310 return ret;
b9bec74b 2311 }
ea643051 2312 if (level >= KVM_PUT_RESET_STATE) {
1bc22652 2313 ret = kvm_put_mp_state(x86_cpu);
b9bec74b 2314 if (ret < 0) {
ea643051 2315 return ret;
b9bec74b 2316 }
1bc22652 2317 ret = kvm_put_apic(x86_cpu);
680c1c6f
JK
2318 if (ret < 0) {
2319 return ret;
2320 }
ea643051 2321 }
7477cd38
MT
2322
2323 ret = kvm_put_tscdeadline_msr(x86_cpu);
2324 if (ret < 0) {
2325 return ret;
2326 }
2327
1bc22652 2328 ret = kvm_put_vcpu_events(x86_cpu, level);
b9bec74b 2329 if (ret < 0) {
a0fb002c 2330 return ret;
b9bec74b 2331 }
1bc22652 2332 ret = kvm_put_debugregs(x86_cpu);
b9bec74b 2333 if (ret < 0) {
b0b1d690 2334 return ret;
b9bec74b 2335 }
b0b1d690 2336 /* must be last */
1bc22652 2337 ret = kvm_guest_debug_workarounds(x86_cpu);
b9bec74b 2338 if (ret < 0) {
ff44f1a3 2339 return ret;
b9bec74b 2340 }
05330448
AL
2341 return 0;
2342}
2343
20d695a9 2344int kvm_arch_get_registers(CPUState *cs)
05330448 2345{
20d695a9 2346 X86CPU *cpu = X86_CPU(cs);
05330448
AL
2347 int ret;
2348
20d695a9 2349 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
dbaa07c4 2350
1bc22652 2351 ret = kvm_getput_regs(cpu, 0);
b9bec74b 2352 if (ret < 0) {
05330448 2353 return ret;
b9bec74b 2354 }
1bc22652 2355 ret = kvm_get_xsave(cpu);
b9bec74b 2356 if (ret < 0) {
f1665b21 2357 return ret;
b9bec74b 2358 }
1bc22652 2359 ret = kvm_get_xcrs(cpu);
b9bec74b 2360 if (ret < 0) {
05330448 2361 return ret;
b9bec74b 2362 }
1bc22652 2363 ret = kvm_get_sregs(cpu);
b9bec74b 2364 if (ret < 0) {
05330448 2365 return ret;
b9bec74b 2366 }
1bc22652 2367 ret = kvm_get_msrs(cpu);
b9bec74b 2368 if (ret < 0) {
05330448 2369 return ret;
b9bec74b 2370 }
23d02d9b 2371 ret = kvm_get_mp_state(cpu);
b9bec74b 2372 if (ret < 0) {
5a2e3c2e 2373 return ret;
b9bec74b 2374 }
1bc22652 2375 ret = kvm_get_apic(cpu);
680c1c6f
JK
2376 if (ret < 0) {
2377 return ret;
2378 }
1bc22652 2379 ret = kvm_get_vcpu_events(cpu);
b9bec74b 2380 if (ret < 0) {
a0fb002c 2381 return ret;
b9bec74b 2382 }
1bc22652 2383 ret = kvm_get_debugregs(cpu);
b9bec74b 2384 if (ret < 0) {
ff44f1a3 2385 return ret;
b9bec74b 2386 }
05330448
AL
2387 return 0;
2388}
2389
20d695a9 2390void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
05330448 2391{
20d695a9
AF
2392 X86CPU *x86_cpu = X86_CPU(cpu);
2393 CPUX86State *env = &x86_cpu->env;
ce377af3
JK
2394 int ret;
2395
276ce815 2396 /* Inject NMI */
fc12d72e
PB
2397 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
2398 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
2399 qemu_mutex_lock_iothread();
2400 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
2401 qemu_mutex_unlock_iothread();
2402 DPRINTF("injected NMI\n");
2403 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
2404 if (ret < 0) {
2405 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
2406 strerror(-ret));
2407 }
2408 }
2409 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
2410 qemu_mutex_lock_iothread();
2411 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
2412 qemu_mutex_unlock_iothread();
2413 DPRINTF("injected SMI\n");
2414 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
2415 if (ret < 0) {
2416 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
2417 strerror(-ret));
2418 }
ce377af3 2419 }
276ce815
LJ
2420 }
2421
4b8523ee
JK
2422 if (!kvm_irqchip_in_kernel()) {
2423 qemu_mutex_lock_iothread();
2424 }
2425
e0723c45
PB
2426 /* Force the VCPU out of its inner loop to process any INIT requests
2427 * or (for userspace APIC, but it is cheap to combine the checks here)
2428 * pending TPR access reports.
2429 */
2430 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
fc12d72e
PB
2431 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
2432 !(env->hflags & HF_SMM_MASK)) {
2433 cpu->exit_request = 1;
2434 }
2435 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
2436 cpu->exit_request = 1;
2437 }
e0723c45 2438 }
05330448 2439
e0723c45 2440 if (!kvm_irqchip_in_kernel()) {
db1669bc
JK
2441 /* Try to inject an interrupt if the guest can accept it */
2442 if (run->ready_for_interrupt_injection &&
259186a7 2443 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
db1669bc
JK
2444 (env->eflags & IF_MASK)) {
2445 int irq;
2446
259186a7 2447 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
db1669bc
JK
2448 irq = cpu_get_pic_interrupt(env);
2449 if (irq >= 0) {
2450 struct kvm_interrupt intr;
2451
2452 intr.irq = irq;
db1669bc 2453 DPRINTF("injected interrupt %d\n", irq);
1bc22652 2454 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
ce377af3
JK
2455 if (ret < 0) {
2456 fprintf(stderr,
2457 "KVM: injection failed, interrupt lost (%s)\n",
2458 strerror(-ret));
2459 }
db1669bc
JK
2460 }
2461 }
05330448 2462
db1669bc
JK
2463 /* If we have an interrupt but the guest is not ready to receive an
2464 * interrupt, request an interrupt window exit. This will
2465 * cause a return to userspace as soon as the guest is ready to
2466 * receive interrupts. */
259186a7 2467 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
db1669bc
JK
2468 run->request_interrupt_window = 1;
2469 } else {
2470 run->request_interrupt_window = 0;
2471 }
2472
2473 DPRINTF("setting tpr\n");
02e51483 2474 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
4b8523ee
JK
2475
2476 qemu_mutex_unlock_iothread();
db1669bc 2477 }
05330448
AL
2478}
2479
4c663752 2480MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
05330448 2481{
20d695a9
AF
2482 X86CPU *x86_cpu = X86_CPU(cpu);
2483 CPUX86State *env = &x86_cpu->env;
2484
fc12d72e
PB
2485 if (run->flags & KVM_RUN_X86_SMM) {
2486 env->hflags |= HF_SMM_MASK;
2487 } else {
2488 env->hflags &= HF_SMM_MASK;
2489 }
b9bec74b 2490 if (run->if_flag) {
05330448 2491 env->eflags |= IF_MASK;
b9bec74b 2492 } else {
05330448 2493 env->eflags &= ~IF_MASK;
b9bec74b 2494 }
4b8523ee
JK
2495
2496 /* We need to protect the apic state against concurrent accesses from
2497 * different threads in case the userspace irqchip is used. */
2498 if (!kvm_irqchip_in_kernel()) {
2499 qemu_mutex_lock_iothread();
2500 }
02e51483
CF
2501 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
2502 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
4b8523ee
JK
2503 if (!kvm_irqchip_in_kernel()) {
2504 qemu_mutex_unlock_iothread();
2505 }
f794aa4a 2506 return cpu_get_mem_attrs(env);
05330448
AL
2507}
2508
20d695a9 2509int kvm_arch_process_async_events(CPUState *cs)
0af691d7 2510{
20d695a9
AF
2511 X86CPU *cpu = X86_CPU(cs);
2512 CPUX86State *env = &cpu->env;
232fc23b 2513
259186a7 2514 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
ab443475
JK
2515 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
2516 assert(env->mcg_cap);
2517
259186a7 2518 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
ab443475 2519
dd1750d7 2520 kvm_cpu_synchronize_state(cs);
ab443475
JK
2521
2522 if (env->exception_injected == EXCP08_DBLE) {
2523 /* this means triple fault */
2524 qemu_system_reset_request();
fcd7d003 2525 cs->exit_request = 1;
ab443475
JK
2526 return 0;
2527 }
2528 env->exception_injected = EXCP12_MCHK;
2529 env->has_error_code = 0;
2530
259186a7 2531 cs->halted = 0;
ab443475
JK
2532 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
2533 env->mp_state = KVM_MP_STATE_RUNNABLE;
2534 }
2535 }
2536
fc12d72e
PB
2537 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
2538 !(env->hflags & HF_SMM_MASK)) {
e0723c45
PB
2539 kvm_cpu_synchronize_state(cs);
2540 do_cpu_init(cpu);
2541 }
2542
db1669bc
JK
2543 if (kvm_irqchip_in_kernel()) {
2544 return 0;
2545 }
2546
259186a7
AF
2547 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
2548 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
02e51483 2549 apic_poll_irq(cpu->apic_state);
5d62c43a 2550 }
259186a7 2551 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4601f7b0 2552 (env->eflags & IF_MASK)) ||
259186a7
AF
2553 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2554 cs->halted = 0;
6792a57b 2555 }
259186a7 2556 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
dd1750d7 2557 kvm_cpu_synchronize_state(cs);
232fc23b 2558 do_cpu_sipi(cpu);
0af691d7 2559 }
259186a7
AF
2560 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
2561 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
dd1750d7 2562 kvm_cpu_synchronize_state(cs);
02e51483 2563 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
d362e757
JK
2564 env->tpr_access_type);
2565 }
0af691d7 2566
259186a7 2567 return cs->halted;
0af691d7
MT
2568}
2569
839b5630 2570static int kvm_handle_halt(X86CPU *cpu)
05330448 2571{
259186a7 2572 CPUState *cs = CPU(cpu);
839b5630
AF
2573 CPUX86State *env = &cpu->env;
2574
259186a7 2575 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
05330448 2576 (env->eflags & IF_MASK)) &&
259186a7
AF
2577 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2578 cs->halted = 1;
bb4ea393 2579 return EXCP_HLT;
05330448
AL
2580 }
2581
bb4ea393 2582 return 0;
05330448
AL
2583}
2584
f7575c96 2585static int kvm_handle_tpr_access(X86CPU *cpu)
d362e757 2586{
f7575c96
AF
2587 CPUState *cs = CPU(cpu);
2588 struct kvm_run *run = cs->kvm_run;
d362e757 2589
02e51483 2590 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
d362e757
JK
2591 run->tpr_access.is_write ? TPR_ACCESS_WRITE
2592 : TPR_ACCESS_READ);
2593 return 1;
2594}
2595
f17ec444 2596int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
e22a25c9 2597{
38972938 2598 static const uint8_t int3 = 0xcc;
64bf3f4e 2599
f17ec444
AF
2600 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
2601 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
e22a25c9 2602 return -EINVAL;
b9bec74b 2603 }
e22a25c9
AL
2604 return 0;
2605}
2606
f17ec444 2607int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
e22a25c9
AL
2608{
2609 uint8_t int3;
2610
f17ec444
AF
2611 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
2612 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
e22a25c9 2613 return -EINVAL;
b9bec74b 2614 }
e22a25c9
AL
2615 return 0;
2616}
2617
2618static struct {
2619 target_ulong addr;
2620 int len;
2621 int type;
2622} hw_breakpoint[4];
2623
2624static int nb_hw_breakpoint;
2625
2626static int find_hw_breakpoint(target_ulong addr, int len, int type)
2627{
2628 int n;
2629
b9bec74b 2630 for (n = 0; n < nb_hw_breakpoint; n++) {
e22a25c9 2631 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
b9bec74b 2632 (hw_breakpoint[n].len == len || len == -1)) {
e22a25c9 2633 return n;
b9bec74b
JK
2634 }
2635 }
e22a25c9
AL
2636 return -1;
2637}
2638
2639int kvm_arch_insert_hw_breakpoint(target_ulong addr,
2640 target_ulong len, int type)
2641{
2642 switch (type) {
2643 case GDB_BREAKPOINT_HW:
2644 len = 1;
2645 break;
2646 case GDB_WATCHPOINT_WRITE:
2647 case GDB_WATCHPOINT_ACCESS:
2648 switch (len) {
2649 case 1:
2650 break;
2651 case 2:
2652 case 4:
2653 case 8:
b9bec74b 2654 if (addr & (len - 1)) {
e22a25c9 2655 return -EINVAL;
b9bec74b 2656 }
e22a25c9
AL
2657 break;
2658 default:
2659 return -EINVAL;
2660 }
2661 break;
2662 default:
2663 return -ENOSYS;
2664 }
2665
b9bec74b 2666 if (nb_hw_breakpoint == 4) {
e22a25c9 2667 return -ENOBUFS;
b9bec74b
JK
2668 }
2669 if (find_hw_breakpoint(addr, len, type) >= 0) {
e22a25c9 2670 return -EEXIST;
b9bec74b 2671 }
e22a25c9
AL
2672 hw_breakpoint[nb_hw_breakpoint].addr = addr;
2673 hw_breakpoint[nb_hw_breakpoint].len = len;
2674 hw_breakpoint[nb_hw_breakpoint].type = type;
2675 nb_hw_breakpoint++;
2676
2677 return 0;
2678}
2679
2680int kvm_arch_remove_hw_breakpoint(target_ulong addr,
2681 target_ulong len, int type)
2682{
2683 int n;
2684
2685 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
b9bec74b 2686 if (n < 0) {
e22a25c9 2687 return -ENOENT;
b9bec74b 2688 }
e22a25c9
AL
2689 nb_hw_breakpoint--;
2690 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
2691
2692 return 0;
2693}
2694
2695void kvm_arch_remove_all_hw_breakpoints(void)
2696{
2697 nb_hw_breakpoint = 0;
2698}
2699
2700static CPUWatchpoint hw_watchpoint;
2701
a60f24b5 2702static int kvm_handle_debug(X86CPU *cpu,
48405526 2703 struct kvm_debug_exit_arch *arch_info)
e22a25c9 2704{
ed2803da 2705 CPUState *cs = CPU(cpu);
a60f24b5 2706 CPUX86State *env = &cpu->env;
f2574737 2707 int ret = 0;
e22a25c9
AL
2708 int n;
2709
2710 if (arch_info->exception == 1) {
2711 if (arch_info->dr6 & (1 << 14)) {
ed2803da 2712 if (cs->singlestep_enabled) {
f2574737 2713 ret = EXCP_DEBUG;
b9bec74b 2714 }
e22a25c9 2715 } else {
b9bec74b
JK
2716 for (n = 0; n < 4; n++) {
2717 if (arch_info->dr6 & (1 << n)) {
e22a25c9
AL
2718 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
2719 case 0x0:
f2574737 2720 ret = EXCP_DEBUG;
e22a25c9
AL
2721 break;
2722 case 0x1:
f2574737 2723 ret = EXCP_DEBUG;
ff4700b0 2724 cs->watchpoint_hit = &hw_watchpoint;
e22a25c9
AL
2725 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
2726 hw_watchpoint.flags = BP_MEM_WRITE;
2727 break;
2728 case 0x3:
f2574737 2729 ret = EXCP_DEBUG;
ff4700b0 2730 cs->watchpoint_hit = &hw_watchpoint;
e22a25c9
AL
2731 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
2732 hw_watchpoint.flags = BP_MEM_ACCESS;
2733 break;
2734 }
b9bec74b
JK
2735 }
2736 }
e22a25c9 2737 }
ff4700b0 2738 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
f2574737 2739 ret = EXCP_DEBUG;
b9bec74b 2740 }
f2574737 2741 if (ret == 0) {
ff4700b0 2742 cpu_synchronize_state(cs);
48405526 2743 assert(env->exception_injected == -1);
b0b1d690 2744
f2574737 2745 /* pass to guest */
48405526
BS
2746 env->exception_injected = arch_info->exception;
2747 env->has_error_code = 0;
b0b1d690 2748 }
e22a25c9 2749
f2574737 2750 return ret;
e22a25c9
AL
2751}
2752
20d695a9 2753void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
e22a25c9
AL
2754{
2755 const uint8_t type_code[] = {
2756 [GDB_BREAKPOINT_HW] = 0x0,
2757 [GDB_WATCHPOINT_WRITE] = 0x1,
2758 [GDB_WATCHPOINT_ACCESS] = 0x3
2759 };
2760 const uint8_t len_code[] = {
2761 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
2762 };
2763 int n;
2764
a60f24b5 2765 if (kvm_sw_breakpoints_active(cpu)) {
e22a25c9 2766 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
b9bec74b 2767 }
e22a25c9
AL
2768 if (nb_hw_breakpoint > 0) {
2769 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
2770 dbg->arch.debugreg[7] = 0x0600;
2771 for (n = 0; n < nb_hw_breakpoint; n++) {
2772 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
2773 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
2774 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
95c077c9 2775 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
e22a25c9
AL
2776 }
2777 }
2778}
4513d923 2779
2a4dac83
JK
2780static bool host_supports_vmx(void)
2781{
2782 uint32_t ecx, unused;
2783
2784 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
2785 return ecx & CPUID_EXT_VMX;
2786}
2787
2788#define VMX_INVALID_GUEST_STATE 0x80000021
2789
20d695a9 2790int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
2a4dac83 2791{
20d695a9 2792 X86CPU *cpu = X86_CPU(cs);
2a4dac83
JK
2793 uint64_t code;
2794 int ret;
2795
2796 switch (run->exit_reason) {
2797 case KVM_EXIT_HLT:
2798 DPRINTF("handle_hlt\n");
4b8523ee 2799 qemu_mutex_lock_iothread();
839b5630 2800 ret = kvm_handle_halt(cpu);
4b8523ee 2801 qemu_mutex_unlock_iothread();
2a4dac83
JK
2802 break;
2803 case KVM_EXIT_SET_TPR:
2804 ret = 0;
2805 break;
d362e757 2806 case KVM_EXIT_TPR_ACCESS:
4b8523ee 2807 qemu_mutex_lock_iothread();
f7575c96 2808 ret = kvm_handle_tpr_access(cpu);
4b8523ee 2809 qemu_mutex_unlock_iothread();
d362e757 2810 break;
2a4dac83
JK
2811 case KVM_EXIT_FAIL_ENTRY:
2812 code = run->fail_entry.hardware_entry_failure_reason;
2813 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
2814 code);
2815 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
2816 fprintf(stderr,
12619721 2817 "\nIf you're running a guest on an Intel machine without "
2a4dac83
JK
2818 "unrestricted mode\n"
2819 "support, the failure can be most likely due to the guest "
2820 "entering an invalid\n"
2821 "state for Intel VT. For example, the guest maybe running "
2822 "in big real mode\n"
2823 "which is not supported on less recent Intel processors."
2824 "\n\n");
2825 }
2826 ret = -1;
2827 break;
2828 case KVM_EXIT_EXCEPTION:
2829 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
2830 run->ex.exception, run->ex.error_code);
2831 ret = -1;
2832 break;
f2574737
JK
2833 case KVM_EXIT_DEBUG:
2834 DPRINTF("kvm_exit_debug\n");
4b8523ee 2835 qemu_mutex_lock_iothread();
a60f24b5 2836 ret = kvm_handle_debug(cpu, &run->debug.arch);
4b8523ee 2837 qemu_mutex_unlock_iothread();
f2574737 2838 break;
2a4dac83
JK
2839 default:
2840 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
2841 ret = -1;
2842 break;
2843 }
2844
2845 return ret;
2846}
2847
20d695a9 2848bool kvm_arch_stop_on_emulation_error(CPUState *cs)
4513d923 2849{
20d695a9
AF
2850 X86CPU *cpu = X86_CPU(cs);
2851 CPUX86State *env = &cpu->env;
2852
dd1750d7 2853 kvm_cpu_synchronize_state(cs);
b9bec74b
JK
2854 return !(env->cr[0] & CR0_PE_MASK) ||
2855 ((env->segs[R_CS].selector & 3) != 3);
4513d923 2856}
84b058d7
JK
2857
2858void kvm_arch_init_irq_routing(KVMState *s)
2859{
2860 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
2861 /* If kernel can't do irq routing, interrupt source
2862 * override 0->2 cannot be set up as required by HPET.
2863 * So we have to disable it.
2864 */
2865 no_hpet = 1;
2866 }
cc7e0ddf 2867 /* We know at this point that we're using the in-kernel
614e41bc 2868 * irqchip, so we can use irqfds, and on x86 we know
f3e1bed8 2869 * we can use msi via irqfd and GSI routing.
cc7e0ddf 2870 */
614e41bc 2871 kvm_msi_via_irqfd_allowed = true;
f3e1bed8 2872 kvm_gsi_routing_allowed = true;
84b058d7 2873}
b139bd30
JK
2874
2875/* Classic KVM device assignment interface. Will remain x86 only. */
2876int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
2877 uint32_t flags, uint32_t *dev_id)
2878{
2879 struct kvm_assigned_pci_dev dev_data = {
2880 .segnr = dev_addr->domain,
2881 .busnr = dev_addr->bus,
2882 .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
2883 .flags = flags,
2884 };
2885 int ret;
2886
2887 dev_data.assigned_dev_id =
2888 (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
2889
2890 ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
2891 if (ret < 0) {
2892 return ret;
2893 }
2894
2895 *dev_id = dev_data.assigned_dev_id;
2896
2897 return 0;
2898}
2899
2900int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
2901{
2902 struct kvm_assigned_pci_dev dev_data = {
2903 .assigned_dev_id = dev_id,
2904 };
2905
2906 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
2907}
2908
2909static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
2910 uint32_t irq_type, uint32_t guest_irq)
2911{
2912 struct kvm_assigned_irq assigned_irq = {
2913 .assigned_dev_id = dev_id,
2914 .guest_irq = guest_irq,
2915 .flags = irq_type,
2916 };
2917
2918 if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
2919 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
2920 } else {
2921 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
2922 }
2923}
2924
2925int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
2926 uint32_t guest_irq)
2927{
2928 uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
2929 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
2930
2931 return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
2932}
2933
2934int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
2935{
2936 struct kvm_assigned_pci_dev dev_data = {
2937 .assigned_dev_id = dev_id,
2938 .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
2939 };
2940
2941 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
2942}
2943
2944static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
2945 uint32_t type)
2946{
2947 struct kvm_assigned_irq assigned_irq = {
2948 .assigned_dev_id = dev_id,
2949 .flags = type,
2950 };
2951
2952 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
2953}
2954
2955int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
2956{
2957 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
2958 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
2959}
2960
2961int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
2962{
2963 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
2964 KVM_DEV_IRQ_GUEST_MSI, virq);
2965}
2966
2967int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
2968{
2969 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
2970 KVM_DEV_IRQ_HOST_MSI);
2971}
2972
2973bool kvm_device_msix_supported(KVMState *s)
2974{
2975 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
2976 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
2977 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
2978}
2979
2980int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
2981 uint32_t nr_vectors)
2982{
2983 struct kvm_assigned_msix_nr msix_nr = {
2984 .assigned_dev_id = dev_id,
2985 .entry_nr = nr_vectors,
2986 };
2987
2988 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
2989}
2990
2991int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
2992 int virq)
2993{
2994 struct kvm_assigned_msix_entry msix_entry = {
2995 .assigned_dev_id = dev_id,
2996 .gsi = virq,
2997 .entry = vector,
2998 };
2999
3000 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
3001}
3002
3003int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
3004{
3005 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
3006 KVM_DEV_IRQ_GUEST_MSIX, 0);
3007}
3008
3009int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
3010{
3011 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
3012 KVM_DEV_IRQ_HOST_MSIX);
3013}
9e03a040
FB
3014
3015int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
dc9f06ca 3016 uint64_t address, uint32_t data, PCIDevice *dev)
9e03a040
FB
3017{
3018 return 0;
3019}
1850b6b7
EA
3020
3021int kvm_arch_msi_data_to_gsi(uint32_t data)
3022{
3023 abort();
3024}