]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/kvm.c
Switch build system to accompanied kernel headers
[mirror_qemu.git] / target-i386 / kvm.c
CommitLineData
05330448
AL
1/*
2 * QEMU KVM support
3 *
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 *
13 */
14
15#include <sys/types.h>
16#include <sys/ioctl.h>
17#include <sys/mman.h>
25d2e361 18#include <sys/utsname.h>
05330448
AL
19
20#include <linux/kvm.h>
21
22#include "qemu-common.h"
23#include "sysemu.h"
24#include "kvm.h"
25#include "cpu.h"
e22a25c9 26#include "gdbstub.h"
0e607a80 27#include "host-utils.h"
4c5b10b7 28#include "hw/pc.h"
408392b3 29#include "hw/apic.h"
35bed8ee 30#include "ioport.h"
05330448 31
bb0300dc
GN
32#ifdef CONFIG_KVM_PARA
33#include <linux/kvm_para.h>
34#endif
35//
05330448
AL
36//#define DEBUG_KVM
37
38#ifdef DEBUG_KVM
8c0d577e 39#define DPRINTF(fmt, ...) \
05330448
AL
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
41#else
8c0d577e 42#define DPRINTF(fmt, ...) \
05330448
AL
43 do { } while (0)
44#endif
45
1a03675d
GC
46#define MSR_KVM_WALL_CLOCK 0x11
47#define MSR_KVM_SYSTEM_TIME 0x12
48
c0532a76
MT
49#ifndef BUS_MCEERR_AR
50#define BUS_MCEERR_AR 4
51#endif
52#ifndef BUS_MCEERR_AO
53#define BUS_MCEERR_AO 5
54#endif
55
94a8d39a
JK
56const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
57 KVM_CAP_INFO(SET_TSS_ADDR),
58 KVM_CAP_INFO(EXT_CPUID),
59 KVM_CAP_INFO(MP_STATE),
60 KVM_CAP_LAST_INFO
61};
25d2e361 62
c3a3a7d3
JK
63static bool has_msr_star;
64static bool has_msr_hsave_pa;
c5999bfc
JK
65#if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
66static bool has_msr_async_pf_en;
67#endif
25d2e361 68static int lm_capable_kernel;
b827df58
AK
69
70static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
71{
72 struct kvm_cpuid2 *cpuid;
73 int r, size;
74
75 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
76 cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size);
77 cpuid->nent = max;
78 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
76ae317f
MM
79 if (r == 0 && cpuid->nent >= max) {
80 r = -E2BIG;
81 }
b827df58
AK
82 if (r < 0) {
83 if (r == -E2BIG) {
84 qemu_free(cpuid);
85 return NULL;
86 } else {
87 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
88 strerror(-r));
89 exit(1);
90 }
91 }
92 return cpuid;
93}
94
0c31b744
GC
95#ifdef CONFIG_KVM_PARA
96struct kvm_para_features {
97 int cap;
98 int feature;
99} para_features[] = {
100 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
101 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
102 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
103#ifdef KVM_CAP_ASYNC_PF
104 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
105#endif
106 { -1, -1 }
107};
108
109static int get_para_features(CPUState *env)
110{
111 int i, features = 0;
112
113 for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) {
114 if (kvm_check_extension(env->kvm_state, para_features[i].cap)) {
115 features |= (1 << para_features[i].feature);
116 }
117 }
118
119 return features;
120}
121#endif
122
123
c958a8bd
SY
124uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
125 uint32_t index, int reg)
b827df58
AK
126{
127 struct kvm_cpuid2 *cpuid;
128 int i, max;
129 uint32_t ret = 0;
130 uint32_t cpuid_1_edx;
0c31b744
GC
131#ifdef CONFIG_KVM_PARA
132 int has_kvm_features = 0;
133#endif
b827df58 134
b827df58
AK
135 max = 1;
136 while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) {
137 max *= 2;
138 }
139
140 for (i = 0; i < cpuid->nent; ++i) {
c958a8bd
SY
141 if (cpuid->entries[i].function == function &&
142 cpuid->entries[i].index == index) {
0c31b744
GC
143#ifdef CONFIG_KVM_PARA
144 if (cpuid->entries[i].function == KVM_CPUID_FEATURES) {
145 has_kvm_features = 1;
146 }
147#endif
b827df58
AK
148 switch (reg) {
149 case R_EAX:
150 ret = cpuid->entries[i].eax;
151 break;
152 case R_EBX:
153 ret = cpuid->entries[i].ebx;
154 break;
155 case R_ECX:
156 ret = cpuid->entries[i].ecx;
157 break;
158 case R_EDX:
159 ret = cpuid->entries[i].edx;
19ccb8ea
JK
160 switch (function) {
161 case 1:
162 /* KVM before 2.6.30 misreports the following features */
163 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
164 break;
165 case 0x80000001:
b827df58
AK
166 /* On Intel, kvm returns cpuid according to the Intel spec,
167 * so add missing bits according to the AMD spec:
168 */
c958a8bd 169 cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX);
c1667e40 170 ret |= cpuid_1_edx & 0x183f7ff;
19ccb8ea 171 break;
b827df58
AK
172 }
173 break;
174 }
175 }
176 }
177
178 qemu_free(cpuid);
179
bb0300dc 180#ifdef CONFIG_KVM_PARA
0c31b744
GC
181 /* fallback for older kernels */
182 if (!has_kvm_features && (function == KVM_CPUID_FEATURES)) {
183 ret = get_para_features(env);
b9bec74b 184 }
b3a98367 185#endif
0c31b744
GC
186
187 return ret;
bb0300dc 188}
bb0300dc 189
3c85e74f
HY
190typedef struct HWPoisonPage {
191 ram_addr_t ram_addr;
192 QLIST_ENTRY(HWPoisonPage) list;
193} HWPoisonPage;
194
195static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
196 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
197
198static void kvm_unpoison_all(void *param)
199{
200 HWPoisonPage *page, *next_page;
201
202 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
203 QLIST_REMOVE(page, list);
204 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
205 qemu_free(page);
206 }
207}
208
e7701825 209#ifdef KVM_CAP_MCE
3c85e74f
HY
210static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
211{
212 HWPoisonPage *page;
213
214 QLIST_FOREACH(page, &hwpoison_page_list, list) {
215 if (page->ram_addr == ram_addr) {
216 return;
217 }
218 }
219 page = qemu_malloc(sizeof(HWPoisonPage));
220 page->ram_addr = ram_addr;
221 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
222}
223
e7701825
MT
224static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
225 int *max_banks)
226{
227 int r;
228
14a09518 229 r = kvm_check_extension(s, KVM_CAP_MCE);
e7701825
MT
230 if (r > 0) {
231 *max_banks = r;
232 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
233 }
234 return -ENOSYS;
235}
236
c34d440a 237static void kvm_mce_inject(CPUState *env, target_phys_addr_t paddr, int code)
e7701825 238{
c34d440a
JK
239 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
240 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
241 uint64_t mcg_status = MCG_STATUS_MCIP;
e7701825 242
c34d440a
JK
243 if (code == BUS_MCEERR_AR) {
244 status |= MCI_STATUS_AR | 0x134;
245 mcg_status |= MCG_STATUS_EIPV;
246 } else {
247 status |= 0xc0;
248 mcg_status |= MCG_STATUS_RIPV;
419fb20a 249 }
c34d440a
JK
250 cpu_x86_inject_mce(NULL, env, 9, status, mcg_status, paddr,
251 (MCM_ADDR_PHYS << 6) | 0xc,
252 cpu_x86_support_mca_broadcast(env) ?
253 MCE_INJECT_BROADCAST : 0);
419fb20a
JK
254}
255#endif /* KVM_CAP_MCE */
256
257static void hardware_memory_error(void)
258{
259 fprintf(stderr, "Hardware memory error!\n");
260 exit(1);
261}
262
263int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
264{
265#ifdef KVM_CAP_MCE
419fb20a
JK
266 ram_addr_t ram_addr;
267 target_phys_addr_t paddr;
268
269 if ((env->mcg_cap & MCG_SER_P) && addr
c34d440a
JK
270 && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
271 if (qemu_ram_addr_from_host(addr, &ram_addr) ||
272 !kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr,
273 &paddr)) {
419fb20a
JK
274 fprintf(stderr, "Hardware memory error for memory used by "
275 "QEMU itself instead of guest system!\n");
276 /* Hope we are lucky for AO MCE */
277 if (code == BUS_MCEERR_AO) {
278 return 0;
279 } else {
280 hardware_memory_error();
281 }
282 }
3c85e74f 283 kvm_hwpoison_page_add(ram_addr);
c34d440a 284 kvm_mce_inject(env, paddr, code);
419fb20a
JK
285 } else
286#endif /* KVM_CAP_MCE */
287 {
288 if (code == BUS_MCEERR_AO) {
289 return 0;
290 } else if (code == BUS_MCEERR_AR) {
291 hardware_memory_error();
292 } else {
293 return 1;
294 }
295 }
296 return 0;
297}
298
299int kvm_arch_on_sigbus(int code, void *addr)
300{
301#ifdef KVM_CAP_MCE
302 if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
419fb20a
JK
303 ram_addr_t ram_addr;
304 target_phys_addr_t paddr;
305
306 /* Hope we are lucky for AO MCE */
c34d440a 307 if (qemu_ram_addr_from_host(addr, &ram_addr) ||
419fb20a
JK
308 !kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr,
309 &paddr)) {
310 fprintf(stderr, "Hardware memory error for memory used by "
311 "QEMU itself instead of guest system!: %p\n", addr);
312 return 0;
313 }
3c85e74f 314 kvm_hwpoison_page_add(ram_addr);
c34d440a 315 kvm_mce_inject(first_cpu, paddr, code);
419fb20a
JK
316 } else
317#endif /* KVM_CAP_MCE */
318 {
319 if (code == BUS_MCEERR_AO) {
320 return 0;
321 } else if (code == BUS_MCEERR_AR) {
322 hardware_memory_error();
323 } else {
324 return 1;
325 }
326 }
327 return 0;
328}
e7701825 329
ab443475
JK
330static int kvm_inject_mce_oldstyle(CPUState *env)
331{
332#ifdef KVM_CAP_MCE
333 if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
334 unsigned int bank, bank_num = env->mcg_cap & 0xff;
335 struct kvm_x86_mce mce;
336
337 env->exception_injected = -1;
338
339 /*
340 * There must be at least one bank in use if an MCE is pending.
341 * Find it and use its values for the event injection.
342 */
343 for (bank = 0; bank < bank_num; bank++) {
344 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
345 break;
346 }
347 }
348 assert(bank < bank_num);
349
350 mce.bank = bank;
351 mce.status = env->mce_banks[bank * 4 + 1];
352 mce.mcg_status = env->mcg_status;
353 mce.addr = env->mce_banks[bank * 4 + 2];
354 mce.misc = env->mce_banks[bank * 4 + 3];
355
356 return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, &mce);
357 }
358#endif /* KVM_CAP_MCE */
359 return 0;
360}
361
b8cc45d6
GC
362static void cpu_update_state(void *opaque, int running, int reason)
363{
364 CPUState *env = opaque;
365
366 if (running) {
367 env->tsc_valid = false;
368 }
369}
370
05330448
AL
371int kvm_arch_init_vcpu(CPUState *env)
372{
373 struct {
486bd5a2
AL
374 struct kvm_cpuid2 cpuid;
375 struct kvm_cpuid_entry2 entries[100];
05330448 376 } __attribute__((packed)) cpuid_data;
486bd5a2 377 uint32_t limit, i, j, cpuid_i;
a33609ca 378 uint32_t unused;
bb0300dc 379 struct kvm_cpuid_entry2 *c;
521f0798 380#ifdef CONFIG_KVM_PARA
bb0300dc
GN
381 uint32_t signature[3];
382#endif
05330448 383
c958a8bd 384 env->cpuid_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX);
6c0d7ee8
AP
385
386 i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
c958a8bd 387 env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_ECX);
6c0d7ee8
AP
388 env->cpuid_ext_features |= i;
389
457dfed6 390 env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
c958a8bd 391 0, R_EDX);
457dfed6 392 env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
c958a8bd 393 0, R_ECX);
296acb64
JR
394 env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(env, 0x8000000A,
395 0, R_EDX);
396
6c1f42fe 397
05330448
AL
398 cpuid_i = 0;
399
bb0300dc
GN
400#ifdef CONFIG_KVM_PARA
401 /* Paravirtualization CPUIDs */
402 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
403 c = &cpuid_data.entries[cpuid_i++];
404 memset(c, 0, sizeof(*c));
405 c->function = KVM_CPUID_SIGNATURE;
406 c->eax = 0;
407 c->ebx = signature[0];
408 c->ecx = signature[1];
409 c->edx = signature[2];
410
411 c = &cpuid_data.entries[cpuid_i++];
412 memset(c, 0, sizeof(*c));
413 c->function = KVM_CPUID_FEATURES;
0c31b744
GC
414 c->eax = env->cpuid_kvm_features & kvm_arch_get_supported_cpuid(env,
415 KVM_CPUID_FEATURES, 0, R_EAX);
416
417#ifdef KVM_CAP_ASYNC_PF
418 has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
419#endif
420
bb0300dc
GN
421#endif
422
a33609ca 423 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
05330448
AL
424
425 for (i = 0; i <= limit; i++) {
bb0300dc 426 c = &cpuid_data.entries[cpuid_i++];
486bd5a2
AL
427
428 switch (i) {
a36b1029
AL
429 case 2: {
430 /* Keep reading function 2 till all the input is received */
431 int times;
432
a36b1029 433 c->function = i;
a33609ca
AL
434 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
435 KVM_CPUID_FLAG_STATE_READ_NEXT;
436 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
437 times = c->eax & 0xff;
a36b1029
AL
438
439 for (j = 1; j < times; ++j) {
a33609ca 440 c = &cpuid_data.entries[cpuid_i++];
a36b1029 441 c->function = i;
a33609ca
AL
442 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
443 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
a36b1029
AL
444 }
445 break;
446 }
486bd5a2
AL
447 case 4:
448 case 0xb:
449 case 0xd:
450 for (j = 0; ; j++) {
486bd5a2
AL
451 c->function = i;
452 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
453 c->index = j;
a33609ca 454 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
486bd5a2 455
b9bec74b 456 if (i == 4 && c->eax == 0) {
486bd5a2 457 break;
b9bec74b
JK
458 }
459 if (i == 0xb && !(c->ecx & 0xff00)) {
486bd5a2 460 break;
b9bec74b
JK
461 }
462 if (i == 0xd && c->eax == 0) {
486bd5a2 463 break;
b9bec74b 464 }
a33609ca 465 c = &cpuid_data.entries[cpuid_i++];
486bd5a2
AL
466 }
467 break;
468 default:
486bd5a2 469 c->function = i;
a33609ca
AL
470 c->flags = 0;
471 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
486bd5a2
AL
472 break;
473 }
05330448 474 }
a33609ca 475 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
05330448
AL
476
477 for (i = 0x80000000; i <= limit; i++) {
bb0300dc 478 c = &cpuid_data.entries[cpuid_i++];
05330448 479
05330448 480 c->function = i;
a33609ca
AL
481 c->flags = 0;
482 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
05330448
AL
483 }
484
b3baa152
BW
485 /* Call Centaur's CPUID instructions they are supported. */
486 if (env->cpuid_xlevel2 > 0) {
487 env->cpuid_ext4_features &=
488 kvm_arch_get_supported_cpuid(env, 0xC0000001, 0, R_EDX);
489 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
490
491 for (i = 0xC0000000; i <= limit; i++) {
492 c = &cpuid_data.entries[cpuid_i++];
493
494 c->function = i;
495 c->flags = 0;
496 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
497 }
498 }
499
05330448
AL
500 cpuid_data.cpuid.nent = cpuid_i;
501
e7701825
MT
502#ifdef KVM_CAP_MCE
503 if (((env->cpuid_version >> 8)&0xF) >= 6
504 && (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)
505 && kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
506 uint64_t mcg_cap;
507 int banks;
32a42024 508 int ret;
e7701825 509
75d49497
JK
510 ret = kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks);
511 if (ret < 0) {
512 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
513 return ret;
e7701825 514 }
75d49497
JK
515
516 if (banks > MCE_BANKS_DEF) {
517 banks = MCE_BANKS_DEF;
518 }
519 mcg_cap &= MCE_CAP_DEF;
520 mcg_cap |= banks;
521 ret = kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, &mcg_cap);
522 if (ret < 0) {
523 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
524 return ret;
525 }
526
527 env->mcg_cap = mcg_cap;
e7701825
MT
528 }
529#endif
530
b8cc45d6
GC
531 qemu_add_vm_change_state_handler(cpu_update_state, env);
532
486bd5a2 533 return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
05330448
AL
534}
535
caa5af0f
JK
536void kvm_arch_reset_vcpu(CPUState *env)
537{
e73223a5 538 env->exception_injected = -1;
0e607a80 539 env->interrupt_injected = -1;
1a5e9d2f 540 env->xcr0 = 1;
ddced198
MT
541 if (kvm_irqchip_in_kernel()) {
542 env->mp_state = cpu_is_bsp(env) ? KVM_MP_STATE_RUNNABLE :
543 KVM_MP_STATE_UNINITIALIZED;
544 } else {
545 env->mp_state = KVM_MP_STATE_RUNNABLE;
546 }
caa5af0f
JK
547}
548
c3a3a7d3 549static int kvm_get_supported_msrs(KVMState *s)
05330448 550{
75b10c43 551 static int kvm_supported_msrs;
c3a3a7d3 552 int ret = 0;
05330448
AL
553
554 /* first time */
75b10c43 555 if (kvm_supported_msrs == 0) {
05330448
AL
556 struct kvm_msr_list msr_list, *kvm_msr_list;
557
75b10c43 558 kvm_supported_msrs = -1;
05330448
AL
559
560 /* Obtain MSR list from KVM. These are the MSRs that we must
561 * save/restore */
4c9f7372 562 msr_list.nmsrs = 0;
c3a3a7d3 563 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
6fb6d245 564 if (ret < 0 && ret != -E2BIG) {
c3a3a7d3 565 return ret;
6fb6d245 566 }
d9db889f
JK
567 /* Old kernel modules had a bug and could write beyond the provided
568 memory. Allocate at least a safe amount of 1K. */
569 kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) +
570 msr_list.nmsrs *
571 sizeof(msr_list.indices[0])));
05330448 572
55308450 573 kvm_msr_list->nmsrs = msr_list.nmsrs;
c3a3a7d3 574 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
05330448
AL
575 if (ret >= 0) {
576 int i;
577
578 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
579 if (kvm_msr_list->indices[i] == MSR_STAR) {
c3a3a7d3 580 has_msr_star = true;
75b10c43
MT
581 continue;
582 }
583 if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
c3a3a7d3 584 has_msr_hsave_pa = true;
75b10c43 585 continue;
05330448
AL
586 }
587 }
588 }
589
4a043713 590 qemu_free(kvm_msr_list);
05330448
AL
591 }
592
c3a3a7d3 593 return ret;
05330448
AL
594}
595
cad1e282 596int kvm_arch_init(KVMState *s)
20420430 597{
11076198 598 uint64_t identity_base = 0xfffbc000;
20420430 599 int ret;
25d2e361 600 struct utsname utsname;
20420430 601
c3a3a7d3 602 ret = kvm_get_supported_msrs(s);
20420430 603 if (ret < 0) {
20420430
SY
604 return ret;
605 }
25d2e361
MT
606
607 uname(&utsname);
608 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
609
4c5b10b7 610 /*
11076198
JK
611 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
612 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
613 * Since these must be part of guest physical memory, we need to allocate
614 * them, both by setting their start addresses in the kernel and by
615 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
616 *
617 * Older KVM versions may not support setting the identity map base. In
618 * that case we need to stick with the default, i.e. a 256K maximum BIOS
619 * size.
4c5b10b7 620 */
11076198
JK
621#ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
622 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
623 /* Allows up to 16M BIOSes. */
624 identity_base = 0xfeffc000;
625
626 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
627 if (ret < 0) {
628 return ret;
629 }
4c5b10b7 630 }
11076198
JK
631#endif
632 /* Set TSS base one page after EPT identity map. */
633 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
20420430
SY
634 if (ret < 0) {
635 return ret;
636 }
637
11076198
JK
638 /* Tell fw_cfg to notify the BIOS to reserve the range. */
639 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
20420430 640 if (ret < 0) {
11076198 641 fprintf(stderr, "e820_add_entry() table is full\n");
20420430
SY
642 return ret;
643 }
3c85e74f 644 qemu_register_reset(kvm_unpoison_all, NULL);
20420430 645
11076198 646 return 0;
05330448 647}
b9bec74b 648
05330448
AL
649static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
650{
651 lhs->selector = rhs->selector;
652 lhs->base = rhs->base;
653 lhs->limit = rhs->limit;
654 lhs->type = 3;
655 lhs->present = 1;
656 lhs->dpl = 3;
657 lhs->db = 0;
658 lhs->s = 1;
659 lhs->l = 0;
660 lhs->g = 0;
661 lhs->avl = 0;
662 lhs->unusable = 0;
663}
664
665static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
666{
667 unsigned flags = rhs->flags;
668 lhs->selector = rhs->selector;
669 lhs->base = rhs->base;
670 lhs->limit = rhs->limit;
671 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
672 lhs->present = (flags & DESC_P_MASK) != 0;
acaa7550 673 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
05330448
AL
674 lhs->db = (flags >> DESC_B_SHIFT) & 1;
675 lhs->s = (flags & DESC_S_MASK) != 0;
676 lhs->l = (flags >> DESC_L_SHIFT) & 1;
677 lhs->g = (flags & DESC_G_MASK) != 0;
678 lhs->avl = (flags & DESC_AVL_MASK) != 0;
679 lhs->unusable = 0;
680}
681
682static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
683{
684 lhs->selector = rhs->selector;
685 lhs->base = rhs->base;
686 lhs->limit = rhs->limit;
b9bec74b
JK
687 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
688 (rhs->present * DESC_P_MASK) |
689 (rhs->dpl << DESC_DPL_SHIFT) |
690 (rhs->db << DESC_B_SHIFT) |
691 (rhs->s * DESC_S_MASK) |
692 (rhs->l << DESC_L_SHIFT) |
693 (rhs->g * DESC_G_MASK) |
694 (rhs->avl * DESC_AVL_MASK);
05330448
AL
695}
696
697static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
698{
b9bec74b 699 if (set) {
05330448 700 *kvm_reg = *qemu_reg;
b9bec74b 701 } else {
05330448 702 *qemu_reg = *kvm_reg;
b9bec74b 703 }
05330448
AL
704}
705
706static int kvm_getput_regs(CPUState *env, int set)
707{
708 struct kvm_regs regs;
709 int ret = 0;
710
711 if (!set) {
712 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
b9bec74b 713 if (ret < 0) {
05330448 714 return ret;
b9bec74b 715 }
05330448
AL
716 }
717
718 kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
719 kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
720 kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
721 kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
722 kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
723 kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
724 kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
725 kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
726#ifdef TARGET_X86_64
727 kvm_getput_reg(&regs.r8, &env->regs[8], set);
728 kvm_getput_reg(&regs.r9, &env->regs[9], set);
729 kvm_getput_reg(&regs.r10, &env->regs[10], set);
730 kvm_getput_reg(&regs.r11, &env->regs[11], set);
731 kvm_getput_reg(&regs.r12, &env->regs[12], set);
732 kvm_getput_reg(&regs.r13, &env->regs[13], set);
733 kvm_getput_reg(&regs.r14, &env->regs[14], set);
734 kvm_getput_reg(&regs.r15, &env->regs[15], set);
735#endif
736
737 kvm_getput_reg(&regs.rflags, &env->eflags, set);
738 kvm_getput_reg(&regs.rip, &env->eip, set);
739
b9bec74b 740 if (set) {
05330448 741 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
b9bec74b 742 }
05330448
AL
743
744 return ret;
745}
746
747static int kvm_put_fpu(CPUState *env)
748{
749 struct kvm_fpu fpu;
750 int i;
751
752 memset(&fpu, 0, sizeof fpu);
753 fpu.fsw = env->fpus & ~(7 << 11);
754 fpu.fsw |= (env->fpstt & 7) << 11;
755 fpu.fcw = env->fpuc;
42cc8fa6
JK
756 fpu.last_opcode = env->fpop;
757 fpu.last_ip = env->fpip;
758 fpu.last_dp = env->fpdp;
b9bec74b
JK
759 for (i = 0; i < 8; ++i) {
760 fpu.ftwx |= (!env->fptags[i]) << i;
761 }
05330448
AL
762 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
763 memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
764 fpu.mxcsr = env->mxcsr;
765
766 return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
767}
768
f1665b21
SY
769#ifdef KVM_CAP_XSAVE
770#define XSAVE_CWD_RIP 2
771#define XSAVE_CWD_RDP 4
772#define XSAVE_MXCSR 6
773#define XSAVE_ST_SPACE 8
774#define XSAVE_XMM_SPACE 40
775#define XSAVE_XSTATE_BV 128
776#define XSAVE_YMMH_SPACE 144
777#endif
778
779static int kvm_put_xsave(CPUState *env)
780{
781#ifdef KVM_CAP_XSAVE
0f53994f 782 int i, r;
f1665b21 783 struct kvm_xsave* xsave;
42cc8fa6 784 uint16_t cwd, swd, twd;
f1665b21 785
b9bec74b 786 if (!kvm_has_xsave()) {
f1665b21 787 return kvm_put_fpu(env);
b9bec74b 788 }
f1665b21
SY
789
790 xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
791 memset(xsave, 0, sizeof(struct kvm_xsave));
42cc8fa6 792 cwd = swd = twd = 0;
f1665b21
SY
793 swd = env->fpus & ~(7 << 11);
794 swd |= (env->fpstt & 7) << 11;
795 cwd = env->fpuc;
b9bec74b 796 for (i = 0; i < 8; ++i) {
f1665b21 797 twd |= (!env->fptags[i]) << i;
b9bec74b 798 }
f1665b21 799 xsave->region[0] = (uint32_t)(swd << 16) + cwd;
42cc8fa6
JK
800 xsave->region[1] = (uint32_t)(env->fpop << 16) + twd;
801 memcpy(&xsave->region[XSAVE_CWD_RIP], &env->fpip, sizeof(env->fpip));
802 memcpy(&xsave->region[XSAVE_CWD_RDP], &env->fpdp, sizeof(env->fpdp));
f1665b21
SY
803 memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
804 sizeof env->fpregs);
805 memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
806 sizeof env->xmm_regs);
807 xsave->region[XSAVE_MXCSR] = env->mxcsr;
808 *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
809 memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
810 sizeof env->ymmh_regs);
0f53994f
MT
811 r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
812 qemu_free(xsave);
813 return r;
f1665b21
SY
814#else
815 return kvm_put_fpu(env);
816#endif
817}
818
819static int kvm_put_xcrs(CPUState *env)
820{
821#ifdef KVM_CAP_XCRS
822 struct kvm_xcrs xcrs;
823
b9bec74b 824 if (!kvm_has_xcrs()) {
f1665b21 825 return 0;
b9bec74b 826 }
f1665b21
SY
827
828 xcrs.nr_xcrs = 1;
829 xcrs.flags = 0;
830 xcrs.xcrs[0].xcr = 0;
831 xcrs.xcrs[0].value = env->xcr0;
832 return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
833#else
834 return 0;
835#endif
836}
837
05330448
AL
838static int kvm_put_sregs(CPUState *env)
839{
840 struct kvm_sregs sregs;
841
0e607a80
JK
842 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
843 if (env->interrupt_injected >= 0) {
844 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
845 (uint64_t)1 << (env->interrupt_injected % 64);
846 }
05330448
AL
847
848 if ((env->eflags & VM_MASK)) {
b9bec74b
JK
849 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
850 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
851 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
852 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
853 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
854 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
05330448 855 } else {
b9bec74b
JK
856 set_seg(&sregs.cs, &env->segs[R_CS]);
857 set_seg(&sregs.ds, &env->segs[R_DS]);
858 set_seg(&sregs.es, &env->segs[R_ES]);
859 set_seg(&sregs.fs, &env->segs[R_FS]);
860 set_seg(&sregs.gs, &env->segs[R_GS]);
861 set_seg(&sregs.ss, &env->segs[R_SS]);
05330448
AL
862 }
863
864 set_seg(&sregs.tr, &env->tr);
865 set_seg(&sregs.ldt, &env->ldt);
866
867 sregs.idt.limit = env->idt.limit;
868 sregs.idt.base = env->idt.base;
869 sregs.gdt.limit = env->gdt.limit;
870 sregs.gdt.base = env->gdt.base;
871
872 sregs.cr0 = env->cr[0];
873 sregs.cr2 = env->cr[2];
874 sregs.cr3 = env->cr[3];
875 sregs.cr4 = env->cr[4];
876
4a942cea
BS
877 sregs.cr8 = cpu_get_apic_tpr(env->apic_state);
878 sregs.apic_base = cpu_get_apic_base(env->apic_state);
05330448
AL
879
880 sregs.efer = env->efer;
881
882 return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
883}
884
885static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
886 uint32_t index, uint64_t value)
887{
888 entry->index = index;
889 entry->data = value;
890}
891
ea643051 892static int kvm_put_msrs(CPUState *env, int level)
05330448
AL
893{
894 struct {
895 struct kvm_msrs info;
896 struct kvm_msr_entry entries[100];
897 } msr_data;
898 struct kvm_msr_entry *msrs = msr_data.entries;
d8da8574 899 int n = 0;
05330448
AL
900
901 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
902 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
903 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
0c03266a 904 kvm_msr_entry_set(&msrs[n++], MSR_PAT, env->pat);
c3a3a7d3 905 if (has_msr_star) {
b9bec74b
JK
906 kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
907 }
c3a3a7d3 908 if (has_msr_hsave_pa) {
75b10c43 909 kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
b9bec74b 910 }
05330448 911#ifdef TARGET_X86_64
25d2e361
MT
912 if (lm_capable_kernel) {
913 kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
914 kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
915 kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
916 kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
917 }
05330448 918#endif
ea643051 919 if (level == KVM_PUT_FULL_STATE) {
384331a6
MT
920 /*
921 * KVM is yet unable to synchronize TSC values of multiple VCPUs on
922 * writeback. Until this is fixed, we only write the offset to SMP
923 * guests after migration, desynchronizing the VCPUs, but avoiding
924 * huge jump-backs that would occur without any writeback at all.
925 */
926 if (smp_cpus == 1 || env->tsc != 0) {
927 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
928 }
ff5c186b
JK
929 }
930 /*
931 * The following paravirtual MSRs have side effects on the guest or are
932 * too heavy for normal writeback. Limit them to reset or full state
933 * updates.
934 */
935 if (level >= KVM_PUT_RESET_STATE) {
ea643051
JK
936 kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
937 env->system_time_msr);
938 kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
521f0798 939#if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
c5999bfc
JK
940 if (has_msr_async_pf_en) {
941 kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN,
942 env->async_pf_en_msr);
943 }
f6584ee2 944#endif
ea643051 945 }
57780495
MT
946#ifdef KVM_CAP_MCE
947 if (env->mcg_cap) {
d8da8574 948 int i;
b9bec74b 949
c34d440a
JK
950 kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
951 kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
952 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
953 kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
57780495
MT
954 }
955 }
956#endif
1a03675d 957
05330448
AL
958 msr_data.info.nmsrs = n;
959
960 return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
961
962}
963
964
965static int kvm_get_fpu(CPUState *env)
966{
967 struct kvm_fpu fpu;
968 int i, ret;
969
970 ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
b9bec74b 971 if (ret < 0) {
05330448 972 return ret;
b9bec74b 973 }
05330448
AL
974
975 env->fpstt = (fpu.fsw >> 11) & 7;
976 env->fpus = fpu.fsw;
977 env->fpuc = fpu.fcw;
42cc8fa6
JK
978 env->fpop = fpu.last_opcode;
979 env->fpip = fpu.last_ip;
980 env->fpdp = fpu.last_dp;
b9bec74b
JK
981 for (i = 0; i < 8; ++i) {
982 env->fptags[i] = !((fpu.ftwx >> i) & 1);
983 }
05330448
AL
984 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
985 memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
986 env->mxcsr = fpu.mxcsr;
987
988 return 0;
989}
990
f1665b21
SY
991static int kvm_get_xsave(CPUState *env)
992{
993#ifdef KVM_CAP_XSAVE
994 struct kvm_xsave* xsave;
995 int ret, i;
42cc8fa6 996 uint16_t cwd, swd, twd;
f1665b21 997
b9bec74b 998 if (!kvm_has_xsave()) {
f1665b21 999 return kvm_get_fpu(env);
b9bec74b 1000 }
f1665b21
SY
1001
1002 xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
1003 ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
0f53994f
MT
1004 if (ret < 0) {
1005 qemu_free(xsave);
f1665b21 1006 return ret;
0f53994f 1007 }
f1665b21
SY
1008
1009 cwd = (uint16_t)xsave->region[0];
1010 swd = (uint16_t)(xsave->region[0] >> 16);
1011 twd = (uint16_t)xsave->region[1];
42cc8fa6 1012 env->fpop = (uint16_t)(xsave->region[1] >> 16);
f1665b21
SY
1013 env->fpstt = (swd >> 11) & 7;
1014 env->fpus = swd;
1015 env->fpuc = cwd;
b9bec74b 1016 for (i = 0; i < 8; ++i) {
f1665b21 1017 env->fptags[i] = !((twd >> i) & 1);
b9bec74b 1018 }
42cc8fa6
JK
1019 memcpy(&env->fpip, &xsave->region[XSAVE_CWD_RIP], sizeof(env->fpip));
1020 memcpy(&env->fpdp, &xsave->region[XSAVE_CWD_RDP], sizeof(env->fpdp));
f1665b21
SY
1021 env->mxcsr = xsave->region[XSAVE_MXCSR];
1022 memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
1023 sizeof env->fpregs);
1024 memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE],
1025 sizeof env->xmm_regs);
1026 env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
1027 memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
1028 sizeof env->ymmh_regs);
0f53994f 1029 qemu_free(xsave);
f1665b21
SY
1030 return 0;
1031#else
1032 return kvm_get_fpu(env);
1033#endif
1034}
1035
1036static int kvm_get_xcrs(CPUState *env)
1037{
1038#ifdef KVM_CAP_XCRS
1039 int i, ret;
1040 struct kvm_xcrs xcrs;
1041
b9bec74b 1042 if (!kvm_has_xcrs()) {
f1665b21 1043 return 0;
b9bec74b 1044 }
f1665b21
SY
1045
1046 ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs);
b9bec74b 1047 if (ret < 0) {
f1665b21 1048 return ret;
b9bec74b 1049 }
f1665b21 1050
b9bec74b 1051 for (i = 0; i < xcrs.nr_xcrs; i++) {
f1665b21
SY
1052 /* Only support xcr0 now */
1053 if (xcrs.xcrs[0].xcr == 0) {
1054 env->xcr0 = xcrs.xcrs[0].value;
1055 break;
1056 }
b9bec74b 1057 }
f1665b21
SY
1058 return 0;
1059#else
1060 return 0;
1061#endif
1062}
1063
05330448
AL
1064static int kvm_get_sregs(CPUState *env)
1065{
1066 struct kvm_sregs sregs;
1067 uint32_t hflags;
0e607a80 1068 int bit, i, ret;
05330448
AL
1069
1070 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
b9bec74b 1071 if (ret < 0) {
05330448 1072 return ret;
b9bec74b 1073 }
05330448 1074
0e607a80
JK
1075 /* There can only be one pending IRQ set in the bitmap at a time, so try
1076 to find it and save its number instead (-1 for none). */
1077 env->interrupt_injected = -1;
1078 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
1079 if (sregs.interrupt_bitmap[i]) {
1080 bit = ctz64(sregs.interrupt_bitmap[i]);
1081 env->interrupt_injected = i * 64 + bit;
1082 break;
1083 }
1084 }
05330448
AL
1085
1086 get_seg(&env->segs[R_CS], &sregs.cs);
1087 get_seg(&env->segs[R_DS], &sregs.ds);
1088 get_seg(&env->segs[R_ES], &sregs.es);
1089 get_seg(&env->segs[R_FS], &sregs.fs);
1090 get_seg(&env->segs[R_GS], &sregs.gs);
1091 get_seg(&env->segs[R_SS], &sregs.ss);
1092
1093 get_seg(&env->tr, &sregs.tr);
1094 get_seg(&env->ldt, &sregs.ldt);
1095
1096 env->idt.limit = sregs.idt.limit;
1097 env->idt.base = sregs.idt.base;
1098 env->gdt.limit = sregs.gdt.limit;
1099 env->gdt.base = sregs.gdt.base;
1100
1101 env->cr[0] = sregs.cr0;
1102 env->cr[2] = sregs.cr2;
1103 env->cr[3] = sregs.cr3;
1104 env->cr[4] = sregs.cr4;
1105
4a942cea 1106 cpu_set_apic_base(env->apic_state, sregs.apic_base);
05330448
AL
1107
1108 env->efer = sregs.efer;
4a942cea 1109 //cpu_set_apic_tpr(env->apic_state, sregs.cr8);
05330448 1110
b9bec74b
JK
1111#define HFLAG_COPY_MASK \
1112 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1113 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1114 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1115 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
05330448
AL
1116
1117 hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
1118 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
1119 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
b9bec74b 1120 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
05330448
AL
1121 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
1122 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
b9bec74b 1123 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
05330448
AL
1124
1125 if (env->efer & MSR_EFER_LMA) {
1126 hflags |= HF_LMA_MASK;
1127 }
1128
1129 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
1130 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1131 } else {
1132 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
b9bec74b 1133 (DESC_B_SHIFT - HF_CS32_SHIFT);
05330448 1134 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
b9bec74b
JK
1135 (DESC_B_SHIFT - HF_SS32_SHIFT);
1136 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
1137 !(hflags & HF_CS32_MASK)) {
1138 hflags |= HF_ADDSEG_MASK;
1139 } else {
1140 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
1141 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
1142 }
05330448
AL
1143 }
1144 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
05330448
AL
1145
1146 return 0;
1147}
1148
1149static int kvm_get_msrs(CPUState *env)
1150{
1151 struct {
1152 struct kvm_msrs info;
1153 struct kvm_msr_entry entries[100];
1154 } msr_data;
1155 struct kvm_msr_entry *msrs = msr_data.entries;
1156 int ret, i, n;
1157
1158 n = 0;
1159 msrs[n++].index = MSR_IA32_SYSENTER_CS;
1160 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
1161 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
0c03266a 1162 msrs[n++].index = MSR_PAT;
c3a3a7d3 1163 if (has_msr_star) {
b9bec74b
JK
1164 msrs[n++].index = MSR_STAR;
1165 }
c3a3a7d3 1166 if (has_msr_hsave_pa) {
75b10c43 1167 msrs[n++].index = MSR_VM_HSAVE_PA;
b9bec74b 1168 }
b8cc45d6
GC
1169
1170 if (!env->tsc_valid) {
1171 msrs[n++].index = MSR_IA32_TSC;
1172 env->tsc_valid = !vm_running;
1173 }
1174
05330448 1175#ifdef TARGET_X86_64
25d2e361
MT
1176 if (lm_capable_kernel) {
1177 msrs[n++].index = MSR_CSTAR;
1178 msrs[n++].index = MSR_KERNELGSBASE;
1179 msrs[n++].index = MSR_FMASK;
1180 msrs[n++].index = MSR_LSTAR;
1181 }
05330448 1182#endif
1a03675d
GC
1183 msrs[n++].index = MSR_KVM_SYSTEM_TIME;
1184 msrs[n++].index = MSR_KVM_WALL_CLOCK;
521f0798 1185#if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
c5999bfc
JK
1186 if (has_msr_async_pf_en) {
1187 msrs[n++].index = MSR_KVM_ASYNC_PF_EN;
1188 }
f6584ee2 1189#endif
1a03675d 1190
57780495
MT
1191#ifdef KVM_CAP_MCE
1192 if (env->mcg_cap) {
1193 msrs[n++].index = MSR_MCG_STATUS;
1194 msrs[n++].index = MSR_MCG_CTL;
b9bec74b 1195 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
57780495 1196 msrs[n++].index = MSR_MC0_CTL + i;
b9bec74b 1197 }
57780495
MT
1198 }
1199#endif
1200
05330448
AL
1201 msr_data.info.nmsrs = n;
1202 ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
b9bec74b 1203 if (ret < 0) {
05330448 1204 return ret;
b9bec74b 1205 }
05330448
AL
1206
1207 for (i = 0; i < ret; i++) {
1208 switch (msrs[i].index) {
1209 case MSR_IA32_SYSENTER_CS:
1210 env->sysenter_cs = msrs[i].data;
1211 break;
1212 case MSR_IA32_SYSENTER_ESP:
1213 env->sysenter_esp = msrs[i].data;
1214 break;
1215 case MSR_IA32_SYSENTER_EIP:
1216 env->sysenter_eip = msrs[i].data;
1217 break;
0c03266a
JK
1218 case MSR_PAT:
1219 env->pat = msrs[i].data;
1220 break;
05330448
AL
1221 case MSR_STAR:
1222 env->star = msrs[i].data;
1223 break;
1224#ifdef TARGET_X86_64
1225 case MSR_CSTAR:
1226 env->cstar = msrs[i].data;
1227 break;
1228 case MSR_KERNELGSBASE:
1229 env->kernelgsbase = msrs[i].data;
1230 break;
1231 case MSR_FMASK:
1232 env->fmask = msrs[i].data;
1233 break;
1234 case MSR_LSTAR:
1235 env->lstar = msrs[i].data;
1236 break;
1237#endif
1238 case MSR_IA32_TSC:
1239 env->tsc = msrs[i].data;
1240 break;
aa851e36
MT
1241 case MSR_VM_HSAVE_PA:
1242 env->vm_hsave = msrs[i].data;
1243 break;
1a03675d
GC
1244 case MSR_KVM_SYSTEM_TIME:
1245 env->system_time_msr = msrs[i].data;
1246 break;
1247 case MSR_KVM_WALL_CLOCK:
1248 env->wall_clock_msr = msrs[i].data;
1249 break;
57780495
MT
1250#ifdef KVM_CAP_MCE
1251 case MSR_MCG_STATUS:
1252 env->mcg_status = msrs[i].data;
1253 break;
1254 case MSR_MCG_CTL:
1255 env->mcg_ctl = msrs[i].data;
1256 break;
1257#endif
1258 default:
1259#ifdef KVM_CAP_MCE
1260 if (msrs[i].index >= MSR_MC0_CTL &&
1261 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
1262 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
57780495
MT
1263 }
1264#endif
d8da8574 1265 break;
521f0798 1266#if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
f6584ee2
GN
1267 case MSR_KVM_ASYNC_PF_EN:
1268 env->async_pf_en_msr = msrs[i].data;
1269 break;
1270#endif
05330448
AL
1271 }
1272 }
1273
1274 return 0;
1275}
1276
9bdbe550
HB
1277static int kvm_put_mp_state(CPUState *env)
1278{
1279 struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
1280
1281 return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
1282}
1283
1284static int kvm_get_mp_state(CPUState *env)
1285{
1286 struct kvm_mp_state mp_state;
1287 int ret;
1288
1289 ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
1290 if (ret < 0) {
1291 return ret;
1292 }
1293 env->mp_state = mp_state.mp_state;
c14750e8
JK
1294 if (kvm_irqchip_in_kernel()) {
1295 env->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
1296 }
9bdbe550
HB
1297 return 0;
1298}
1299
ea643051 1300static int kvm_put_vcpu_events(CPUState *env, int level)
a0fb002c
JK
1301{
1302#ifdef KVM_CAP_VCPU_EVENTS
1303 struct kvm_vcpu_events events;
1304
1305 if (!kvm_has_vcpu_events()) {
1306 return 0;
1307 }
1308
31827373
JK
1309 events.exception.injected = (env->exception_injected >= 0);
1310 events.exception.nr = env->exception_injected;
a0fb002c
JK
1311 events.exception.has_error_code = env->has_error_code;
1312 events.exception.error_code = env->error_code;
1313
1314 events.interrupt.injected = (env->interrupt_injected >= 0);
1315 events.interrupt.nr = env->interrupt_injected;
1316 events.interrupt.soft = env->soft_interrupt;
1317
1318 events.nmi.injected = env->nmi_injected;
1319 events.nmi.pending = env->nmi_pending;
1320 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
1321
1322 events.sipi_vector = env->sipi_vector;
1323
ea643051
JK
1324 events.flags = 0;
1325 if (level >= KVM_PUT_RESET_STATE) {
1326 events.flags |=
1327 KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
1328 }
aee028b9 1329
a0fb002c
JK
1330 return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
1331#else
1332 return 0;
1333#endif
1334}
1335
1336static int kvm_get_vcpu_events(CPUState *env)
1337{
1338#ifdef KVM_CAP_VCPU_EVENTS
1339 struct kvm_vcpu_events events;
1340 int ret;
1341
1342 if (!kvm_has_vcpu_events()) {
1343 return 0;
1344 }
1345
1346 ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events);
1347 if (ret < 0) {
1348 return ret;
1349 }
31827373 1350 env->exception_injected =
a0fb002c
JK
1351 events.exception.injected ? events.exception.nr : -1;
1352 env->has_error_code = events.exception.has_error_code;
1353 env->error_code = events.exception.error_code;
1354
1355 env->interrupt_injected =
1356 events.interrupt.injected ? events.interrupt.nr : -1;
1357 env->soft_interrupt = events.interrupt.soft;
1358
1359 env->nmi_injected = events.nmi.injected;
1360 env->nmi_pending = events.nmi.pending;
1361 if (events.nmi.masked) {
1362 env->hflags2 |= HF2_NMI_MASK;
1363 } else {
1364 env->hflags2 &= ~HF2_NMI_MASK;
1365 }
1366
1367 env->sipi_vector = events.sipi_vector;
1368#endif
1369
1370 return 0;
1371}
1372
b0b1d690
JK
1373static int kvm_guest_debug_workarounds(CPUState *env)
1374{
1375 int ret = 0;
1376#ifdef KVM_CAP_SET_GUEST_DEBUG
1377 unsigned long reinject_trap = 0;
1378
1379 if (!kvm_has_vcpu_events()) {
1380 if (env->exception_injected == 1) {
1381 reinject_trap = KVM_GUESTDBG_INJECT_DB;
1382 } else if (env->exception_injected == 3) {
1383 reinject_trap = KVM_GUESTDBG_INJECT_BP;
1384 }
1385 env->exception_injected = -1;
1386 }
1387
1388 /*
1389 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
1390 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
1391 * by updating the debug state once again if single-stepping is on.
1392 * Another reason to call kvm_update_guest_debug here is a pending debug
1393 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
1394 * reinject them via SET_GUEST_DEBUG.
1395 */
1396 if (reinject_trap ||
1397 (!kvm_has_robust_singlestep() && env->singlestep_enabled)) {
1398 ret = kvm_update_guest_debug(env, reinject_trap);
1399 }
1400#endif /* KVM_CAP_SET_GUEST_DEBUG */
1401 return ret;
1402}
1403
ff44f1a3
JK
1404static int kvm_put_debugregs(CPUState *env)
1405{
1406#ifdef KVM_CAP_DEBUGREGS
1407 struct kvm_debugregs dbgregs;
1408 int i;
1409
1410 if (!kvm_has_debugregs()) {
1411 return 0;
1412 }
1413
1414 for (i = 0; i < 4; i++) {
1415 dbgregs.db[i] = env->dr[i];
1416 }
1417 dbgregs.dr6 = env->dr[6];
1418 dbgregs.dr7 = env->dr[7];
1419 dbgregs.flags = 0;
1420
1421 return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
1422#else
1423 return 0;
1424#endif
1425}
1426
1427static int kvm_get_debugregs(CPUState *env)
1428{
1429#ifdef KVM_CAP_DEBUGREGS
1430 struct kvm_debugregs dbgregs;
1431 int i, ret;
1432
1433 if (!kvm_has_debugregs()) {
1434 return 0;
1435 }
1436
1437 ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs);
1438 if (ret < 0) {
b9bec74b 1439 return ret;
ff44f1a3
JK
1440 }
1441 for (i = 0; i < 4; i++) {
1442 env->dr[i] = dbgregs.db[i];
1443 }
1444 env->dr[4] = env->dr[6] = dbgregs.dr6;
1445 env->dr[5] = env->dr[7] = dbgregs.dr7;
1446#endif
1447
1448 return 0;
1449}
1450
ea375f9a 1451int kvm_arch_put_registers(CPUState *env, int level)
05330448
AL
1452{
1453 int ret;
1454
b7680cb6 1455 assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
dbaa07c4 1456
05330448 1457 ret = kvm_getput_regs(env, 1);
b9bec74b 1458 if (ret < 0) {
05330448 1459 return ret;
b9bec74b 1460 }
f1665b21 1461 ret = kvm_put_xsave(env);
b9bec74b 1462 if (ret < 0) {
f1665b21 1463 return ret;
b9bec74b 1464 }
f1665b21 1465 ret = kvm_put_xcrs(env);
b9bec74b 1466 if (ret < 0) {
05330448 1467 return ret;
b9bec74b 1468 }
05330448 1469 ret = kvm_put_sregs(env);
b9bec74b 1470 if (ret < 0) {
05330448 1471 return ret;
b9bec74b 1472 }
ab443475
JK
1473 /* must be before kvm_put_msrs */
1474 ret = kvm_inject_mce_oldstyle(env);
1475 if (ret < 0) {
1476 return ret;
1477 }
ea643051 1478 ret = kvm_put_msrs(env, level);
b9bec74b 1479 if (ret < 0) {
05330448 1480 return ret;
b9bec74b 1481 }
ea643051
JK
1482 if (level >= KVM_PUT_RESET_STATE) {
1483 ret = kvm_put_mp_state(env);
b9bec74b 1484 if (ret < 0) {
ea643051 1485 return ret;
b9bec74b 1486 }
ea643051 1487 }
ea643051 1488 ret = kvm_put_vcpu_events(env, level);
b9bec74b 1489 if (ret < 0) {
a0fb002c 1490 return ret;
b9bec74b 1491 }
0d75a9ec 1492 ret = kvm_put_debugregs(env);
b9bec74b 1493 if (ret < 0) {
b0b1d690 1494 return ret;
b9bec74b 1495 }
b0b1d690
JK
1496 /* must be last */
1497 ret = kvm_guest_debug_workarounds(env);
b9bec74b 1498 if (ret < 0) {
ff44f1a3 1499 return ret;
b9bec74b 1500 }
05330448
AL
1501 return 0;
1502}
1503
1504int kvm_arch_get_registers(CPUState *env)
1505{
1506 int ret;
1507
b7680cb6 1508 assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
dbaa07c4 1509
05330448 1510 ret = kvm_getput_regs(env, 0);
b9bec74b 1511 if (ret < 0) {
05330448 1512 return ret;
b9bec74b 1513 }
f1665b21 1514 ret = kvm_get_xsave(env);
b9bec74b 1515 if (ret < 0) {
f1665b21 1516 return ret;
b9bec74b 1517 }
f1665b21 1518 ret = kvm_get_xcrs(env);
b9bec74b 1519 if (ret < 0) {
05330448 1520 return ret;
b9bec74b 1521 }
05330448 1522 ret = kvm_get_sregs(env);
b9bec74b 1523 if (ret < 0) {
05330448 1524 return ret;
b9bec74b 1525 }
05330448 1526 ret = kvm_get_msrs(env);
b9bec74b 1527 if (ret < 0) {
05330448 1528 return ret;
b9bec74b 1529 }
5a2e3c2e 1530 ret = kvm_get_mp_state(env);
b9bec74b 1531 if (ret < 0) {
5a2e3c2e 1532 return ret;
b9bec74b 1533 }
a0fb002c 1534 ret = kvm_get_vcpu_events(env);
b9bec74b 1535 if (ret < 0) {
a0fb002c 1536 return ret;
b9bec74b 1537 }
ff44f1a3 1538 ret = kvm_get_debugregs(env);
b9bec74b 1539 if (ret < 0) {
ff44f1a3 1540 return ret;
b9bec74b 1541 }
05330448
AL
1542 return 0;
1543}
1544
7a39fe58 1545void kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
05330448 1546{
ce377af3
JK
1547 int ret;
1548
276ce815
LJ
1549 /* Inject NMI */
1550 if (env->interrupt_request & CPU_INTERRUPT_NMI) {
1551 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
1552 DPRINTF("injected NMI\n");
ce377af3
JK
1553 ret = kvm_vcpu_ioctl(env, KVM_NMI);
1554 if (ret < 0) {
1555 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
1556 strerror(-ret));
1557 }
276ce815
LJ
1558 }
1559
db1669bc
JK
1560 if (!kvm_irqchip_in_kernel()) {
1561 /* Force the VCPU out of its inner loop to process the INIT request */
1562 if (env->interrupt_request & CPU_INTERRUPT_INIT) {
1563 env->exit_request = 1;
05330448 1564 }
05330448 1565
db1669bc
JK
1566 /* Try to inject an interrupt if the guest can accept it */
1567 if (run->ready_for_interrupt_injection &&
1568 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
1569 (env->eflags & IF_MASK)) {
1570 int irq;
1571
1572 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
1573 irq = cpu_get_pic_interrupt(env);
1574 if (irq >= 0) {
1575 struct kvm_interrupt intr;
1576
1577 intr.irq = irq;
db1669bc 1578 DPRINTF("injected interrupt %d\n", irq);
ce377af3
JK
1579 ret = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
1580 if (ret < 0) {
1581 fprintf(stderr,
1582 "KVM: injection failed, interrupt lost (%s)\n",
1583 strerror(-ret));
1584 }
db1669bc
JK
1585 }
1586 }
05330448 1587
db1669bc
JK
1588 /* If we have an interrupt but the guest is not ready to receive an
1589 * interrupt, request an interrupt window exit. This will
1590 * cause a return to userspace as soon as the guest is ready to
1591 * receive interrupts. */
1592 if ((env->interrupt_request & CPU_INTERRUPT_HARD)) {
1593 run->request_interrupt_window = 1;
1594 } else {
1595 run->request_interrupt_window = 0;
1596 }
1597
1598 DPRINTF("setting tpr\n");
1599 run->cr8 = cpu_get_apic_tpr(env->apic_state);
1600 }
05330448
AL
1601}
1602
7a39fe58 1603void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
05330448 1604{
b9bec74b 1605 if (run->if_flag) {
05330448 1606 env->eflags |= IF_MASK;
b9bec74b 1607 } else {
05330448 1608 env->eflags &= ~IF_MASK;
b9bec74b 1609 }
4a942cea
BS
1610 cpu_set_apic_tpr(env->apic_state, run->cr8);
1611 cpu_set_apic_base(env->apic_state, run->apic_base);
05330448
AL
1612}
1613
99036865 1614int kvm_arch_process_async_events(CPUState *env)
0af691d7 1615{
ab443475
JK
1616 if (env->interrupt_request & CPU_INTERRUPT_MCE) {
1617 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
1618 assert(env->mcg_cap);
1619
1620 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
1621
1622 kvm_cpu_synchronize_state(env);
1623
1624 if (env->exception_injected == EXCP08_DBLE) {
1625 /* this means triple fault */
1626 qemu_system_reset_request();
1627 env->exit_request = 1;
1628 return 0;
1629 }
1630 env->exception_injected = EXCP12_MCHK;
1631 env->has_error_code = 0;
1632
1633 env->halted = 0;
1634 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
1635 env->mp_state = KVM_MP_STATE_RUNNABLE;
1636 }
1637 }
1638
db1669bc
JK
1639 if (kvm_irqchip_in_kernel()) {
1640 return 0;
1641 }
1642
4601f7b0
JK
1643 if (((env->interrupt_request & CPU_INTERRUPT_HARD) &&
1644 (env->eflags & IF_MASK)) ||
1645 (env->interrupt_request & CPU_INTERRUPT_NMI)) {
6792a57b
JK
1646 env->halted = 0;
1647 }
0af691d7
MT
1648 if (env->interrupt_request & CPU_INTERRUPT_INIT) {
1649 kvm_cpu_synchronize_state(env);
1650 do_cpu_init(env);
0af691d7 1651 }
0af691d7
MT
1652 if (env->interrupt_request & CPU_INTERRUPT_SIPI) {
1653 kvm_cpu_synchronize_state(env);
1654 do_cpu_sipi(env);
1655 }
1656
1657 return env->halted;
1658}
1659
05330448
AL
1660static int kvm_handle_halt(CPUState *env)
1661{
1662 if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
1663 (env->eflags & IF_MASK)) &&
1664 !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
1665 env->halted = 1;
bb4ea393 1666 return EXCP_HLT;
05330448
AL
1667 }
1668
bb4ea393 1669 return 0;
05330448
AL
1670}
1671
e22a25c9 1672#ifdef KVM_CAP_SET_GUEST_DEBUG
e22a25c9
AL
1673int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
1674{
38972938 1675 static const uint8_t int3 = 0xcc;
64bf3f4e 1676
e22a25c9 1677 if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
b9bec74b 1678 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1)) {
e22a25c9 1679 return -EINVAL;
b9bec74b 1680 }
e22a25c9
AL
1681 return 0;
1682}
1683
1684int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
1685{
1686 uint8_t int3;
1687
1688 if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
b9bec74b 1689 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
e22a25c9 1690 return -EINVAL;
b9bec74b 1691 }
e22a25c9
AL
1692 return 0;
1693}
1694
1695static struct {
1696 target_ulong addr;
1697 int len;
1698 int type;
1699} hw_breakpoint[4];
1700
1701static int nb_hw_breakpoint;
1702
1703static int find_hw_breakpoint(target_ulong addr, int len, int type)
1704{
1705 int n;
1706
b9bec74b 1707 for (n = 0; n < nb_hw_breakpoint; n++) {
e22a25c9 1708 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
b9bec74b 1709 (hw_breakpoint[n].len == len || len == -1)) {
e22a25c9 1710 return n;
b9bec74b
JK
1711 }
1712 }
e22a25c9
AL
1713 return -1;
1714}
1715
1716int kvm_arch_insert_hw_breakpoint(target_ulong addr,
1717 target_ulong len, int type)
1718{
1719 switch (type) {
1720 case GDB_BREAKPOINT_HW:
1721 len = 1;
1722 break;
1723 case GDB_WATCHPOINT_WRITE:
1724 case GDB_WATCHPOINT_ACCESS:
1725 switch (len) {
1726 case 1:
1727 break;
1728 case 2:
1729 case 4:
1730 case 8:
b9bec74b 1731 if (addr & (len - 1)) {
e22a25c9 1732 return -EINVAL;
b9bec74b 1733 }
e22a25c9
AL
1734 break;
1735 default:
1736 return -EINVAL;
1737 }
1738 break;
1739 default:
1740 return -ENOSYS;
1741 }
1742
b9bec74b 1743 if (nb_hw_breakpoint == 4) {
e22a25c9 1744 return -ENOBUFS;
b9bec74b
JK
1745 }
1746 if (find_hw_breakpoint(addr, len, type) >= 0) {
e22a25c9 1747 return -EEXIST;
b9bec74b 1748 }
e22a25c9
AL
1749 hw_breakpoint[nb_hw_breakpoint].addr = addr;
1750 hw_breakpoint[nb_hw_breakpoint].len = len;
1751 hw_breakpoint[nb_hw_breakpoint].type = type;
1752 nb_hw_breakpoint++;
1753
1754 return 0;
1755}
1756
1757int kvm_arch_remove_hw_breakpoint(target_ulong addr,
1758 target_ulong len, int type)
1759{
1760 int n;
1761
1762 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
b9bec74b 1763 if (n < 0) {
e22a25c9 1764 return -ENOENT;
b9bec74b 1765 }
e22a25c9
AL
1766 nb_hw_breakpoint--;
1767 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
1768
1769 return 0;
1770}
1771
1772void kvm_arch_remove_all_hw_breakpoints(void)
1773{
1774 nb_hw_breakpoint = 0;
1775}
1776
1777static CPUWatchpoint hw_watchpoint;
1778
f2574737 1779static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info)
e22a25c9 1780{
f2574737 1781 int ret = 0;
e22a25c9
AL
1782 int n;
1783
1784 if (arch_info->exception == 1) {
1785 if (arch_info->dr6 & (1 << 14)) {
b9bec74b 1786 if (cpu_single_env->singlestep_enabled) {
f2574737 1787 ret = EXCP_DEBUG;
b9bec74b 1788 }
e22a25c9 1789 } else {
b9bec74b
JK
1790 for (n = 0; n < 4; n++) {
1791 if (arch_info->dr6 & (1 << n)) {
e22a25c9
AL
1792 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
1793 case 0x0:
f2574737 1794 ret = EXCP_DEBUG;
e22a25c9
AL
1795 break;
1796 case 0x1:
f2574737 1797 ret = EXCP_DEBUG;
e22a25c9
AL
1798 cpu_single_env->watchpoint_hit = &hw_watchpoint;
1799 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1800 hw_watchpoint.flags = BP_MEM_WRITE;
1801 break;
1802 case 0x3:
f2574737 1803 ret = EXCP_DEBUG;
e22a25c9
AL
1804 cpu_single_env->watchpoint_hit = &hw_watchpoint;
1805 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1806 hw_watchpoint.flags = BP_MEM_ACCESS;
1807 break;
1808 }
b9bec74b
JK
1809 }
1810 }
e22a25c9 1811 }
b9bec74b 1812 } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) {
f2574737 1813 ret = EXCP_DEBUG;
b9bec74b 1814 }
f2574737 1815 if (ret == 0) {
b0b1d690
JK
1816 cpu_synchronize_state(cpu_single_env);
1817 assert(cpu_single_env->exception_injected == -1);
1818
f2574737 1819 /* pass to guest */
b0b1d690
JK
1820 cpu_single_env->exception_injected = arch_info->exception;
1821 cpu_single_env->has_error_code = 0;
1822 }
e22a25c9 1823
f2574737 1824 return ret;
e22a25c9
AL
1825}
1826
1827void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
1828{
1829 const uint8_t type_code[] = {
1830 [GDB_BREAKPOINT_HW] = 0x0,
1831 [GDB_WATCHPOINT_WRITE] = 0x1,
1832 [GDB_WATCHPOINT_ACCESS] = 0x3
1833 };
1834 const uint8_t len_code[] = {
1835 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
1836 };
1837 int n;
1838
b9bec74b 1839 if (kvm_sw_breakpoints_active(env)) {
e22a25c9 1840 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
b9bec74b 1841 }
e22a25c9
AL
1842 if (nb_hw_breakpoint > 0) {
1843 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1844 dbg->arch.debugreg[7] = 0x0600;
1845 for (n = 0; n < nb_hw_breakpoint; n++) {
1846 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
1847 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
1848 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
95c077c9 1849 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
e22a25c9
AL
1850 }
1851 }
1852}
1853#endif /* KVM_CAP_SET_GUEST_DEBUG */
4513d923 1854
2a4dac83
JK
1855static bool host_supports_vmx(void)
1856{
1857 uint32_t ecx, unused;
1858
1859 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
1860 return ecx & CPUID_EXT_VMX;
1861}
1862
1863#define VMX_INVALID_GUEST_STATE 0x80000021
1864
1865int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
1866{
1867 uint64_t code;
1868 int ret;
1869
1870 switch (run->exit_reason) {
1871 case KVM_EXIT_HLT:
1872 DPRINTF("handle_hlt\n");
1873 ret = kvm_handle_halt(env);
1874 break;
1875 case KVM_EXIT_SET_TPR:
1876 ret = 0;
1877 break;
1878 case KVM_EXIT_FAIL_ENTRY:
1879 code = run->fail_entry.hardware_entry_failure_reason;
1880 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
1881 code);
1882 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
1883 fprintf(stderr,
1884 "\nIf you're runnning a guest on an Intel machine without "
1885 "unrestricted mode\n"
1886 "support, the failure can be most likely due to the guest "
1887 "entering an invalid\n"
1888 "state for Intel VT. For example, the guest maybe running "
1889 "in big real mode\n"
1890 "which is not supported on less recent Intel processors."
1891 "\n\n");
1892 }
1893 ret = -1;
1894 break;
1895 case KVM_EXIT_EXCEPTION:
1896 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
1897 run->ex.exception, run->ex.error_code);
1898 ret = -1;
1899 break;
f2574737
JK
1900#ifdef KVM_CAP_SET_GUEST_DEBUG
1901 case KVM_EXIT_DEBUG:
1902 DPRINTF("kvm_exit_debug\n");
1903 ret = kvm_handle_debug(&run->debug.arch);
1904 break;
1905#endif /* KVM_CAP_SET_GUEST_DEBUG */
2a4dac83
JK
1906 default:
1907 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
1908 ret = -1;
1909 break;
1910 }
1911
1912 return ret;
1913}
1914
4513d923
GN
1915bool kvm_arch_stop_on_emulation_error(CPUState *env)
1916{
b9bec74b
JK
1917 return !(env->cr[0] & CR0_PE_MASK) ||
1918 ((env->segs[R_CS].selector & 3) != 3);
4513d923 1919}