]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/kvm.c
kvm: x86: Avoid runtime allocation of xsave buffer
[mirror_qemu.git] / target-i386 / kvm.c
CommitLineData
05330448
AL
1/*
2 * QEMU KVM support
3 *
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 *
13 */
14
15#include <sys/types.h>
16#include <sys/ioctl.h>
17#include <sys/mman.h>
25d2e361 18#include <sys/utsname.h>
05330448
AL
19
20#include <linux/kvm.h>
5802e066 21#include <linux/kvm_para.h>
05330448
AL
22
23#include "qemu-common.h"
24#include "sysemu.h"
25#include "kvm.h"
26#include "cpu.h"
e22a25c9 27#include "gdbstub.h"
0e607a80 28#include "host-utils.h"
4c5b10b7 29#include "hw/pc.h"
408392b3 30#include "hw/apic.h"
35bed8ee 31#include "ioport.h"
05330448
AL
32
33//#define DEBUG_KVM
34
35#ifdef DEBUG_KVM
8c0d577e 36#define DPRINTF(fmt, ...) \
05330448
AL
37 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
38#else
8c0d577e 39#define DPRINTF(fmt, ...) \
05330448
AL
40 do { } while (0)
41#endif
42
1a03675d
GC
43#define MSR_KVM_WALL_CLOCK 0x11
44#define MSR_KVM_SYSTEM_TIME 0x12
45
c0532a76
MT
46#ifndef BUS_MCEERR_AR
47#define BUS_MCEERR_AR 4
48#endif
49#ifndef BUS_MCEERR_AO
50#define BUS_MCEERR_AO 5
51#endif
52
94a8d39a
JK
53const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
54 KVM_CAP_INFO(SET_TSS_ADDR),
55 KVM_CAP_INFO(EXT_CPUID),
56 KVM_CAP_INFO(MP_STATE),
57 KVM_CAP_LAST_INFO
58};
25d2e361 59
c3a3a7d3
JK
60static bool has_msr_star;
61static bool has_msr_hsave_pa;
aa82ba54 62static bool has_msr_tsc_deadline;
c5999bfc 63static bool has_msr_async_pf_en;
21e87c46 64static bool has_msr_misc_enable;
25d2e361 65static int lm_capable_kernel;
b827df58
AK
66
67static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
68{
69 struct kvm_cpuid2 *cpuid;
70 int r, size;
71
72 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
7267c094 73 cpuid = (struct kvm_cpuid2 *)g_malloc0(size);
b827df58
AK
74 cpuid->nent = max;
75 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
76ae317f
MM
76 if (r == 0 && cpuid->nent >= max) {
77 r = -E2BIG;
78 }
b827df58
AK
79 if (r < 0) {
80 if (r == -E2BIG) {
7267c094 81 g_free(cpuid);
b827df58
AK
82 return NULL;
83 } else {
84 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
85 strerror(-r));
86 exit(1);
87 }
88 }
89 return cpuid;
90}
91
0c31b744
GC
92struct kvm_para_features {
93 int cap;
94 int feature;
95} para_features[] = {
96 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
97 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
98 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
0c31b744 99 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
0c31b744
GC
100 { -1, -1 }
101};
102
ba9bc59e 103static int get_para_features(KVMState *s)
0c31b744
GC
104{
105 int i, features = 0;
106
107 for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) {
ba9bc59e 108 if (kvm_check_extension(s, para_features[i].cap)) {
0c31b744
GC
109 features |= (1 << para_features[i].feature);
110 }
111 }
112
113 return features;
114}
0c31b744
GC
115
116
ba9bc59e 117uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
c958a8bd 118 uint32_t index, int reg)
b827df58
AK
119{
120 struct kvm_cpuid2 *cpuid;
121 int i, max;
122 uint32_t ret = 0;
123 uint32_t cpuid_1_edx;
0c31b744 124 int has_kvm_features = 0;
b827df58 125
b827df58 126 max = 1;
ba9bc59e 127 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
b827df58
AK
128 max *= 2;
129 }
130
131 for (i = 0; i < cpuid->nent; ++i) {
c958a8bd
SY
132 if (cpuid->entries[i].function == function &&
133 cpuid->entries[i].index == index) {
0c31b744
GC
134 if (cpuid->entries[i].function == KVM_CPUID_FEATURES) {
135 has_kvm_features = 1;
136 }
b827df58
AK
137 switch (reg) {
138 case R_EAX:
139 ret = cpuid->entries[i].eax;
140 break;
141 case R_EBX:
142 ret = cpuid->entries[i].ebx;
143 break;
144 case R_ECX:
145 ret = cpuid->entries[i].ecx;
146 break;
147 case R_EDX:
148 ret = cpuid->entries[i].edx;
19ccb8ea
JK
149 switch (function) {
150 case 1:
151 /* KVM before 2.6.30 misreports the following features */
152 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
153 break;
154 case 0x80000001:
b827df58
AK
155 /* On Intel, kvm returns cpuid according to the Intel spec,
156 * so add missing bits according to the AMD spec:
157 */
ba9bc59e 158 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
c1667e40 159 ret |= cpuid_1_edx & 0x183f7ff;
19ccb8ea 160 break;
b827df58
AK
161 }
162 break;
163 }
164 }
165 }
166
7267c094 167 g_free(cpuid);
b827df58 168
0c31b744
GC
169 /* fallback for older kernels */
170 if (!has_kvm_features && (function == KVM_CPUID_FEATURES)) {
ba9bc59e 171 ret = get_para_features(s);
b9bec74b 172 }
0c31b744
GC
173
174 return ret;
bb0300dc 175}
bb0300dc 176
3c85e74f
HY
177typedef struct HWPoisonPage {
178 ram_addr_t ram_addr;
179 QLIST_ENTRY(HWPoisonPage) list;
180} HWPoisonPage;
181
182static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
183 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
184
185static void kvm_unpoison_all(void *param)
186{
187 HWPoisonPage *page, *next_page;
188
189 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
190 QLIST_REMOVE(page, list);
191 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
7267c094 192 g_free(page);
3c85e74f
HY
193 }
194}
195
3c85e74f
HY
196static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
197{
198 HWPoisonPage *page;
199
200 QLIST_FOREACH(page, &hwpoison_page_list, list) {
201 if (page->ram_addr == ram_addr) {
202 return;
203 }
204 }
7267c094 205 page = g_malloc(sizeof(HWPoisonPage));
3c85e74f
HY
206 page->ram_addr = ram_addr;
207 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
208}
209
e7701825
MT
210static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
211 int *max_banks)
212{
213 int r;
214
14a09518 215 r = kvm_check_extension(s, KVM_CAP_MCE);
e7701825
MT
216 if (r > 0) {
217 *max_banks = r;
218 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
219 }
220 return -ENOSYS;
221}
222
c34d440a 223static void kvm_mce_inject(CPUState *env, target_phys_addr_t paddr, int code)
e7701825 224{
c34d440a
JK
225 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
226 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
227 uint64_t mcg_status = MCG_STATUS_MCIP;
e7701825 228
c34d440a
JK
229 if (code == BUS_MCEERR_AR) {
230 status |= MCI_STATUS_AR | 0x134;
231 mcg_status |= MCG_STATUS_EIPV;
232 } else {
233 status |= 0xc0;
234 mcg_status |= MCG_STATUS_RIPV;
419fb20a 235 }
c34d440a
JK
236 cpu_x86_inject_mce(NULL, env, 9, status, mcg_status, paddr,
237 (MCM_ADDR_PHYS << 6) | 0xc,
238 cpu_x86_support_mca_broadcast(env) ?
239 MCE_INJECT_BROADCAST : 0);
419fb20a 240}
419fb20a
JK
241
242static void hardware_memory_error(void)
243{
244 fprintf(stderr, "Hardware memory error!\n");
245 exit(1);
246}
247
248int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
249{
419fb20a
JK
250 ram_addr_t ram_addr;
251 target_phys_addr_t paddr;
252
253 if ((env->mcg_cap & MCG_SER_P) && addr
c34d440a
JK
254 && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
255 if (qemu_ram_addr_from_host(addr, &ram_addr) ||
256 !kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr,
257 &paddr)) {
419fb20a
JK
258 fprintf(stderr, "Hardware memory error for memory used by "
259 "QEMU itself instead of guest system!\n");
260 /* Hope we are lucky for AO MCE */
261 if (code == BUS_MCEERR_AO) {
262 return 0;
263 } else {
264 hardware_memory_error();
265 }
266 }
3c85e74f 267 kvm_hwpoison_page_add(ram_addr);
c34d440a 268 kvm_mce_inject(env, paddr, code);
e56ff191 269 } else {
419fb20a
JK
270 if (code == BUS_MCEERR_AO) {
271 return 0;
272 } else if (code == BUS_MCEERR_AR) {
273 hardware_memory_error();
274 } else {
275 return 1;
276 }
277 }
278 return 0;
279}
280
281int kvm_arch_on_sigbus(int code, void *addr)
282{
419fb20a 283 if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
419fb20a
JK
284 ram_addr_t ram_addr;
285 target_phys_addr_t paddr;
286
287 /* Hope we are lucky for AO MCE */
c34d440a 288 if (qemu_ram_addr_from_host(addr, &ram_addr) ||
419fb20a
JK
289 !kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr,
290 &paddr)) {
291 fprintf(stderr, "Hardware memory error for memory used by "
292 "QEMU itself instead of guest system!: %p\n", addr);
293 return 0;
294 }
3c85e74f 295 kvm_hwpoison_page_add(ram_addr);
c34d440a 296 kvm_mce_inject(first_cpu, paddr, code);
e56ff191 297 } else {
419fb20a
JK
298 if (code == BUS_MCEERR_AO) {
299 return 0;
300 } else if (code == BUS_MCEERR_AR) {
301 hardware_memory_error();
302 } else {
303 return 1;
304 }
305 }
306 return 0;
307}
e7701825 308
ab443475
JK
309static int kvm_inject_mce_oldstyle(CPUState *env)
310{
ab443475
JK
311 if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
312 unsigned int bank, bank_num = env->mcg_cap & 0xff;
313 struct kvm_x86_mce mce;
314
315 env->exception_injected = -1;
316
317 /*
318 * There must be at least one bank in use if an MCE is pending.
319 * Find it and use its values for the event injection.
320 */
321 for (bank = 0; bank < bank_num; bank++) {
322 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
323 break;
324 }
325 }
326 assert(bank < bank_num);
327
328 mce.bank = bank;
329 mce.status = env->mce_banks[bank * 4 + 1];
330 mce.mcg_status = env->mcg_status;
331 mce.addr = env->mce_banks[bank * 4 + 2];
332 mce.misc = env->mce_banks[bank * 4 + 3];
333
334 return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, &mce);
335 }
ab443475
JK
336 return 0;
337}
338
1dfb4dd9 339static void cpu_update_state(void *opaque, int running, RunState state)
b8cc45d6
GC
340{
341 CPUState *env = opaque;
342
343 if (running) {
344 env->tsc_valid = false;
345 }
346}
347
05330448
AL
348int kvm_arch_init_vcpu(CPUState *env)
349{
350 struct {
486bd5a2
AL
351 struct kvm_cpuid2 cpuid;
352 struct kvm_cpuid_entry2 entries[100];
541dc0d4 353 } QEMU_PACKED cpuid_data;
ba9bc59e 354 KVMState *s = env->kvm_state;
486bd5a2 355 uint32_t limit, i, j, cpuid_i;
a33609ca 356 uint32_t unused;
bb0300dc 357 struct kvm_cpuid_entry2 *c;
bb0300dc 358 uint32_t signature[3];
e7429073 359 int r;
05330448 360
ba9bc59e 361 env->cpuid_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
6c0d7ee8
AP
362
363 i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
ba9bc59e 364 env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX);
6c0d7ee8
AP
365 env->cpuid_ext_features |= i;
366
ba9bc59e 367 env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
c958a8bd 368 0, R_EDX);
ba9bc59e 369 env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
c958a8bd 370 0, R_ECX);
ba9bc59e 371 env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(s, 0x8000000A,
296acb64
JR
372 0, R_EDX);
373
05330448
AL
374 cpuid_i = 0;
375
bb0300dc
GN
376 /* Paravirtualization CPUIDs */
377 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
378 c = &cpuid_data.entries[cpuid_i++];
379 memset(c, 0, sizeof(*c));
380 c->function = KVM_CPUID_SIGNATURE;
381 c->eax = 0;
382 c->ebx = signature[0];
383 c->ecx = signature[1];
384 c->edx = signature[2];
385
386 c = &cpuid_data.entries[cpuid_i++];
387 memset(c, 0, sizeof(*c));
388 c->function = KVM_CPUID_FEATURES;
ba9bc59e
JK
389 c->eax = env->cpuid_kvm_features &
390 kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
0c31b744 391
0c31b744 392 has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
bb0300dc 393
a33609ca 394 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
05330448
AL
395
396 for (i = 0; i <= limit; i++) {
bb0300dc 397 c = &cpuid_data.entries[cpuid_i++];
486bd5a2
AL
398
399 switch (i) {
a36b1029
AL
400 case 2: {
401 /* Keep reading function 2 till all the input is received */
402 int times;
403
a36b1029 404 c->function = i;
a33609ca
AL
405 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
406 KVM_CPUID_FLAG_STATE_READ_NEXT;
407 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
408 times = c->eax & 0xff;
a36b1029
AL
409
410 for (j = 1; j < times; ++j) {
a33609ca 411 c = &cpuid_data.entries[cpuid_i++];
a36b1029 412 c->function = i;
a33609ca
AL
413 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
414 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
a36b1029
AL
415 }
416 break;
417 }
486bd5a2
AL
418 case 4:
419 case 0xb:
420 case 0xd:
421 for (j = 0; ; j++) {
31e8c696
AP
422 if (i == 0xd && j == 64) {
423 break;
424 }
486bd5a2
AL
425 c->function = i;
426 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
427 c->index = j;
a33609ca 428 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
486bd5a2 429
b9bec74b 430 if (i == 4 && c->eax == 0) {
486bd5a2 431 break;
b9bec74b
JK
432 }
433 if (i == 0xb && !(c->ecx & 0xff00)) {
486bd5a2 434 break;
b9bec74b
JK
435 }
436 if (i == 0xd && c->eax == 0) {
31e8c696 437 continue;
b9bec74b 438 }
a33609ca 439 c = &cpuid_data.entries[cpuid_i++];
486bd5a2
AL
440 }
441 break;
442 default:
486bd5a2 443 c->function = i;
a33609ca
AL
444 c->flags = 0;
445 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
486bd5a2
AL
446 break;
447 }
05330448 448 }
a33609ca 449 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
05330448
AL
450
451 for (i = 0x80000000; i <= limit; i++) {
bb0300dc 452 c = &cpuid_data.entries[cpuid_i++];
05330448 453
05330448 454 c->function = i;
a33609ca
AL
455 c->flags = 0;
456 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
05330448
AL
457 }
458
b3baa152
BW
459 /* Call Centaur's CPUID instructions they are supported. */
460 if (env->cpuid_xlevel2 > 0) {
461 env->cpuid_ext4_features &=
ba9bc59e 462 kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
b3baa152
BW
463 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
464
465 for (i = 0xC0000000; i <= limit; i++) {
466 c = &cpuid_data.entries[cpuid_i++];
467
468 c->function = i;
469 c->flags = 0;
470 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
471 }
472 }
473
05330448
AL
474 cpuid_data.cpuid.nent = cpuid_i;
475
e7701825
MT
476 if (((env->cpuid_version >> 8)&0xF) >= 6
477 && (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)
478 && kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
479 uint64_t mcg_cap;
480 int banks;
32a42024 481 int ret;
e7701825 482
75d49497
JK
483 ret = kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks);
484 if (ret < 0) {
485 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
486 return ret;
e7701825 487 }
75d49497
JK
488
489 if (banks > MCE_BANKS_DEF) {
490 banks = MCE_BANKS_DEF;
491 }
492 mcg_cap &= MCE_CAP_DEF;
493 mcg_cap |= banks;
494 ret = kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, &mcg_cap);
495 if (ret < 0) {
496 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
497 return ret;
498 }
499
500 env->mcg_cap = mcg_cap;
e7701825 501 }
e7701825 502
b8cc45d6
GC
503 qemu_add_vm_change_state_handler(cpu_update_state, env);
504
e7429073 505 r = kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
fdc9c41a
JK
506 if (r) {
507 return r;
508 }
e7429073 509
e7429073
JR
510 r = kvm_check_extension(env->kvm_state, KVM_CAP_TSC_CONTROL);
511 if (r && env->tsc_khz) {
512 r = kvm_vcpu_ioctl(env, KVM_SET_TSC_KHZ, env->tsc_khz);
513 if (r < 0) {
514 fprintf(stderr, "KVM_SET_TSC_KHZ failed\n");
515 return r;
516 }
517 }
e7429073 518
fabacc0f
JK
519 if (kvm_has_xsave()) {
520 env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
521 }
522
e7429073 523 return 0;
05330448
AL
524}
525
caa5af0f
JK
526void kvm_arch_reset_vcpu(CPUState *env)
527{
e73223a5 528 env->exception_injected = -1;
0e607a80 529 env->interrupt_injected = -1;
1a5e9d2f 530 env->xcr0 = 1;
ddced198
MT
531 if (kvm_irqchip_in_kernel()) {
532 env->mp_state = cpu_is_bsp(env) ? KVM_MP_STATE_RUNNABLE :
533 KVM_MP_STATE_UNINITIALIZED;
534 } else {
535 env->mp_state = KVM_MP_STATE_RUNNABLE;
536 }
caa5af0f
JK
537}
538
c3a3a7d3 539static int kvm_get_supported_msrs(KVMState *s)
05330448 540{
75b10c43 541 static int kvm_supported_msrs;
c3a3a7d3 542 int ret = 0;
05330448
AL
543
544 /* first time */
75b10c43 545 if (kvm_supported_msrs == 0) {
05330448
AL
546 struct kvm_msr_list msr_list, *kvm_msr_list;
547
75b10c43 548 kvm_supported_msrs = -1;
05330448
AL
549
550 /* Obtain MSR list from KVM. These are the MSRs that we must
551 * save/restore */
4c9f7372 552 msr_list.nmsrs = 0;
c3a3a7d3 553 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
6fb6d245 554 if (ret < 0 && ret != -E2BIG) {
c3a3a7d3 555 return ret;
6fb6d245 556 }
d9db889f
JK
557 /* Old kernel modules had a bug and could write beyond the provided
558 memory. Allocate at least a safe amount of 1K. */
7267c094 559 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
d9db889f
JK
560 msr_list.nmsrs *
561 sizeof(msr_list.indices[0])));
05330448 562
55308450 563 kvm_msr_list->nmsrs = msr_list.nmsrs;
c3a3a7d3 564 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
05330448
AL
565 if (ret >= 0) {
566 int i;
567
568 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
569 if (kvm_msr_list->indices[i] == MSR_STAR) {
c3a3a7d3 570 has_msr_star = true;
75b10c43
MT
571 continue;
572 }
573 if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
c3a3a7d3 574 has_msr_hsave_pa = true;
75b10c43 575 continue;
05330448 576 }
aa82ba54
LJ
577 if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
578 has_msr_tsc_deadline = true;
579 continue;
580 }
21e87c46
AK
581 if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
582 has_msr_misc_enable = true;
583 continue;
584 }
05330448
AL
585 }
586 }
587
7267c094 588 g_free(kvm_msr_list);
05330448
AL
589 }
590
c3a3a7d3 591 return ret;
05330448
AL
592}
593
cad1e282 594int kvm_arch_init(KVMState *s)
20420430 595{
11076198 596 uint64_t identity_base = 0xfffbc000;
20420430 597 int ret;
25d2e361 598 struct utsname utsname;
20420430 599
c3a3a7d3 600 ret = kvm_get_supported_msrs(s);
20420430 601 if (ret < 0) {
20420430
SY
602 return ret;
603 }
25d2e361
MT
604
605 uname(&utsname);
606 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
607
4c5b10b7 608 /*
11076198
JK
609 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
610 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
611 * Since these must be part of guest physical memory, we need to allocate
612 * them, both by setting their start addresses in the kernel and by
613 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
614 *
615 * Older KVM versions may not support setting the identity map base. In
616 * that case we need to stick with the default, i.e. a 256K maximum BIOS
617 * size.
4c5b10b7 618 */
11076198
JK
619 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
620 /* Allows up to 16M BIOSes. */
621 identity_base = 0xfeffc000;
622
623 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
624 if (ret < 0) {
625 return ret;
626 }
4c5b10b7 627 }
e56ff191 628
11076198
JK
629 /* Set TSS base one page after EPT identity map. */
630 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
20420430
SY
631 if (ret < 0) {
632 return ret;
633 }
634
11076198
JK
635 /* Tell fw_cfg to notify the BIOS to reserve the range. */
636 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
20420430 637 if (ret < 0) {
11076198 638 fprintf(stderr, "e820_add_entry() table is full\n");
20420430
SY
639 return ret;
640 }
3c85e74f 641 qemu_register_reset(kvm_unpoison_all, NULL);
20420430 642
11076198 643 return 0;
05330448 644}
b9bec74b 645
05330448
AL
646static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
647{
648 lhs->selector = rhs->selector;
649 lhs->base = rhs->base;
650 lhs->limit = rhs->limit;
651 lhs->type = 3;
652 lhs->present = 1;
653 lhs->dpl = 3;
654 lhs->db = 0;
655 lhs->s = 1;
656 lhs->l = 0;
657 lhs->g = 0;
658 lhs->avl = 0;
659 lhs->unusable = 0;
660}
661
662static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
663{
664 unsigned flags = rhs->flags;
665 lhs->selector = rhs->selector;
666 lhs->base = rhs->base;
667 lhs->limit = rhs->limit;
668 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
669 lhs->present = (flags & DESC_P_MASK) != 0;
acaa7550 670 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
05330448
AL
671 lhs->db = (flags >> DESC_B_SHIFT) & 1;
672 lhs->s = (flags & DESC_S_MASK) != 0;
673 lhs->l = (flags >> DESC_L_SHIFT) & 1;
674 lhs->g = (flags & DESC_G_MASK) != 0;
675 lhs->avl = (flags & DESC_AVL_MASK) != 0;
676 lhs->unusable = 0;
677}
678
679static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
680{
681 lhs->selector = rhs->selector;
682 lhs->base = rhs->base;
683 lhs->limit = rhs->limit;
b9bec74b
JK
684 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
685 (rhs->present * DESC_P_MASK) |
686 (rhs->dpl << DESC_DPL_SHIFT) |
687 (rhs->db << DESC_B_SHIFT) |
688 (rhs->s * DESC_S_MASK) |
689 (rhs->l << DESC_L_SHIFT) |
690 (rhs->g * DESC_G_MASK) |
691 (rhs->avl * DESC_AVL_MASK);
05330448
AL
692}
693
694static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
695{
b9bec74b 696 if (set) {
05330448 697 *kvm_reg = *qemu_reg;
b9bec74b 698 } else {
05330448 699 *qemu_reg = *kvm_reg;
b9bec74b 700 }
05330448
AL
701}
702
703static int kvm_getput_regs(CPUState *env, int set)
704{
705 struct kvm_regs regs;
706 int ret = 0;
707
708 if (!set) {
709 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
b9bec74b 710 if (ret < 0) {
05330448 711 return ret;
b9bec74b 712 }
05330448
AL
713 }
714
715 kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
716 kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
717 kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
718 kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
719 kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
720 kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
721 kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
722 kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
723#ifdef TARGET_X86_64
724 kvm_getput_reg(&regs.r8, &env->regs[8], set);
725 kvm_getput_reg(&regs.r9, &env->regs[9], set);
726 kvm_getput_reg(&regs.r10, &env->regs[10], set);
727 kvm_getput_reg(&regs.r11, &env->regs[11], set);
728 kvm_getput_reg(&regs.r12, &env->regs[12], set);
729 kvm_getput_reg(&regs.r13, &env->regs[13], set);
730 kvm_getput_reg(&regs.r14, &env->regs[14], set);
731 kvm_getput_reg(&regs.r15, &env->regs[15], set);
732#endif
733
734 kvm_getput_reg(&regs.rflags, &env->eflags, set);
735 kvm_getput_reg(&regs.rip, &env->eip, set);
736
b9bec74b 737 if (set) {
05330448 738 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
b9bec74b 739 }
05330448
AL
740
741 return ret;
742}
743
744static int kvm_put_fpu(CPUState *env)
745{
746 struct kvm_fpu fpu;
747 int i;
748
749 memset(&fpu, 0, sizeof fpu);
750 fpu.fsw = env->fpus & ~(7 << 11);
751 fpu.fsw |= (env->fpstt & 7) << 11;
752 fpu.fcw = env->fpuc;
42cc8fa6
JK
753 fpu.last_opcode = env->fpop;
754 fpu.last_ip = env->fpip;
755 fpu.last_dp = env->fpdp;
b9bec74b
JK
756 for (i = 0; i < 8; ++i) {
757 fpu.ftwx |= (!env->fptags[i]) << i;
758 }
05330448
AL
759 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
760 memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
761 fpu.mxcsr = env->mxcsr;
762
763 return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
764}
765
6b42494b
JK
766#define XSAVE_FCW_FSW 0
767#define XSAVE_FTW_FOP 1
f1665b21
SY
768#define XSAVE_CWD_RIP 2
769#define XSAVE_CWD_RDP 4
770#define XSAVE_MXCSR 6
771#define XSAVE_ST_SPACE 8
772#define XSAVE_XMM_SPACE 40
773#define XSAVE_XSTATE_BV 128
774#define XSAVE_YMMH_SPACE 144
f1665b21
SY
775
776static int kvm_put_xsave(CPUState *env)
777{
fabacc0f 778 struct kvm_xsave* xsave = env->kvm_xsave_buf;
42cc8fa6 779 uint16_t cwd, swd, twd;
fabacc0f 780 int i, r;
f1665b21 781
b9bec74b 782 if (!kvm_has_xsave()) {
f1665b21 783 return kvm_put_fpu(env);
b9bec74b 784 }
f1665b21 785
f1665b21 786 memset(xsave, 0, sizeof(struct kvm_xsave));
6115c0a8 787 twd = 0;
f1665b21
SY
788 swd = env->fpus & ~(7 << 11);
789 swd |= (env->fpstt & 7) << 11;
790 cwd = env->fpuc;
b9bec74b 791 for (i = 0; i < 8; ++i) {
f1665b21 792 twd |= (!env->fptags[i]) << i;
b9bec74b 793 }
6b42494b
JK
794 xsave->region[XSAVE_FCW_FSW] = (uint32_t)(swd << 16) + cwd;
795 xsave->region[XSAVE_FTW_FOP] = (uint32_t)(env->fpop << 16) + twd;
42cc8fa6
JK
796 memcpy(&xsave->region[XSAVE_CWD_RIP], &env->fpip, sizeof(env->fpip));
797 memcpy(&xsave->region[XSAVE_CWD_RDP], &env->fpdp, sizeof(env->fpdp));
f1665b21
SY
798 memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
799 sizeof env->fpregs);
800 memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
801 sizeof env->xmm_regs);
802 xsave->region[XSAVE_MXCSR] = env->mxcsr;
803 *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
804 memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
805 sizeof env->ymmh_regs);
0f53994f 806 r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
0f53994f 807 return r;
f1665b21
SY
808}
809
810static int kvm_put_xcrs(CPUState *env)
811{
f1665b21
SY
812 struct kvm_xcrs xcrs;
813
b9bec74b 814 if (!kvm_has_xcrs()) {
f1665b21 815 return 0;
b9bec74b 816 }
f1665b21
SY
817
818 xcrs.nr_xcrs = 1;
819 xcrs.flags = 0;
820 xcrs.xcrs[0].xcr = 0;
821 xcrs.xcrs[0].value = env->xcr0;
822 return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
f1665b21
SY
823}
824
05330448
AL
825static int kvm_put_sregs(CPUState *env)
826{
827 struct kvm_sregs sregs;
828
0e607a80
JK
829 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
830 if (env->interrupt_injected >= 0) {
831 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
832 (uint64_t)1 << (env->interrupt_injected % 64);
833 }
05330448
AL
834
835 if ((env->eflags & VM_MASK)) {
b9bec74b
JK
836 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
837 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
838 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
839 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
840 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
841 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
05330448 842 } else {
b9bec74b
JK
843 set_seg(&sregs.cs, &env->segs[R_CS]);
844 set_seg(&sregs.ds, &env->segs[R_DS]);
845 set_seg(&sregs.es, &env->segs[R_ES]);
846 set_seg(&sregs.fs, &env->segs[R_FS]);
847 set_seg(&sregs.gs, &env->segs[R_GS]);
848 set_seg(&sregs.ss, &env->segs[R_SS]);
05330448
AL
849 }
850
851 set_seg(&sregs.tr, &env->tr);
852 set_seg(&sregs.ldt, &env->ldt);
853
854 sregs.idt.limit = env->idt.limit;
855 sregs.idt.base = env->idt.base;
856 sregs.gdt.limit = env->gdt.limit;
857 sregs.gdt.base = env->gdt.base;
858
859 sregs.cr0 = env->cr[0];
860 sregs.cr2 = env->cr[2];
861 sregs.cr3 = env->cr[3];
862 sregs.cr4 = env->cr[4];
863
4a942cea
BS
864 sregs.cr8 = cpu_get_apic_tpr(env->apic_state);
865 sregs.apic_base = cpu_get_apic_base(env->apic_state);
05330448
AL
866
867 sregs.efer = env->efer;
868
869 return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
870}
871
872static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
873 uint32_t index, uint64_t value)
874{
875 entry->index = index;
876 entry->data = value;
877}
878
ea643051 879static int kvm_put_msrs(CPUState *env, int level)
05330448
AL
880{
881 struct {
882 struct kvm_msrs info;
883 struct kvm_msr_entry entries[100];
884 } msr_data;
885 struct kvm_msr_entry *msrs = msr_data.entries;
d8da8574 886 int n = 0;
05330448
AL
887
888 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
889 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
890 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
0c03266a 891 kvm_msr_entry_set(&msrs[n++], MSR_PAT, env->pat);
c3a3a7d3 892 if (has_msr_star) {
b9bec74b
JK
893 kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
894 }
c3a3a7d3 895 if (has_msr_hsave_pa) {
75b10c43 896 kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
b9bec74b 897 }
aa82ba54
LJ
898 if (has_msr_tsc_deadline) {
899 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
900 }
21e87c46
AK
901 if (has_msr_misc_enable) {
902 kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE,
903 env->msr_ia32_misc_enable);
904 }
05330448 905#ifdef TARGET_X86_64
25d2e361
MT
906 if (lm_capable_kernel) {
907 kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
908 kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
909 kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
910 kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
911 }
05330448 912#endif
ea643051 913 if (level == KVM_PUT_FULL_STATE) {
384331a6
MT
914 /*
915 * KVM is yet unable to synchronize TSC values of multiple VCPUs on
916 * writeback. Until this is fixed, we only write the offset to SMP
917 * guests after migration, desynchronizing the VCPUs, but avoiding
918 * huge jump-backs that would occur without any writeback at all.
919 */
920 if (smp_cpus == 1 || env->tsc != 0) {
921 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
922 }
ff5c186b
JK
923 }
924 /*
925 * The following paravirtual MSRs have side effects on the guest or are
926 * too heavy for normal writeback. Limit them to reset or full state
927 * updates.
928 */
929 if (level >= KVM_PUT_RESET_STATE) {
ea643051
JK
930 kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
931 env->system_time_msr);
932 kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
c5999bfc
JK
933 if (has_msr_async_pf_en) {
934 kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN,
935 env->async_pf_en_msr);
936 }
ea643051 937 }
57780495 938 if (env->mcg_cap) {
d8da8574 939 int i;
b9bec74b 940
c34d440a
JK
941 kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
942 kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
943 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
944 kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
57780495
MT
945 }
946 }
1a03675d 947
05330448
AL
948 msr_data.info.nmsrs = n;
949
950 return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
951
952}
953
954
955static int kvm_get_fpu(CPUState *env)
956{
957 struct kvm_fpu fpu;
958 int i, ret;
959
960 ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
b9bec74b 961 if (ret < 0) {
05330448 962 return ret;
b9bec74b 963 }
05330448
AL
964
965 env->fpstt = (fpu.fsw >> 11) & 7;
966 env->fpus = fpu.fsw;
967 env->fpuc = fpu.fcw;
42cc8fa6
JK
968 env->fpop = fpu.last_opcode;
969 env->fpip = fpu.last_ip;
970 env->fpdp = fpu.last_dp;
b9bec74b
JK
971 for (i = 0; i < 8; ++i) {
972 env->fptags[i] = !((fpu.ftwx >> i) & 1);
973 }
05330448
AL
974 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
975 memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
976 env->mxcsr = fpu.mxcsr;
977
978 return 0;
979}
980
f1665b21
SY
981static int kvm_get_xsave(CPUState *env)
982{
fabacc0f 983 struct kvm_xsave* xsave = env->kvm_xsave_buf;
f1665b21 984 int ret, i;
42cc8fa6 985 uint16_t cwd, swd, twd;
f1665b21 986
b9bec74b 987 if (!kvm_has_xsave()) {
f1665b21 988 return kvm_get_fpu(env);
b9bec74b 989 }
f1665b21 990
f1665b21 991 ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
0f53994f 992 if (ret < 0) {
f1665b21 993 return ret;
0f53994f 994 }
f1665b21 995
6b42494b
JK
996 cwd = (uint16_t)xsave->region[XSAVE_FCW_FSW];
997 swd = (uint16_t)(xsave->region[XSAVE_FCW_FSW] >> 16);
998 twd = (uint16_t)xsave->region[XSAVE_FTW_FOP];
999 env->fpop = (uint16_t)(xsave->region[XSAVE_FTW_FOP] >> 16);
f1665b21
SY
1000 env->fpstt = (swd >> 11) & 7;
1001 env->fpus = swd;
1002 env->fpuc = cwd;
b9bec74b 1003 for (i = 0; i < 8; ++i) {
f1665b21 1004 env->fptags[i] = !((twd >> i) & 1);
b9bec74b 1005 }
42cc8fa6
JK
1006 memcpy(&env->fpip, &xsave->region[XSAVE_CWD_RIP], sizeof(env->fpip));
1007 memcpy(&env->fpdp, &xsave->region[XSAVE_CWD_RDP], sizeof(env->fpdp));
f1665b21
SY
1008 env->mxcsr = xsave->region[XSAVE_MXCSR];
1009 memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
1010 sizeof env->fpregs);
1011 memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE],
1012 sizeof env->xmm_regs);
1013 env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
1014 memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
1015 sizeof env->ymmh_regs);
1016 return 0;
f1665b21
SY
1017}
1018
1019static int kvm_get_xcrs(CPUState *env)
1020{
f1665b21
SY
1021 int i, ret;
1022 struct kvm_xcrs xcrs;
1023
b9bec74b 1024 if (!kvm_has_xcrs()) {
f1665b21 1025 return 0;
b9bec74b 1026 }
f1665b21
SY
1027
1028 ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs);
b9bec74b 1029 if (ret < 0) {
f1665b21 1030 return ret;
b9bec74b 1031 }
f1665b21 1032
b9bec74b 1033 for (i = 0; i < xcrs.nr_xcrs; i++) {
f1665b21
SY
1034 /* Only support xcr0 now */
1035 if (xcrs.xcrs[0].xcr == 0) {
1036 env->xcr0 = xcrs.xcrs[0].value;
1037 break;
1038 }
b9bec74b 1039 }
f1665b21 1040 return 0;
f1665b21
SY
1041}
1042
05330448
AL
1043static int kvm_get_sregs(CPUState *env)
1044{
1045 struct kvm_sregs sregs;
1046 uint32_t hflags;
0e607a80 1047 int bit, i, ret;
05330448
AL
1048
1049 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
b9bec74b 1050 if (ret < 0) {
05330448 1051 return ret;
b9bec74b 1052 }
05330448 1053
0e607a80
JK
1054 /* There can only be one pending IRQ set in the bitmap at a time, so try
1055 to find it and save its number instead (-1 for none). */
1056 env->interrupt_injected = -1;
1057 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
1058 if (sregs.interrupt_bitmap[i]) {
1059 bit = ctz64(sregs.interrupt_bitmap[i]);
1060 env->interrupt_injected = i * 64 + bit;
1061 break;
1062 }
1063 }
05330448
AL
1064
1065 get_seg(&env->segs[R_CS], &sregs.cs);
1066 get_seg(&env->segs[R_DS], &sregs.ds);
1067 get_seg(&env->segs[R_ES], &sregs.es);
1068 get_seg(&env->segs[R_FS], &sregs.fs);
1069 get_seg(&env->segs[R_GS], &sregs.gs);
1070 get_seg(&env->segs[R_SS], &sregs.ss);
1071
1072 get_seg(&env->tr, &sregs.tr);
1073 get_seg(&env->ldt, &sregs.ldt);
1074
1075 env->idt.limit = sregs.idt.limit;
1076 env->idt.base = sregs.idt.base;
1077 env->gdt.limit = sregs.gdt.limit;
1078 env->gdt.base = sregs.gdt.base;
1079
1080 env->cr[0] = sregs.cr0;
1081 env->cr[2] = sregs.cr2;
1082 env->cr[3] = sregs.cr3;
1083 env->cr[4] = sregs.cr4;
1084
4a942cea 1085 cpu_set_apic_base(env->apic_state, sregs.apic_base);
05330448
AL
1086
1087 env->efer = sregs.efer;
4a942cea 1088 //cpu_set_apic_tpr(env->apic_state, sregs.cr8);
05330448 1089
b9bec74b
JK
1090#define HFLAG_COPY_MASK \
1091 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1092 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1093 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1094 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
05330448
AL
1095
1096 hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
1097 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
1098 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
b9bec74b 1099 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
05330448
AL
1100 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
1101 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
b9bec74b 1102 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
05330448
AL
1103
1104 if (env->efer & MSR_EFER_LMA) {
1105 hflags |= HF_LMA_MASK;
1106 }
1107
1108 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
1109 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1110 } else {
1111 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
b9bec74b 1112 (DESC_B_SHIFT - HF_CS32_SHIFT);
05330448 1113 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
b9bec74b
JK
1114 (DESC_B_SHIFT - HF_SS32_SHIFT);
1115 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
1116 !(hflags & HF_CS32_MASK)) {
1117 hflags |= HF_ADDSEG_MASK;
1118 } else {
1119 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
1120 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
1121 }
05330448
AL
1122 }
1123 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
05330448
AL
1124
1125 return 0;
1126}
1127
1128static int kvm_get_msrs(CPUState *env)
1129{
1130 struct {
1131 struct kvm_msrs info;
1132 struct kvm_msr_entry entries[100];
1133 } msr_data;
1134 struct kvm_msr_entry *msrs = msr_data.entries;
1135 int ret, i, n;
1136
1137 n = 0;
1138 msrs[n++].index = MSR_IA32_SYSENTER_CS;
1139 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
1140 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
0c03266a 1141 msrs[n++].index = MSR_PAT;
c3a3a7d3 1142 if (has_msr_star) {
b9bec74b
JK
1143 msrs[n++].index = MSR_STAR;
1144 }
c3a3a7d3 1145 if (has_msr_hsave_pa) {
75b10c43 1146 msrs[n++].index = MSR_VM_HSAVE_PA;
b9bec74b 1147 }
aa82ba54
LJ
1148 if (has_msr_tsc_deadline) {
1149 msrs[n++].index = MSR_IA32_TSCDEADLINE;
1150 }
21e87c46
AK
1151 if (has_msr_misc_enable) {
1152 msrs[n++].index = MSR_IA32_MISC_ENABLE;
1153 }
b8cc45d6
GC
1154
1155 if (!env->tsc_valid) {
1156 msrs[n++].index = MSR_IA32_TSC;
1354869c 1157 env->tsc_valid = !runstate_is_running();
b8cc45d6
GC
1158 }
1159
05330448 1160#ifdef TARGET_X86_64
25d2e361
MT
1161 if (lm_capable_kernel) {
1162 msrs[n++].index = MSR_CSTAR;
1163 msrs[n++].index = MSR_KERNELGSBASE;
1164 msrs[n++].index = MSR_FMASK;
1165 msrs[n++].index = MSR_LSTAR;
1166 }
05330448 1167#endif
1a03675d
GC
1168 msrs[n++].index = MSR_KVM_SYSTEM_TIME;
1169 msrs[n++].index = MSR_KVM_WALL_CLOCK;
c5999bfc
JK
1170 if (has_msr_async_pf_en) {
1171 msrs[n++].index = MSR_KVM_ASYNC_PF_EN;
1172 }
1a03675d 1173
57780495
MT
1174 if (env->mcg_cap) {
1175 msrs[n++].index = MSR_MCG_STATUS;
1176 msrs[n++].index = MSR_MCG_CTL;
b9bec74b 1177 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
57780495 1178 msrs[n++].index = MSR_MC0_CTL + i;
b9bec74b 1179 }
57780495 1180 }
57780495 1181
05330448
AL
1182 msr_data.info.nmsrs = n;
1183 ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
b9bec74b 1184 if (ret < 0) {
05330448 1185 return ret;
b9bec74b 1186 }
05330448
AL
1187
1188 for (i = 0; i < ret; i++) {
1189 switch (msrs[i].index) {
1190 case MSR_IA32_SYSENTER_CS:
1191 env->sysenter_cs = msrs[i].data;
1192 break;
1193 case MSR_IA32_SYSENTER_ESP:
1194 env->sysenter_esp = msrs[i].data;
1195 break;
1196 case MSR_IA32_SYSENTER_EIP:
1197 env->sysenter_eip = msrs[i].data;
1198 break;
0c03266a
JK
1199 case MSR_PAT:
1200 env->pat = msrs[i].data;
1201 break;
05330448
AL
1202 case MSR_STAR:
1203 env->star = msrs[i].data;
1204 break;
1205#ifdef TARGET_X86_64
1206 case MSR_CSTAR:
1207 env->cstar = msrs[i].data;
1208 break;
1209 case MSR_KERNELGSBASE:
1210 env->kernelgsbase = msrs[i].data;
1211 break;
1212 case MSR_FMASK:
1213 env->fmask = msrs[i].data;
1214 break;
1215 case MSR_LSTAR:
1216 env->lstar = msrs[i].data;
1217 break;
1218#endif
1219 case MSR_IA32_TSC:
1220 env->tsc = msrs[i].data;
1221 break;
aa82ba54
LJ
1222 case MSR_IA32_TSCDEADLINE:
1223 env->tsc_deadline = msrs[i].data;
1224 break;
aa851e36
MT
1225 case MSR_VM_HSAVE_PA:
1226 env->vm_hsave = msrs[i].data;
1227 break;
1a03675d
GC
1228 case MSR_KVM_SYSTEM_TIME:
1229 env->system_time_msr = msrs[i].data;
1230 break;
1231 case MSR_KVM_WALL_CLOCK:
1232 env->wall_clock_msr = msrs[i].data;
1233 break;
57780495
MT
1234 case MSR_MCG_STATUS:
1235 env->mcg_status = msrs[i].data;
1236 break;
1237 case MSR_MCG_CTL:
1238 env->mcg_ctl = msrs[i].data;
1239 break;
21e87c46
AK
1240 case MSR_IA32_MISC_ENABLE:
1241 env->msr_ia32_misc_enable = msrs[i].data;
1242 break;
57780495 1243 default:
57780495
MT
1244 if (msrs[i].index >= MSR_MC0_CTL &&
1245 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
1246 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
57780495 1247 }
d8da8574 1248 break;
f6584ee2
GN
1249 case MSR_KVM_ASYNC_PF_EN:
1250 env->async_pf_en_msr = msrs[i].data;
1251 break;
05330448
AL
1252 }
1253 }
1254
1255 return 0;
1256}
1257
9bdbe550
HB
1258static int kvm_put_mp_state(CPUState *env)
1259{
1260 struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
1261
1262 return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
1263}
1264
1265static int kvm_get_mp_state(CPUState *env)
1266{
1267 struct kvm_mp_state mp_state;
1268 int ret;
1269
1270 ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
1271 if (ret < 0) {
1272 return ret;
1273 }
1274 env->mp_state = mp_state.mp_state;
c14750e8
JK
1275 if (kvm_irqchip_in_kernel()) {
1276 env->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
1277 }
9bdbe550
HB
1278 return 0;
1279}
1280
ea643051 1281static int kvm_put_vcpu_events(CPUState *env, int level)
a0fb002c 1282{
a0fb002c
JK
1283 struct kvm_vcpu_events events;
1284
1285 if (!kvm_has_vcpu_events()) {
1286 return 0;
1287 }
1288
31827373
JK
1289 events.exception.injected = (env->exception_injected >= 0);
1290 events.exception.nr = env->exception_injected;
a0fb002c
JK
1291 events.exception.has_error_code = env->has_error_code;
1292 events.exception.error_code = env->error_code;
1293
1294 events.interrupt.injected = (env->interrupt_injected >= 0);
1295 events.interrupt.nr = env->interrupt_injected;
1296 events.interrupt.soft = env->soft_interrupt;
1297
1298 events.nmi.injected = env->nmi_injected;
1299 events.nmi.pending = env->nmi_pending;
1300 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
1301
1302 events.sipi_vector = env->sipi_vector;
1303
ea643051
JK
1304 events.flags = 0;
1305 if (level >= KVM_PUT_RESET_STATE) {
1306 events.flags |=
1307 KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
1308 }
aee028b9 1309
a0fb002c 1310 return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
a0fb002c
JK
1311}
1312
1313static int kvm_get_vcpu_events(CPUState *env)
1314{
a0fb002c
JK
1315 struct kvm_vcpu_events events;
1316 int ret;
1317
1318 if (!kvm_has_vcpu_events()) {
1319 return 0;
1320 }
1321
1322 ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events);
1323 if (ret < 0) {
1324 return ret;
1325 }
31827373 1326 env->exception_injected =
a0fb002c
JK
1327 events.exception.injected ? events.exception.nr : -1;
1328 env->has_error_code = events.exception.has_error_code;
1329 env->error_code = events.exception.error_code;
1330
1331 env->interrupt_injected =
1332 events.interrupt.injected ? events.interrupt.nr : -1;
1333 env->soft_interrupt = events.interrupt.soft;
1334
1335 env->nmi_injected = events.nmi.injected;
1336 env->nmi_pending = events.nmi.pending;
1337 if (events.nmi.masked) {
1338 env->hflags2 |= HF2_NMI_MASK;
1339 } else {
1340 env->hflags2 &= ~HF2_NMI_MASK;
1341 }
1342
1343 env->sipi_vector = events.sipi_vector;
a0fb002c
JK
1344
1345 return 0;
1346}
1347
b0b1d690
JK
1348static int kvm_guest_debug_workarounds(CPUState *env)
1349{
1350 int ret = 0;
b0b1d690
JK
1351 unsigned long reinject_trap = 0;
1352
1353 if (!kvm_has_vcpu_events()) {
1354 if (env->exception_injected == 1) {
1355 reinject_trap = KVM_GUESTDBG_INJECT_DB;
1356 } else if (env->exception_injected == 3) {
1357 reinject_trap = KVM_GUESTDBG_INJECT_BP;
1358 }
1359 env->exception_injected = -1;
1360 }
1361
1362 /*
1363 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
1364 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
1365 * by updating the debug state once again if single-stepping is on.
1366 * Another reason to call kvm_update_guest_debug here is a pending debug
1367 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
1368 * reinject them via SET_GUEST_DEBUG.
1369 */
1370 if (reinject_trap ||
1371 (!kvm_has_robust_singlestep() && env->singlestep_enabled)) {
1372 ret = kvm_update_guest_debug(env, reinject_trap);
1373 }
b0b1d690
JK
1374 return ret;
1375}
1376
ff44f1a3
JK
1377static int kvm_put_debugregs(CPUState *env)
1378{
ff44f1a3
JK
1379 struct kvm_debugregs dbgregs;
1380 int i;
1381
1382 if (!kvm_has_debugregs()) {
1383 return 0;
1384 }
1385
1386 for (i = 0; i < 4; i++) {
1387 dbgregs.db[i] = env->dr[i];
1388 }
1389 dbgregs.dr6 = env->dr[6];
1390 dbgregs.dr7 = env->dr[7];
1391 dbgregs.flags = 0;
1392
1393 return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
ff44f1a3
JK
1394}
1395
1396static int kvm_get_debugregs(CPUState *env)
1397{
ff44f1a3
JK
1398 struct kvm_debugregs dbgregs;
1399 int i, ret;
1400
1401 if (!kvm_has_debugregs()) {
1402 return 0;
1403 }
1404
1405 ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs);
1406 if (ret < 0) {
b9bec74b 1407 return ret;
ff44f1a3
JK
1408 }
1409 for (i = 0; i < 4; i++) {
1410 env->dr[i] = dbgregs.db[i];
1411 }
1412 env->dr[4] = env->dr[6] = dbgregs.dr6;
1413 env->dr[5] = env->dr[7] = dbgregs.dr7;
ff44f1a3
JK
1414
1415 return 0;
1416}
1417
ea375f9a 1418int kvm_arch_put_registers(CPUState *env, int level)
05330448
AL
1419{
1420 int ret;
1421
b7680cb6 1422 assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
dbaa07c4 1423
05330448 1424 ret = kvm_getput_regs(env, 1);
b9bec74b 1425 if (ret < 0) {
05330448 1426 return ret;
b9bec74b 1427 }
f1665b21 1428 ret = kvm_put_xsave(env);
b9bec74b 1429 if (ret < 0) {
f1665b21 1430 return ret;
b9bec74b 1431 }
f1665b21 1432 ret = kvm_put_xcrs(env);
b9bec74b 1433 if (ret < 0) {
05330448 1434 return ret;
b9bec74b 1435 }
05330448 1436 ret = kvm_put_sregs(env);
b9bec74b 1437 if (ret < 0) {
05330448 1438 return ret;
b9bec74b 1439 }
ab443475
JK
1440 /* must be before kvm_put_msrs */
1441 ret = kvm_inject_mce_oldstyle(env);
1442 if (ret < 0) {
1443 return ret;
1444 }
ea643051 1445 ret = kvm_put_msrs(env, level);
b9bec74b 1446 if (ret < 0) {
05330448 1447 return ret;
b9bec74b 1448 }
ea643051
JK
1449 if (level >= KVM_PUT_RESET_STATE) {
1450 ret = kvm_put_mp_state(env);
b9bec74b 1451 if (ret < 0) {
ea643051 1452 return ret;
b9bec74b 1453 }
ea643051 1454 }
ea643051 1455 ret = kvm_put_vcpu_events(env, level);
b9bec74b 1456 if (ret < 0) {
a0fb002c 1457 return ret;
b9bec74b 1458 }
0d75a9ec 1459 ret = kvm_put_debugregs(env);
b9bec74b 1460 if (ret < 0) {
b0b1d690 1461 return ret;
b9bec74b 1462 }
b0b1d690
JK
1463 /* must be last */
1464 ret = kvm_guest_debug_workarounds(env);
b9bec74b 1465 if (ret < 0) {
ff44f1a3 1466 return ret;
b9bec74b 1467 }
05330448
AL
1468 return 0;
1469}
1470
1471int kvm_arch_get_registers(CPUState *env)
1472{
1473 int ret;
1474
b7680cb6 1475 assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
dbaa07c4 1476
05330448 1477 ret = kvm_getput_regs(env, 0);
b9bec74b 1478 if (ret < 0) {
05330448 1479 return ret;
b9bec74b 1480 }
f1665b21 1481 ret = kvm_get_xsave(env);
b9bec74b 1482 if (ret < 0) {
f1665b21 1483 return ret;
b9bec74b 1484 }
f1665b21 1485 ret = kvm_get_xcrs(env);
b9bec74b 1486 if (ret < 0) {
05330448 1487 return ret;
b9bec74b 1488 }
05330448 1489 ret = kvm_get_sregs(env);
b9bec74b 1490 if (ret < 0) {
05330448 1491 return ret;
b9bec74b 1492 }
05330448 1493 ret = kvm_get_msrs(env);
b9bec74b 1494 if (ret < 0) {
05330448 1495 return ret;
b9bec74b 1496 }
5a2e3c2e 1497 ret = kvm_get_mp_state(env);
b9bec74b 1498 if (ret < 0) {
5a2e3c2e 1499 return ret;
b9bec74b 1500 }
a0fb002c 1501 ret = kvm_get_vcpu_events(env);
b9bec74b 1502 if (ret < 0) {
a0fb002c 1503 return ret;
b9bec74b 1504 }
ff44f1a3 1505 ret = kvm_get_debugregs(env);
b9bec74b 1506 if (ret < 0) {
ff44f1a3 1507 return ret;
b9bec74b 1508 }
05330448
AL
1509 return 0;
1510}
1511
7a39fe58 1512void kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
05330448 1513{
ce377af3
JK
1514 int ret;
1515
276ce815
LJ
1516 /* Inject NMI */
1517 if (env->interrupt_request & CPU_INTERRUPT_NMI) {
1518 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
1519 DPRINTF("injected NMI\n");
ce377af3
JK
1520 ret = kvm_vcpu_ioctl(env, KVM_NMI);
1521 if (ret < 0) {
1522 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
1523 strerror(-ret));
1524 }
276ce815
LJ
1525 }
1526
db1669bc
JK
1527 if (!kvm_irqchip_in_kernel()) {
1528 /* Force the VCPU out of its inner loop to process the INIT request */
1529 if (env->interrupt_request & CPU_INTERRUPT_INIT) {
1530 env->exit_request = 1;
05330448 1531 }
05330448 1532
db1669bc
JK
1533 /* Try to inject an interrupt if the guest can accept it */
1534 if (run->ready_for_interrupt_injection &&
1535 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
1536 (env->eflags & IF_MASK)) {
1537 int irq;
1538
1539 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
1540 irq = cpu_get_pic_interrupt(env);
1541 if (irq >= 0) {
1542 struct kvm_interrupt intr;
1543
1544 intr.irq = irq;
db1669bc 1545 DPRINTF("injected interrupt %d\n", irq);
ce377af3
JK
1546 ret = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
1547 if (ret < 0) {
1548 fprintf(stderr,
1549 "KVM: injection failed, interrupt lost (%s)\n",
1550 strerror(-ret));
1551 }
db1669bc
JK
1552 }
1553 }
05330448 1554
db1669bc
JK
1555 /* If we have an interrupt but the guest is not ready to receive an
1556 * interrupt, request an interrupt window exit. This will
1557 * cause a return to userspace as soon as the guest is ready to
1558 * receive interrupts. */
1559 if ((env->interrupt_request & CPU_INTERRUPT_HARD)) {
1560 run->request_interrupt_window = 1;
1561 } else {
1562 run->request_interrupt_window = 0;
1563 }
1564
1565 DPRINTF("setting tpr\n");
1566 run->cr8 = cpu_get_apic_tpr(env->apic_state);
1567 }
05330448
AL
1568}
1569
7a39fe58 1570void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
05330448 1571{
b9bec74b 1572 if (run->if_flag) {
05330448 1573 env->eflags |= IF_MASK;
b9bec74b 1574 } else {
05330448 1575 env->eflags &= ~IF_MASK;
b9bec74b 1576 }
4a942cea
BS
1577 cpu_set_apic_tpr(env->apic_state, run->cr8);
1578 cpu_set_apic_base(env->apic_state, run->apic_base);
05330448
AL
1579}
1580
99036865 1581int kvm_arch_process_async_events(CPUState *env)
0af691d7 1582{
ab443475
JK
1583 if (env->interrupt_request & CPU_INTERRUPT_MCE) {
1584 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
1585 assert(env->mcg_cap);
1586
1587 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
1588
1589 kvm_cpu_synchronize_state(env);
1590
1591 if (env->exception_injected == EXCP08_DBLE) {
1592 /* this means triple fault */
1593 qemu_system_reset_request();
1594 env->exit_request = 1;
1595 return 0;
1596 }
1597 env->exception_injected = EXCP12_MCHK;
1598 env->has_error_code = 0;
1599
1600 env->halted = 0;
1601 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
1602 env->mp_state = KVM_MP_STATE_RUNNABLE;
1603 }
1604 }
1605
db1669bc
JK
1606 if (kvm_irqchip_in_kernel()) {
1607 return 0;
1608 }
1609
4601f7b0
JK
1610 if (((env->interrupt_request & CPU_INTERRUPT_HARD) &&
1611 (env->eflags & IF_MASK)) ||
1612 (env->interrupt_request & CPU_INTERRUPT_NMI)) {
6792a57b
JK
1613 env->halted = 0;
1614 }
0af691d7
MT
1615 if (env->interrupt_request & CPU_INTERRUPT_INIT) {
1616 kvm_cpu_synchronize_state(env);
1617 do_cpu_init(env);
0af691d7 1618 }
0af691d7
MT
1619 if (env->interrupt_request & CPU_INTERRUPT_SIPI) {
1620 kvm_cpu_synchronize_state(env);
1621 do_cpu_sipi(env);
1622 }
1623
1624 return env->halted;
1625}
1626
05330448
AL
1627static int kvm_handle_halt(CPUState *env)
1628{
1629 if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
1630 (env->eflags & IF_MASK)) &&
1631 !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
1632 env->halted = 1;
bb4ea393 1633 return EXCP_HLT;
05330448
AL
1634 }
1635
bb4ea393 1636 return 0;
05330448
AL
1637}
1638
e22a25c9
AL
1639int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
1640{
38972938 1641 static const uint8_t int3 = 0xcc;
64bf3f4e 1642
e22a25c9 1643 if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
b9bec74b 1644 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1)) {
e22a25c9 1645 return -EINVAL;
b9bec74b 1646 }
e22a25c9
AL
1647 return 0;
1648}
1649
1650int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
1651{
1652 uint8_t int3;
1653
1654 if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
b9bec74b 1655 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
e22a25c9 1656 return -EINVAL;
b9bec74b 1657 }
e22a25c9
AL
1658 return 0;
1659}
1660
1661static struct {
1662 target_ulong addr;
1663 int len;
1664 int type;
1665} hw_breakpoint[4];
1666
1667static int nb_hw_breakpoint;
1668
1669static int find_hw_breakpoint(target_ulong addr, int len, int type)
1670{
1671 int n;
1672
b9bec74b 1673 for (n = 0; n < nb_hw_breakpoint; n++) {
e22a25c9 1674 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
b9bec74b 1675 (hw_breakpoint[n].len == len || len == -1)) {
e22a25c9 1676 return n;
b9bec74b
JK
1677 }
1678 }
e22a25c9
AL
1679 return -1;
1680}
1681
1682int kvm_arch_insert_hw_breakpoint(target_ulong addr,
1683 target_ulong len, int type)
1684{
1685 switch (type) {
1686 case GDB_BREAKPOINT_HW:
1687 len = 1;
1688 break;
1689 case GDB_WATCHPOINT_WRITE:
1690 case GDB_WATCHPOINT_ACCESS:
1691 switch (len) {
1692 case 1:
1693 break;
1694 case 2:
1695 case 4:
1696 case 8:
b9bec74b 1697 if (addr & (len - 1)) {
e22a25c9 1698 return -EINVAL;
b9bec74b 1699 }
e22a25c9
AL
1700 break;
1701 default:
1702 return -EINVAL;
1703 }
1704 break;
1705 default:
1706 return -ENOSYS;
1707 }
1708
b9bec74b 1709 if (nb_hw_breakpoint == 4) {
e22a25c9 1710 return -ENOBUFS;
b9bec74b
JK
1711 }
1712 if (find_hw_breakpoint(addr, len, type) >= 0) {
e22a25c9 1713 return -EEXIST;
b9bec74b 1714 }
e22a25c9
AL
1715 hw_breakpoint[nb_hw_breakpoint].addr = addr;
1716 hw_breakpoint[nb_hw_breakpoint].len = len;
1717 hw_breakpoint[nb_hw_breakpoint].type = type;
1718 nb_hw_breakpoint++;
1719
1720 return 0;
1721}
1722
1723int kvm_arch_remove_hw_breakpoint(target_ulong addr,
1724 target_ulong len, int type)
1725{
1726 int n;
1727
1728 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
b9bec74b 1729 if (n < 0) {
e22a25c9 1730 return -ENOENT;
b9bec74b 1731 }
e22a25c9
AL
1732 nb_hw_breakpoint--;
1733 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
1734
1735 return 0;
1736}
1737
1738void kvm_arch_remove_all_hw_breakpoints(void)
1739{
1740 nb_hw_breakpoint = 0;
1741}
1742
1743static CPUWatchpoint hw_watchpoint;
1744
f2574737 1745static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info)
e22a25c9 1746{
f2574737 1747 int ret = 0;
e22a25c9
AL
1748 int n;
1749
1750 if (arch_info->exception == 1) {
1751 if (arch_info->dr6 & (1 << 14)) {
b9bec74b 1752 if (cpu_single_env->singlestep_enabled) {
f2574737 1753 ret = EXCP_DEBUG;
b9bec74b 1754 }
e22a25c9 1755 } else {
b9bec74b
JK
1756 for (n = 0; n < 4; n++) {
1757 if (arch_info->dr6 & (1 << n)) {
e22a25c9
AL
1758 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
1759 case 0x0:
f2574737 1760 ret = EXCP_DEBUG;
e22a25c9
AL
1761 break;
1762 case 0x1:
f2574737 1763 ret = EXCP_DEBUG;
e22a25c9
AL
1764 cpu_single_env->watchpoint_hit = &hw_watchpoint;
1765 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1766 hw_watchpoint.flags = BP_MEM_WRITE;
1767 break;
1768 case 0x3:
f2574737 1769 ret = EXCP_DEBUG;
e22a25c9
AL
1770 cpu_single_env->watchpoint_hit = &hw_watchpoint;
1771 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1772 hw_watchpoint.flags = BP_MEM_ACCESS;
1773 break;
1774 }
b9bec74b
JK
1775 }
1776 }
e22a25c9 1777 }
b9bec74b 1778 } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) {
f2574737 1779 ret = EXCP_DEBUG;
b9bec74b 1780 }
f2574737 1781 if (ret == 0) {
b0b1d690
JK
1782 cpu_synchronize_state(cpu_single_env);
1783 assert(cpu_single_env->exception_injected == -1);
1784
f2574737 1785 /* pass to guest */
b0b1d690
JK
1786 cpu_single_env->exception_injected = arch_info->exception;
1787 cpu_single_env->has_error_code = 0;
1788 }
e22a25c9 1789
f2574737 1790 return ret;
e22a25c9
AL
1791}
1792
1793void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
1794{
1795 const uint8_t type_code[] = {
1796 [GDB_BREAKPOINT_HW] = 0x0,
1797 [GDB_WATCHPOINT_WRITE] = 0x1,
1798 [GDB_WATCHPOINT_ACCESS] = 0x3
1799 };
1800 const uint8_t len_code[] = {
1801 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
1802 };
1803 int n;
1804
b9bec74b 1805 if (kvm_sw_breakpoints_active(env)) {
e22a25c9 1806 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
b9bec74b 1807 }
e22a25c9
AL
1808 if (nb_hw_breakpoint > 0) {
1809 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1810 dbg->arch.debugreg[7] = 0x0600;
1811 for (n = 0; n < nb_hw_breakpoint; n++) {
1812 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
1813 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
1814 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
95c077c9 1815 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
e22a25c9
AL
1816 }
1817 }
1818}
4513d923 1819
2a4dac83
JK
1820static bool host_supports_vmx(void)
1821{
1822 uint32_t ecx, unused;
1823
1824 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
1825 return ecx & CPUID_EXT_VMX;
1826}
1827
1828#define VMX_INVALID_GUEST_STATE 0x80000021
1829
1830int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
1831{
1832 uint64_t code;
1833 int ret;
1834
1835 switch (run->exit_reason) {
1836 case KVM_EXIT_HLT:
1837 DPRINTF("handle_hlt\n");
1838 ret = kvm_handle_halt(env);
1839 break;
1840 case KVM_EXIT_SET_TPR:
1841 ret = 0;
1842 break;
1843 case KVM_EXIT_FAIL_ENTRY:
1844 code = run->fail_entry.hardware_entry_failure_reason;
1845 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
1846 code);
1847 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
1848 fprintf(stderr,
12619721 1849 "\nIf you're running a guest on an Intel machine without "
2a4dac83
JK
1850 "unrestricted mode\n"
1851 "support, the failure can be most likely due to the guest "
1852 "entering an invalid\n"
1853 "state for Intel VT. For example, the guest maybe running "
1854 "in big real mode\n"
1855 "which is not supported on less recent Intel processors."
1856 "\n\n");
1857 }
1858 ret = -1;
1859 break;
1860 case KVM_EXIT_EXCEPTION:
1861 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
1862 run->ex.exception, run->ex.error_code);
1863 ret = -1;
1864 break;
f2574737
JK
1865 case KVM_EXIT_DEBUG:
1866 DPRINTF("kvm_exit_debug\n");
1867 ret = kvm_handle_debug(&run->debug.arch);
1868 break;
2a4dac83
JK
1869 default:
1870 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
1871 ret = -1;
1872 break;
1873 }
1874
1875 return ret;
1876}
1877
4513d923
GN
1878bool kvm_arch_stop_on_emulation_error(CPUState *env)
1879{
b9bec74b
JK
1880 return !(env->cr[0] & CR0_PE_MASK) ||
1881 ((env->segs[R_CS].selector & 3) != 3);
4513d923 1882}