]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/kvm.c
kvm: add save/restore of MSR_VM_HSAVE_PA
[mirror_qemu.git] / target-i386 / kvm.c
CommitLineData
05330448
AL
1/*
2 * QEMU KVM support
3 *
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 *
13 */
14
15#include <sys/types.h>
16#include <sys/ioctl.h>
17#include <sys/mman.h>
18
19#include <linux/kvm.h>
20
21#include "qemu-common.h"
22#include "sysemu.h"
23#include "kvm.h"
24#include "cpu.h"
e22a25c9 25#include "gdbstub.h"
0e607a80 26#include "host-utils.h"
4c5b10b7 27#include "hw/pc.h"
408392b3 28#include "hw/apic.h"
35bed8ee 29#include "ioport.h"
e7701825 30#include "kvm_x86.h"
05330448 31
bb0300dc
GN
32#ifdef CONFIG_KVM_PARA
33#include <linux/kvm_para.h>
34#endif
35//
05330448
AL
36//#define DEBUG_KVM
37
38#ifdef DEBUG_KVM
8c0d577e 39#define DPRINTF(fmt, ...) \
05330448
AL
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
41#else
8c0d577e 42#define DPRINTF(fmt, ...) \
05330448
AL
43 do { } while (0)
44#endif
45
1a03675d
GC
46#define MSR_KVM_WALL_CLOCK 0x11
47#define MSR_KVM_SYSTEM_TIME 0x12
48
c0532a76
MT
49#ifndef BUS_MCEERR_AR
50#define BUS_MCEERR_AR 4
51#endif
52#ifndef BUS_MCEERR_AO
53#define BUS_MCEERR_AO 5
54#endif
55
b827df58
AK
56#ifdef KVM_CAP_EXT_CPUID
57
58static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
59{
60 struct kvm_cpuid2 *cpuid;
61 int r, size;
62
63 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
64 cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size);
65 cpuid->nent = max;
66 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
76ae317f
MM
67 if (r == 0 && cpuid->nent >= max) {
68 r = -E2BIG;
69 }
b827df58
AK
70 if (r < 0) {
71 if (r == -E2BIG) {
72 qemu_free(cpuid);
73 return NULL;
74 } else {
75 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
76 strerror(-r));
77 exit(1);
78 }
79 }
80 return cpuid;
81}
82
c958a8bd
SY
83uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
84 uint32_t index, int reg)
b827df58
AK
85{
86 struct kvm_cpuid2 *cpuid;
87 int i, max;
88 uint32_t ret = 0;
89 uint32_t cpuid_1_edx;
90
91 if (!kvm_check_extension(env->kvm_state, KVM_CAP_EXT_CPUID)) {
92 return -1U;
93 }
94
95 max = 1;
96 while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) {
97 max *= 2;
98 }
99
100 for (i = 0; i < cpuid->nent; ++i) {
c958a8bd
SY
101 if (cpuid->entries[i].function == function &&
102 cpuid->entries[i].index == index) {
b827df58
AK
103 switch (reg) {
104 case R_EAX:
105 ret = cpuid->entries[i].eax;
106 break;
107 case R_EBX:
108 ret = cpuid->entries[i].ebx;
109 break;
110 case R_ECX:
111 ret = cpuid->entries[i].ecx;
112 break;
113 case R_EDX:
114 ret = cpuid->entries[i].edx;
19ccb8ea
JK
115 switch (function) {
116 case 1:
117 /* KVM before 2.6.30 misreports the following features */
118 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
119 break;
120 case 0x80000001:
b827df58
AK
121 /* On Intel, kvm returns cpuid according to the Intel spec,
122 * so add missing bits according to the AMD spec:
123 */
c958a8bd 124 cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX);
c1667e40 125 ret |= cpuid_1_edx & 0x183f7ff;
19ccb8ea 126 break;
b827df58
AK
127 }
128 break;
129 }
130 }
131 }
132
133 qemu_free(cpuid);
134
135 return ret;
136}
137
138#else
139
c958a8bd
SY
140uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
141 uint32_t index, int reg)
b827df58
AK
142{
143 return -1U;
144}
145
146#endif
147
bb0300dc
GN
148#ifdef CONFIG_KVM_PARA
149struct kvm_para_features {
150 int cap;
151 int feature;
152} para_features[] = {
153#ifdef KVM_CAP_CLOCKSOURCE
154 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
155#endif
156#ifdef KVM_CAP_NOP_IO_DELAY
157 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
158#endif
159#ifdef KVM_CAP_PV_MMU
160 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
bb0300dc
GN
161#endif
162 { -1, -1 }
163};
164
165static int get_para_features(CPUState *env)
166{
167 int i, features = 0;
168
169 for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) {
170 if (kvm_check_extension(env->kvm_state, para_features[i].cap))
171 features |= (1 << para_features[i].feature);
172 }
173
174 return features;
175}
176#endif
177
e7701825
MT
178#ifdef KVM_CAP_MCE
179static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
180 int *max_banks)
181{
182 int r;
183
184 r = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_MCE);
185 if (r > 0) {
186 *max_banks = r;
187 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
188 }
189 return -ENOSYS;
190}
191
192static int kvm_setup_mce(CPUState *env, uint64_t *mcg_cap)
193{
194 return kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, mcg_cap);
195}
196
197static int kvm_set_mce(CPUState *env, struct kvm_x86_mce *m)
198{
199 return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, m);
200}
201
c0532a76
MT
202static int kvm_get_msr(CPUState *env, struct kvm_msr_entry *msrs, int n)
203{
204 struct kvm_msrs *kmsrs = qemu_malloc(sizeof *kmsrs + n * sizeof *msrs);
205 int r;
206
207 kmsrs->nmsrs = n;
208 memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
209 r = kvm_vcpu_ioctl(env, KVM_GET_MSRS, kmsrs);
210 memcpy(msrs, kmsrs->entries, n * sizeof *msrs);
211 free(kmsrs);
212 return r;
213}
214
215/* FIXME: kill this and kvm_get_msr, use env->mcg_status instead */
216static int kvm_mce_in_exception(CPUState *env)
217{
218 struct kvm_msr_entry msr_mcg_status = {
219 .index = MSR_MCG_STATUS,
220 };
221 int r;
222
223 r = kvm_get_msr(env, &msr_mcg_status, 1);
224 if (r == -1 || r == 0) {
225 return -1;
226 }
227 return !!(msr_mcg_status.data & MCG_STATUS_MCIP);
228}
229
e7701825
MT
230struct kvm_x86_mce_data
231{
232 CPUState *env;
233 struct kvm_x86_mce *mce;
c0532a76 234 int abort_on_error;
e7701825
MT
235};
236
237static void kvm_do_inject_x86_mce(void *_data)
238{
239 struct kvm_x86_mce_data *data = _data;
240 int r;
241
f8502cfb
HS
242 /* If there is an MCE exception being processed, ignore this SRAO MCE */
243 if ((data->env->mcg_cap & MCG_SER_P) &&
244 !(data->mce->status & MCI_STATUS_AR)) {
245 r = kvm_mce_in_exception(data->env);
246 if (r == -1) {
247 fprintf(stderr, "Failed to get MCE status\n");
248 } else if (r) {
249 return;
250 }
251 }
c0532a76 252
e7701825 253 r = kvm_set_mce(data->env, data->mce);
c0532a76 254 if (r < 0) {
e7701825 255 perror("kvm_set_mce FAILED");
c0532a76
MT
256 if (data->abort_on_error) {
257 abort();
258 }
259 }
e7701825
MT
260}
261#endif
262
263void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
c0532a76
MT
264 uint64_t mcg_status, uint64_t addr, uint64_t misc,
265 int abort_on_error)
e7701825
MT
266{
267#ifdef KVM_CAP_MCE
268 struct kvm_x86_mce mce = {
269 .bank = bank,
270 .status = status,
271 .mcg_status = mcg_status,
272 .addr = addr,
273 .misc = misc,
274 };
275 struct kvm_x86_mce_data data = {
276 .env = cenv,
277 .mce = &mce,
278 };
279
c0532a76
MT
280 if (!cenv->mcg_cap) {
281 fprintf(stderr, "MCE support is not enabled!\n");
282 return;
283 }
284
e7701825 285 run_on_cpu(cenv, kvm_do_inject_x86_mce, &data);
c0532a76
MT
286#else
287 if (abort_on_error)
288 abort();
e7701825
MT
289#endif
290}
291
05330448
AL
292int kvm_arch_init_vcpu(CPUState *env)
293{
294 struct {
486bd5a2
AL
295 struct kvm_cpuid2 cpuid;
296 struct kvm_cpuid_entry2 entries[100];
05330448 297 } __attribute__((packed)) cpuid_data;
486bd5a2 298 uint32_t limit, i, j, cpuid_i;
a33609ca 299 uint32_t unused;
bb0300dc
GN
300 struct kvm_cpuid_entry2 *c;
301#ifdef KVM_CPUID_SIGNATURE
302 uint32_t signature[3];
303#endif
05330448 304
f8d926e9
JK
305 env->mp_state = KVM_MP_STATE_RUNNABLE;
306
c958a8bd 307 env->cpuid_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX);
6c0d7ee8
AP
308
309 i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
c958a8bd 310 env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_ECX);
6c0d7ee8
AP
311 env->cpuid_ext_features |= i;
312
457dfed6 313 env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
c958a8bd 314 0, R_EDX);
457dfed6 315 env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
c958a8bd 316 0, R_ECX);
296acb64
JR
317 env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(env, 0x8000000A,
318 0, R_EDX);
319
6c1f42fe 320
05330448
AL
321 cpuid_i = 0;
322
bb0300dc
GN
323#ifdef CONFIG_KVM_PARA
324 /* Paravirtualization CPUIDs */
325 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
326 c = &cpuid_data.entries[cpuid_i++];
327 memset(c, 0, sizeof(*c));
328 c->function = KVM_CPUID_SIGNATURE;
329 c->eax = 0;
330 c->ebx = signature[0];
331 c->ecx = signature[1];
332 c->edx = signature[2];
333
334 c = &cpuid_data.entries[cpuid_i++];
335 memset(c, 0, sizeof(*c));
336 c->function = KVM_CPUID_FEATURES;
337 c->eax = env->cpuid_kvm_features & get_para_features(env);
338#endif
339
a33609ca 340 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
05330448
AL
341
342 for (i = 0; i <= limit; i++) {
bb0300dc 343 c = &cpuid_data.entries[cpuid_i++];
486bd5a2
AL
344
345 switch (i) {
a36b1029
AL
346 case 2: {
347 /* Keep reading function 2 till all the input is received */
348 int times;
349
a36b1029 350 c->function = i;
a33609ca
AL
351 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
352 KVM_CPUID_FLAG_STATE_READ_NEXT;
353 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
354 times = c->eax & 0xff;
a36b1029
AL
355
356 for (j = 1; j < times; ++j) {
a33609ca 357 c = &cpuid_data.entries[cpuid_i++];
a36b1029 358 c->function = i;
a33609ca
AL
359 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
360 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
a36b1029
AL
361 }
362 break;
363 }
486bd5a2
AL
364 case 4:
365 case 0xb:
366 case 0xd:
367 for (j = 0; ; j++) {
486bd5a2
AL
368 c->function = i;
369 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
370 c->index = j;
a33609ca 371 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
486bd5a2 372
a33609ca 373 if (i == 4 && c->eax == 0)
486bd5a2 374 break;
a33609ca 375 if (i == 0xb && !(c->ecx & 0xff00))
486bd5a2 376 break;
a33609ca 377 if (i == 0xd && c->eax == 0)
486bd5a2 378 break;
a33609ca
AL
379
380 c = &cpuid_data.entries[cpuid_i++];
486bd5a2
AL
381 }
382 break;
383 default:
486bd5a2 384 c->function = i;
a33609ca
AL
385 c->flags = 0;
386 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
486bd5a2
AL
387 break;
388 }
05330448 389 }
a33609ca 390 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
05330448
AL
391
392 for (i = 0x80000000; i <= limit; i++) {
bb0300dc 393 c = &cpuid_data.entries[cpuid_i++];
05330448 394
05330448 395 c->function = i;
a33609ca
AL
396 c->flags = 0;
397 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
05330448
AL
398 }
399
400 cpuid_data.cpuid.nent = cpuid_i;
401
e7701825
MT
402#ifdef KVM_CAP_MCE
403 if (((env->cpuid_version >> 8)&0xF) >= 6
404 && (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)
405 && kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
406 uint64_t mcg_cap;
407 int banks;
408
409 if (kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks))
410 perror("kvm_get_mce_cap_supported FAILED");
411 else {
412 if (banks > MCE_BANKS_DEF)
413 banks = MCE_BANKS_DEF;
414 mcg_cap &= MCE_CAP_DEF;
415 mcg_cap |= banks;
416 if (kvm_setup_mce(env, &mcg_cap))
417 perror("kvm_setup_mce FAILED");
418 else
419 env->mcg_cap = mcg_cap;
420 }
421 }
422#endif
423
486bd5a2 424 return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
05330448
AL
425}
426
caa5af0f
JK
427void kvm_arch_reset_vcpu(CPUState *env)
428{
e73223a5 429 env->exception_injected = -1;
0e607a80 430 env->interrupt_injected = -1;
a0fb002c
JK
431 env->nmi_injected = 0;
432 env->nmi_pending = 0;
ddced198
MT
433 if (kvm_irqchip_in_kernel()) {
434 env->mp_state = cpu_is_bsp(env) ? KVM_MP_STATE_RUNNABLE :
435 KVM_MP_STATE_UNINITIALIZED;
436 } else {
437 env->mp_state = KVM_MP_STATE_RUNNABLE;
438 }
caa5af0f
JK
439}
440
05330448
AL
441static int kvm_has_msr_star(CPUState *env)
442{
443 static int has_msr_star;
444 int ret;
445
446 /* first time */
447 if (has_msr_star == 0) {
448 struct kvm_msr_list msr_list, *kvm_msr_list;
449
450 has_msr_star = -1;
451
452 /* Obtain MSR list from KVM. These are the MSRs that we must
453 * save/restore */
4c9f7372 454 msr_list.nmsrs = 0;
05330448 455 ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list);
6fb6d245 456 if (ret < 0 && ret != -E2BIG) {
05330448 457 return 0;
6fb6d245 458 }
d9db889f
JK
459 /* Old kernel modules had a bug and could write beyond the provided
460 memory. Allocate at least a safe amount of 1K. */
461 kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) +
462 msr_list.nmsrs *
463 sizeof(msr_list.indices[0])));
05330448 464
55308450 465 kvm_msr_list->nmsrs = msr_list.nmsrs;
05330448
AL
466 ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
467 if (ret >= 0) {
468 int i;
469
470 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
471 if (kvm_msr_list->indices[i] == MSR_STAR) {
472 has_msr_star = 1;
473 break;
474 }
475 }
476 }
477
478 free(kvm_msr_list);
479 }
480
481 if (has_msr_star == 1)
482 return 1;
483 return 0;
484}
485
20420430
SY
486static int kvm_init_identity_map_page(KVMState *s)
487{
488#ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
489 int ret;
490 uint64_t addr = 0xfffbc000;
491
492 if (!kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
493 return 0;
494 }
495
496 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &addr);
497 if (ret < 0) {
498 fprintf(stderr, "kvm_set_identity_map_addr: %s\n", strerror(ret));
499 return ret;
500 }
501#endif
502 return 0;
503}
504
05330448
AL
505int kvm_arch_init(KVMState *s, int smp_cpus)
506{
507 int ret;
508
509 /* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code
510 * directly. In order to use vm86 mode, a TSS is needed. Since this
511 * must be part of guest physical memory, we need to allocate it. Older
512 * versions of KVM just assumed that it would be at the end of physical
513 * memory but that doesn't work with more than 4GB of memory. We simply
514 * refuse to work with those older versions of KVM. */
984b5181 515 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
05330448
AL
516 if (ret <= 0) {
517 fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
518 return ret;
519 }
520
521 /* this address is 3 pages before the bios, and the bios should present
522 * as unavaible memory. FIXME, need to ensure the e820 map deals with
523 * this?
524 */
4c5b10b7
JS
525 /*
526 * Tell fw_cfg to notify the BIOS to reserve the range.
527 */
528 if (e820_add_entry(0xfffbc000, 0x4000, E820_RESERVED) < 0) {
529 perror("e820_add_entry() table is full");
530 exit(1);
531 }
20420430
SY
532 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000);
533 if (ret < 0) {
534 return ret;
535 }
536
537 return kvm_init_identity_map_page(s);
05330448
AL
538}
539
540static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
541{
542 lhs->selector = rhs->selector;
543 lhs->base = rhs->base;
544 lhs->limit = rhs->limit;
545 lhs->type = 3;
546 lhs->present = 1;
547 lhs->dpl = 3;
548 lhs->db = 0;
549 lhs->s = 1;
550 lhs->l = 0;
551 lhs->g = 0;
552 lhs->avl = 0;
553 lhs->unusable = 0;
554}
555
556static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
557{
558 unsigned flags = rhs->flags;
559 lhs->selector = rhs->selector;
560 lhs->base = rhs->base;
561 lhs->limit = rhs->limit;
562 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
563 lhs->present = (flags & DESC_P_MASK) != 0;
564 lhs->dpl = rhs->selector & 3;
565 lhs->db = (flags >> DESC_B_SHIFT) & 1;
566 lhs->s = (flags & DESC_S_MASK) != 0;
567 lhs->l = (flags >> DESC_L_SHIFT) & 1;
568 lhs->g = (flags & DESC_G_MASK) != 0;
569 lhs->avl = (flags & DESC_AVL_MASK) != 0;
570 lhs->unusable = 0;
571}
572
573static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
574{
575 lhs->selector = rhs->selector;
576 lhs->base = rhs->base;
577 lhs->limit = rhs->limit;
578 lhs->flags =
579 (rhs->type << DESC_TYPE_SHIFT)
580 | (rhs->present * DESC_P_MASK)
581 | (rhs->dpl << DESC_DPL_SHIFT)
582 | (rhs->db << DESC_B_SHIFT)
583 | (rhs->s * DESC_S_MASK)
584 | (rhs->l << DESC_L_SHIFT)
585 | (rhs->g * DESC_G_MASK)
586 | (rhs->avl * DESC_AVL_MASK);
587}
588
589static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
590{
591 if (set)
592 *kvm_reg = *qemu_reg;
593 else
594 *qemu_reg = *kvm_reg;
595}
596
597static int kvm_getput_regs(CPUState *env, int set)
598{
599 struct kvm_regs regs;
600 int ret = 0;
601
602 if (!set) {
603 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
604 if (ret < 0)
605 return ret;
606 }
607
608 kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
609 kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
610 kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
611 kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
612 kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
613 kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
614 kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
615 kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
616#ifdef TARGET_X86_64
617 kvm_getput_reg(&regs.r8, &env->regs[8], set);
618 kvm_getput_reg(&regs.r9, &env->regs[9], set);
619 kvm_getput_reg(&regs.r10, &env->regs[10], set);
620 kvm_getput_reg(&regs.r11, &env->regs[11], set);
621 kvm_getput_reg(&regs.r12, &env->regs[12], set);
622 kvm_getput_reg(&regs.r13, &env->regs[13], set);
623 kvm_getput_reg(&regs.r14, &env->regs[14], set);
624 kvm_getput_reg(&regs.r15, &env->regs[15], set);
625#endif
626
627 kvm_getput_reg(&regs.rflags, &env->eflags, set);
628 kvm_getput_reg(&regs.rip, &env->eip, set);
629
630 if (set)
631 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
632
633 return ret;
634}
635
636static int kvm_put_fpu(CPUState *env)
637{
638 struct kvm_fpu fpu;
639 int i;
640
641 memset(&fpu, 0, sizeof fpu);
642 fpu.fsw = env->fpus & ~(7 << 11);
643 fpu.fsw |= (env->fpstt & 7) << 11;
644 fpu.fcw = env->fpuc;
645 for (i = 0; i < 8; ++i)
646 fpu.ftwx |= (!env->fptags[i]) << i;
647 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
648 memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
649 fpu.mxcsr = env->mxcsr;
650
651 return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
652}
653
f1665b21
SY
654#ifdef KVM_CAP_XSAVE
655#define XSAVE_CWD_RIP 2
656#define XSAVE_CWD_RDP 4
657#define XSAVE_MXCSR 6
658#define XSAVE_ST_SPACE 8
659#define XSAVE_XMM_SPACE 40
660#define XSAVE_XSTATE_BV 128
661#define XSAVE_YMMH_SPACE 144
662#endif
663
664static int kvm_put_xsave(CPUState *env)
665{
666#ifdef KVM_CAP_XSAVE
0f53994f 667 int i, r;
f1665b21
SY
668 struct kvm_xsave* xsave;
669 uint16_t cwd, swd, twd, fop;
670
671 if (!kvm_has_xsave())
672 return kvm_put_fpu(env);
673
674 xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
675 memset(xsave, 0, sizeof(struct kvm_xsave));
676 cwd = swd = twd = fop = 0;
677 swd = env->fpus & ~(7 << 11);
678 swd |= (env->fpstt & 7) << 11;
679 cwd = env->fpuc;
680 for (i = 0; i < 8; ++i)
681 twd |= (!env->fptags[i]) << i;
682 xsave->region[0] = (uint32_t)(swd << 16) + cwd;
683 xsave->region[1] = (uint32_t)(fop << 16) + twd;
684 memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
685 sizeof env->fpregs);
686 memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
687 sizeof env->xmm_regs);
688 xsave->region[XSAVE_MXCSR] = env->mxcsr;
689 *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
690 memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
691 sizeof env->ymmh_regs);
0f53994f
MT
692 r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
693 qemu_free(xsave);
694 return r;
f1665b21
SY
695#else
696 return kvm_put_fpu(env);
697#endif
698}
699
700static int kvm_put_xcrs(CPUState *env)
701{
702#ifdef KVM_CAP_XCRS
703 struct kvm_xcrs xcrs;
704
705 if (!kvm_has_xcrs())
706 return 0;
707
708 xcrs.nr_xcrs = 1;
709 xcrs.flags = 0;
710 xcrs.xcrs[0].xcr = 0;
711 xcrs.xcrs[0].value = env->xcr0;
712 return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
713#else
714 return 0;
715#endif
716}
717
05330448
AL
718static int kvm_put_sregs(CPUState *env)
719{
720 struct kvm_sregs sregs;
721
0e607a80
JK
722 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
723 if (env->interrupt_injected >= 0) {
724 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
725 (uint64_t)1 << (env->interrupt_injected % 64);
726 }
05330448
AL
727
728 if ((env->eflags & VM_MASK)) {
729 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
730 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
731 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
732 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
733 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
734 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
735 } else {
736 set_seg(&sregs.cs, &env->segs[R_CS]);
737 set_seg(&sregs.ds, &env->segs[R_DS]);
738 set_seg(&sregs.es, &env->segs[R_ES]);
739 set_seg(&sregs.fs, &env->segs[R_FS]);
740 set_seg(&sregs.gs, &env->segs[R_GS]);
741 set_seg(&sregs.ss, &env->segs[R_SS]);
742
743 if (env->cr[0] & CR0_PE_MASK) {
744 /* force ss cpl to cs cpl */
745 sregs.ss.selector = (sregs.ss.selector & ~3) |
746 (sregs.cs.selector & 3);
747 sregs.ss.dpl = sregs.ss.selector & 3;
748 }
749 }
750
751 set_seg(&sregs.tr, &env->tr);
752 set_seg(&sregs.ldt, &env->ldt);
753
754 sregs.idt.limit = env->idt.limit;
755 sregs.idt.base = env->idt.base;
756 sregs.gdt.limit = env->gdt.limit;
757 sregs.gdt.base = env->gdt.base;
758
759 sregs.cr0 = env->cr[0];
760 sregs.cr2 = env->cr[2];
761 sregs.cr3 = env->cr[3];
762 sregs.cr4 = env->cr[4];
763
4a942cea
BS
764 sregs.cr8 = cpu_get_apic_tpr(env->apic_state);
765 sregs.apic_base = cpu_get_apic_base(env->apic_state);
05330448
AL
766
767 sregs.efer = env->efer;
768
769 return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
770}
771
772static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
773 uint32_t index, uint64_t value)
774{
775 entry->index = index;
776 entry->data = value;
777}
778
ea643051 779static int kvm_put_msrs(CPUState *env, int level)
05330448
AL
780{
781 struct {
782 struct kvm_msrs info;
783 struct kvm_msr_entry entries[100];
784 } msr_data;
785 struct kvm_msr_entry *msrs = msr_data.entries;
d8da8574 786 int n = 0;
05330448
AL
787
788 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
789 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
790 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
791 if (kvm_has_msr_star(env))
792 kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
aa851e36 793 kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
05330448
AL
794#ifdef TARGET_X86_64
795 /* FIXME if lm capable */
796 kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
797 kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
798 kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
799 kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
800#endif
ea643051
JK
801 if (level == KVM_PUT_FULL_STATE) {
802 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
803 kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
804 env->system_time_msr);
805 kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
806 }
57780495
MT
807#ifdef KVM_CAP_MCE
808 if (env->mcg_cap) {
d8da8574 809 int i;
57780495
MT
810 if (level == KVM_PUT_RESET_STATE)
811 kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
812 else if (level == KVM_PUT_FULL_STATE) {
813 kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
814 kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
815 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++)
816 kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
817 }
818 }
819#endif
1a03675d 820
05330448
AL
821 msr_data.info.nmsrs = n;
822
823 return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
824
825}
826
827
828static int kvm_get_fpu(CPUState *env)
829{
830 struct kvm_fpu fpu;
831 int i, ret;
832
833 ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
834 if (ret < 0)
835 return ret;
836
837 env->fpstt = (fpu.fsw >> 11) & 7;
838 env->fpus = fpu.fsw;
839 env->fpuc = fpu.fcw;
840 for (i = 0; i < 8; ++i)
841 env->fptags[i] = !((fpu.ftwx >> i) & 1);
842 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
843 memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
844 env->mxcsr = fpu.mxcsr;
845
846 return 0;
847}
848
f1665b21
SY
849static int kvm_get_xsave(CPUState *env)
850{
851#ifdef KVM_CAP_XSAVE
852 struct kvm_xsave* xsave;
853 int ret, i;
854 uint16_t cwd, swd, twd, fop;
855
856 if (!kvm_has_xsave())
857 return kvm_get_fpu(env);
858
859 xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
860 ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
0f53994f
MT
861 if (ret < 0) {
862 qemu_free(xsave);
f1665b21 863 return ret;
0f53994f 864 }
f1665b21
SY
865
866 cwd = (uint16_t)xsave->region[0];
867 swd = (uint16_t)(xsave->region[0] >> 16);
868 twd = (uint16_t)xsave->region[1];
869 fop = (uint16_t)(xsave->region[1] >> 16);
870 env->fpstt = (swd >> 11) & 7;
871 env->fpus = swd;
872 env->fpuc = cwd;
873 for (i = 0; i < 8; ++i)
874 env->fptags[i] = !((twd >> i) & 1);
875 env->mxcsr = xsave->region[XSAVE_MXCSR];
876 memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
877 sizeof env->fpregs);
878 memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE],
879 sizeof env->xmm_regs);
880 env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
881 memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
882 sizeof env->ymmh_regs);
0f53994f 883 qemu_free(xsave);
f1665b21
SY
884 return 0;
885#else
886 return kvm_get_fpu(env);
887#endif
888}
889
890static int kvm_get_xcrs(CPUState *env)
891{
892#ifdef KVM_CAP_XCRS
893 int i, ret;
894 struct kvm_xcrs xcrs;
895
896 if (!kvm_has_xcrs())
897 return 0;
898
899 ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs);
900 if (ret < 0)
901 return ret;
902
903 for (i = 0; i < xcrs.nr_xcrs; i++)
904 /* Only support xcr0 now */
905 if (xcrs.xcrs[0].xcr == 0) {
906 env->xcr0 = xcrs.xcrs[0].value;
907 break;
908 }
909 return 0;
910#else
911 return 0;
912#endif
913}
914
05330448
AL
915static int kvm_get_sregs(CPUState *env)
916{
917 struct kvm_sregs sregs;
918 uint32_t hflags;
0e607a80 919 int bit, i, ret;
05330448
AL
920
921 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
922 if (ret < 0)
923 return ret;
924
0e607a80
JK
925 /* There can only be one pending IRQ set in the bitmap at a time, so try
926 to find it and save its number instead (-1 for none). */
927 env->interrupt_injected = -1;
928 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
929 if (sregs.interrupt_bitmap[i]) {
930 bit = ctz64(sregs.interrupt_bitmap[i]);
931 env->interrupt_injected = i * 64 + bit;
932 break;
933 }
934 }
05330448
AL
935
936 get_seg(&env->segs[R_CS], &sregs.cs);
937 get_seg(&env->segs[R_DS], &sregs.ds);
938 get_seg(&env->segs[R_ES], &sregs.es);
939 get_seg(&env->segs[R_FS], &sregs.fs);
940 get_seg(&env->segs[R_GS], &sregs.gs);
941 get_seg(&env->segs[R_SS], &sregs.ss);
942
943 get_seg(&env->tr, &sregs.tr);
944 get_seg(&env->ldt, &sregs.ldt);
945
946 env->idt.limit = sregs.idt.limit;
947 env->idt.base = sregs.idt.base;
948 env->gdt.limit = sregs.gdt.limit;
949 env->gdt.base = sregs.gdt.base;
950
951 env->cr[0] = sregs.cr0;
952 env->cr[2] = sregs.cr2;
953 env->cr[3] = sregs.cr3;
954 env->cr[4] = sregs.cr4;
955
4a942cea 956 cpu_set_apic_base(env->apic_state, sregs.apic_base);
05330448
AL
957
958 env->efer = sregs.efer;
4a942cea 959 //cpu_set_apic_tpr(env->apic_state, sregs.cr8);
05330448
AL
960
961#define HFLAG_COPY_MASK ~( \
962 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
963 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
964 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
965 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
966
967
968
969 hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
970 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
971 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
972 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
973 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
974 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
975 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
976
977 if (env->efer & MSR_EFER_LMA) {
978 hflags |= HF_LMA_MASK;
979 }
980
981 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
982 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
983 } else {
984 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
985 (DESC_B_SHIFT - HF_CS32_SHIFT);
986 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
987 (DESC_B_SHIFT - HF_SS32_SHIFT);
988 if (!(env->cr[0] & CR0_PE_MASK) ||
989 (env->eflags & VM_MASK) ||
990 !(hflags & HF_CS32_MASK)) {
991 hflags |= HF_ADDSEG_MASK;
992 } else {
993 hflags |= ((env->segs[R_DS].base |
994 env->segs[R_ES].base |
995 env->segs[R_SS].base) != 0) <<
996 HF_ADDSEG_SHIFT;
997 }
998 }
999 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
05330448
AL
1000
1001 return 0;
1002}
1003
1004static int kvm_get_msrs(CPUState *env)
1005{
1006 struct {
1007 struct kvm_msrs info;
1008 struct kvm_msr_entry entries[100];
1009 } msr_data;
1010 struct kvm_msr_entry *msrs = msr_data.entries;
1011 int ret, i, n;
1012
1013 n = 0;
1014 msrs[n++].index = MSR_IA32_SYSENTER_CS;
1015 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
1016 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
1017 if (kvm_has_msr_star(env))
1018 msrs[n++].index = MSR_STAR;
aa851e36 1019 msrs[n++].index = MSR_VM_HSAVE_PA;
05330448
AL
1020 msrs[n++].index = MSR_IA32_TSC;
1021#ifdef TARGET_X86_64
1022 /* FIXME lm_capable_kernel */
1023 msrs[n++].index = MSR_CSTAR;
1024 msrs[n++].index = MSR_KERNELGSBASE;
1025 msrs[n++].index = MSR_FMASK;
1026 msrs[n++].index = MSR_LSTAR;
1027#endif
1a03675d
GC
1028 msrs[n++].index = MSR_KVM_SYSTEM_TIME;
1029 msrs[n++].index = MSR_KVM_WALL_CLOCK;
1030
57780495
MT
1031#ifdef KVM_CAP_MCE
1032 if (env->mcg_cap) {
1033 msrs[n++].index = MSR_MCG_STATUS;
1034 msrs[n++].index = MSR_MCG_CTL;
1035 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++)
1036 msrs[n++].index = MSR_MC0_CTL + i;
1037 }
1038#endif
1039
05330448
AL
1040 msr_data.info.nmsrs = n;
1041 ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
1042 if (ret < 0)
1043 return ret;
1044
1045 for (i = 0; i < ret; i++) {
1046 switch (msrs[i].index) {
1047 case MSR_IA32_SYSENTER_CS:
1048 env->sysenter_cs = msrs[i].data;
1049 break;
1050 case MSR_IA32_SYSENTER_ESP:
1051 env->sysenter_esp = msrs[i].data;
1052 break;
1053 case MSR_IA32_SYSENTER_EIP:
1054 env->sysenter_eip = msrs[i].data;
1055 break;
1056 case MSR_STAR:
1057 env->star = msrs[i].data;
1058 break;
1059#ifdef TARGET_X86_64
1060 case MSR_CSTAR:
1061 env->cstar = msrs[i].data;
1062 break;
1063 case MSR_KERNELGSBASE:
1064 env->kernelgsbase = msrs[i].data;
1065 break;
1066 case MSR_FMASK:
1067 env->fmask = msrs[i].data;
1068 break;
1069 case MSR_LSTAR:
1070 env->lstar = msrs[i].data;
1071 break;
1072#endif
1073 case MSR_IA32_TSC:
1074 env->tsc = msrs[i].data;
1075 break;
aa851e36
MT
1076 case MSR_VM_HSAVE_PA:
1077 env->vm_hsave = msrs[i].data;
1078 break;
1a03675d
GC
1079 case MSR_KVM_SYSTEM_TIME:
1080 env->system_time_msr = msrs[i].data;
1081 break;
1082 case MSR_KVM_WALL_CLOCK:
1083 env->wall_clock_msr = msrs[i].data;
1084 break;
57780495
MT
1085#ifdef KVM_CAP_MCE
1086 case MSR_MCG_STATUS:
1087 env->mcg_status = msrs[i].data;
1088 break;
1089 case MSR_MCG_CTL:
1090 env->mcg_ctl = msrs[i].data;
1091 break;
1092#endif
1093 default:
1094#ifdef KVM_CAP_MCE
1095 if (msrs[i].index >= MSR_MC0_CTL &&
1096 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
1097 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
57780495
MT
1098 }
1099#endif
d8da8574 1100 break;
05330448
AL
1101 }
1102 }
1103
1104 return 0;
1105}
1106
9bdbe550
HB
1107static int kvm_put_mp_state(CPUState *env)
1108{
1109 struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
1110
1111 return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
1112}
1113
1114static int kvm_get_mp_state(CPUState *env)
1115{
1116 struct kvm_mp_state mp_state;
1117 int ret;
1118
1119 ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
1120 if (ret < 0) {
1121 return ret;
1122 }
1123 env->mp_state = mp_state.mp_state;
1124 return 0;
1125}
1126
ea643051 1127static int kvm_put_vcpu_events(CPUState *env, int level)
a0fb002c
JK
1128{
1129#ifdef KVM_CAP_VCPU_EVENTS
1130 struct kvm_vcpu_events events;
1131
1132 if (!kvm_has_vcpu_events()) {
1133 return 0;
1134 }
1135
31827373
JK
1136 events.exception.injected = (env->exception_injected >= 0);
1137 events.exception.nr = env->exception_injected;
a0fb002c
JK
1138 events.exception.has_error_code = env->has_error_code;
1139 events.exception.error_code = env->error_code;
1140
1141 events.interrupt.injected = (env->interrupt_injected >= 0);
1142 events.interrupt.nr = env->interrupt_injected;
1143 events.interrupt.soft = env->soft_interrupt;
1144
1145 events.nmi.injected = env->nmi_injected;
1146 events.nmi.pending = env->nmi_pending;
1147 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
1148
1149 events.sipi_vector = env->sipi_vector;
1150
ea643051
JK
1151 events.flags = 0;
1152 if (level >= KVM_PUT_RESET_STATE) {
1153 events.flags |=
1154 KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
1155 }
aee028b9 1156
a0fb002c
JK
1157 return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
1158#else
1159 return 0;
1160#endif
1161}
1162
1163static int kvm_get_vcpu_events(CPUState *env)
1164{
1165#ifdef KVM_CAP_VCPU_EVENTS
1166 struct kvm_vcpu_events events;
1167 int ret;
1168
1169 if (!kvm_has_vcpu_events()) {
1170 return 0;
1171 }
1172
1173 ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events);
1174 if (ret < 0) {
1175 return ret;
1176 }
31827373 1177 env->exception_injected =
a0fb002c
JK
1178 events.exception.injected ? events.exception.nr : -1;
1179 env->has_error_code = events.exception.has_error_code;
1180 env->error_code = events.exception.error_code;
1181
1182 env->interrupt_injected =
1183 events.interrupt.injected ? events.interrupt.nr : -1;
1184 env->soft_interrupt = events.interrupt.soft;
1185
1186 env->nmi_injected = events.nmi.injected;
1187 env->nmi_pending = events.nmi.pending;
1188 if (events.nmi.masked) {
1189 env->hflags2 |= HF2_NMI_MASK;
1190 } else {
1191 env->hflags2 &= ~HF2_NMI_MASK;
1192 }
1193
1194 env->sipi_vector = events.sipi_vector;
1195#endif
1196
1197 return 0;
1198}
1199
b0b1d690
JK
1200static int kvm_guest_debug_workarounds(CPUState *env)
1201{
1202 int ret = 0;
1203#ifdef KVM_CAP_SET_GUEST_DEBUG
1204 unsigned long reinject_trap = 0;
1205
1206 if (!kvm_has_vcpu_events()) {
1207 if (env->exception_injected == 1) {
1208 reinject_trap = KVM_GUESTDBG_INJECT_DB;
1209 } else if (env->exception_injected == 3) {
1210 reinject_trap = KVM_GUESTDBG_INJECT_BP;
1211 }
1212 env->exception_injected = -1;
1213 }
1214
1215 /*
1216 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
1217 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
1218 * by updating the debug state once again if single-stepping is on.
1219 * Another reason to call kvm_update_guest_debug here is a pending debug
1220 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
1221 * reinject them via SET_GUEST_DEBUG.
1222 */
1223 if (reinject_trap ||
1224 (!kvm_has_robust_singlestep() && env->singlestep_enabled)) {
1225 ret = kvm_update_guest_debug(env, reinject_trap);
1226 }
1227#endif /* KVM_CAP_SET_GUEST_DEBUG */
1228 return ret;
1229}
1230
ff44f1a3
JK
1231static int kvm_put_debugregs(CPUState *env)
1232{
1233#ifdef KVM_CAP_DEBUGREGS
1234 struct kvm_debugregs dbgregs;
1235 int i;
1236
1237 if (!kvm_has_debugregs()) {
1238 return 0;
1239 }
1240
1241 for (i = 0; i < 4; i++) {
1242 dbgregs.db[i] = env->dr[i];
1243 }
1244 dbgregs.dr6 = env->dr[6];
1245 dbgregs.dr7 = env->dr[7];
1246 dbgregs.flags = 0;
1247
1248 return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
1249#else
1250 return 0;
1251#endif
1252}
1253
1254static int kvm_get_debugregs(CPUState *env)
1255{
1256#ifdef KVM_CAP_DEBUGREGS
1257 struct kvm_debugregs dbgregs;
1258 int i, ret;
1259
1260 if (!kvm_has_debugregs()) {
1261 return 0;
1262 }
1263
1264 ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs);
1265 if (ret < 0) {
1266 return ret;
1267 }
1268 for (i = 0; i < 4; i++) {
1269 env->dr[i] = dbgregs.db[i];
1270 }
1271 env->dr[4] = env->dr[6] = dbgregs.dr6;
1272 env->dr[5] = env->dr[7] = dbgregs.dr7;
1273#endif
1274
1275 return 0;
1276}
1277
ea375f9a 1278int kvm_arch_put_registers(CPUState *env, int level)
05330448
AL
1279{
1280 int ret;
1281
dbaa07c4
JK
1282 assert(cpu_is_stopped(env) || qemu_cpu_self(env));
1283
05330448
AL
1284 ret = kvm_getput_regs(env, 1);
1285 if (ret < 0)
1286 return ret;
1287
f1665b21
SY
1288 ret = kvm_put_xsave(env);
1289 if (ret < 0)
1290 return ret;
1291
1292 ret = kvm_put_xcrs(env);
05330448
AL
1293 if (ret < 0)
1294 return ret;
1295
1296 ret = kvm_put_sregs(env);
1297 if (ret < 0)
1298 return ret;
1299
ea643051 1300 ret = kvm_put_msrs(env, level);
05330448
AL
1301 if (ret < 0)
1302 return ret;
1303
ea643051
JK
1304 if (level >= KVM_PUT_RESET_STATE) {
1305 ret = kvm_put_mp_state(env);
1306 if (ret < 0)
1307 return ret;
1308 }
f8d926e9 1309
ea643051 1310 ret = kvm_put_vcpu_events(env, level);
a0fb002c
JK
1311 if (ret < 0)
1312 return ret;
1313
b0b1d690
JK
1314 /* must be last */
1315 ret = kvm_guest_debug_workarounds(env);
1316 if (ret < 0)
1317 return ret;
1318
ff44f1a3
JK
1319 ret = kvm_put_debugregs(env);
1320 if (ret < 0)
1321 return ret;
1322
05330448
AL
1323 return 0;
1324}
1325
1326int kvm_arch_get_registers(CPUState *env)
1327{
1328 int ret;
1329
dbaa07c4
JK
1330 assert(cpu_is_stopped(env) || qemu_cpu_self(env));
1331
05330448
AL
1332 ret = kvm_getput_regs(env, 0);
1333 if (ret < 0)
1334 return ret;
1335
f1665b21
SY
1336 ret = kvm_get_xsave(env);
1337 if (ret < 0)
1338 return ret;
1339
1340 ret = kvm_get_xcrs(env);
05330448
AL
1341 if (ret < 0)
1342 return ret;
1343
1344 ret = kvm_get_sregs(env);
1345 if (ret < 0)
1346 return ret;
1347
1348 ret = kvm_get_msrs(env);
1349 if (ret < 0)
1350 return ret;
1351
5a2e3c2e
JK
1352 ret = kvm_get_mp_state(env);
1353 if (ret < 0)
1354 return ret;
1355
a0fb002c
JK
1356 ret = kvm_get_vcpu_events(env);
1357 if (ret < 0)
1358 return ret;
1359
ff44f1a3
JK
1360 ret = kvm_get_debugregs(env);
1361 if (ret < 0)
1362 return ret;
1363
05330448
AL
1364 return 0;
1365}
1366
1367int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
1368{
1369 /* Try to inject an interrupt if the guest can accept it */
1370 if (run->ready_for_interrupt_injection &&
1371 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
1372 (env->eflags & IF_MASK)) {
1373 int irq;
1374
1375 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
1376 irq = cpu_get_pic_interrupt(env);
1377 if (irq >= 0) {
1378 struct kvm_interrupt intr;
1379 intr.irq = irq;
1380 /* FIXME: errors */
8c0d577e 1381 DPRINTF("injected interrupt %d\n", irq);
05330448
AL
1382 kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
1383 }
1384 }
1385
1386 /* If we have an interrupt but the guest is not ready to receive an
1387 * interrupt, request an interrupt window exit. This will
1388 * cause a return to userspace as soon as the guest is ready to
1389 * receive interrupts. */
1390 if ((env->interrupt_request & CPU_INTERRUPT_HARD))
1391 run->request_interrupt_window = 1;
1392 else
1393 run->request_interrupt_window = 0;
1394
8c0d577e 1395 DPRINTF("setting tpr\n");
4a942cea 1396 run->cr8 = cpu_get_apic_tpr(env->apic_state);
05330448
AL
1397
1398 return 0;
1399}
1400
1401int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
1402{
1403 if (run->if_flag)
1404 env->eflags |= IF_MASK;
1405 else
1406 env->eflags &= ~IF_MASK;
1407
4a942cea
BS
1408 cpu_set_apic_tpr(env->apic_state, run->cr8);
1409 cpu_set_apic_base(env->apic_state, run->apic_base);
05330448
AL
1410
1411 return 0;
1412}
1413
0af691d7
MT
1414int kvm_arch_process_irqchip_events(CPUState *env)
1415{
1416 if (env->interrupt_request & CPU_INTERRUPT_INIT) {
1417 kvm_cpu_synchronize_state(env);
1418 do_cpu_init(env);
1419 env->exception_index = EXCP_HALTED;
1420 }
1421
1422 if (env->interrupt_request & CPU_INTERRUPT_SIPI) {
1423 kvm_cpu_synchronize_state(env);
1424 do_cpu_sipi(env);
1425 }
1426
1427 return env->halted;
1428}
1429
05330448
AL
1430static int kvm_handle_halt(CPUState *env)
1431{
1432 if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
1433 (env->eflags & IF_MASK)) &&
1434 !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
1435 env->halted = 1;
1436 env->exception_index = EXCP_HLT;
1437 return 0;
1438 }
1439
1440 return 1;
1441}
1442
1443int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
1444{
1445 int ret = 0;
1446
1447 switch (run->exit_reason) {
1448 case KVM_EXIT_HLT:
8c0d577e 1449 DPRINTF("handle_hlt\n");
05330448
AL
1450 ret = kvm_handle_halt(env);
1451 break;
1452 }
1453
1454 return ret;
1455}
e22a25c9
AL
1456
1457#ifdef KVM_CAP_SET_GUEST_DEBUG
e22a25c9
AL
1458int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
1459{
38972938 1460 static const uint8_t int3 = 0xcc;
64bf3f4e 1461
e22a25c9 1462 if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
64bf3f4e 1463 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1))
e22a25c9
AL
1464 return -EINVAL;
1465 return 0;
1466}
1467
1468int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
1469{
1470 uint8_t int3;
1471
1472 if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
64bf3f4e 1473 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1))
e22a25c9
AL
1474 return -EINVAL;
1475 return 0;
1476}
1477
1478static struct {
1479 target_ulong addr;
1480 int len;
1481 int type;
1482} hw_breakpoint[4];
1483
1484static int nb_hw_breakpoint;
1485
1486static int find_hw_breakpoint(target_ulong addr, int len, int type)
1487{
1488 int n;
1489
1490 for (n = 0; n < nb_hw_breakpoint; n++)
1491 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
1492 (hw_breakpoint[n].len == len || len == -1))
1493 return n;
1494 return -1;
1495}
1496
1497int kvm_arch_insert_hw_breakpoint(target_ulong addr,
1498 target_ulong len, int type)
1499{
1500 switch (type) {
1501 case GDB_BREAKPOINT_HW:
1502 len = 1;
1503 break;
1504 case GDB_WATCHPOINT_WRITE:
1505 case GDB_WATCHPOINT_ACCESS:
1506 switch (len) {
1507 case 1:
1508 break;
1509 case 2:
1510 case 4:
1511 case 8:
1512 if (addr & (len - 1))
1513 return -EINVAL;
1514 break;
1515 default:
1516 return -EINVAL;
1517 }
1518 break;
1519 default:
1520 return -ENOSYS;
1521 }
1522
1523 if (nb_hw_breakpoint == 4)
1524 return -ENOBUFS;
1525
1526 if (find_hw_breakpoint(addr, len, type) >= 0)
1527 return -EEXIST;
1528
1529 hw_breakpoint[nb_hw_breakpoint].addr = addr;
1530 hw_breakpoint[nb_hw_breakpoint].len = len;
1531 hw_breakpoint[nb_hw_breakpoint].type = type;
1532 nb_hw_breakpoint++;
1533
1534 return 0;
1535}
1536
1537int kvm_arch_remove_hw_breakpoint(target_ulong addr,
1538 target_ulong len, int type)
1539{
1540 int n;
1541
1542 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
1543 if (n < 0)
1544 return -ENOENT;
1545
1546 nb_hw_breakpoint--;
1547 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
1548
1549 return 0;
1550}
1551
1552void kvm_arch_remove_all_hw_breakpoints(void)
1553{
1554 nb_hw_breakpoint = 0;
1555}
1556
1557static CPUWatchpoint hw_watchpoint;
1558
1559int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
1560{
1561 int handle = 0;
1562 int n;
1563
1564 if (arch_info->exception == 1) {
1565 if (arch_info->dr6 & (1 << 14)) {
1566 if (cpu_single_env->singlestep_enabled)
1567 handle = 1;
1568 } else {
1569 for (n = 0; n < 4; n++)
1570 if (arch_info->dr6 & (1 << n))
1571 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
1572 case 0x0:
1573 handle = 1;
1574 break;
1575 case 0x1:
1576 handle = 1;
1577 cpu_single_env->watchpoint_hit = &hw_watchpoint;
1578 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1579 hw_watchpoint.flags = BP_MEM_WRITE;
1580 break;
1581 case 0x3:
1582 handle = 1;
1583 cpu_single_env->watchpoint_hit = &hw_watchpoint;
1584 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1585 hw_watchpoint.flags = BP_MEM_ACCESS;
1586 break;
1587 }
1588 }
1589 } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc))
1590 handle = 1;
1591
b0b1d690
JK
1592 if (!handle) {
1593 cpu_synchronize_state(cpu_single_env);
1594 assert(cpu_single_env->exception_injected == -1);
1595
1596 cpu_single_env->exception_injected = arch_info->exception;
1597 cpu_single_env->has_error_code = 0;
1598 }
e22a25c9
AL
1599
1600 return handle;
1601}
1602
1603void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
1604{
1605 const uint8_t type_code[] = {
1606 [GDB_BREAKPOINT_HW] = 0x0,
1607 [GDB_WATCHPOINT_WRITE] = 0x1,
1608 [GDB_WATCHPOINT_ACCESS] = 0x3
1609 };
1610 const uint8_t len_code[] = {
1611 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
1612 };
1613 int n;
1614
1615 if (kvm_sw_breakpoints_active(env))
1616 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1617
1618 if (nb_hw_breakpoint > 0) {
1619 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1620 dbg->arch.debugreg[7] = 0x0600;
1621 for (n = 0; n < nb_hw_breakpoint; n++) {
1622 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
1623 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
1624 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
1625 (len_code[hw_breakpoint[n].len] << (18 + n*4));
1626 }
1627 }
f1665b21
SY
1628 /* Legal xcr0 for loading */
1629 env->xcr0 = 1;
e22a25c9
AL
1630}
1631#endif /* KVM_CAP_SET_GUEST_DEBUG */
4513d923
GN
1632
1633bool kvm_arch_stop_on_emulation_error(CPUState *env)
1634{
1635 return !(env->cr[0] & CR0_PE_MASK) ||
1636 ((env->segs[R_CS].selector & 3) != 3);
1637}
1638
c0532a76
MT
1639static void hardware_memory_error(void)
1640{
1641 fprintf(stderr, "Hardware memory error!\n");
1642 exit(1);
1643}
1644
f71ac88f
HS
1645#ifdef KVM_CAP_MCE
1646static void kvm_mce_broadcast_rest(CPUState *env)
1647{
1648 CPUState *cenv;
1649 int family, model, cpuver = env->cpuid_version;
1650
1651 family = (cpuver >> 8) & 0xf;
1652 model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0xf);
1653
1654 /* Broadcast MCA signal for processor version 06H_EH and above */
1655 if ((family == 6 && model >= 14) || family > 6) {
1656 for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) {
1657 if (cenv == env) {
1658 continue;
1659 }
1660 kvm_inject_x86_mce(cenv, 1, MCI_STATUS_VAL | MCI_STATUS_UC,
1661 MCG_STATUS_MCIP | MCG_STATUS_RIPV, 0, 0, 1);
1662 }
1663 }
1664}
1665#endif
1666
c0532a76
MT
1667int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr)
1668{
1669#if defined(KVM_CAP_MCE)
1670 struct kvm_x86_mce mce = {
1671 .bank = 9,
1672 };
1673 void *vaddr;
1674 ram_addr_t ram_addr;
1675 target_phys_addr_t paddr;
1676 int r;
1677
1678 if ((env->mcg_cap & MCG_SER_P) && addr
1679 && (code == BUS_MCEERR_AR
1680 || code == BUS_MCEERR_AO)) {
1681 if (code == BUS_MCEERR_AR) {
1682 /* Fake an Intel architectural Data Load SRAR UCR */
1683 mce.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
1684 | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
1685 | MCI_STATUS_AR | 0x134;
1686 mce.misc = (MCM_ADDR_PHYS << 6) | 0xc;
1687 mce.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV;
1688 } else {
1689 /*
1690 * If there is an MCE excpetion being processed, ignore
1691 * this SRAO MCE
1692 */
1693 r = kvm_mce_in_exception(env);
1694 if (r == -1) {
1695 fprintf(stderr, "Failed to get MCE status\n");
1696 } else if (r) {
1697 return 0;
1698 }
1699 /* Fake an Intel architectural Memory scrubbing UCR */
1700 mce.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
1701 | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
1702 | 0xc0;
1703 mce.misc = (MCM_ADDR_PHYS << 6) | 0xc;
1704 mce.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1705 }
1706 vaddr = (void *)addr;
1707 if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
1708 !kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr, &paddr)) {
1709 fprintf(stderr, "Hardware memory error for memory used by "
1710 "QEMU itself instead of guest system!\n");
1711 /* Hope we are lucky for AO MCE */
1712 if (code == BUS_MCEERR_AO) {
1713 return 0;
1714 } else {
1715 hardware_memory_error();
1716 }
1717 }
1718 mce.addr = paddr;
1719 r = kvm_set_mce(env, &mce);
1720 if (r < 0) {
1721 fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
1722 abort();
1723 }
f71ac88f 1724 kvm_mce_broadcast_rest(env);
c0532a76
MT
1725 } else
1726#endif
1727 {
1728 if (code == BUS_MCEERR_AO) {
1729 return 0;
1730 } else if (code == BUS_MCEERR_AR) {
1731 hardware_memory_error();
1732 } else {
1733 return 1;
1734 }
1735 }
1736 return 0;
1737}
1738
1739int kvm_on_sigbus(int code, void *addr)
1740{
1741#if defined(KVM_CAP_MCE)
1742 if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
1743 uint64_t status;
1744 void *vaddr;
1745 ram_addr_t ram_addr;
1746 target_phys_addr_t paddr;
c0532a76
MT
1747
1748 /* Hope we are lucky for AO MCE */
1749 vaddr = addr;
1750 if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
1751 !kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr, &paddr)) {
1752 fprintf(stderr, "Hardware memory error for memory used by "
1753 "QEMU itself instead of guest system!: %p\n", addr);
1754 return 0;
1755 }
1756 status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
1757 | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
1758 | 0xc0;
1759 kvm_inject_x86_mce(first_cpu, 9, status,
1760 MCG_STATUS_MCIP | MCG_STATUS_RIPV, paddr,
1761 (MCM_ADDR_PHYS << 6) | 0xc, 1);
f71ac88f 1762 kvm_mce_broadcast_rest(first_cpu);
c0532a76
MT
1763 } else
1764#endif
1765 {
1766 if (code == BUS_MCEERR_AO) {
1767 return 0;
1768 } else if (code == BUS_MCEERR_AR) {
1769 hardware_memory_error();
1770 } else {
1771 return 1;
1772 }
1773 }
1774 return 0;
1775}