]> git.proxmox.com Git - qemu.git/blob - target-i386/kvm.c
Merge remote-tracking branch 'afaerber/qom-cpu' into staging
[qemu.git] / target-i386 / kvm.c
1 /*
2 * QEMU KVM support
3 *
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 *
13 */
14
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
17 #include <sys/mman.h>
18 #include <sys/utsname.h>
19
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
22
23 #include "qemu-common.h"
24 #include "sysemu.h"
25 #include "kvm.h"
26 #include "kvm_i386.h"
27 #include "cpu.h"
28 #include "gdbstub.h"
29 #include "host-utils.h"
30 #include "hw/pc.h"
31 #include "hw/apic.h"
32 #include "ioport.h"
33 #include "hyperv.h"
34 #include "hw/pci.h"
35
36 //#define DEBUG_KVM
37
38 #ifdef DEBUG_KVM
39 #define DPRINTF(fmt, ...) \
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
41 #else
42 #define DPRINTF(fmt, ...) \
43 do { } while (0)
44 #endif
45
46 #define MSR_KVM_WALL_CLOCK 0x11
47 #define MSR_KVM_SYSTEM_TIME 0x12
48
49 #ifndef BUS_MCEERR_AR
50 #define BUS_MCEERR_AR 4
51 #endif
52 #ifndef BUS_MCEERR_AO
53 #define BUS_MCEERR_AO 5
54 #endif
55
56 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
57 KVM_CAP_INFO(SET_TSS_ADDR),
58 KVM_CAP_INFO(EXT_CPUID),
59 KVM_CAP_INFO(MP_STATE),
60 KVM_CAP_LAST_INFO
61 };
62
63 static bool has_msr_star;
64 static bool has_msr_hsave_pa;
65 static bool has_msr_tsc_deadline;
66 static bool has_msr_async_pf_en;
67 static bool has_msr_pv_eoi_en;
68 static bool has_msr_misc_enable;
69 static int lm_capable_kernel;
70
71 bool kvm_allows_irq0_override(void)
72 {
73 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
74 }
75
76 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
77 {
78 struct kvm_cpuid2 *cpuid;
79 int r, size;
80
81 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
82 cpuid = (struct kvm_cpuid2 *)g_malloc0(size);
83 cpuid->nent = max;
84 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
85 if (r == 0 && cpuid->nent >= max) {
86 r = -E2BIG;
87 }
88 if (r < 0) {
89 if (r == -E2BIG) {
90 g_free(cpuid);
91 return NULL;
92 } else {
93 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
94 strerror(-r));
95 exit(1);
96 }
97 }
98 return cpuid;
99 }
100
101 struct kvm_para_features {
102 int cap;
103 int feature;
104 } para_features[] = {
105 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
106 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
107 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
108 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
109 { -1, -1 }
110 };
111
112 static int get_para_features(KVMState *s)
113 {
114 int i, features = 0;
115
116 for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) {
117 if (kvm_check_extension(s, para_features[i].cap)) {
118 features |= (1 << para_features[i].feature);
119 }
120 }
121
122 return features;
123 }
124
125
126 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
127 uint32_t index, int reg)
128 {
129 struct kvm_cpuid2 *cpuid;
130 int i, max;
131 uint32_t ret = 0;
132 uint32_t cpuid_1_edx;
133 int has_kvm_features = 0;
134
135 max = 1;
136 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
137 max *= 2;
138 }
139
140 for (i = 0; i < cpuid->nent; ++i) {
141 if (cpuid->entries[i].function == function &&
142 cpuid->entries[i].index == index) {
143 if (cpuid->entries[i].function == KVM_CPUID_FEATURES) {
144 has_kvm_features = 1;
145 }
146 switch (reg) {
147 case R_EAX:
148 ret = cpuid->entries[i].eax;
149 break;
150 case R_EBX:
151 ret = cpuid->entries[i].ebx;
152 break;
153 case R_ECX:
154 ret = cpuid->entries[i].ecx;
155 break;
156 case R_EDX:
157 ret = cpuid->entries[i].edx;
158 switch (function) {
159 case 1:
160 /* KVM before 2.6.30 misreports the following features */
161 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
162 break;
163 case 0x80000001:
164 /* On Intel, kvm returns cpuid according to the Intel spec,
165 * so add missing bits according to the AMD spec:
166 */
167 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
168 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
169 break;
170 }
171 break;
172 }
173 }
174 }
175
176 g_free(cpuid);
177
178 /* fallback for older kernels */
179 if (!has_kvm_features && (function == KVM_CPUID_FEATURES)) {
180 ret = get_para_features(s);
181 }
182
183 return ret;
184 }
185
186 typedef struct HWPoisonPage {
187 ram_addr_t ram_addr;
188 QLIST_ENTRY(HWPoisonPage) list;
189 } HWPoisonPage;
190
191 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
192 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
193
194 static void kvm_unpoison_all(void *param)
195 {
196 HWPoisonPage *page, *next_page;
197
198 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
199 QLIST_REMOVE(page, list);
200 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
201 g_free(page);
202 }
203 }
204
205 static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
206 {
207 HWPoisonPage *page;
208
209 QLIST_FOREACH(page, &hwpoison_page_list, list) {
210 if (page->ram_addr == ram_addr) {
211 return;
212 }
213 }
214 page = g_malloc(sizeof(HWPoisonPage));
215 page->ram_addr = ram_addr;
216 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
217 }
218
219 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
220 int *max_banks)
221 {
222 int r;
223
224 r = kvm_check_extension(s, KVM_CAP_MCE);
225 if (r > 0) {
226 *max_banks = r;
227 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
228 }
229 return -ENOSYS;
230 }
231
232 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
233 {
234 CPUX86State *env = &cpu->env;
235 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
236 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
237 uint64_t mcg_status = MCG_STATUS_MCIP;
238
239 if (code == BUS_MCEERR_AR) {
240 status |= MCI_STATUS_AR | 0x134;
241 mcg_status |= MCG_STATUS_EIPV;
242 } else {
243 status |= 0xc0;
244 mcg_status |= MCG_STATUS_RIPV;
245 }
246 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
247 (MCM_ADDR_PHYS << 6) | 0xc,
248 cpu_x86_support_mca_broadcast(env) ?
249 MCE_INJECT_BROADCAST : 0);
250 }
251
252 static void hardware_memory_error(void)
253 {
254 fprintf(stderr, "Hardware memory error!\n");
255 exit(1);
256 }
257
258 int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr)
259 {
260 X86CPU *cpu = x86_env_get_cpu(env);
261 ram_addr_t ram_addr;
262 hwaddr paddr;
263
264 if ((env->mcg_cap & MCG_SER_P) && addr
265 && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
266 if (qemu_ram_addr_from_host(addr, &ram_addr) ||
267 !kvm_physical_memory_addr_from_host(env->kvm_state, addr, &paddr)) {
268 fprintf(stderr, "Hardware memory error for memory used by "
269 "QEMU itself instead of guest system!\n");
270 /* Hope we are lucky for AO MCE */
271 if (code == BUS_MCEERR_AO) {
272 return 0;
273 } else {
274 hardware_memory_error();
275 }
276 }
277 kvm_hwpoison_page_add(ram_addr);
278 kvm_mce_inject(cpu, paddr, code);
279 } else {
280 if (code == BUS_MCEERR_AO) {
281 return 0;
282 } else if (code == BUS_MCEERR_AR) {
283 hardware_memory_error();
284 } else {
285 return 1;
286 }
287 }
288 return 0;
289 }
290
291 int kvm_arch_on_sigbus(int code, void *addr)
292 {
293 if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
294 ram_addr_t ram_addr;
295 hwaddr paddr;
296
297 /* Hope we are lucky for AO MCE */
298 if (qemu_ram_addr_from_host(addr, &ram_addr) ||
299 !kvm_physical_memory_addr_from_host(first_cpu->kvm_state, addr,
300 &paddr)) {
301 fprintf(stderr, "Hardware memory error for memory used by "
302 "QEMU itself instead of guest system!: %p\n", addr);
303 return 0;
304 }
305 kvm_hwpoison_page_add(ram_addr);
306 kvm_mce_inject(x86_env_get_cpu(first_cpu), paddr, code);
307 } else {
308 if (code == BUS_MCEERR_AO) {
309 return 0;
310 } else if (code == BUS_MCEERR_AR) {
311 hardware_memory_error();
312 } else {
313 return 1;
314 }
315 }
316 return 0;
317 }
318
319 static int kvm_inject_mce_oldstyle(CPUX86State *env)
320 {
321 if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
322 unsigned int bank, bank_num = env->mcg_cap & 0xff;
323 struct kvm_x86_mce mce;
324
325 env->exception_injected = -1;
326
327 /*
328 * There must be at least one bank in use if an MCE is pending.
329 * Find it and use its values for the event injection.
330 */
331 for (bank = 0; bank < bank_num; bank++) {
332 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
333 break;
334 }
335 }
336 assert(bank < bank_num);
337
338 mce.bank = bank;
339 mce.status = env->mce_banks[bank * 4 + 1];
340 mce.mcg_status = env->mcg_status;
341 mce.addr = env->mce_banks[bank * 4 + 2];
342 mce.misc = env->mce_banks[bank * 4 + 3];
343
344 return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, &mce);
345 }
346 return 0;
347 }
348
349 static void cpu_update_state(void *opaque, int running, RunState state)
350 {
351 CPUX86State *env = opaque;
352
353 if (running) {
354 env->tsc_valid = false;
355 }
356 }
357
358 int kvm_arch_init_vcpu(CPUX86State *env)
359 {
360 struct {
361 struct kvm_cpuid2 cpuid;
362 struct kvm_cpuid_entry2 entries[100];
363 } QEMU_PACKED cpuid_data;
364 KVMState *s = env->kvm_state;
365 uint32_t limit, i, j, cpuid_i;
366 uint32_t unused;
367 struct kvm_cpuid_entry2 *c;
368 uint32_t signature[3];
369 int r;
370
371 env->cpuid_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
372
373 i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
374 j = env->cpuid_ext_features & CPUID_EXT_TSC_DEADLINE_TIMER;
375 env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX);
376 env->cpuid_ext_features |= i;
377 if (j && kvm_irqchip_in_kernel() &&
378 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
379 env->cpuid_ext_features |= CPUID_EXT_TSC_DEADLINE_TIMER;
380 }
381
382 env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
383 0, R_EDX);
384 env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
385 0, R_ECX);
386 env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(s, 0x8000000A,
387 0, R_EDX);
388
389 cpuid_i = 0;
390
391 /* Paravirtualization CPUIDs */
392 c = &cpuid_data.entries[cpuid_i++];
393 memset(c, 0, sizeof(*c));
394 c->function = KVM_CPUID_SIGNATURE;
395 if (!hyperv_enabled()) {
396 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
397 c->eax = 0;
398 } else {
399 memcpy(signature, "Microsoft Hv", 12);
400 c->eax = HYPERV_CPUID_MIN;
401 }
402 c->ebx = signature[0];
403 c->ecx = signature[1];
404 c->edx = signature[2];
405
406 c = &cpuid_data.entries[cpuid_i++];
407 memset(c, 0, sizeof(*c));
408 c->function = KVM_CPUID_FEATURES;
409 c->eax = env->cpuid_kvm_features &
410 kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
411
412 if (hyperv_enabled()) {
413 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
414 c->eax = signature[0];
415
416 c = &cpuid_data.entries[cpuid_i++];
417 memset(c, 0, sizeof(*c));
418 c->function = HYPERV_CPUID_VERSION;
419 c->eax = 0x00001bbc;
420 c->ebx = 0x00060001;
421
422 c = &cpuid_data.entries[cpuid_i++];
423 memset(c, 0, sizeof(*c));
424 c->function = HYPERV_CPUID_FEATURES;
425 if (hyperv_relaxed_timing_enabled()) {
426 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
427 }
428 if (hyperv_vapic_recommended()) {
429 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
430 c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
431 }
432
433 c = &cpuid_data.entries[cpuid_i++];
434 memset(c, 0, sizeof(*c));
435 c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
436 if (hyperv_relaxed_timing_enabled()) {
437 c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
438 }
439 if (hyperv_vapic_recommended()) {
440 c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
441 }
442 c->ebx = hyperv_get_spinlock_retries();
443
444 c = &cpuid_data.entries[cpuid_i++];
445 memset(c, 0, sizeof(*c));
446 c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
447 c->eax = 0x40;
448 c->ebx = 0x40;
449
450 c = &cpuid_data.entries[cpuid_i++];
451 memset(c, 0, sizeof(*c));
452 c->function = KVM_CPUID_SIGNATURE_NEXT;
453 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
454 c->eax = 0;
455 c->ebx = signature[0];
456 c->ecx = signature[1];
457 c->edx = signature[2];
458 }
459
460 has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
461
462 has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
463
464 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
465
466 for (i = 0; i <= limit; i++) {
467 c = &cpuid_data.entries[cpuid_i++];
468
469 switch (i) {
470 case 2: {
471 /* Keep reading function 2 till all the input is received */
472 int times;
473
474 c->function = i;
475 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
476 KVM_CPUID_FLAG_STATE_READ_NEXT;
477 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
478 times = c->eax & 0xff;
479
480 for (j = 1; j < times; ++j) {
481 c = &cpuid_data.entries[cpuid_i++];
482 c->function = i;
483 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
484 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
485 }
486 break;
487 }
488 case 4:
489 case 0xb:
490 case 0xd:
491 for (j = 0; ; j++) {
492 if (i == 0xd && j == 64) {
493 break;
494 }
495 c->function = i;
496 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
497 c->index = j;
498 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
499
500 if (i == 4 && c->eax == 0) {
501 break;
502 }
503 if (i == 0xb && !(c->ecx & 0xff00)) {
504 break;
505 }
506 if (i == 0xd && c->eax == 0) {
507 continue;
508 }
509 c = &cpuid_data.entries[cpuid_i++];
510 }
511 break;
512 default:
513 c->function = i;
514 c->flags = 0;
515 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
516 break;
517 }
518 }
519 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
520
521 for (i = 0x80000000; i <= limit; i++) {
522 c = &cpuid_data.entries[cpuid_i++];
523
524 c->function = i;
525 c->flags = 0;
526 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
527 }
528
529 /* Call Centaur's CPUID instructions they are supported. */
530 if (env->cpuid_xlevel2 > 0) {
531 env->cpuid_ext4_features &=
532 kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
533 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
534
535 for (i = 0xC0000000; i <= limit; i++) {
536 c = &cpuid_data.entries[cpuid_i++];
537
538 c->function = i;
539 c->flags = 0;
540 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
541 }
542 }
543
544 cpuid_data.cpuid.nent = cpuid_i;
545
546 if (((env->cpuid_version >> 8)&0xF) >= 6
547 && (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)
548 && kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
549 uint64_t mcg_cap;
550 int banks;
551 int ret;
552
553 ret = kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks);
554 if (ret < 0) {
555 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
556 return ret;
557 }
558
559 if (banks > MCE_BANKS_DEF) {
560 banks = MCE_BANKS_DEF;
561 }
562 mcg_cap &= MCE_CAP_DEF;
563 mcg_cap |= banks;
564 ret = kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, &mcg_cap);
565 if (ret < 0) {
566 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
567 return ret;
568 }
569
570 env->mcg_cap = mcg_cap;
571 }
572
573 qemu_add_vm_change_state_handler(cpu_update_state, env);
574
575 cpuid_data.cpuid.padding = 0;
576 r = kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
577 if (r) {
578 return r;
579 }
580
581 r = kvm_check_extension(env->kvm_state, KVM_CAP_TSC_CONTROL);
582 if (r && env->tsc_khz) {
583 r = kvm_vcpu_ioctl(env, KVM_SET_TSC_KHZ, env->tsc_khz);
584 if (r < 0) {
585 fprintf(stderr, "KVM_SET_TSC_KHZ failed\n");
586 return r;
587 }
588 }
589
590 if (kvm_has_xsave()) {
591 env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
592 }
593
594 return 0;
595 }
596
597 void kvm_arch_reset_vcpu(CPUX86State *env)
598 {
599 X86CPU *cpu = x86_env_get_cpu(env);
600
601 env->exception_injected = -1;
602 env->interrupt_injected = -1;
603 env->xcr0 = 1;
604 if (kvm_irqchip_in_kernel()) {
605 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
606 KVM_MP_STATE_UNINITIALIZED;
607 } else {
608 env->mp_state = KVM_MP_STATE_RUNNABLE;
609 }
610 }
611
612 static int kvm_get_supported_msrs(KVMState *s)
613 {
614 static int kvm_supported_msrs;
615 int ret = 0;
616
617 /* first time */
618 if (kvm_supported_msrs == 0) {
619 struct kvm_msr_list msr_list, *kvm_msr_list;
620
621 kvm_supported_msrs = -1;
622
623 /* Obtain MSR list from KVM. These are the MSRs that we must
624 * save/restore */
625 msr_list.nmsrs = 0;
626 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
627 if (ret < 0 && ret != -E2BIG) {
628 return ret;
629 }
630 /* Old kernel modules had a bug and could write beyond the provided
631 memory. Allocate at least a safe amount of 1K. */
632 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
633 msr_list.nmsrs *
634 sizeof(msr_list.indices[0])));
635
636 kvm_msr_list->nmsrs = msr_list.nmsrs;
637 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
638 if (ret >= 0) {
639 int i;
640
641 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
642 if (kvm_msr_list->indices[i] == MSR_STAR) {
643 has_msr_star = true;
644 continue;
645 }
646 if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
647 has_msr_hsave_pa = true;
648 continue;
649 }
650 if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
651 has_msr_tsc_deadline = true;
652 continue;
653 }
654 if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
655 has_msr_misc_enable = true;
656 continue;
657 }
658 }
659 }
660
661 g_free(kvm_msr_list);
662 }
663
664 return ret;
665 }
666
667 int kvm_arch_init(KVMState *s)
668 {
669 QemuOptsList *list = qemu_find_opts("machine");
670 uint64_t identity_base = 0xfffbc000;
671 uint64_t shadow_mem;
672 int ret;
673 struct utsname utsname;
674
675 ret = kvm_get_supported_msrs(s);
676 if (ret < 0) {
677 return ret;
678 }
679
680 uname(&utsname);
681 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
682
683 /*
684 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
685 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
686 * Since these must be part of guest physical memory, we need to allocate
687 * them, both by setting their start addresses in the kernel and by
688 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
689 *
690 * Older KVM versions may not support setting the identity map base. In
691 * that case we need to stick with the default, i.e. a 256K maximum BIOS
692 * size.
693 */
694 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
695 /* Allows up to 16M BIOSes. */
696 identity_base = 0xfeffc000;
697
698 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
699 if (ret < 0) {
700 return ret;
701 }
702 }
703
704 /* Set TSS base one page after EPT identity map. */
705 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
706 if (ret < 0) {
707 return ret;
708 }
709
710 /* Tell fw_cfg to notify the BIOS to reserve the range. */
711 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
712 if (ret < 0) {
713 fprintf(stderr, "e820_add_entry() table is full\n");
714 return ret;
715 }
716 qemu_register_reset(kvm_unpoison_all, NULL);
717
718 if (!QTAILQ_EMPTY(&list->head)) {
719 shadow_mem = qemu_opt_get_size(QTAILQ_FIRST(&list->head),
720 "kvm_shadow_mem", -1);
721 if (shadow_mem != -1) {
722 shadow_mem /= 4096;
723 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
724 if (ret < 0) {
725 return ret;
726 }
727 }
728 }
729 return 0;
730 }
731
732 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
733 {
734 lhs->selector = rhs->selector;
735 lhs->base = rhs->base;
736 lhs->limit = rhs->limit;
737 lhs->type = 3;
738 lhs->present = 1;
739 lhs->dpl = 3;
740 lhs->db = 0;
741 lhs->s = 1;
742 lhs->l = 0;
743 lhs->g = 0;
744 lhs->avl = 0;
745 lhs->unusable = 0;
746 }
747
748 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
749 {
750 unsigned flags = rhs->flags;
751 lhs->selector = rhs->selector;
752 lhs->base = rhs->base;
753 lhs->limit = rhs->limit;
754 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
755 lhs->present = (flags & DESC_P_MASK) != 0;
756 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
757 lhs->db = (flags >> DESC_B_SHIFT) & 1;
758 lhs->s = (flags & DESC_S_MASK) != 0;
759 lhs->l = (flags >> DESC_L_SHIFT) & 1;
760 lhs->g = (flags & DESC_G_MASK) != 0;
761 lhs->avl = (flags & DESC_AVL_MASK) != 0;
762 lhs->unusable = 0;
763 lhs->padding = 0;
764 }
765
766 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
767 {
768 lhs->selector = rhs->selector;
769 lhs->base = rhs->base;
770 lhs->limit = rhs->limit;
771 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
772 (rhs->present * DESC_P_MASK) |
773 (rhs->dpl << DESC_DPL_SHIFT) |
774 (rhs->db << DESC_B_SHIFT) |
775 (rhs->s * DESC_S_MASK) |
776 (rhs->l << DESC_L_SHIFT) |
777 (rhs->g * DESC_G_MASK) |
778 (rhs->avl * DESC_AVL_MASK);
779 }
780
781 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
782 {
783 if (set) {
784 *kvm_reg = *qemu_reg;
785 } else {
786 *qemu_reg = *kvm_reg;
787 }
788 }
789
790 static int kvm_getput_regs(CPUX86State *env, int set)
791 {
792 struct kvm_regs regs;
793 int ret = 0;
794
795 if (!set) {
796 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
797 if (ret < 0) {
798 return ret;
799 }
800 }
801
802 kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
803 kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
804 kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
805 kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
806 kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
807 kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
808 kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
809 kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
810 #ifdef TARGET_X86_64
811 kvm_getput_reg(&regs.r8, &env->regs[8], set);
812 kvm_getput_reg(&regs.r9, &env->regs[9], set);
813 kvm_getput_reg(&regs.r10, &env->regs[10], set);
814 kvm_getput_reg(&regs.r11, &env->regs[11], set);
815 kvm_getput_reg(&regs.r12, &env->regs[12], set);
816 kvm_getput_reg(&regs.r13, &env->regs[13], set);
817 kvm_getput_reg(&regs.r14, &env->regs[14], set);
818 kvm_getput_reg(&regs.r15, &env->regs[15], set);
819 #endif
820
821 kvm_getput_reg(&regs.rflags, &env->eflags, set);
822 kvm_getput_reg(&regs.rip, &env->eip, set);
823
824 if (set) {
825 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
826 }
827
828 return ret;
829 }
830
831 static int kvm_put_fpu(CPUX86State *env)
832 {
833 struct kvm_fpu fpu;
834 int i;
835
836 memset(&fpu, 0, sizeof fpu);
837 fpu.fsw = env->fpus & ~(7 << 11);
838 fpu.fsw |= (env->fpstt & 7) << 11;
839 fpu.fcw = env->fpuc;
840 fpu.last_opcode = env->fpop;
841 fpu.last_ip = env->fpip;
842 fpu.last_dp = env->fpdp;
843 for (i = 0; i < 8; ++i) {
844 fpu.ftwx |= (!env->fptags[i]) << i;
845 }
846 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
847 memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
848 fpu.mxcsr = env->mxcsr;
849
850 return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
851 }
852
853 #define XSAVE_FCW_FSW 0
854 #define XSAVE_FTW_FOP 1
855 #define XSAVE_CWD_RIP 2
856 #define XSAVE_CWD_RDP 4
857 #define XSAVE_MXCSR 6
858 #define XSAVE_ST_SPACE 8
859 #define XSAVE_XMM_SPACE 40
860 #define XSAVE_XSTATE_BV 128
861 #define XSAVE_YMMH_SPACE 144
862
863 static int kvm_put_xsave(CPUX86State *env)
864 {
865 struct kvm_xsave* xsave = env->kvm_xsave_buf;
866 uint16_t cwd, swd, twd;
867 int i, r;
868
869 if (!kvm_has_xsave()) {
870 return kvm_put_fpu(env);
871 }
872
873 memset(xsave, 0, sizeof(struct kvm_xsave));
874 twd = 0;
875 swd = env->fpus & ~(7 << 11);
876 swd |= (env->fpstt & 7) << 11;
877 cwd = env->fpuc;
878 for (i = 0; i < 8; ++i) {
879 twd |= (!env->fptags[i]) << i;
880 }
881 xsave->region[XSAVE_FCW_FSW] = (uint32_t)(swd << 16) + cwd;
882 xsave->region[XSAVE_FTW_FOP] = (uint32_t)(env->fpop << 16) + twd;
883 memcpy(&xsave->region[XSAVE_CWD_RIP], &env->fpip, sizeof(env->fpip));
884 memcpy(&xsave->region[XSAVE_CWD_RDP], &env->fpdp, sizeof(env->fpdp));
885 memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
886 sizeof env->fpregs);
887 memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
888 sizeof env->xmm_regs);
889 xsave->region[XSAVE_MXCSR] = env->mxcsr;
890 *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
891 memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
892 sizeof env->ymmh_regs);
893 r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
894 return r;
895 }
896
897 static int kvm_put_xcrs(CPUX86State *env)
898 {
899 struct kvm_xcrs xcrs;
900
901 if (!kvm_has_xcrs()) {
902 return 0;
903 }
904
905 xcrs.nr_xcrs = 1;
906 xcrs.flags = 0;
907 xcrs.xcrs[0].xcr = 0;
908 xcrs.xcrs[0].value = env->xcr0;
909 return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
910 }
911
912 static int kvm_put_sregs(CPUX86State *env)
913 {
914 struct kvm_sregs sregs;
915
916 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
917 if (env->interrupt_injected >= 0) {
918 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
919 (uint64_t)1 << (env->interrupt_injected % 64);
920 }
921
922 if ((env->eflags & VM_MASK)) {
923 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
924 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
925 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
926 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
927 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
928 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
929 } else {
930 set_seg(&sregs.cs, &env->segs[R_CS]);
931 set_seg(&sregs.ds, &env->segs[R_DS]);
932 set_seg(&sregs.es, &env->segs[R_ES]);
933 set_seg(&sregs.fs, &env->segs[R_FS]);
934 set_seg(&sregs.gs, &env->segs[R_GS]);
935 set_seg(&sregs.ss, &env->segs[R_SS]);
936 }
937
938 set_seg(&sregs.tr, &env->tr);
939 set_seg(&sregs.ldt, &env->ldt);
940
941 sregs.idt.limit = env->idt.limit;
942 sregs.idt.base = env->idt.base;
943 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
944 sregs.gdt.limit = env->gdt.limit;
945 sregs.gdt.base = env->gdt.base;
946 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
947
948 sregs.cr0 = env->cr[0];
949 sregs.cr2 = env->cr[2];
950 sregs.cr3 = env->cr[3];
951 sregs.cr4 = env->cr[4];
952
953 sregs.cr8 = cpu_get_apic_tpr(env->apic_state);
954 sregs.apic_base = cpu_get_apic_base(env->apic_state);
955
956 sregs.efer = env->efer;
957
958 return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
959 }
960
961 static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
962 uint32_t index, uint64_t value)
963 {
964 entry->index = index;
965 entry->data = value;
966 }
967
968 static int kvm_put_msrs(CPUX86State *env, int level)
969 {
970 struct {
971 struct kvm_msrs info;
972 struct kvm_msr_entry entries[100];
973 } msr_data;
974 struct kvm_msr_entry *msrs = msr_data.entries;
975 int n = 0;
976
977 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
978 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
979 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
980 kvm_msr_entry_set(&msrs[n++], MSR_PAT, env->pat);
981 if (has_msr_star) {
982 kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
983 }
984 if (has_msr_hsave_pa) {
985 kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
986 }
987 if (has_msr_tsc_deadline) {
988 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
989 }
990 if (has_msr_misc_enable) {
991 kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE,
992 env->msr_ia32_misc_enable);
993 }
994 #ifdef TARGET_X86_64
995 if (lm_capable_kernel) {
996 kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
997 kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
998 kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
999 kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
1000 }
1001 #endif
1002 if (level == KVM_PUT_FULL_STATE) {
1003 /*
1004 * KVM is yet unable to synchronize TSC values of multiple VCPUs on
1005 * writeback. Until this is fixed, we only write the offset to SMP
1006 * guests after migration, desynchronizing the VCPUs, but avoiding
1007 * huge jump-backs that would occur without any writeback at all.
1008 */
1009 if (smp_cpus == 1 || env->tsc != 0) {
1010 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
1011 }
1012 }
1013 /*
1014 * The following paravirtual MSRs have side effects on the guest or are
1015 * too heavy for normal writeback. Limit them to reset or full state
1016 * updates.
1017 */
1018 if (level >= KVM_PUT_RESET_STATE) {
1019 kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
1020 env->system_time_msr);
1021 kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
1022 if (has_msr_async_pf_en) {
1023 kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN,
1024 env->async_pf_en_msr);
1025 }
1026 if (has_msr_pv_eoi_en) {
1027 kvm_msr_entry_set(&msrs[n++], MSR_KVM_PV_EOI_EN,
1028 env->pv_eoi_en_msr);
1029 }
1030 if (hyperv_hypercall_available()) {
1031 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID, 0);
1032 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL, 0);
1033 }
1034 if (hyperv_vapic_recommended()) {
1035 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE, 0);
1036 }
1037 }
1038 if (env->mcg_cap) {
1039 int i;
1040
1041 kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
1042 kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
1043 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
1044 kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
1045 }
1046 }
1047
1048 msr_data.info.nmsrs = n;
1049
1050 return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
1051
1052 }
1053
1054
1055 static int kvm_get_fpu(CPUX86State *env)
1056 {
1057 struct kvm_fpu fpu;
1058 int i, ret;
1059
1060 ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
1061 if (ret < 0) {
1062 return ret;
1063 }
1064
1065 env->fpstt = (fpu.fsw >> 11) & 7;
1066 env->fpus = fpu.fsw;
1067 env->fpuc = fpu.fcw;
1068 env->fpop = fpu.last_opcode;
1069 env->fpip = fpu.last_ip;
1070 env->fpdp = fpu.last_dp;
1071 for (i = 0; i < 8; ++i) {
1072 env->fptags[i] = !((fpu.ftwx >> i) & 1);
1073 }
1074 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
1075 memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
1076 env->mxcsr = fpu.mxcsr;
1077
1078 return 0;
1079 }
1080
1081 static int kvm_get_xsave(CPUX86State *env)
1082 {
1083 struct kvm_xsave* xsave = env->kvm_xsave_buf;
1084 int ret, i;
1085 uint16_t cwd, swd, twd;
1086
1087 if (!kvm_has_xsave()) {
1088 return kvm_get_fpu(env);
1089 }
1090
1091 ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
1092 if (ret < 0) {
1093 return ret;
1094 }
1095
1096 cwd = (uint16_t)xsave->region[XSAVE_FCW_FSW];
1097 swd = (uint16_t)(xsave->region[XSAVE_FCW_FSW] >> 16);
1098 twd = (uint16_t)xsave->region[XSAVE_FTW_FOP];
1099 env->fpop = (uint16_t)(xsave->region[XSAVE_FTW_FOP] >> 16);
1100 env->fpstt = (swd >> 11) & 7;
1101 env->fpus = swd;
1102 env->fpuc = cwd;
1103 for (i = 0; i < 8; ++i) {
1104 env->fptags[i] = !((twd >> i) & 1);
1105 }
1106 memcpy(&env->fpip, &xsave->region[XSAVE_CWD_RIP], sizeof(env->fpip));
1107 memcpy(&env->fpdp, &xsave->region[XSAVE_CWD_RDP], sizeof(env->fpdp));
1108 env->mxcsr = xsave->region[XSAVE_MXCSR];
1109 memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
1110 sizeof env->fpregs);
1111 memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE],
1112 sizeof env->xmm_regs);
1113 env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
1114 memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
1115 sizeof env->ymmh_regs);
1116 return 0;
1117 }
1118
1119 static int kvm_get_xcrs(CPUX86State *env)
1120 {
1121 int i, ret;
1122 struct kvm_xcrs xcrs;
1123
1124 if (!kvm_has_xcrs()) {
1125 return 0;
1126 }
1127
1128 ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs);
1129 if (ret < 0) {
1130 return ret;
1131 }
1132
1133 for (i = 0; i < xcrs.nr_xcrs; i++) {
1134 /* Only support xcr0 now */
1135 if (xcrs.xcrs[0].xcr == 0) {
1136 env->xcr0 = xcrs.xcrs[0].value;
1137 break;
1138 }
1139 }
1140 return 0;
1141 }
1142
1143 static int kvm_get_sregs(CPUX86State *env)
1144 {
1145 struct kvm_sregs sregs;
1146 uint32_t hflags;
1147 int bit, i, ret;
1148
1149 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
1150 if (ret < 0) {
1151 return ret;
1152 }
1153
1154 /* There can only be one pending IRQ set in the bitmap at a time, so try
1155 to find it and save its number instead (-1 for none). */
1156 env->interrupt_injected = -1;
1157 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
1158 if (sregs.interrupt_bitmap[i]) {
1159 bit = ctz64(sregs.interrupt_bitmap[i]);
1160 env->interrupt_injected = i * 64 + bit;
1161 break;
1162 }
1163 }
1164
1165 get_seg(&env->segs[R_CS], &sregs.cs);
1166 get_seg(&env->segs[R_DS], &sregs.ds);
1167 get_seg(&env->segs[R_ES], &sregs.es);
1168 get_seg(&env->segs[R_FS], &sregs.fs);
1169 get_seg(&env->segs[R_GS], &sregs.gs);
1170 get_seg(&env->segs[R_SS], &sregs.ss);
1171
1172 get_seg(&env->tr, &sregs.tr);
1173 get_seg(&env->ldt, &sregs.ldt);
1174
1175 env->idt.limit = sregs.idt.limit;
1176 env->idt.base = sregs.idt.base;
1177 env->gdt.limit = sregs.gdt.limit;
1178 env->gdt.base = sregs.gdt.base;
1179
1180 env->cr[0] = sregs.cr0;
1181 env->cr[2] = sregs.cr2;
1182 env->cr[3] = sregs.cr3;
1183 env->cr[4] = sregs.cr4;
1184
1185 env->efer = sregs.efer;
1186
1187 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
1188
1189 #define HFLAG_COPY_MASK \
1190 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1191 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1192 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1193 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1194
1195 hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
1196 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
1197 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
1198 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
1199 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
1200 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
1201 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
1202
1203 if (env->efer & MSR_EFER_LMA) {
1204 hflags |= HF_LMA_MASK;
1205 }
1206
1207 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
1208 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1209 } else {
1210 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
1211 (DESC_B_SHIFT - HF_CS32_SHIFT);
1212 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
1213 (DESC_B_SHIFT - HF_SS32_SHIFT);
1214 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
1215 !(hflags & HF_CS32_MASK)) {
1216 hflags |= HF_ADDSEG_MASK;
1217 } else {
1218 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
1219 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
1220 }
1221 }
1222 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
1223
1224 return 0;
1225 }
1226
1227 static int kvm_get_msrs(CPUX86State *env)
1228 {
1229 struct {
1230 struct kvm_msrs info;
1231 struct kvm_msr_entry entries[100];
1232 } msr_data;
1233 struct kvm_msr_entry *msrs = msr_data.entries;
1234 int ret, i, n;
1235
1236 n = 0;
1237 msrs[n++].index = MSR_IA32_SYSENTER_CS;
1238 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
1239 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
1240 msrs[n++].index = MSR_PAT;
1241 if (has_msr_star) {
1242 msrs[n++].index = MSR_STAR;
1243 }
1244 if (has_msr_hsave_pa) {
1245 msrs[n++].index = MSR_VM_HSAVE_PA;
1246 }
1247 if (has_msr_tsc_deadline) {
1248 msrs[n++].index = MSR_IA32_TSCDEADLINE;
1249 }
1250 if (has_msr_misc_enable) {
1251 msrs[n++].index = MSR_IA32_MISC_ENABLE;
1252 }
1253
1254 if (!env->tsc_valid) {
1255 msrs[n++].index = MSR_IA32_TSC;
1256 env->tsc_valid = !runstate_is_running();
1257 }
1258
1259 #ifdef TARGET_X86_64
1260 if (lm_capable_kernel) {
1261 msrs[n++].index = MSR_CSTAR;
1262 msrs[n++].index = MSR_KERNELGSBASE;
1263 msrs[n++].index = MSR_FMASK;
1264 msrs[n++].index = MSR_LSTAR;
1265 }
1266 #endif
1267 msrs[n++].index = MSR_KVM_SYSTEM_TIME;
1268 msrs[n++].index = MSR_KVM_WALL_CLOCK;
1269 if (has_msr_async_pf_en) {
1270 msrs[n++].index = MSR_KVM_ASYNC_PF_EN;
1271 }
1272 if (has_msr_pv_eoi_en) {
1273 msrs[n++].index = MSR_KVM_PV_EOI_EN;
1274 }
1275
1276 if (env->mcg_cap) {
1277 msrs[n++].index = MSR_MCG_STATUS;
1278 msrs[n++].index = MSR_MCG_CTL;
1279 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
1280 msrs[n++].index = MSR_MC0_CTL + i;
1281 }
1282 }
1283
1284 msr_data.info.nmsrs = n;
1285 ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
1286 if (ret < 0) {
1287 return ret;
1288 }
1289
1290 for (i = 0; i < ret; i++) {
1291 switch (msrs[i].index) {
1292 case MSR_IA32_SYSENTER_CS:
1293 env->sysenter_cs = msrs[i].data;
1294 break;
1295 case MSR_IA32_SYSENTER_ESP:
1296 env->sysenter_esp = msrs[i].data;
1297 break;
1298 case MSR_IA32_SYSENTER_EIP:
1299 env->sysenter_eip = msrs[i].data;
1300 break;
1301 case MSR_PAT:
1302 env->pat = msrs[i].data;
1303 break;
1304 case MSR_STAR:
1305 env->star = msrs[i].data;
1306 break;
1307 #ifdef TARGET_X86_64
1308 case MSR_CSTAR:
1309 env->cstar = msrs[i].data;
1310 break;
1311 case MSR_KERNELGSBASE:
1312 env->kernelgsbase = msrs[i].data;
1313 break;
1314 case MSR_FMASK:
1315 env->fmask = msrs[i].data;
1316 break;
1317 case MSR_LSTAR:
1318 env->lstar = msrs[i].data;
1319 break;
1320 #endif
1321 case MSR_IA32_TSC:
1322 env->tsc = msrs[i].data;
1323 break;
1324 case MSR_IA32_TSCDEADLINE:
1325 env->tsc_deadline = msrs[i].data;
1326 break;
1327 case MSR_VM_HSAVE_PA:
1328 env->vm_hsave = msrs[i].data;
1329 break;
1330 case MSR_KVM_SYSTEM_TIME:
1331 env->system_time_msr = msrs[i].data;
1332 break;
1333 case MSR_KVM_WALL_CLOCK:
1334 env->wall_clock_msr = msrs[i].data;
1335 break;
1336 case MSR_MCG_STATUS:
1337 env->mcg_status = msrs[i].data;
1338 break;
1339 case MSR_MCG_CTL:
1340 env->mcg_ctl = msrs[i].data;
1341 break;
1342 case MSR_IA32_MISC_ENABLE:
1343 env->msr_ia32_misc_enable = msrs[i].data;
1344 break;
1345 default:
1346 if (msrs[i].index >= MSR_MC0_CTL &&
1347 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
1348 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
1349 }
1350 break;
1351 case MSR_KVM_ASYNC_PF_EN:
1352 env->async_pf_en_msr = msrs[i].data;
1353 break;
1354 case MSR_KVM_PV_EOI_EN:
1355 env->pv_eoi_en_msr = msrs[i].data;
1356 break;
1357 }
1358 }
1359
1360 return 0;
1361 }
1362
1363 static int kvm_put_mp_state(CPUX86State *env)
1364 {
1365 struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
1366
1367 return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
1368 }
1369
1370 static int kvm_get_mp_state(X86CPU *cpu)
1371 {
1372 CPUX86State *env = &cpu->env;
1373 struct kvm_mp_state mp_state;
1374 int ret;
1375
1376 ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
1377 if (ret < 0) {
1378 return ret;
1379 }
1380 env->mp_state = mp_state.mp_state;
1381 if (kvm_irqchip_in_kernel()) {
1382 env->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
1383 }
1384 return 0;
1385 }
1386
1387 static int kvm_get_apic(CPUX86State *env)
1388 {
1389 DeviceState *apic = env->apic_state;
1390 struct kvm_lapic_state kapic;
1391 int ret;
1392
1393 if (apic && kvm_irqchip_in_kernel()) {
1394 ret = kvm_vcpu_ioctl(env, KVM_GET_LAPIC, &kapic);
1395 if (ret < 0) {
1396 return ret;
1397 }
1398
1399 kvm_get_apic_state(apic, &kapic);
1400 }
1401 return 0;
1402 }
1403
1404 static int kvm_put_apic(CPUX86State *env)
1405 {
1406 DeviceState *apic = env->apic_state;
1407 struct kvm_lapic_state kapic;
1408
1409 if (apic && kvm_irqchip_in_kernel()) {
1410 kvm_put_apic_state(apic, &kapic);
1411
1412 return kvm_vcpu_ioctl(env, KVM_SET_LAPIC, &kapic);
1413 }
1414 return 0;
1415 }
1416
1417 static int kvm_put_vcpu_events(CPUX86State *env, int level)
1418 {
1419 struct kvm_vcpu_events events;
1420
1421 if (!kvm_has_vcpu_events()) {
1422 return 0;
1423 }
1424
1425 events.exception.injected = (env->exception_injected >= 0);
1426 events.exception.nr = env->exception_injected;
1427 events.exception.has_error_code = env->has_error_code;
1428 events.exception.error_code = env->error_code;
1429 events.exception.pad = 0;
1430
1431 events.interrupt.injected = (env->interrupt_injected >= 0);
1432 events.interrupt.nr = env->interrupt_injected;
1433 events.interrupt.soft = env->soft_interrupt;
1434
1435 events.nmi.injected = env->nmi_injected;
1436 events.nmi.pending = env->nmi_pending;
1437 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
1438 events.nmi.pad = 0;
1439
1440 events.sipi_vector = env->sipi_vector;
1441
1442 events.flags = 0;
1443 if (level >= KVM_PUT_RESET_STATE) {
1444 events.flags |=
1445 KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
1446 }
1447
1448 return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
1449 }
1450
1451 static int kvm_get_vcpu_events(CPUX86State *env)
1452 {
1453 struct kvm_vcpu_events events;
1454 int ret;
1455
1456 if (!kvm_has_vcpu_events()) {
1457 return 0;
1458 }
1459
1460 ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events);
1461 if (ret < 0) {
1462 return ret;
1463 }
1464 env->exception_injected =
1465 events.exception.injected ? events.exception.nr : -1;
1466 env->has_error_code = events.exception.has_error_code;
1467 env->error_code = events.exception.error_code;
1468
1469 env->interrupt_injected =
1470 events.interrupt.injected ? events.interrupt.nr : -1;
1471 env->soft_interrupt = events.interrupt.soft;
1472
1473 env->nmi_injected = events.nmi.injected;
1474 env->nmi_pending = events.nmi.pending;
1475 if (events.nmi.masked) {
1476 env->hflags2 |= HF2_NMI_MASK;
1477 } else {
1478 env->hflags2 &= ~HF2_NMI_MASK;
1479 }
1480
1481 env->sipi_vector = events.sipi_vector;
1482
1483 return 0;
1484 }
1485
1486 static int kvm_guest_debug_workarounds(CPUX86State *env)
1487 {
1488 int ret = 0;
1489 unsigned long reinject_trap = 0;
1490
1491 if (!kvm_has_vcpu_events()) {
1492 if (env->exception_injected == 1) {
1493 reinject_trap = KVM_GUESTDBG_INJECT_DB;
1494 } else if (env->exception_injected == 3) {
1495 reinject_trap = KVM_GUESTDBG_INJECT_BP;
1496 }
1497 env->exception_injected = -1;
1498 }
1499
1500 /*
1501 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
1502 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
1503 * by updating the debug state once again if single-stepping is on.
1504 * Another reason to call kvm_update_guest_debug here is a pending debug
1505 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
1506 * reinject them via SET_GUEST_DEBUG.
1507 */
1508 if (reinject_trap ||
1509 (!kvm_has_robust_singlestep() && env->singlestep_enabled)) {
1510 ret = kvm_update_guest_debug(env, reinject_trap);
1511 }
1512 return ret;
1513 }
1514
1515 static int kvm_put_debugregs(CPUX86State *env)
1516 {
1517 struct kvm_debugregs dbgregs;
1518 int i;
1519
1520 if (!kvm_has_debugregs()) {
1521 return 0;
1522 }
1523
1524 for (i = 0; i < 4; i++) {
1525 dbgregs.db[i] = env->dr[i];
1526 }
1527 dbgregs.dr6 = env->dr[6];
1528 dbgregs.dr7 = env->dr[7];
1529 dbgregs.flags = 0;
1530
1531 return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
1532 }
1533
1534 static int kvm_get_debugregs(CPUX86State *env)
1535 {
1536 struct kvm_debugregs dbgregs;
1537 int i, ret;
1538
1539 if (!kvm_has_debugregs()) {
1540 return 0;
1541 }
1542
1543 ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs);
1544 if (ret < 0) {
1545 return ret;
1546 }
1547 for (i = 0; i < 4; i++) {
1548 env->dr[i] = dbgregs.db[i];
1549 }
1550 env->dr[4] = env->dr[6] = dbgregs.dr6;
1551 env->dr[5] = env->dr[7] = dbgregs.dr7;
1552
1553 return 0;
1554 }
1555
1556 int kvm_arch_put_registers(CPUX86State *env, int level)
1557 {
1558 CPUState *cpu = ENV_GET_CPU(env);
1559 int ret;
1560
1561 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
1562
1563 ret = kvm_getput_regs(env, 1);
1564 if (ret < 0) {
1565 return ret;
1566 }
1567 ret = kvm_put_xsave(env);
1568 if (ret < 0) {
1569 return ret;
1570 }
1571 ret = kvm_put_xcrs(env);
1572 if (ret < 0) {
1573 return ret;
1574 }
1575 ret = kvm_put_sregs(env);
1576 if (ret < 0) {
1577 return ret;
1578 }
1579 /* must be before kvm_put_msrs */
1580 ret = kvm_inject_mce_oldstyle(env);
1581 if (ret < 0) {
1582 return ret;
1583 }
1584 ret = kvm_put_msrs(env, level);
1585 if (ret < 0) {
1586 return ret;
1587 }
1588 if (level >= KVM_PUT_RESET_STATE) {
1589 ret = kvm_put_mp_state(env);
1590 if (ret < 0) {
1591 return ret;
1592 }
1593 ret = kvm_put_apic(env);
1594 if (ret < 0) {
1595 return ret;
1596 }
1597 }
1598 ret = kvm_put_vcpu_events(env, level);
1599 if (ret < 0) {
1600 return ret;
1601 }
1602 ret = kvm_put_debugregs(env);
1603 if (ret < 0) {
1604 return ret;
1605 }
1606 /* must be last */
1607 ret = kvm_guest_debug_workarounds(env);
1608 if (ret < 0) {
1609 return ret;
1610 }
1611 return 0;
1612 }
1613
1614 int kvm_arch_get_registers(CPUX86State *env)
1615 {
1616 X86CPU *cpu = x86_env_get_cpu(env);
1617 int ret;
1618
1619 assert(cpu_is_stopped(CPU(cpu)) || qemu_cpu_is_self(CPU(cpu)));
1620
1621 ret = kvm_getput_regs(env, 0);
1622 if (ret < 0) {
1623 return ret;
1624 }
1625 ret = kvm_get_xsave(env);
1626 if (ret < 0) {
1627 return ret;
1628 }
1629 ret = kvm_get_xcrs(env);
1630 if (ret < 0) {
1631 return ret;
1632 }
1633 ret = kvm_get_sregs(env);
1634 if (ret < 0) {
1635 return ret;
1636 }
1637 ret = kvm_get_msrs(env);
1638 if (ret < 0) {
1639 return ret;
1640 }
1641 ret = kvm_get_mp_state(cpu);
1642 if (ret < 0) {
1643 return ret;
1644 }
1645 ret = kvm_get_apic(env);
1646 if (ret < 0) {
1647 return ret;
1648 }
1649 ret = kvm_get_vcpu_events(env);
1650 if (ret < 0) {
1651 return ret;
1652 }
1653 ret = kvm_get_debugregs(env);
1654 if (ret < 0) {
1655 return ret;
1656 }
1657 return 0;
1658 }
1659
1660 void kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run)
1661 {
1662 int ret;
1663
1664 /* Inject NMI */
1665 if (env->interrupt_request & CPU_INTERRUPT_NMI) {
1666 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
1667 DPRINTF("injected NMI\n");
1668 ret = kvm_vcpu_ioctl(env, KVM_NMI);
1669 if (ret < 0) {
1670 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
1671 strerror(-ret));
1672 }
1673 }
1674
1675 if (!kvm_irqchip_in_kernel()) {
1676 /* Force the VCPU out of its inner loop to process any INIT requests
1677 * or pending TPR access reports. */
1678 if (env->interrupt_request &
1679 (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
1680 env->exit_request = 1;
1681 }
1682
1683 /* Try to inject an interrupt if the guest can accept it */
1684 if (run->ready_for_interrupt_injection &&
1685 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
1686 (env->eflags & IF_MASK)) {
1687 int irq;
1688
1689 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
1690 irq = cpu_get_pic_interrupt(env);
1691 if (irq >= 0) {
1692 struct kvm_interrupt intr;
1693
1694 intr.irq = irq;
1695 DPRINTF("injected interrupt %d\n", irq);
1696 ret = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
1697 if (ret < 0) {
1698 fprintf(stderr,
1699 "KVM: injection failed, interrupt lost (%s)\n",
1700 strerror(-ret));
1701 }
1702 }
1703 }
1704
1705 /* If we have an interrupt but the guest is not ready to receive an
1706 * interrupt, request an interrupt window exit. This will
1707 * cause a return to userspace as soon as the guest is ready to
1708 * receive interrupts. */
1709 if ((env->interrupt_request & CPU_INTERRUPT_HARD)) {
1710 run->request_interrupt_window = 1;
1711 } else {
1712 run->request_interrupt_window = 0;
1713 }
1714
1715 DPRINTF("setting tpr\n");
1716 run->cr8 = cpu_get_apic_tpr(env->apic_state);
1717 }
1718 }
1719
1720 void kvm_arch_post_run(CPUX86State *env, struct kvm_run *run)
1721 {
1722 if (run->if_flag) {
1723 env->eflags |= IF_MASK;
1724 } else {
1725 env->eflags &= ~IF_MASK;
1726 }
1727 cpu_set_apic_tpr(env->apic_state, run->cr8);
1728 cpu_set_apic_base(env->apic_state, run->apic_base);
1729 }
1730
1731 int kvm_arch_process_async_events(CPUX86State *env)
1732 {
1733 X86CPU *cpu = x86_env_get_cpu(env);
1734
1735 if (env->interrupt_request & CPU_INTERRUPT_MCE) {
1736 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
1737 assert(env->mcg_cap);
1738
1739 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
1740
1741 kvm_cpu_synchronize_state(env);
1742
1743 if (env->exception_injected == EXCP08_DBLE) {
1744 /* this means triple fault */
1745 qemu_system_reset_request();
1746 env->exit_request = 1;
1747 return 0;
1748 }
1749 env->exception_injected = EXCP12_MCHK;
1750 env->has_error_code = 0;
1751
1752 env->halted = 0;
1753 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
1754 env->mp_state = KVM_MP_STATE_RUNNABLE;
1755 }
1756 }
1757
1758 if (kvm_irqchip_in_kernel()) {
1759 return 0;
1760 }
1761
1762 if (env->interrupt_request & CPU_INTERRUPT_POLL) {
1763 env->interrupt_request &= ~CPU_INTERRUPT_POLL;
1764 apic_poll_irq(env->apic_state);
1765 }
1766 if (((env->interrupt_request & CPU_INTERRUPT_HARD) &&
1767 (env->eflags & IF_MASK)) ||
1768 (env->interrupt_request & CPU_INTERRUPT_NMI)) {
1769 env->halted = 0;
1770 }
1771 if (env->interrupt_request & CPU_INTERRUPT_INIT) {
1772 kvm_cpu_synchronize_state(env);
1773 do_cpu_init(cpu);
1774 }
1775 if (env->interrupt_request & CPU_INTERRUPT_SIPI) {
1776 kvm_cpu_synchronize_state(env);
1777 do_cpu_sipi(cpu);
1778 }
1779 if (env->interrupt_request & CPU_INTERRUPT_TPR) {
1780 env->interrupt_request &= ~CPU_INTERRUPT_TPR;
1781 kvm_cpu_synchronize_state(env);
1782 apic_handle_tpr_access_report(env->apic_state, env->eip,
1783 env->tpr_access_type);
1784 }
1785
1786 return env->halted;
1787 }
1788
1789 static int kvm_handle_halt(X86CPU *cpu)
1790 {
1791 CPUX86State *env = &cpu->env;
1792
1793 if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
1794 (env->eflags & IF_MASK)) &&
1795 !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
1796 env->halted = 1;
1797 return EXCP_HLT;
1798 }
1799
1800 return 0;
1801 }
1802
1803 static int kvm_handle_tpr_access(CPUX86State *env)
1804 {
1805 struct kvm_run *run = env->kvm_run;
1806
1807 apic_handle_tpr_access_report(env->apic_state, run->tpr_access.rip,
1808 run->tpr_access.is_write ? TPR_ACCESS_WRITE
1809 : TPR_ACCESS_READ);
1810 return 1;
1811 }
1812
1813 int kvm_arch_insert_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
1814 {
1815 static const uint8_t int3 = 0xcc;
1816
1817 if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
1818 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1)) {
1819 return -EINVAL;
1820 }
1821 return 0;
1822 }
1823
1824 int kvm_arch_remove_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
1825 {
1826 uint8_t int3;
1827
1828 if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
1829 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
1830 return -EINVAL;
1831 }
1832 return 0;
1833 }
1834
1835 static struct {
1836 target_ulong addr;
1837 int len;
1838 int type;
1839 } hw_breakpoint[4];
1840
1841 static int nb_hw_breakpoint;
1842
1843 static int find_hw_breakpoint(target_ulong addr, int len, int type)
1844 {
1845 int n;
1846
1847 for (n = 0; n < nb_hw_breakpoint; n++) {
1848 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
1849 (hw_breakpoint[n].len == len || len == -1)) {
1850 return n;
1851 }
1852 }
1853 return -1;
1854 }
1855
1856 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
1857 target_ulong len, int type)
1858 {
1859 switch (type) {
1860 case GDB_BREAKPOINT_HW:
1861 len = 1;
1862 break;
1863 case GDB_WATCHPOINT_WRITE:
1864 case GDB_WATCHPOINT_ACCESS:
1865 switch (len) {
1866 case 1:
1867 break;
1868 case 2:
1869 case 4:
1870 case 8:
1871 if (addr & (len - 1)) {
1872 return -EINVAL;
1873 }
1874 break;
1875 default:
1876 return -EINVAL;
1877 }
1878 break;
1879 default:
1880 return -ENOSYS;
1881 }
1882
1883 if (nb_hw_breakpoint == 4) {
1884 return -ENOBUFS;
1885 }
1886 if (find_hw_breakpoint(addr, len, type) >= 0) {
1887 return -EEXIST;
1888 }
1889 hw_breakpoint[nb_hw_breakpoint].addr = addr;
1890 hw_breakpoint[nb_hw_breakpoint].len = len;
1891 hw_breakpoint[nb_hw_breakpoint].type = type;
1892 nb_hw_breakpoint++;
1893
1894 return 0;
1895 }
1896
1897 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
1898 target_ulong len, int type)
1899 {
1900 int n;
1901
1902 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
1903 if (n < 0) {
1904 return -ENOENT;
1905 }
1906 nb_hw_breakpoint--;
1907 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
1908
1909 return 0;
1910 }
1911
1912 void kvm_arch_remove_all_hw_breakpoints(void)
1913 {
1914 nb_hw_breakpoint = 0;
1915 }
1916
1917 static CPUWatchpoint hw_watchpoint;
1918
1919 static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info)
1920 {
1921 int ret = 0;
1922 int n;
1923
1924 if (arch_info->exception == 1) {
1925 if (arch_info->dr6 & (1 << 14)) {
1926 if (cpu_single_env->singlestep_enabled) {
1927 ret = EXCP_DEBUG;
1928 }
1929 } else {
1930 for (n = 0; n < 4; n++) {
1931 if (arch_info->dr6 & (1 << n)) {
1932 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
1933 case 0x0:
1934 ret = EXCP_DEBUG;
1935 break;
1936 case 0x1:
1937 ret = EXCP_DEBUG;
1938 cpu_single_env->watchpoint_hit = &hw_watchpoint;
1939 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1940 hw_watchpoint.flags = BP_MEM_WRITE;
1941 break;
1942 case 0x3:
1943 ret = EXCP_DEBUG;
1944 cpu_single_env->watchpoint_hit = &hw_watchpoint;
1945 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1946 hw_watchpoint.flags = BP_MEM_ACCESS;
1947 break;
1948 }
1949 }
1950 }
1951 }
1952 } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) {
1953 ret = EXCP_DEBUG;
1954 }
1955 if (ret == 0) {
1956 cpu_synchronize_state(cpu_single_env);
1957 assert(cpu_single_env->exception_injected == -1);
1958
1959 /* pass to guest */
1960 cpu_single_env->exception_injected = arch_info->exception;
1961 cpu_single_env->has_error_code = 0;
1962 }
1963
1964 return ret;
1965 }
1966
1967 void kvm_arch_update_guest_debug(CPUX86State *env, struct kvm_guest_debug *dbg)
1968 {
1969 const uint8_t type_code[] = {
1970 [GDB_BREAKPOINT_HW] = 0x0,
1971 [GDB_WATCHPOINT_WRITE] = 0x1,
1972 [GDB_WATCHPOINT_ACCESS] = 0x3
1973 };
1974 const uint8_t len_code[] = {
1975 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
1976 };
1977 int n;
1978
1979 if (kvm_sw_breakpoints_active(env)) {
1980 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1981 }
1982 if (nb_hw_breakpoint > 0) {
1983 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1984 dbg->arch.debugreg[7] = 0x0600;
1985 for (n = 0; n < nb_hw_breakpoint; n++) {
1986 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
1987 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
1988 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
1989 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
1990 }
1991 }
1992 }
1993
1994 static bool host_supports_vmx(void)
1995 {
1996 uint32_t ecx, unused;
1997
1998 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
1999 return ecx & CPUID_EXT_VMX;
2000 }
2001
2002 #define VMX_INVALID_GUEST_STATE 0x80000021
2003
2004 int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
2005 {
2006 X86CPU *cpu = x86_env_get_cpu(env);
2007 uint64_t code;
2008 int ret;
2009
2010 switch (run->exit_reason) {
2011 case KVM_EXIT_HLT:
2012 DPRINTF("handle_hlt\n");
2013 ret = kvm_handle_halt(cpu);
2014 break;
2015 case KVM_EXIT_SET_TPR:
2016 ret = 0;
2017 break;
2018 case KVM_EXIT_TPR_ACCESS:
2019 ret = kvm_handle_tpr_access(env);
2020 break;
2021 case KVM_EXIT_FAIL_ENTRY:
2022 code = run->fail_entry.hardware_entry_failure_reason;
2023 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
2024 code);
2025 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
2026 fprintf(stderr,
2027 "\nIf you're running a guest on an Intel machine without "
2028 "unrestricted mode\n"
2029 "support, the failure can be most likely due to the guest "
2030 "entering an invalid\n"
2031 "state for Intel VT. For example, the guest maybe running "
2032 "in big real mode\n"
2033 "which is not supported on less recent Intel processors."
2034 "\n\n");
2035 }
2036 ret = -1;
2037 break;
2038 case KVM_EXIT_EXCEPTION:
2039 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
2040 run->ex.exception, run->ex.error_code);
2041 ret = -1;
2042 break;
2043 case KVM_EXIT_DEBUG:
2044 DPRINTF("kvm_exit_debug\n");
2045 ret = kvm_handle_debug(&run->debug.arch);
2046 break;
2047 default:
2048 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
2049 ret = -1;
2050 break;
2051 }
2052
2053 return ret;
2054 }
2055
2056 bool kvm_arch_stop_on_emulation_error(CPUX86State *env)
2057 {
2058 kvm_cpu_synchronize_state(env);
2059 return !(env->cr[0] & CR0_PE_MASK) ||
2060 ((env->segs[R_CS].selector & 3) != 3);
2061 }
2062
2063 void kvm_arch_init_irq_routing(KVMState *s)
2064 {
2065 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
2066 /* If kernel can't do irq routing, interrupt source
2067 * override 0->2 cannot be set up as required by HPET.
2068 * So we have to disable it.
2069 */
2070 no_hpet = 1;
2071 }
2072 /* We know at this point that we're using the in-kernel
2073 * irqchip, so we can use irqfds, and on x86 we know
2074 * we can use msi via irqfd and GSI routing.
2075 */
2076 kvm_irqfds_allowed = true;
2077 kvm_msi_via_irqfd_allowed = true;
2078 kvm_gsi_routing_allowed = true;
2079 }
2080
2081 /* Classic KVM device assignment interface. Will remain x86 only. */
2082 int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
2083 uint32_t flags, uint32_t *dev_id)
2084 {
2085 struct kvm_assigned_pci_dev dev_data = {
2086 .segnr = dev_addr->domain,
2087 .busnr = dev_addr->bus,
2088 .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
2089 .flags = flags,
2090 };
2091 int ret;
2092
2093 dev_data.assigned_dev_id =
2094 (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
2095
2096 ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
2097 if (ret < 0) {
2098 return ret;
2099 }
2100
2101 *dev_id = dev_data.assigned_dev_id;
2102
2103 return 0;
2104 }
2105
2106 int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
2107 {
2108 struct kvm_assigned_pci_dev dev_data = {
2109 .assigned_dev_id = dev_id,
2110 };
2111
2112 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
2113 }
2114
2115 static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
2116 uint32_t irq_type, uint32_t guest_irq)
2117 {
2118 struct kvm_assigned_irq assigned_irq = {
2119 .assigned_dev_id = dev_id,
2120 .guest_irq = guest_irq,
2121 .flags = irq_type,
2122 };
2123
2124 if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
2125 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
2126 } else {
2127 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
2128 }
2129 }
2130
2131 int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
2132 uint32_t guest_irq)
2133 {
2134 uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
2135 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
2136
2137 return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
2138 }
2139
2140 int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
2141 {
2142 struct kvm_assigned_pci_dev dev_data = {
2143 .assigned_dev_id = dev_id,
2144 .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
2145 };
2146
2147 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
2148 }
2149
2150 static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
2151 uint32_t type)
2152 {
2153 struct kvm_assigned_irq assigned_irq = {
2154 .assigned_dev_id = dev_id,
2155 .flags = type,
2156 };
2157
2158 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
2159 }
2160
2161 int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
2162 {
2163 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
2164 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
2165 }
2166
2167 int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
2168 {
2169 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
2170 KVM_DEV_IRQ_GUEST_MSI, virq);
2171 }
2172
2173 int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
2174 {
2175 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
2176 KVM_DEV_IRQ_HOST_MSI);
2177 }
2178
2179 bool kvm_device_msix_supported(KVMState *s)
2180 {
2181 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
2182 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
2183 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
2184 }
2185
2186 int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
2187 uint32_t nr_vectors)
2188 {
2189 struct kvm_assigned_msix_nr msix_nr = {
2190 .assigned_dev_id = dev_id,
2191 .entry_nr = nr_vectors,
2192 };
2193
2194 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
2195 }
2196
2197 int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
2198 int virq)
2199 {
2200 struct kvm_assigned_msix_entry msix_entry = {
2201 .assigned_dev_id = dev_id,
2202 .gsi = virq,
2203 .entry = vector,
2204 };
2205
2206 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
2207 }
2208
2209 int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
2210 {
2211 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
2212 KVM_DEV_IRQ_GUEST_MSIX, 0);
2213 }
2214
2215 int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
2216 {
2217 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
2218 KVM_DEV_IRQ_HOST_MSIX);
2219 }