]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/kvm/vmx.c
KVM: add hypercall nr to kvm_run
[mirror_ubuntu-artful-kernel.git] / drivers / kvm / vmx.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
19#include "vmx.h"
e495606d
AK
20#include "segment_descriptor.h"
21
6aa8b732 22#include <linux/module.h>
9d8f549d 23#include <linux/kernel.h>
6aa8b732
AK
24#include <linux/mm.h>
25#include <linux/highmem.h>
07031e14 26#include <linux/profile.h>
e8edc6e0 27#include <linux/sched.h>
e495606d 28
6aa8b732 29#include <asm/io.h>
3b3be0d1 30#include <asm/desc.h>
6aa8b732 31
6aa8b732
AK
32MODULE_AUTHOR("Qumranet");
33MODULE_LICENSE("GPL");
34
a2fa3e9f
GH
35struct vmcs {
36 u32 revision_id;
37 u32 abort;
38 char data[0];
39};
40
41struct vcpu_vmx {
fb3f0f51 42 struct kvm_vcpu vcpu;
a2fa3e9f
GH
43 int launched;
44 struct kvm_msr_entry *guest_msrs;
45 struct kvm_msr_entry *host_msrs;
46 int nmsrs;
47 int save_nmsrs;
48 int msr_offset_efer;
49#ifdef CONFIG_X86_64
50 int msr_offset_kernel_gs_base;
51#endif
52 struct vmcs *vmcs;
53 struct {
54 int loaded;
55 u16 fs_sel, gs_sel, ldt_sel;
56 int fs_gs_ldt_reload_needed;
57 }host_state;
58
59};
60
61static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
62{
fb3f0f51 63 return container_of(vcpu, struct vcpu_vmx, vcpu);
a2fa3e9f
GH
64}
65
75880a01
AK
66static int init_rmode_tss(struct kvm *kvm);
67
6aa8b732
AK
68static DEFINE_PER_CPU(struct vmcs *, vmxarea);
69static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
70
fdef3ad1
HQ
71static struct page *vmx_io_bitmap_a;
72static struct page *vmx_io_bitmap_b;
73
2cc51560 74#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
6aa8b732 75
1c3d14fe 76static struct vmcs_config {
6aa8b732
AK
77 int size;
78 int order;
79 u32 revision_id;
1c3d14fe
YS
80 u32 pin_based_exec_ctrl;
81 u32 cpu_based_exec_ctrl;
82 u32 vmexit_ctrl;
83 u32 vmentry_ctrl;
84} vmcs_config;
6aa8b732
AK
85
86#define VMX_SEGMENT_FIELD(seg) \
87 [VCPU_SREG_##seg] = { \
88 .selector = GUEST_##seg##_SELECTOR, \
89 .base = GUEST_##seg##_BASE, \
90 .limit = GUEST_##seg##_LIMIT, \
91 .ar_bytes = GUEST_##seg##_AR_BYTES, \
92 }
93
94static struct kvm_vmx_segment_field {
95 unsigned selector;
96 unsigned base;
97 unsigned limit;
98 unsigned ar_bytes;
99} kvm_vmx_segment_fields[] = {
100 VMX_SEGMENT_FIELD(CS),
101 VMX_SEGMENT_FIELD(DS),
102 VMX_SEGMENT_FIELD(ES),
103 VMX_SEGMENT_FIELD(FS),
104 VMX_SEGMENT_FIELD(GS),
105 VMX_SEGMENT_FIELD(SS),
106 VMX_SEGMENT_FIELD(TR),
107 VMX_SEGMENT_FIELD(LDTR),
108};
109
4d56c8a7
AK
110/*
111 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
112 * away by decrementing the array size.
113 */
6aa8b732 114static const u32 vmx_msr_index[] = {
05b3e0c2 115#ifdef CONFIG_X86_64
6aa8b732
AK
116 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
117#endif
118 MSR_EFER, MSR_K6_STAR,
119};
9d8f549d 120#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
6aa8b732 121
a2fa3e9f
GH
122static void load_msrs(struct kvm_msr_entry *e, int n)
123{
124 int i;
125
126 for (i = 0; i < n; ++i)
127 wrmsrl(e[i].index, e[i].data);
128}
129
130static void save_msrs(struct kvm_msr_entry *e, int n)
131{
132 int i;
133
134 for (i = 0; i < n; ++i)
135 rdmsrl(e[i].index, e[i].data);
136}
137
138static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
2cc51560
ED
139{
140 return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
141}
142
143static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
144{
a2fa3e9f
GH
145 struct vcpu_vmx *vmx = to_vmx(vcpu);
146 int efer_offset = vmx->msr_offset_efer;
147 return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
148 msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
2cc51560
ED
149}
150
6aa8b732
AK
151static inline int is_page_fault(u32 intr_info)
152{
153 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
154 INTR_INFO_VALID_MASK)) ==
155 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
156}
157
2ab455cc
AL
158static inline int is_no_device(u32 intr_info)
159{
160 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
161 INTR_INFO_VALID_MASK)) ==
162 (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
163}
164
6aa8b732
AK
165static inline int is_external_interrupt(u32 intr_info)
166{
167 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
168 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
169}
170
a75beee6 171static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
7725f0ba 172{
a2fa3e9f 173 struct vcpu_vmx *vmx = to_vmx(vcpu);
7725f0ba
AK
174 int i;
175
a2fa3e9f
GH
176 for (i = 0; i < vmx->nmsrs; ++i)
177 if (vmx->guest_msrs[i].index == msr)
a75beee6
ED
178 return i;
179 return -1;
180}
181
a2fa3e9f 182static struct kvm_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
a75beee6 183{
a2fa3e9f 184 struct vcpu_vmx *vmx = to_vmx(vcpu);
a75beee6
ED
185 int i;
186
187 i = __find_msr_index(vcpu, msr);
188 if (i >= 0)
a2fa3e9f 189 return &vmx->guest_msrs[i];
8b6d44c7 190 return NULL;
7725f0ba
AK
191}
192
6aa8b732
AK
193static void vmcs_clear(struct vmcs *vmcs)
194{
195 u64 phys_addr = __pa(vmcs);
196 u8 error;
197
198 asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
199 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
200 : "cc", "memory");
201 if (error)
202 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
203 vmcs, phys_addr);
204}
205
206static void __vcpu_clear(void *arg)
207{
208 struct kvm_vcpu *vcpu = arg;
a2fa3e9f 209 struct vcpu_vmx *vmx = to_vmx(vcpu);
d3b2c338 210 int cpu = raw_smp_processor_id();
6aa8b732
AK
211
212 if (vcpu->cpu == cpu)
a2fa3e9f
GH
213 vmcs_clear(vmx->vmcs);
214 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
6aa8b732 215 per_cpu(current_vmcs, cpu) = NULL;
7700270e 216 rdtscll(vcpu->host_tsc);
6aa8b732
AK
217}
218
8d0be2b3
AK
219static void vcpu_clear(struct kvm_vcpu *vcpu)
220{
221 if (vcpu->cpu != raw_smp_processor_id() && vcpu->cpu != -1)
222 smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1);
223 else
224 __vcpu_clear(vcpu);
a2fa3e9f 225 to_vmx(vcpu)->launched = 0;
8d0be2b3
AK
226}
227
6aa8b732
AK
228static unsigned long vmcs_readl(unsigned long field)
229{
230 unsigned long value;
231
232 asm volatile (ASM_VMX_VMREAD_RDX_RAX
233 : "=a"(value) : "d"(field) : "cc");
234 return value;
235}
236
237static u16 vmcs_read16(unsigned long field)
238{
239 return vmcs_readl(field);
240}
241
242static u32 vmcs_read32(unsigned long field)
243{
244 return vmcs_readl(field);
245}
246
247static u64 vmcs_read64(unsigned long field)
248{
05b3e0c2 249#ifdef CONFIG_X86_64
6aa8b732
AK
250 return vmcs_readl(field);
251#else
252 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
253#endif
254}
255
e52de1b8
AK
256static noinline void vmwrite_error(unsigned long field, unsigned long value)
257{
258 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
259 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
260 dump_stack();
261}
262
6aa8b732
AK
263static void vmcs_writel(unsigned long field, unsigned long value)
264{
265 u8 error;
266
267 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
268 : "=q"(error) : "a"(value), "d"(field) : "cc" );
e52de1b8
AK
269 if (unlikely(error))
270 vmwrite_error(field, value);
6aa8b732
AK
271}
272
273static void vmcs_write16(unsigned long field, u16 value)
274{
275 vmcs_writel(field, value);
276}
277
278static void vmcs_write32(unsigned long field, u32 value)
279{
280 vmcs_writel(field, value);
281}
282
283static void vmcs_write64(unsigned long field, u64 value)
284{
05b3e0c2 285#ifdef CONFIG_X86_64
6aa8b732
AK
286 vmcs_writel(field, value);
287#else
288 vmcs_writel(field, value);
289 asm volatile ("");
290 vmcs_writel(field+1, value >> 32);
291#endif
292}
293
2ab455cc
AL
294static void vmcs_clear_bits(unsigned long field, u32 mask)
295{
296 vmcs_writel(field, vmcs_readl(field) & ~mask);
297}
298
299static void vmcs_set_bits(unsigned long field, u32 mask)
300{
301 vmcs_writel(field, vmcs_readl(field) | mask);
302}
303
abd3f2d6
AK
304static void update_exception_bitmap(struct kvm_vcpu *vcpu)
305{
306 u32 eb;
307
308 eb = 1u << PF_VECTOR;
309 if (!vcpu->fpu_active)
310 eb |= 1u << NM_VECTOR;
311 if (vcpu->guest_debug.enabled)
312 eb |= 1u << 1;
313 if (vcpu->rmode.active)
314 eb = ~0;
315 vmcs_write32(EXCEPTION_BITMAP, eb);
316}
317
33ed6329
AK
318static void reload_tss(void)
319{
320#ifndef CONFIG_X86_64
321
322 /*
323 * VT restores TR but not its size. Useless.
324 */
325 struct descriptor_table gdt;
326 struct segment_descriptor *descs;
327
328 get_gdt(&gdt);
329 descs = (void *)gdt.base;
330 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
331 load_TR_desc();
332#endif
333}
334
2cc51560
ED
335static void load_transition_efer(struct kvm_vcpu *vcpu)
336{
337 u64 trans_efer;
a2fa3e9f
GH
338 struct vcpu_vmx *vmx = to_vmx(vcpu);
339 int efer_offset = vmx->msr_offset_efer;
2cc51560 340
a2fa3e9f 341 trans_efer = vmx->host_msrs[efer_offset].data;
2cc51560 342 trans_efer &= ~EFER_SAVE_RESTORE_BITS;
a2fa3e9f 343 trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
2cc51560
ED
344 wrmsrl(MSR_EFER, trans_efer);
345 vcpu->stat.efer_reload++;
346}
347
33ed6329
AK
348static void vmx_save_host_state(struct kvm_vcpu *vcpu)
349{
a2fa3e9f 350 struct vcpu_vmx *vmx = to_vmx(vcpu);
33ed6329 351
a2fa3e9f 352 if (vmx->host_state.loaded)
33ed6329
AK
353 return;
354
a2fa3e9f 355 vmx->host_state.loaded = 1;
33ed6329
AK
356 /*
357 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
358 * allow segment selectors with cpl > 0 or ti == 1.
359 */
a2fa3e9f
GH
360 vmx->host_state.ldt_sel = read_ldt();
361 vmx->host_state.fs_gs_ldt_reload_needed = vmx->host_state.ldt_sel;
362 vmx->host_state.fs_sel = read_fs();
363 if (!(vmx->host_state.fs_sel & 7))
364 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
33ed6329
AK
365 else {
366 vmcs_write16(HOST_FS_SELECTOR, 0);
a2fa3e9f 367 vmx->host_state.fs_gs_ldt_reload_needed = 1;
33ed6329 368 }
a2fa3e9f
GH
369 vmx->host_state.gs_sel = read_gs();
370 if (!(vmx->host_state.gs_sel & 7))
371 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
33ed6329
AK
372 else {
373 vmcs_write16(HOST_GS_SELECTOR, 0);
a2fa3e9f 374 vmx->host_state.fs_gs_ldt_reload_needed = 1;
33ed6329
AK
375 }
376
377#ifdef CONFIG_X86_64
378 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
379 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
380#else
a2fa3e9f
GH
381 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
382 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
33ed6329 383#endif
707c0874
AK
384
385#ifdef CONFIG_X86_64
386 if (is_long_mode(vcpu)) {
a2fa3e9f
GH
387 save_msrs(vmx->host_msrs +
388 vmx->msr_offset_kernel_gs_base, 1);
707c0874
AK
389 }
390#endif
a2fa3e9f 391 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
2cc51560
ED
392 if (msr_efer_need_save_restore(vcpu))
393 load_transition_efer(vcpu);
33ed6329
AK
394}
395
396static void vmx_load_host_state(struct kvm_vcpu *vcpu)
397{
a2fa3e9f 398 struct vcpu_vmx *vmx = to_vmx(vcpu);
33ed6329 399
a2fa3e9f 400 if (!vmx->host_state.loaded)
33ed6329
AK
401 return;
402
a2fa3e9f
GH
403 vmx->host_state.loaded = 0;
404 if (vmx->host_state.fs_gs_ldt_reload_needed) {
405 load_ldt(vmx->host_state.ldt_sel);
406 load_fs(vmx->host_state.fs_sel);
33ed6329
AK
407 /*
408 * If we have to reload gs, we must take care to
409 * preserve our gs base.
410 */
411 local_irq_disable();
a2fa3e9f 412 load_gs(vmx->host_state.gs_sel);
33ed6329
AK
413#ifdef CONFIG_X86_64
414 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
415#endif
416 local_irq_enable();
417
418 reload_tss();
419 }
a2fa3e9f
GH
420 save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
421 load_msrs(vmx->host_msrs, vmx->save_nmsrs);
2cc51560 422 if (msr_efer_need_save_restore(vcpu))
a2fa3e9f 423 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
33ed6329
AK
424}
425
6aa8b732
AK
426/*
427 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
428 * vcpu mutex is already taken.
429 */
bccf2150 430static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 431{
a2fa3e9f
GH
432 struct vcpu_vmx *vmx = to_vmx(vcpu);
433 u64 phys_addr = __pa(vmx->vmcs);
6aa8b732 434 int cpu;
7700270e 435 u64 tsc_this, delta;
6aa8b732
AK
436
437 cpu = get_cpu();
438
8d0be2b3
AK
439 if (vcpu->cpu != cpu)
440 vcpu_clear(vcpu);
6aa8b732 441
a2fa3e9f 442 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
6aa8b732
AK
443 u8 error;
444
a2fa3e9f 445 per_cpu(current_vmcs, cpu) = vmx->vmcs;
6aa8b732
AK
446 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
447 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
448 : "cc");
449 if (error)
450 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
a2fa3e9f 451 vmx->vmcs, phys_addr);
6aa8b732
AK
452 }
453
454 if (vcpu->cpu != cpu) {
455 struct descriptor_table dt;
456 unsigned long sysenter_esp;
457
458 vcpu->cpu = cpu;
459 /*
460 * Linux uses per-cpu TSS and GDT, so set these when switching
461 * processors.
462 */
463 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
464 get_gdt(&dt);
465 vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
466
467 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
468 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
7700270e
AK
469
470 /*
471 * Make sure the time stamp counter is monotonous.
472 */
473 rdtscll(tsc_this);
474 delta = vcpu->host_tsc - tsc_this;
475 vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
6aa8b732 476 }
6aa8b732
AK
477}
478
479static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
480{
33ed6329 481 vmx_load_host_state(vcpu);
7702fd1f 482 kvm_put_guest_fpu(vcpu);
6aa8b732
AK
483 put_cpu();
484}
485
5fd86fcf
AK
486static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
487{
488 if (vcpu->fpu_active)
489 return;
490 vcpu->fpu_active = 1;
707d92fa
RR
491 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
492 if (vcpu->cr0 & X86_CR0_TS)
493 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
5fd86fcf
AK
494 update_exception_bitmap(vcpu);
495}
496
497static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
498{
499 if (!vcpu->fpu_active)
500 return;
501 vcpu->fpu_active = 0;
707d92fa 502 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
5fd86fcf
AK
503 update_exception_bitmap(vcpu);
504}
505
774c47f1
AK
506static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
507{
508 vcpu_clear(vcpu);
509}
510
6aa8b732
AK
511static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
512{
513 return vmcs_readl(GUEST_RFLAGS);
514}
515
516static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
517{
518 vmcs_writel(GUEST_RFLAGS, rflags);
519}
520
521static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
522{
523 unsigned long rip;
524 u32 interruptibility;
525
526 rip = vmcs_readl(GUEST_RIP);
527 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
528 vmcs_writel(GUEST_RIP, rip);
529
530 /*
531 * We emulated an instruction, so temporary interrupt blocking
532 * should be removed, if set.
533 */
534 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
535 if (interruptibility & 3)
536 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
537 interruptibility & ~3);
c1150d8c 538 vcpu->interrupt_window_open = 1;
6aa8b732
AK
539}
540
541static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
542{
543 printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
544 vmcs_readl(GUEST_RIP));
545 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
546 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
547 GP_VECTOR |
548 INTR_TYPE_EXCEPTION |
549 INTR_INFO_DELIEVER_CODE_MASK |
550 INTR_INFO_VALID_MASK);
551}
552
a75beee6
ED
553/*
554 * Swap MSR entry in host/guest MSR entry array.
555 */
556void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
557{
a2fa3e9f
GH
558 struct vcpu_vmx *vmx = to_vmx(vcpu);
559 struct kvm_msr_entry tmp;
560
561 tmp = vmx->guest_msrs[to];
562 vmx->guest_msrs[to] = vmx->guest_msrs[from];
563 vmx->guest_msrs[from] = tmp;
564 tmp = vmx->host_msrs[to];
565 vmx->host_msrs[to] = vmx->host_msrs[from];
566 vmx->host_msrs[from] = tmp;
a75beee6
ED
567}
568
e38aea3e
AK
569/*
570 * Set up the vmcs to automatically save and restore system
571 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
572 * mode, as fiddling with msrs is very expensive.
573 */
574static void setup_msrs(struct kvm_vcpu *vcpu)
575{
a2fa3e9f 576 struct vcpu_vmx *vmx = to_vmx(vcpu);
2cc51560 577 int save_nmsrs;
e38aea3e 578
a75beee6
ED
579 save_nmsrs = 0;
580#ifdef CONFIG_X86_64
581 if (is_long_mode(vcpu)) {
2cc51560
ED
582 int index;
583
a75beee6
ED
584 index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
585 if (index >= 0)
586 move_msr_up(vcpu, index, save_nmsrs++);
587 index = __find_msr_index(vcpu, MSR_LSTAR);
588 if (index >= 0)
589 move_msr_up(vcpu, index, save_nmsrs++);
590 index = __find_msr_index(vcpu, MSR_CSTAR);
591 if (index >= 0)
592 move_msr_up(vcpu, index, save_nmsrs++);
593 index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
594 if (index >= 0)
595 move_msr_up(vcpu, index, save_nmsrs++);
596 /*
597 * MSR_K6_STAR is only needed on long mode guests, and only
598 * if efer.sce is enabled.
599 */
600 index = __find_msr_index(vcpu, MSR_K6_STAR);
601 if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE))
602 move_msr_up(vcpu, index, save_nmsrs++);
603 }
604#endif
a2fa3e9f 605 vmx->save_nmsrs = save_nmsrs;
e38aea3e 606
4d56c8a7 607#ifdef CONFIG_X86_64
a2fa3e9f 608 vmx->msr_offset_kernel_gs_base =
a75beee6 609 __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
4d56c8a7 610#endif
a2fa3e9f 611 vmx->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
e38aea3e
AK
612}
613
6aa8b732
AK
614/*
615 * reads and returns guest's timestamp counter "register"
616 * guest_tsc = host_tsc + tsc_offset -- 21.3
617 */
618static u64 guest_read_tsc(void)
619{
620 u64 host_tsc, tsc_offset;
621
622 rdtscll(host_tsc);
623 tsc_offset = vmcs_read64(TSC_OFFSET);
624 return host_tsc + tsc_offset;
625}
626
627/*
628 * writes 'guest_tsc' into guest's timestamp counter "register"
629 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
630 */
631static void guest_write_tsc(u64 guest_tsc)
632{
633 u64 host_tsc;
634
635 rdtscll(host_tsc);
636 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
637}
638
6aa8b732
AK
639/*
640 * Reads an msr value (of 'msr_index') into 'pdata'.
641 * Returns 0 on success, non-0 otherwise.
642 * Assumes vcpu_load() was already called.
643 */
644static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
645{
646 u64 data;
a2fa3e9f 647 struct kvm_msr_entry *msr;
6aa8b732
AK
648
649 if (!pdata) {
650 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
651 return -EINVAL;
652 }
653
654 switch (msr_index) {
05b3e0c2 655#ifdef CONFIG_X86_64
6aa8b732
AK
656 case MSR_FS_BASE:
657 data = vmcs_readl(GUEST_FS_BASE);
658 break;
659 case MSR_GS_BASE:
660 data = vmcs_readl(GUEST_GS_BASE);
661 break;
662 case MSR_EFER:
3bab1f5d 663 return kvm_get_msr_common(vcpu, msr_index, pdata);
6aa8b732
AK
664#endif
665 case MSR_IA32_TIME_STAMP_COUNTER:
666 data = guest_read_tsc();
667 break;
668 case MSR_IA32_SYSENTER_CS:
669 data = vmcs_read32(GUEST_SYSENTER_CS);
670 break;
671 case MSR_IA32_SYSENTER_EIP:
f5b42c33 672 data = vmcs_readl(GUEST_SYSENTER_EIP);
6aa8b732
AK
673 break;
674 case MSR_IA32_SYSENTER_ESP:
f5b42c33 675 data = vmcs_readl(GUEST_SYSENTER_ESP);
6aa8b732 676 break;
6aa8b732
AK
677 default:
678 msr = find_msr_entry(vcpu, msr_index);
3bab1f5d
AK
679 if (msr) {
680 data = msr->data;
681 break;
6aa8b732 682 }
3bab1f5d 683 return kvm_get_msr_common(vcpu, msr_index, pdata);
6aa8b732
AK
684 }
685
686 *pdata = data;
687 return 0;
688}
689
690/*
691 * Writes msr value into into the appropriate "register".
692 * Returns 0 on success, non-0 otherwise.
693 * Assumes vcpu_load() was already called.
694 */
695static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
696{
a2fa3e9f
GH
697 struct vcpu_vmx *vmx = to_vmx(vcpu);
698 struct kvm_msr_entry *msr;
2cc51560
ED
699 int ret = 0;
700
6aa8b732 701 switch (msr_index) {
05b3e0c2 702#ifdef CONFIG_X86_64
3bab1f5d 703 case MSR_EFER:
2cc51560 704 ret = kvm_set_msr_common(vcpu, msr_index, data);
a2fa3e9f 705 if (vmx->host_state.loaded)
2cc51560
ED
706 load_transition_efer(vcpu);
707 break;
6aa8b732
AK
708 case MSR_FS_BASE:
709 vmcs_writel(GUEST_FS_BASE, data);
710 break;
711 case MSR_GS_BASE:
712 vmcs_writel(GUEST_GS_BASE, data);
713 break;
714#endif
715 case MSR_IA32_SYSENTER_CS:
716 vmcs_write32(GUEST_SYSENTER_CS, data);
717 break;
718 case MSR_IA32_SYSENTER_EIP:
f5b42c33 719 vmcs_writel(GUEST_SYSENTER_EIP, data);
6aa8b732
AK
720 break;
721 case MSR_IA32_SYSENTER_ESP:
f5b42c33 722 vmcs_writel(GUEST_SYSENTER_ESP, data);
6aa8b732 723 break;
d27d4aca 724 case MSR_IA32_TIME_STAMP_COUNTER:
6aa8b732
AK
725 guest_write_tsc(data);
726 break;
6aa8b732
AK
727 default:
728 msr = find_msr_entry(vcpu, msr_index);
3bab1f5d
AK
729 if (msr) {
730 msr->data = data;
a2fa3e9f
GH
731 if (vmx->host_state.loaded)
732 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
3bab1f5d 733 break;
6aa8b732 734 }
2cc51560 735 ret = kvm_set_msr_common(vcpu, msr_index, data);
6aa8b732
AK
736 }
737
2cc51560 738 return ret;
6aa8b732
AK
739}
740
741/*
742 * Sync the rsp and rip registers into the vcpu structure. This allows
743 * registers to be accessed by indexing vcpu->regs.
744 */
745static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
746{
747 vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
748 vcpu->rip = vmcs_readl(GUEST_RIP);
749}
750
751/*
752 * Syncs rsp and rip back into the vmcs. Should be called after possible
753 * modification.
754 */
755static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
756{
757 vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]);
758 vmcs_writel(GUEST_RIP, vcpu->rip);
759}
760
761static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
762{
763 unsigned long dr7 = 0x400;
6aa8b732
AK
764 int old_singlestep;
765
6aa8b732
AK
766 old_singlestep = vcpu->guest_debug.singlestep;
767
768 vcpu->guest_debug.enabled = dbg->enabled;
769 if (vcpu->guest_debug.enabled) {
770 int i;
771
772 dr7 |= 0x200; /* exact */
773 for (i = 0; i < 4; ++i) {
774 if (!dbg->breakpoints[i].enabled)
775 continue;
776 vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
777 dr7 |= 2 << (i*2); /* global enable */
778 dr7 |= 0 << (i*4+16); /* execution breakpoint */
779 }
780
6aa8b732 781 vcpu->guest_debug.singlestep = dbg->singlestep;
abd3f2d6 782 } else
6aa8b732 783 vcpu->guest_debug.singlestep = 0;
6aa8b732
AK
784
785 if (old_singlestep && !vcpu->guest_debug.singlestep) {
786 unsigned long flags;
787
788 flags = vmcs_readl(GUEST_RFLAGS);
789 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
790 vmcs_writel(GUEST_RFLAGS, flags);
791 }
792
abd3f2d6 793 update_exception_bitmap(vcpu);
6aa8b732
AK
794 vmcs_writel(GUEST_DR7, dr7);
795
796 return 0;
797}
798
799static __init int cpu_has_kvm_support(void)
800{
801 unsigned long ecx = cpuid_ecx(1);
802 return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
803}
804
805static __init int vmx_disabled_by_bios(void)
806{
807 u64 msr;
808
809 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
62b3ffb8
YS
810 return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED |
811 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
812 == MSR_IA32_FEATURE_CONTROL_LOCKED;
813 /* locked but not enabled */
6aa8b732
AK
814}
815
774c47f1 816static void hardware_enable(void *garbage)
6aa8b732
AK
817{
818 int cpu = raw_smp_processor_id();
819 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
820 u64 old;
821
822 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
62b3ffb8
YS
823 if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
824 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
825 != (MSR_IA32_FEATURE_CONTROL_LOCKED |
826 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
6aa8b732 827 /* enable and lock */
62b3ffb8
YS
828 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
829 MSR_IA32_FEATURE_CONTROL_LOCKED |
830 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
66aee91a 831 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
6aa8b732
AK
832 asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
833 : "memory", "cc");
834}
835
836static void hardware_disable(void *garbage)
837{
838 asm volatile (ASM_VMX_VMXOFF : : : "cc");
839}
840
1c3d14fe
YS
841static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
842 u32 msr, u32* result)
843{
844 u32 vmx_msr_low, vmx_msr_high;
845 u32 ctl = ctl_min | ctl_opt;
846
847 rdmsr(msr, vmx_msr_low, vmx_msr_high);
848
849 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
850 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
851
852 /* Ensure minimum (required) set of control bits are supported. */
853 if (ctl_min & ~ctl)
854 return -1;
855
856 *result = ctl;
857 return 0;
858}
859
860static __init int setup_vmcs_config(void)
6aa8b732
AK
861{
862 u32 vmx_msr_low, vmx_msr_high;
1c3d14fe
YS
863 u32 min, opt;
864 u32 _pin_based_exec_control = 0;
865 u32 _cpu_based_exec_control = 0;
866 u32 _vmexit_control = 0;
867 u32 _vmentry_control = 0;
868
869 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
870 opt = 0;
871 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
872 &_pin_based_exec_control) < 0)
873 return -1;
874
875 min = CPU_BASED_HLT_EXITING |
876#ifdef CONFIG_X86_64
877 CPU_BASED_CR8_LOAD_EXITING |
878 CPU_BASED_CR8_STORE_EXITING |
879#endif
880 CPU_BASED_USE_IO_BITMAPS |
881 CPU_BASED_MOV_DR_EXITING |
882 CPU_BASED_USE_TSC_OFFSETING;
883 opt = 0;
884 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
885 &_cpu_based_exec_control) < 0)
886 return -1;
887
888 min = 0;
889#ifdef CONFIG_X86_64
890 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
891#endif
892 opt = 0;
893 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
894 &_vmexit_control) < 0)
895 return -1;
896
897 min = opt = 0;
898 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
899 &_vmentry_control) < 0)
900 return -1;
6aa8b732 901
c68876fd 902 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
1c3d14fe
YS
903
904 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
905 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
906 return -1;
907
908#ifdef CONFIG_X86_64
909 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
910 if (vmx_msr_high & (1u<<16))
911 return -1;
912#endif
913
914 /* Require Write-Back (WB) memory type for VMCS accesses. */
915 if (((vmx_msr_high >> 18) & 15) != 6)
916 return -1;
917
918 vmcs_config.size = vmx_msr_high & 0x1fff;
919 vmcs_config.order = get_order(vmcs_config.size);
920 vmcs_config.revision_id = vmx_msr_low;
921
922 vmcs_config.pin_based_exec_ctrl = _pin_based_exec_control;
923 vmcs_config.cpu_based_exec_ctrl = _cpu_based_exec_control;
924 vmcs_config.vmexit_ctrl = _vmexit_control;
925 vmcs_config.vmentry_ctrl = _vmentry_control;
926
927 return 0;
c68876fd 928}
6aa8b732
AK
929
930static struct vmcs *alloc_vmcs_cpu(int cpu)
931{
932 int node = cpu_to_node(cpu);
933 struct page *pages;
934 struct vmcs *vmcs;
935
1c3d14fe 936 pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
6aa8b732
AK
937 if (!pages)
938 return NULL;
939 vmcs = page_address(pages);
1c3d14fe
YS
940 memset(vmcs, 0, vmcs_config.size);
941 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
6aa8b732
AK
942 return vmcs;
943}
944
945static struct vmcs *alloc_vmcs(void)
946{
d3b2c338 947 return alloc_vmcs_cpu(raw_smp_processor_id());
6aa8b732
AK
948}
949
950static void free_vmcs(struct vmcs *vmcs)
951{
1c3d14fe 952 free_pages((unsigned long)vmcs, vmcs_config.order);
6aa8b732
AK
953}
954
39959588 955static void free_kvm_area(void)
6aa8b732
AK
956{
957 int cpu;
958
959 for_each_online_cpu(cpu)
960 free_vmcs(per_cpu(vmxarea, cpu));
961}
962
963extern struct vmcs *alloc_vmcs_cpu(int cpu);
964
965static __init int alloc_kvm_area(void)
966{
967 int cpu;
968
969 for_each_online_cpu(cpu) {
970 struct vmcs *vmcs;
971
972 vmcs = alloc_vmcs_cpu(cpu);
973 if (!vmcs) {
974 free_kvm_area();
975 return -ENOMEM;
976 }
977
978 per_cpu(vmxarea, cpu) = vmcs;
979 }
980 return 0;
981}
982
983static __init int hardware_setup(void)
984{
1c3d14fe
YS
985 if (setup_vmcs_config() < 0)
986 return -1;
6aa8b732
AK
987 return alloc_kvm_area();
988}
989
990static __exit void hardware_unsetup(void)
991{
992 free_kvm_area();
993}
994
6aa8b732
AK
995static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
996{
997 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
998
6af11b9e 999 if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
6aa8b732
AK
1000 vmcs_write16(sf->selector, save->selector);
1001 vmcs_writel(sf->base, save->base);
1002 vmcs_write32(sf->limit, save->limit);
1003 vmcs_write32(sf->ar_bytes, save->ar);
1004 } else {
1005 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1006 << AR_DPL_SHIFT;
1007 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1008 }
1009}
1010
1011static void enter_pmode(struct kvm_vcpu *vcpu)
1012{
1013 unsigned long flags;
1014
1015 vcpu->rmode.active = 0;
1016
1017 vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
1018 vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
1019 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
1020
1021 flags = vmcs_readl(GUEST_RFLAGS);
1022 flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
1023 flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
1024 vmcs_writel(GUEST_RFLAGS, flags);
1025
66aee91a
RR
1026 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1027 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
6aa8b732
AK
1028
1029 update_exception_bitmap(vcpu);
1030
1031 fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
1032 fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
1033 fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
1034 fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
1035
1036 vmcs_write16(GUEST_SS_SELECTOR, 0);
1037 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1038
1039 vmcs_write16(GUEST_CS_SELECTOR,
1040 vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1041 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1042}
1043
1044static int rmode_tss_base(struct kvm* kvm)
1045{
1046 gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
1047 return base_gfn << PAGE_SHIFT;
1048}
1049
1050static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1051{
1052 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1053
1054 save->selector = vmcs_read16(sf->selector);
1055 save->base = vmcs_readl(sf->base);
1056 save->limit = vmcs_read32(sf->limit);
1057 save->ar = vmcs_read32(sf->ar_bytes);
1058 vmcs_write16(sf->selector, vmcs_readl(sf->base) >> 4);
1059 vmcs_write32(sf->limit, 0xffff);
1060 vmcs_write32(sf->ar_bytes, 0xf3);
1061}
1062
1063static void enter_rmode(struct kvm_vcpu *vcpu)
1064{
1065 unsigned long flags;
1066
1067 vcpu->rmode.active = 1;
1068
1069 vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
1070 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1071
1072 vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
1073 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1074
1075 vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
1076 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1077
1078 flags = vmcs_readl(GUEST_RFLAGS);
1079 vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
1080
1081 flags |= IOPL_MASK | X86_EFLAGS_VM;
1082
1083 vmcs_writel(GUEST_RFLAGS, flags);
66aee91a 1084 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
6aa8b732
AK
1085 update_exception_bitmap(vcpu);
1086
1087 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1088 vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1089 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1090
1091 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
abacf8df 1092 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
8cb5b033
AK
1093 if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1094 vmcs_writel(GUEST_CS_BASE, 0xf0000);
6aa8b732
AK
1095 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1096
1097 fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
1098 fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
1099 fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
1100 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
75880a01
AK
1101
1102 init_rmode_tss(vcpu->kvm);
6aa8b732
AK
1103}
1104
05b3e0c2 1105#ifdef CONFIG_X86_64
6aa8b732
AK
1106
1107static void enter_lmode(struct kvm_vcpu *vcpu)
1108{
1109 u32 guest_tr_ar;
1110
1111 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1112 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1113 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
1114 __FUNCTION__);
1115 vmcs_write32(GUEST_TR_AR_BYTES,
1116 (guest_tr_ar & ~AR_TYPE_MASK)
1117 | AR_TYPE_BUSY_64_TSS);
1118 }
1119
1120 vcpu->shadow_efer |= EFER_LMA;
1121
1122 find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME;
1123 vmcs_write32(VM_ENTRY_CONTROLS,
1124 vmcs_read32(VM_ENTRY_CONTROLS)
1125 | VM_ENTRY_CONTROLS_IA32E_MASK);
1126}
1127
1128static void exit_lmode(struct kvm_vcpu *vcpu)
1129{
1130 vcpu->shadow_efer &= ~EFER_LMA;
1131
1132 vmcs_write32(VM_ENTRY_CONTROLS,
1133 vmcs_read32(VM_ENTRY_CONTROLS)
1134 & ~VM_ENTRY_CONTROLS_IA32E_MASK);
1135}
1136
1137#endif
1138
25c4c276 1139static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
399badf3 1140{
399badf3
AK
1141 vcpu->cr4 &= KVM_GUEST_CR4_MASK;
1142 vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
1143}
1144
6aa8b732
AK
1145static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1146{
5fd86fcf
AK
1147 vmx_fpu_deactivate(vcpu);
1148
707d92fa 1149 if (vcpu->rmode.active && (cr0 & X86_CR0_PE))
6aa8b732
AK
1150 enter_pmode(vcpu);
1151
707d92fa 1152 if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE))
6aa8b732
AK
1153 enter_rmode(vcpu);
1154
05b3e0c2 1155#ifdef CONFIG_X86_64
6aa8b732 1156 if (vcpu->shadow_efer & EFER_LME) {
707d92fa 1157 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
6aa8b732 1158 enter_lmode(vcpu);
707d92fa 1159 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
6aa8b732
AK
1160 exit_lmode(vcpu);
1161 }
1162#endif
1163
1164 vmcs_writel(CR0_READ_SHADOW, cr0);
1165 vmcs_writel(GUEST_CR0,
1166 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
1167 vcpu->cr0 = cr0;
5fd86fcf 1168
707d92fa 1169 if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
5fd86fcf 1170 vmx_fpu_activate(vcpu);
6aa8b732
AK
1171}
1172
6aa8b732
AK
1173static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1174{
1175 vmcs_writel(GUEST_CR3, cr3);
707d92fa 1176 if (vcpu->cr0 & X86_CR0_PE)
5fd86fcf 1177 vmx_fpu_deactivate(vcpu);
6aa8b732
AK
1178}
1179
1180static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1181{
1182 vmcs_writel(CR4_READ_SHADOW, cr4);
1183 vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
1184 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
1185 vcpu->cr4 = cr4;
1186}
1187
05b3e0c2 1188#ifdef CONFIG_X86_64
6aa8b732
AK
1189
1190static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1191{
a2fa3e9f 1192 struct kvm_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
6aa8b732
AK
1193
1194 vcpu->shadow_efer = efer;
1195 if (efer & EFER_LMA) {
1196 vmcs_write32(VM_ENTRY_CONTROLS,
1197 vmcs_read32(VM_ENTRY_CONTROLS) |
1198 VM_ENTRY_CONTROLS_IA32E_MASK);
1199 msr->data = efer;
1200
1201 } else {
1202 vmcs_write32(VM_ENTRY_CONTROLS,
1203 vmcs_read32(VM_ENTRY_CONTROLS) &
1204 ~VM_ENTRY_CONTROLS_IA32E_MASK);
1205
1206 msr->data = efer & ~EFER_LME;
1207 }
e38aea3e 1208 setup_msrs(vcpu);
6aa8b732
AK
1209}
1210
1211#endif
1212
1213static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1214{
1215 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1216
1217 return vmcs_readl(sf->base);
1218}
1219
1220static void vmx_get_segment(struct kvm_vcpu *vcpu,
1221 struct kvm_segment *var, int seg)
1222{
1223 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1224 u32 ar;
1225
1226 var->base = vmcs_readl(sf->base);
1227 var->limit = vmcs_read32(sf->limit);
1228 var->selector = vmcs_read16(sf->selector);
1229 ar = vmcs_read32(sf->ar_bytes);
1230 if (ar & AR_UNUSABLE_MASK)
1231 ar = 0;
1232 var->type = ar & 15;
1233 var->s = (ar >> 4) & 1;
1234 var->dpl = (ar >> 5) & 3;
1235 var->present = (ar >> 7) & 1;
1236 var->avl = (ar >> 12) & 1;
1237 var->l = (ar >> 13) & 1;
1238 var->db = (ar >> 14) & 1;
1239 var->g = (ar >> 15) & 1;
1240 var->unusable = (ar >> 16) & 1;
1241}
1242
653e3108 1243static u32 vmx_segment_access_rights(struct kvm_segment *var)
6aa8b732 1244{
6aa8b732
AK
1245 u32 ar;
1246
653e3108 1247 if (var->unusable)
6aa8b732
AK
1248 ar = 1 << 16;
1249 else {
1250 ar = var->type & 15;
1251 ar |= (var->s & 1) << 4;
1252 ar |= (var->dpl & 3) << 5;
1253 ar |= (var->present & 1) << 7;
1254 ar |= (var->avl & 1) << 12;
1255 ar |= (var->l & 1) << 13;
1256 ar |= (var->db & 1) << 14;
1257 ar |= (var->g & 1) << 15;
1258 }
f7fbf1fd
UL
1259 if (ar == 0) /* a 0 value means unusable */
1260 ar = AR_UNUSABLE_MASK;
653e3108
AK
1261
1262 return ar;
1263}
1264
1265static void vmx_set_segment(struct kvm_vcpu *vcpu,
1266 struct kvm_segment *var, int seg)
1267{
1268 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1269 u32 ar;
1270
1271 if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
1272 vcpu->rmode.tr.selector = var->selector;
1273 vcpu->rmode.tr.base = var->base;
1274 vcpu->rmode.tr.limit = var->limit;
1275 vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
1276 return;
1277 }
1278 vmcs_writel(sf->base, var->base);
1279 vmcs_write32(sf->limit, var->limit);
1280 vmcs_write16(sf->selector, var->selector);
1281 if (vcpu->rmode.active && var->s) {
1282 /*
1283 * Hack real-mode segments into vm86 compatibility.
1284 */
1285 if (var->base == 0xffff0000 && var->selector == 0xf000)
1286 vmcs_writel(sf->base, 0xf0000);
1287 ar = 0xf3;
1288 } else
1289 ar = vmx_segment_access_rights(var);
6aa8b732
AK
1290 vmcs_write32(sf->ar_bytes, ar);
1291}
1292
6aa8b732
AK
1293static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1294{
1295 u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1296
1297 *db = (ar >> 14) & 1;
1298 *l = (ar >> 13) & 1;
1299}
1300
1301static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1302{
1303 dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1304 dt->base = vmcs_readl(GUEST_IDTR_BASE);
1305}
1306
1307static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1308{
1309 vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1310 vmcs_writel(GUEST_IDTR_BASE, dt->base);
1311}
1312
1313static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1314{
1315 dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1316 dt->base = vmcs_readl(GUEST_GDTR_BASE);
1317}
1318
1319static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1320{
1321 vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1322 vmcs_writel(GUEST_GDTR_BASE, dt->base);
1323}
1324
1325static int init_rmode_tss(struct kvm* kvm)
1326{
1327 struct page *p1, *p2, *p3;
1328 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
1329 char *page;
1330
954bbbc2
AK
1331 p1 = gfn_to_page(kvm, fn++);
1332 p2 = gfn_to_page(kvm, fn++);
1333 p3 = gfn_to_page(kvm, fn);
6aa8b732
AK
1334
1335 if (!p1 || !p2 || !p3) {
1336 kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
1337 return 0;
1338 }
1339
1340 page = kmap_atomic(p1, KM_USER0);
a3870c47 1341 clear_page(page);
6aa8b732
AK
1342 *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
1343 kunmap_atomic(page, KM_USER0);
1344
1345 page = kmap_atomic(p2, KM_USER0);
a3870c47 1346 clear_page(page);
6aa8b732
AK
1347 kunmap_atomic(page, KM_USER0);
1348
1349 page = kmap_atomic(p3, KM_USER0);
a3870c47 1350 clear_page(page);
6aa8b732
AK
1351 *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
1352 kunmap_atomic(page, KM_USER0);
1353
1354 return 1;
1355}
1356
6aa8b732
AK
1357static void seg_setup(int seg)
1358{
1359 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1360
1361 vmcs_write16(sf->selector, 0);
1362 vmcs_writel(sf->base, 0);
1363 vmcs_write32(sf->limit, 0xffff);
1364 vmcs_write32(sf->ar_bytes, 0x93);
1365}
1366
1367/*
1368 * Sets up the vmcs for emulated real mode.
1369 */
1370static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1371{
a2fa3e9f 1372 struct vcpu_vmx *vmx = to_vmx(vcpu);
6aa8b732
AK
1373 u32 host_sysenter_cs;
1374 u32 junk;
1375 unsigned long a;
1376 struct descriptor_table dt;
1377 int i;
1378 int ret = 0;
cd2276a7 1379 unsigned long kvm_vmx_return;
6aa8b732
AK
1380
1381 if (!init_rmode_tss(vcpu->kvm)) {
1382 ret = -ENOMEM;
1383 goto out;
1384 }
1385
1386 memset(vcpu->regs, 0, sizeof(vcpu->regs));
1387 vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
1388 vcpu->cr8 = 0;
94cea1bb 1389 vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
dad3795d 1390 if (vcpu->vcpu_id == 0)
94cea1bb 1391 vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
6aa8b732
AK
1392
1393 fx_init(vcpu);
1394
1395 /*
1396 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
1397 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
1398 */
1399 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1400 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1401 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1402 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1403
1404 seg_setup(VCPU_SREG_DS);
1405 seg_setup(VCPU_SREG_ES);
1406 seg_setup(VCPU_SREG_FS);
1407 seg_setup(VCPU_SREG_GS);
1408 seg_setup(VCPU_SREG_SS);
1409
1410 vmcs_write16(GUEST_TR_SELECTOR, 0);
1411 vmcs_writel(GUEST_TR_BASE, 0);
1412 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
1413 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1414
1415 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
1416 vmcs_writel(GUEST_LDTR_BASE, 0);
1417 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
1418 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
1419
1420 vmcs_write32(GUEST_SYSENTER_CS, 0);
1421 vmcs_writel(GUEST_SYSENTER_ESP, 0);
1422 vmcs_writel(GUEST_SYSENTER_EIP, 0);
1423
1424 vmcs_writel(GUEST_RFLAGS, 0x02);
1425 vmcs_writel(GUEST_RIP, 0xfff0);
1426 vmcs_writel(GUEST_RSP, 0);
1427
6aa8b732
AK
1428 //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
1429 vmcs_writel(GUEST_DR7, 0x400);
1430
1431 vmcs_writel(GUEST_GDTR_BASE, 0);
1432 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
1433
1434 vmcs_writel(GUEST_IDTR_BASE, 0);
1435 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
1436
1437 vmcs_write32(GUEST_ACTIVITY_STATE, 0);
1438 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
1439 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1440
1441 /* I/O */
fdef3ad1
HQ
1442 vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
1443 vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
6aa8b732
AK
1444
1445 guest_write_tsc(0);
1446
1447 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1448
1449 /* Special registers */
1450 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1451
1452 /* Control */
1c3d14fe
YS
1453 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
1454 vmcs_config.pin_based_exec_ctrl);
1455 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
1456 vmcs_config.cpu_based_exec_ctrl);
6aa8b732 1457
6aa8b732
AK
1458 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
1459 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
1460 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
1461
1462 vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
1463 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
1464 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
1465
1466 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
1467 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1468 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1469 vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
1470 vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
1471 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
05b3e0c2 1472#ifdef CONFIG_X86_64
6aa8b732
AK
1473 rdmsrl(MSR_FS_BASE, a);
1474 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1475 rdmsrl(MSR_GS_BASE, a);
1476 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
1477#else
1478 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
1479 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
1480#endif
1481
1482 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
1483
1484 get_idt(&dt);
1485 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
1486
cd2276a7
AK
1487 asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
1488 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
2cc51560
ED
1489 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1490 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
1491 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
6aa8b732
AK
1492
1493 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1494 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
1495 rdmsrl(MSR_IA32_SYSENTER_ESP, a);
1496 vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
1497 rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1498 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
1499
6aa8b732
AK
1500 for (i = 0; i < NR_VMX_MSR; ++i) {
1501 u32 index = vmx_msr_index[i];
1502 u32 data_low, data_high;
1503 u64 data;
a2fa3e9f 1504 int j = vmx->nmsrs;
6aa8b732
AK
1505
1506 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1507 continue;
432bd6cb
AK
1508 if (wrmsr_safe(index, data_low, data_high) < 0)
1509 continue;
6aa8b732 1510 data = data_low | ((u64)data_high << 32);
a2fa3e9f
GH
1511 vmx->host_msrs[j].index = index;
1512 vmx->host_msrs[j].reserved = 0;
1513 vmx->host_msrs[j].data = data;
1514 vmx->guest_msrs[j] = vmx->host_msrs[j];
1515 ++vmx->nmsrs;
6aa8b732 1516 }
6aa8b732 1517
e38aea3e
AK
1518 setup_msrs(vcpu);
1519
1c3d14fe 1520 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
6aa8b732
AK
1521
1522 /* 22.2.1, 20.8.1 */
1c3d14fe
YS
1523 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
1524
6aa8b732
AK
1525 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
1526
3b99ab24 1527#ifdef CONFIG_X86_64
6aa8b732
AK
1528 vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0);
1529 vmcs_writel(TPR_THRESHOLD, 0);
3b99ab24 1530#endif
6aa8b732 1531
25c4c276 1532 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
6aa8b732
AK
1533 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1534
1535 vcpu->cr0 = 0x60000010;
1536 vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
1537 vmx_set_cr4(vcpu, 0);
05b3e0c2 1538#ifdef CONFIG_X86_64
6aa8b732
AK
1539 vmx_set_efer(vcpu, 0);
1540#endif
5fd86fcf 1541 vmx_fpu_activate(vcpu);
abd3f2d6 1542 update_exception_bitmap(vcpu);
6aa8b732
AK
1543
1544 return 0;
1545
6aa8b732
AK
1546out:
1547 return ret;
1548}
1549
1550static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1551{
1552 u16 ent[2];
1553 u16 cs;
1554 u16 ip;
1555 unsigned long flags;
1556 unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
1557 u16 sp = vmcs_readl(GUEST_RSP);
1558 u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
1559
3964994b 1560 if (sp > ss_limit || sp < 6 ) {
6aa8b732
AK
1561 vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
1562 __FUNCTION__,
1563 vmcs_readl(GUEST_RSP),
1564 vmcs_readl(GUEST_SS_BASE),
1565 vmcs_read32(GUEST_SS_LIMIT));
1566 return;
1567 }
1568
1569 if (kvm_read_guest(vcpu, irq * sizeof(ent), sizeof(ent), &ent) !=
1570 sizeof(ent)) {
1571 vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
1572 return;
1573 }
1574
1575 flags = vmcs_readl(GUEST_RFLAGS);
1576 cs = vmcs_readl(GUEST_CS_BASE) >> 4;
1577 ip = vmcs_readl(GUEST_RIP);
1578
1579
1580 if (kvm_write_guest(vcpu, ss_base + sp - 2, 2, &flags) != 2 ||
1581 kvm_write_guest(vcpu, ss_base + sp - 4, 2, &cs) != 2 ||
1582 kvm_write_guest(vcpu, ss_base + sp - 6, 2, &ip) != 2) {
1583 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
1584 return;
1585 }
1586
1587 vmcs_writel(GUEST_RFLAGS, flags &
1588 ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
1589 vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
1590 vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
1591 vmcs_writel(GUEST_RIP, ent[0]);
1592 vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
1593}
1594
1595static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1596{
1597 int word_index = __ffs(vcpu->irq_summary);
1598 int bit_index = __ffs(vcpu->irq_pending[word_index]);
1599 int irq = word_index * BITS_PER_LONG + bit_index;
1600
1601 clear_bit(bit_index, &vcpu->irq_pending[word_index]);
1602 if (!vcpu->irq_pending[word_index])
1603 clear_bit(word_index, &vcpu->irq_summary);
1604
1605 if (vcpu->rmode.active) {
1606 inject_rmode_irq(vcpu, irq);
1607 return;
1608 }
1609 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1610 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1611}
1612
c1150d8c
DL
1613
1614static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1615 struct kvm_run *kvm_run)
6aa8b732 1616{
c1150d8c
DL
1617 u32 cpu_based_vm_exec_control;
1618
1619 vcpu->interrupt_window_open =
1620 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1621 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1622
1623 if (vcpu->interrupt_window_open &&
1624 vcpu->irq_summary &&
1625 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
6aa8b732 1626 /*
c1150d8c 1627 * If interrupts enabled, and not blocked by sti or mov ss. Good.
6aa8b732
AK
1628 */
1629 kvm_do_inject_irq(vcpu);
c1150d8c
DL
1630
1631 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1632 if (!vcpu->interrupt_window_open &&
1633 (vcpu->irq_summary || kvm_run->request_interrupt_window))
6aa8b732
AK
1634 /*
1635 * Interrupts blocked. Wait for unblock.
1636 */
c1150d8c
DL
1637 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1638 else
1639 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1640 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
6aa8b732
AK
1641}
1642
1643static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1644{
1645 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
1646
1647 set_debugreg(dbg->bp[0], 0);
1648 set_debugreg(dbg->bp[1], 1);
1649 set_debugreg(dbg->bp[2], 2);
1650 set_debugreg(dbg->bp[3], 3);
1651
1652 if (dbg->singlestep) {
1653 unsigned long flags;
1654
1655 flags = vmcs_readl(GUEST_RFLAGS);
1656 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1657 vmcs_writel(GUEST_RFLAGS, flags);
1658 }
1659}
1660
1661static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1662 int vec, u32 err_code)
1663{
1664 if (!vcpu->rmode.active)
1665 return 0;
1666
b3f37707
NK
1667 /*
1668 * Instruction with address size override prefix opcode 0x67
1669 * Cause the #SS fault with 0 error code in VM86 mode.
1670 */
1671 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
6aa8b732
AK
1672 if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
1673 return 1;
1674 return 0;
1675}
1676
1677static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1678{
1679 u32 intr_info, error_code;
1680 unsigned long cr2, rip;
1681 u32 vect_info;
1682 enum emulation_result er;
e2dec939 1683 int r;
6aa8b732
AK
1684
1685 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1686 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1687
1688 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
1689 !is_page_fault(intr_info)) {
1690 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1691 "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
1692 }
1693
1694 if (is_external_interrupt(vect_info)) {
1695 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
1696 set_bit(irq, vcpu->irq_pending);
1697 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
1698 }
1699
1700 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
1701 asm ("int $2");
1702 return 1;
1703 }
2ab455cc
AL
1704
1705 if (is_no_device(intr_info)) {
5fd86fcf 1706 vmx_fpu_activate(vcpu);
2ab455cc
AL
1707 return 1;
1708 }
1709
6aa8b732
AK
1710 error_code = 0;
1711 rip = vmcs_readl(GUEST_RIP);
1712 if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
1713 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1714 if (is_page_fault(intr_info)) {
1715 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1716
1717 spin_lock(&vcpu->kvm->lock);
e2dec939
AK
1718 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1719 if (r < 0) {
1720 spin_unlock(&vcpu->kvm->lock);
1721 return r;
1722 }
1723 if (!r) {
6aa8b732
AK
1724 spin_unlock(&vcpu->kvm->lock);
1725 return 1;
1726 }
1727
1728 er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
1729 spin_unlock(&vcpu->kvm->lock);
1730
1731 switch (er) {
1732 case EMULATE_DONE:
1733 return 1;
1734 case EMULATE_DO_MMIO:
1165f5fe 1735 ++vcpu->stat.mmio_exits;
6aa8b732
AK
1736 return 0;
1737 case EMULATE_FAIL:
1738 vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
1739 break;
1740 default:
1741 BUG();
1742 }
1743 }
1744
1745 if (vcpu->rmode.active &&
1746 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
72d6e5a0
AK
1747 error_code)) {
1748 if (vcpu->halt_request) {
1749 vcpu->halt_request = 0;
1750 return kvm_emulate_halt(vcpu);
1751 }
6aa8b732 1752 return 1;
72d6e5a0 1753 }
6aa8b732
AK
1754
1755 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
1756 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1757 return 0;
1758 }
1759 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
1760 kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1761 kvm_run->ex.error_code = error_code;
1762 return 0;
1763}
1764
1765static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1766 struct kvm_run *kvm_run)
1767{
1165f5fe 1768 ++vcpu->stat.irq_exits;
6aa8b732
AK
1769 return 1;
1770}
1771
988ad74f
AK
1772static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1773{
1774 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1775 return 0;
1776}
6aa8b732 1777
039576c0 1778static int get_io_count(struct kvm_vcpu *vcpu, unsigned long *count)
6aa8b732
AK
1779{
1780 u64 inst;
1781 gva_t rip;
1782 int countr_size;
1783 int i, n;
1784
1785 if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_VM)) {
1786 countr_size = 2;
1787 } else {
1788 u32 cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
1789
1790 countr_size = (cs_ar & AR_L_MASK) ? 8:
1791 (cs_ar & AR_DB_MASK) ? 4: 2;
1792 }
1793
1794 rip = vmcs_readl(GUEST_RIP);
1795 if (countr_size != 8)
1796 rip += vmcs_readl(GUEST_CS_BASE);
1797
1798 n = kvm_read_guest(vcpu, rip, sizeof(inst), &inst);
1799
1800 for (i = 0; i < n; i++) {
1801 switch (((u8*)&inst)[i]) {
1802 case 0xf0:
1803 case 0xf2:
1804 case 0xf3:
1805 case 0x2e:
1806 case 0x36:
1807 case 0x3e:
1808 case 0x26:
1809 case 0x64:
1810 case 0x65:
1811 case 0x66:
1812 break;
1813 case 0x67:
1814 countr_size = (countr_size == 2) ? 4: (countr_size >> 1);
1815 default:
1816 goto done;
1817 }
1818 }
1819 return 0;
1820done:
1821 countr_size *= 8;
1822 *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size));
039576c0 1823 //printk("cx: %lx\n", vcpu->regs[VCPU_REGS_RCX]);
6aa8b732
AK
1824 return 1;
1825}
1826
1827static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1828{
1829 u64 exit_qualification;
039576c0
AK
1830 int size, down, in, string, rep;
1831 unsigned port;
1832 unsigned long count;
1833 gva_t address;
6aa8b732 1834
1165f5fe 1835 ++vcpu->stat.io_exits;
6aa8b732 1836 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
039576c0
AK
1837 in = (exit_qualification & 8) != 0;
1838 size = (exit_qualification & 7) + 1;
1839 string = (exit_qualification & 16) != 0;
1840 down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
1841 count = 1;
1842 rep = (exit_qualification & 32) != 0;
1843 port = exit_qualification >> 16;
1844 address = 0;
1845 if (string) {
1846 if (rep && !get_io_count(vcpu, &count))
6aa8b732 1847 return 1;
039576c0
AK
1848 address = vmcs_readl(GUEST_LINEAR_ADDRESS);
1849 }
1850 return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
1851 address, rep, port);
6aa8b732
AK
1852}
1853
102d8325
IM
1854static void
1855vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1856{
1857 /*
1858 * Patch in the VMCALL instruction:
1859 */
1860 hypercall[0] = 0x0f;
1861 hypercall[1] = 0x01;
1862 hypercall[2] = 0xc1;
1863 hypercall[3] = 0xc3;
1864}
1865
6aa8b732
AK
1866static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1867{
1868 u64 exit_qualification;
1869 int cr;
1870 int reg;
1871
1872 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1873 cr = exit_qualification & 15;
1874 reg = (exit_qualification >> 8) & 15;
1875 switch ((exit_qualification >> 4) & 3) {
1876 case 0: /* mov to cr */
1877 switch (cr) {
1878 case 0:
1879 vcpu_load_rsp_rip(vcpu);
1880 set_cr0(vcpu, vcpu->regs[reg]);
1881 skip_emulated_instruction(vcpu);
1882 return 1;
1883 case 3:
1884 vcpu_load_rsp_rip(vcpu);
1885 set_cr3(vcpu, vcpu->regs[reg]);
1886 skip_emulated_instruction(vcpu);
1887 return 1;
1888 case 4:
1889 vcpu_load_rsp_rip(vcpu);
1890 set_cr4(vcpu, vcpu->regs[reg]);
1891 skip_emulated_instruction(vcpu);
1892 return 1;
1893 case 8:
1894 vcpu_load_rsp_rip(vcpu);
1895 set_cr8(vcpu, vcpu->regs[reg]);
1896 skip_emulated_instruction(vcpu);
1897 return 1;
1898 };
1899 break;
25c4c276
AL
1900 case 2: /* clts */
1901 vcpu_load_rsp_rip(vcpu);
5fd86fcf 1902 vmx_fpu_deactivate(vcpu);
707d92fa 1903 vcpu->cr0 &= ~X86_CR0_TS;
2ab455cc 1904 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
5fd86fcf 1905 vmx_fpu_activate(vcpu);
25c4c276
AL
1906 skip_emulated_instruction(vcpu);
1907 return 1;
6aa8b732
AK
1908 case 1: /*mov from cr*/
1909 switch (cr) {
1910 case 3:
1911 vcpu_load_rsp_rip(vcpu);
1912 vcpu->regs[reg] = vcpu->cr3;
1913 vcpu_put_rsp_rip(vcpu);
1914 skip_emulated_instruction(vcpu);
1915 return 1;
1916 case 8:
6aa8b732
AK
1917 vcpu_load_rsp_rip(vcpu);
1918 vcpu->regs[reg] = vcpu->cr8;
1919 vcpu_put_rsp_rip(vcpu);
1920 skip_emulated_instruction(vcpu);
1921 return 1;
1922 }
1923 break;
1924 case 3: /* lmsw */
1925 lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
1926
1927 skip_emulated_instruction(vcpu);
1928 return 1;
1929 default:
1930 break;
1931 }
1932 kvm_run->exit_reason = 0;
1933 printk(KERN_ERR "kvm: unhandled control register: op %d cr %d\n",
1934 (int)(exit_qualification >> 4) & 3, cr);
1935 return 0;
1936}
1937
1938static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1939{
1940 u64 exit_qualification;
1941 unsigned long val;
1942 int dr, reg;
1943
1944 /*
1945 * FIXME: this code assumes the host is debugging the guest.
1946 * need to deal with guest debugging itself too.
1947 */
1948 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1949 dr = exit_qualification & 7;
1950 reg = (exit_qualification >> 8) & 15;
1951 vcpu_load_rsp_rip(vcpu);
1952 if (exit_qualification & 16) {
1953 /* mov from dr */
1954 switch (dr) {
1955 case 6:
1956 val = 0xffff0ff0;
1957 break;
1958 case 7:
1959 val = 0x400;
1960 break;
1961 default:
1962 val = 0;
1963 }
1964 vcpu->regs[reg] = val;
1965 } else {
1966 /* mov to dr */
1967 }
1968 vcpu_put_rsp_rip(vcpu);
1969 skip_emulated_instruction(vcpu);
1970 return 1;
1971}
1972
1973static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1974{
06465c5a
AK
1975 kvm_emulate_cpuid(vcpu);
1976 return 1;
6aa8b732
AK
1977}
1978
1979static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1980{
1981 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1982 u64 data;
1983
1984 if (vmx_get_msr(vcpu, ecx, &data)) {
1985 vmx_inject_gp(vcpu, 0);
1986 return 1;
1987 }
1988
1989 /* FIXME: handling of bits 32:63 of rax, rdx */
1990 vcpu->regs[VCPU_REGS_RAX] = data & -1u;
1991 vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
1992 skip_emulated_instruction(vcpu);
1993 return 1;
1994}
1995
1996static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1997{
1998 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1999 u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
2000 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
2001
2002 if (vmx_set_msr(vcpu, ecx, data) != 0) {
2003 vmx_inject_gp(vcpu, 0);
2004 return 1;
2005 }
2006
2007 skip_emulated_instruction(vcpu);
2008 return 1;
2009}
2010
c1150d8c
DL
2011static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2012 struct kvm_run *kvm_run)
2013{
2014 kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
2015 kvm_run->cr8 = vcpu->cr8;
2016 kvm_run->apic_base = vcpu->apic_base;
2017 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
2018 vcpu->irq_summary == 0);
2019}
2020
6aa8b732
AK
2021static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2022 struct kvm_run *kvm_run)
2023{
c1150d8c
DL
2024 /*
2025 * If the user space waits to inject interrupts, exit as soon as
2026 * possible
2027 */
2028 if (kvm_run->request_interrupt_window &&
022a9308 2029 !vcpu->irq_summary) {
c1150d8c 2030 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1165f5fe 2031 ++vcpu->stat.irq_window_exits;
c1150d8c
DL
2032 return 0;
2033 }
6aa8b732
AK
2034 return 1;
2035}
2036
2037static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2038{
2039 skip_emulated_instruction(vcpu);
d3bef15f 2040 return kvm_emulate_halt(vcpu);
6aa8b732
AK
2041}
2042
c21415e8
IM
2043static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2044{
510043da 2045 skip_emulated_instruction(vcpu);
270fd9b9 2046 return kvm_hypercall(vcpu, kvm_run);
c21415e8
IM
2047}
2048
6aa8b732
AK
2049/*
2050 * The exit handlers return 1 if the exit was handled fully and guest execution
2051 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
2052 * to be done to userspace and return 0.
2053 */
2054static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
2055 struct kvm_run *kvm_run) = {
2056 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
2057 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
988ad74f 2058 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
6aa8b732 2059 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
6aa8b732
AK
2060 [EXIT_REASON_CR_ACCESS] = handle_cr,
2061 [EXIT_REASON_DR_ACCESS] = handle_dr,
2062 [EXIT_REASON_CPUID] = handle_cpuid,
2063 [EXIT_REASON_MSR_READ] = handle_rdmsr,
2064 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
2065 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
2066 [EXIT_REASON_HLT] = handle_halt,
c21415e8 2067 [EXIT_REASON_VMCALL] = handle_vmcall,
6aa8b732
AK
2068};
2069
2070static const int kvm_vmx_max_exit_handlers =
50a3485c 2071 ARRAY_SIZE(kvm_vmx_exit_handlers);
6aa8b732
AK
2072
2073/*
2074 * The guest has exited. See if we can fix it or if we need userspace
2075 * assistance.
2076 */
2077static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2078{
2079 u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2080 u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
2081
2082 if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
2083 exit_reason != EXIT_REASON_EXCEPTION_NMI )
2084 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
2085 "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
6aa8b732
AK
2086 if (exit_reason < kvm_vmx_max_exit_handlers
2087 && kvm_vmx_exit_handlers[exit_reason])
2088 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
2089 else {
2090 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
2091 kvm_run->hw.hardware_exit_reason = exit_reason;
2092 }
2093 return 0;
2094}
2095
c1150d8c
DL
2096/*
2097 * Check if userspace requested an interrupt window, and that the
2098 * interrupt window is open.
2099 *
2100 * No need to exit to userspace if we already have an interrupt queued.
2101 */
2102static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2103 struct kvm_run *kvm_run)
2104{
2105 return (!vcpu->irq_summary &&
2106 kvm_run->request_interrupt_window &&
2107 vcpu->interrupt_window_open &&
2108 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
2109}
2110
d9e368d6
AK
2111static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2112{
d9e368d6
AK
2113}
2114
6aa8b732
AK
2115static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2116{
a2fa3e9f 2117 struct vcpu_vmx *vmx = to_vmx(vcpu);
6aa8b732 2118 u8 fail;
e2dec939 2119 int r;
6aa8b732 2120
e6adf283 2121preempted:
6aa8b732
AK
2122 if (vcpu->guest_debug.enabled)
2123 kvm_guest_debug_pre(vcpu);
2124
e6adf283 2125again:
9ae0448f
SL
2126 r = kvm_mmu_reload(vcpu);
2127 if (unlikely(r))
2128 goto out;
2129
ff1dc794
GH
2130 if (!vcpu->mmio_read_completed)
2131 do_interrupt_requests(vcpu, kvm_run);
2132
33ed6329 2133 vmx_save_host_state(vcpu);
e6adf283
AK
2134 kvm_load_guest_fpu(vcpu);
2135
2136 /*
2137 * Loading guest fpu may have cleared host cr0.ts
2138 */
2139 vmcs_writel(HOST_CR0, read_cr0());
2140
d9e368d6
AK
2141 local_irq_disable();
2142
2143 vcpu->guest_mode = 1;
2144 if (vcpu->requests)
2145 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
2146 vmx_flush_tlb(vcpu);
2147
6aa8b732
AK
2148 asm (
2149 /* Store host registers */
05b3e0c2 2150#ifdef CONFIG_X86_64
6aa8b732
AK
2151 "push %%rax; push %%rbx; push %%rdx;"
2152 "push %%rsi; push %%rdi; push %%rbp;"
2153 "push %%r8; push %%r9; push %%r10; push %%r11;"
2154 "push %%r12; push %%r13; push %%r14; push %%r15;"
2155 "push %%rcx \n\t"
2156 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2157#else
2158 "pusha; push %%ecx \n\t"
2159 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2160#endif
2161 /* Check if vmlaunch of vmresume is needed */
2162 "cmp $0, %1 \n\t"
2163 /* Load guest registers. Don't clobber flags. */
05b3e0c2 2164#ifdef CONFIG_X86_64
6aa8b732
AK
2165 "mov %c[cr2](%3), %%rax \n\t"
2166 "mov %%rax, %%cr2 \n\t"
2167 "mov %c[rax](%3), %%rax \n\t"
2168 "mov %c[rbx](%3), %%rbx \n\t"
2169 "mov %c[rdx](%3), %%rdx \n\t"
2170 "mov %c[rsi](%3), %%rsi \n\t"
2171 "mov %c[rdi](%3), %%rdi \n\t"
2172 "mov %c[rbp](%3), %%rbp \n\t"
2173 "mov %c[r8](%3), %%r8 \n\t"
2174 "mov %c[r9](%3), %%r9 \n\t"
2175 "mov %c[r10](%3), %%r10 \n\t"
2176 "mov %c[r11](%3), %%r11 \n\t"
2177 "mov %c[r12](%3), %%r12 \n\t"
2178 "mov %c[r13](%3), %%r13 \n\t"
2179 "mov %c[r14](%3), %%r14 \n\t"
2180 "mov %c[r15](%3), %%r15 \n\t"
2181 "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
2182#else
2183 "mov %c[cr2](%3), %%eax \n\t"
2184 "mov %%eax, %%cr2 \n\t"
2185 "mov %c[rax](%3), %%eax \n\t"
2186 "mov %c[rbx](%3), %%ebx \n\t"
2187 "mov %c[rdx](%3), %%edx \n\t"
2188 "mov %c[rsi](%3), %%esi \n\t"
2189 "mov %c[rdi](%3), %%edi \n\t"
2190 "mov %c[rbp](%3), %%ebp \n\t"
2191 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
2192#endif
2193 /* Enter guest mode */
cd2276a7 2194 "jne .Llaunched \n\t"
6aa8b732 2195 ASM_VMX_VMLAUNCH "\n\t"
cd2276a7
AK
2196 "jmp .Lkvm_vmx_return \n\t"
2197 ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
2198 ".Lkvm_vmx_return: "
6aa8b732 2199 /* Save guest registers, load host registers, keep flags */
05b3e0c2 2200#ifdef CONFIG_X86_64
96958231 2201 "xchg %3, (%%rsp) \n\t"
6aa8b732
AK
2202 "mov %%rax, %c[rax](%3) \n\t"
2203 "mov %%rbx, %c[rbx](%3) \n\t"
96958231 2204 "pushq (%%rsp); popq %c[rcx](%3) \n\t"
6aa8b732
AK
2205 "mov %%rdx, %c[rdx](%3) \n\t"
2206 "mov %%rsi, %c[rsi](%3) \n\t"
2207 "mov %%rdi, %c[rdi](%3) \n\t"
2208 "mov %%rbp, %c[rbp](%3) \n\t"
2209 "mov %%r8, %c[r8](%3) \n\t"
2210 "mov %%r9, %c[r9](%3) \n\t"
2211 "mov %%r10, %c[r10](%3) \n\t"
2212 "mov %%r11, %c[r11](%3) \n\t"
2213 "mov %%r12, %c[r12](%3) \n\t"
2214 "mov %%r13, %c[r13](%3) \n\t"
2215 "mov %%r14, %c[r14](%3) \n\t"
2216 "mov %%r15, %c[r15](%3) \n\t"
2217 "mov %%cr2, %%rax \n\t"
2218 "mov %%rax, %c[cr2](%3) \n\t"
96958231 2219 "mov (%%rsp), %3 \n\t"
6aa8b732
AK
2220
2221 "pop %%rcx; pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
2222 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
2223 "pop %%rbp; pop %%rdi; pop %%rsi;"
2224 "pop %%rdx; pop %%rbx; pop %%rax \n\t"
2225#else
96958231 2226 "xchg %3, (%%esp) \n\t"
6aa8b732
AK
2227 "mov %%eax, %c[rax](%3) \n\t"
2228 "mov %%ebx, %c[rbx](%3) \n\t"
96958231 2229 "pushl (%%esp); popl %c[rcx](%3) \n\t"
6aa8b732
AK
2230 "mov %%edx, %c[rdx](%3) \n\t"
2231 "mov %%esi, %c[rsi](%3) \n\t"
2232 "mov %%edi, %c[rdi](%3) \n\t"
2233 "mov %%ebp, %c[rbp](%3) \n\t"
2234 "mov %%cr2, %%eax \n\t"
2235 "mov %%eax, %c[cr2](%3) \n\t"
96958231 2236 "mov (%%esp), %3 \n\t"
6aa8b732
AK
2237
2238 "pop %%ecx; popa \n\t"
2239#endif
2240 "setbe %0 \n\t"
e0015489 2241 : "=q" (fail)
a2fa3e9f 2242 : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
6aa8b732
AK
2243 "c"(vcpu),
2244 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
2245 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
2246 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
2247 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
2248 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
2249 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
2250 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
05b3e0c2 2251#ifdef CONFIG_X86_64
6aa8b732
AK
2252 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
2253 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
2254 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
2255 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
2256 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
2257 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
2258 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
2259 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
2260#endif
2261 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
2262 : "cc", "memory" );
2263
d9e368d6
AK
2264 vcpu->guest_mode = 0;
2265 local_irq_enable();
2266
1165f5fe 2267 ++vcpu->stat.exits;
6aa8b732 2268
c1150d8c 2269 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
6aa8b732 2270
6aa8b732 2271 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
6aa8b732 2272
05e0c8c3 2273 if (unlikely(fail)) {
8eb7d334
AK
2274 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2275 kvm_run->fail_entry.hardware_entry_failure_reason
2276 = vmcs_read32(VM_INSTRUCTION_ERROR);
e2dec939 2277 r = 0;
05e0c8c3
AK
2278 goto out;
2279 }
2280 /*
2281 * Profile KVM exit RIPs:
2282 */
2283 if (unlikely(prof_on == KVM_PROFILING))
2284 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
2285
a2fa3e9f 2286 vmx->launched = 1;
05e0c8c3
AK
2287 r = kvm_handle_exit(kvm_run, vcpu);
2288 if (r > 0) {
2289 /* Give scheduler a change to reschedule. */
2290 if (signal_pending(current)) {
2291 r = -EINTR;
2292 kvm_run->exit_reason = KVM_EXIT_INTR;
2293 ++vcpu->stat.signal_exits;
2294 goto out;
2295 }
2296
2297 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2298 r = -EINTR;
2299 kvm_run->exit_reason = KVM_EXIT_INTR;
2300 ++vcpu->stat.request_irq_exits;
2301 goto out;
2302 }
2303 if (!need_resched()) {
2304 ++vcpu->stat.light_exits;
2305 goto again;
6aa8b732
AK
2306 }
2307 }
c1150d8c 2308
e6adf283 2309out:
e6adf283
AK
2310 if (r > 0) {
2311 kvm_resched(vcpu);
2312 goto preempted;
2313 }
2314
c1150d8c 2315 post_kvm_run_save(vcpu, kvm_run);
e2dec939 2316 return r;
6aa8b732
AK
2317}
2318
6aa8b732
AK
2319static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
2320 unsigned long addr,
2321 u32 err_code)
2322{
2323 u32 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2324
1165f5fe 2325 ++vcpu->stat.pf_guest;
6aa8b732
AK
2326
2327 if (is_page_fault(vect_info)) {
2328 printk(KERN_DEBUG "inject_page_fault: "
2329 "double fault 0x%lx @ 0x%lx\n",
2330 addr, vmcs_readl(GUEST_RIP));
2331 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
2332 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2333 DF_VECTOR |
2334 INTR_TYPE_EXCEPTION |
2335 INTR_INFO_DELIEVER_CODE_MASK |
2336 INTR_INFO_VALID_MASK);
2337 return;
2338 }
2339 vcpu->cr2 = addr;
2340 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
2341 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2342 PF_VECTOR |
2343 INTR_TYPE_EXCEPTION |
2344 INTR_INFO_DELIEVER_CODE_MASK |
2345 INTR_INFO_VALID_MASK);
2346
2347}
2348
2349static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2350{
a2fa3e9f
GH
2351 struct vcpu_vmx *vmx = to_vmx(vcpu);
2352
2353 if (vmx->vmcs) {
6aa8b732 2354 on_each_cpu(__vcpu_clear, vcpu, 0, 1);
a2fa3e9f
GH
2355 free_vmcs(vmx->vmcs);
2356 vmx->vmcs = NULL;
6aa8b732
AK
2357 }
2358}
2359
2360static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2361{
fb3f0f51
RR
2362 struct vcpu_vmx *vmx = to_vmx(vcpu);
2363
6aa8b732 2364 vmx_free_vmcs(vcpu);
fb3f0f51
RR
2365 kfree(vmx->host_msrs);
2366 kfree(vmx->guest_msrs);
2367 kvm_vcpu_uninit(vcpu);
2368 kfree(vmx);
6aa8b732
AK
2369}
2370
fb3f0f51 2371static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
6aa8b732 2372{
fb3f0f51
RR
2373 int err;
2374 struct vcpu_vmx *vmx = kzalloc(sizeof(*vmx), GFP_KERNEL);
6aa8b732 2375
a2fa3e9f 2376 if (!vmx)
fb3f0f51
RR
2377 return ERR_PTR(-ENOMEM);
2378
2379 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
2380 if (err)
2381 goto free_vcpu;
965b58a5 2382
a2fa3e9f 2383 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
fb3f0f51
RR
2384 if (!vmx->guest_msrs) {
2385 err = -ENOMEM;
2386 goto uninit_vcpu;
2387 }
965b58a5 2388
a2fa3e9f
GH
2389 vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2390 if (!vmx->host_msrs)
fb3f0f51 2391 goto free_guest_msrs;
965b58a5 2392
a2fa3e9f
GH
2393 vmx->vmcs = alloc_vmcs();
2394 if (!vmx->vmcs)
fb3f0f51 2395 goto free_msrs;
a2fa3e9f
GH
2396
2397 vmcs_clear(vmx->vmcs);
2398
fb3f0f51
RR
2399 vmx_vcpu_load(&vmx->vcpu);
2400 err = vmx_vcpu_setup(&vmx->vcpu);
2401 vmx_vcpu_put(&vmx->vcpu);
2402 if (err)
2403 goto free_vmcs;
2404
2405 return &vmx->vcpu;
2406
2407free_vmcs:
2408 free_vmcs(vmx->vmcs);
2409free_msrs:
2410 kfree(vmx->host_msrs);
2411free_guest_msrs:
2412 kfree(vmx->guest_msrs);
2413uninit_vcpu:
2414 kvm_vcpu_uninit(&vmx->vcpu);
2415free_vcpu:
a2fa3e9f 2416 kfree(vmx);
fb3f0f51 2417 return ERR_PTR(err);
6aa8b732
AK
2418}
2419
2420static struct kvm_arch_ops vmx_arch_ops = {
2421 .cpu_has_kvm_support = cpu_has_kvm_support,
2422 .disabled_by_bios = vmx_disabled_by_bios,
2423 .hardware_setup = hardware_setup,
2424 .hardware_unsetup = hardware_unsetup,
2425 .hardware_enable = hardware_enable,
2426 .hardware_disable = hardware_disable,
2427
2428 .vcpu_create = vmx_create_vcpu,
2429 .vcpu_free = vmx_free_vcpu,
2430
2431 .vcpu_load = vmx_vcpu_load,
2432 .vcpu_put = vmx_vcpu_put,
774c47f1 2433 .vcpu_decache = vmx_vcpu_decache,
6aa8b732
AK
2434
2435 .set_guest_debug = set_guest_debug,
2436 .get_msr = vmx_get_msr,
2437 .set_msr = vmx_set_msr,
2438 .get_segment_base = vmx_get_segment_base,
2439 .get_segment = vmx_get_segment,
2440 .set_segment = vmx_set_segment,
6aa8b732 2441 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
25c4c276 2442 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
6aa8b732 2443 .set_cr0 = vmx_set_cr0,
6aa8b732
AK
2444 .set_cr3 = vmx_set_cr3,
2445 .set_cr4 = vmx_set_cr4,
05b3e0c2 2446#ifdef CONFIG_X86_64
6aa8b732
AK
2447 .set_efer = vmx_set_efer,
2448#endif
2449 .get_idt = vmx_get_idt,
2450 .set_idt = vmx_set_idt,
2451 .get_gdt = vmx_get_gdt,
2452 .set_gdt = vmx_set_gdt,
2453 .cache_regs = vcpu_load_rsp_rip,
2454 .decache_regs = vcpu_put_rsp_rip,
2455 .get_rflags = vmx_get_rflags,
2456 .set_rflags = vmx_set_rflags,
2457
2458 .tlb_flush = vmx_flush_tlb,
2459 .inject_page_fault = vmx_inject_page_fault,
2460
2461 .inject_gp = vmx_inject_gp,
2462
2463 .run = vmx_vcpu_run,
2464 .skip_emulated_instruction = skip_emulated_instruction,
102d8325 2465 .patch_hypercall = vmx_patch_hypercall,
6aa8b732
AK
2466};
2467
2468static int __init vmx_init(void)
2469{
fdef3ad1
HQ
2470 void *iova;
2471 int r;
2472
2473 vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2474 if (!vmx_io_bitmap_a)
2475 return -ENOMEM;
2476
2477 vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2478 if (!vmx_io_bitmap_b) {
2479 r = -ENOMEM;
2480 goto out;
2481 }
2482
2483 /*
2484 * Allow direct access to the PC debug port (it is often used for I/O
2485 * delays, but the vmexits simply slow things down).
2486 */
2487 iova = kmap(vmx_io_bitmap_a);
2488 memset(iova, 0xff, PAGE_SIZE);
2489 clear_bit(0x80, iova);
cd0536d7 2490 kunmap(vmx_io_bitmap_a);
fdef3ad1
HQ
2491
2492 iova = kmap(vmx_io_bitmap_b);
2493 memset(iova, 0xff, PAGE_SIZE);
cd0536d7 2494 kunmap(vmx_io_bitmap_b);
fdef3ad1
HQ
2495
2496 r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
2497 if (r)
2498 goto out1;
2499
2500 return 0;
2501
2502out1:
2503 __free_page(vmx_io_bitmap_b);
2504out:
2505 __free_page(vmx_io_bitmap_a);
2506 return r;
6aa8b732
AK
2507}
2508
2509static void __exit vmx_exit(void)
2510{
fdef3ad1
HQ
2511 __free_page(vmx_io_bitmap_b);
2512 __free_page(vmx_io_bitmap_a);
2513
6aa8b732
AK
2514 kvm_exit_arch();
2515}
2516
2517module_init(vmx_init)
2518module_exit(vmx_exit)