]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/kvm/x86.c
KVM: Replace page fault injection by the generalized exception queue
[mirror_ubuntu-artful-kernel.git] / drivers / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Avi Kivity <avi@qumranet.com>
10 * Yaniv Kamay <yaniv@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
16
313a3dc7 17#include "kvm.h"
043405e1 18#include "x86.h"
d825ed0a 19#include "x86_emulate.h"
5fb76f9b 20#include "segment_descriptor.h"
313a3dc7
CO
21#include "irq.h"
22
23#include <linux/kvm.h>
24#include <linux/fs.h>
25#include <linux/vmalloc.h>
5fb76f9b 26#include <linux/module.h>
0de10343 27#include <linux/mman.h>
043405e1
CO
28
29#include <asm/uaccess.h>
d825ed0a 30#include <asm/msr.h>
043405e1 31
313a3dc7 32#define MAX_IO_MSRS 256
a03490ed
CO
33#define CR0_RESERVED_BITS \
34 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
35 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
36 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
37#define CR4_RESERVED_BITS \
38 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
39 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
40 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
41 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
42
43#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
15c4a640 44#define EFER_RESERVED_BITS 0xfffffffffffff2fe
313a3dc7 45
ba1389b7
AK
46#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
47#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
417bc304 48
97896d04
ZX
49struct kvm_x86_ops *kvm_x86_ops;
50
417bc304 51struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
52 { "pf_fixed", VCPU_STAT(pf_fixed) },
53 { "pf_guest", VCPU_STAT(pf_guest) },
54 { "tlb_flush", VCPU_STAT(tlb_flush) },
55 { "invlpg", VCPU_STAT(invlpg) },
56 { "exits", VCPU_STAT(exits) },
57 { "io_exits", VCPU_STAT(io_exits) },
58 { "mmio_exits", VCPU_STAT(mmio_exits) },
59 { "signal_exits", VCPU_STAT(signal_exits) },
60 { "irq_window", VCPU_STAT(irq_window_exits) },
61 { "halt_exits", VCPU_STAT(halt_exits) },
62 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63 { "request_irq", VCPU_STAT(request_irq_exits) },
64 { "irq_exits", VCPU_STAT(irq_exits) },
65 { "host_state_reload", VCPU_STAT(host_state_reload) },
66 { "efer_reload", VCPU_STAT(efer_reload) },
67 { "fpu_reload", VCPU_STAT(fpu_reload) },
68 { "insn_emulation", VCPU_STAT(insn_emulation) },
69 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
4cee5764
AK
70 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
71 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
72 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
73 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
74 { "mmu_flooded", VM_STAT(mmu_flooded) },
75 { "mmu_recycled", VM_STAT(mmu_recycled) },
0f74a24c 76 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
417bc304
HB
77 { NULL }
78};
79
80
5fb76f9b
CO
81unsigned long segment_base(u16 selector)
82{
83 struct descriptor_table gdt;
84 struct segment_descriptor *d;
85 unsigned long table_base;
86 unsigned long v;
87
88 if (selector == 0)
89 return 0;
90
91 asm("sgdt %0" : "=m"(gdt));
92 table_base = gdt.base;
93
94 if (selector & 4) { /* from ldt */
95 u16 ldt_selector;
96
97 asm("sldt %0" : "=g"(ldt_selector));
98 table_base = segment_base(ldt_selector);
99 }
100 d = (struct segment_descriptor *)(table_base + (selector & ~7));
101 v = d->base_low | ((unsigned long)d->base_mid << 16) |
102 ((unsigned long)d->base_high << 24);
103#ifdef CONFIG_X86_64
104 if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
105 v |= ((unsigned long) \
106 ((struct segment_descriptor_64 *)d)->base_higher) << 32;
107#endif
108 return v;
109}
110EXPORT_SYMBOL_GPL(segment_base);
111
6866b83e
CO
112u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
113{
114 if (irqchip_in_kernel(vcpu->kvm))
115 return vcpu->apic_base;
116 else
117 return vcpu->apic_base;
118}
119EXPORT_SYMBOL_GPL(kvm_get_apic_base);
120
121void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
122{
123 /* TODO: reserve bits check */
124 if (irqchip_in_kernel(vcpu->kvm))
125 kvm_lapic_set_base(vcpu, data);
126 else
127 vcpu->apic_base = data;
128}
129EXPORT_SYMBOL_GPL(kvm_set_apic_base);
130
a03490ed
CO
131static void inject_gp(struct kvm_vcpu *vcpu)
132{
133 kvm_x86_ops->inject_gp(vcpu, 0);
134}
135
298101da
AK
136void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
137{
138 WARN_ON(vcpu->exception.pending);
139 vcpu->exception.pending = true;
140 vcpu->exception.has_error_code = false;
141 vcpu->exception.nr = nr;
142}
143EXPORT_SYMBOL_GPL(kvm_queue_exception);
144
c3c91fee
AK
145void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
146 u32 error_code)
147{
148 ++vcpu->stat.pf_guest;
149 if (vcpu->exception.pending && vcpu->exception.nr == PF_VECTOR) {
150 printk(KERN_DEBUG "kvm: inject_page_fault:"
151 " double fault 0x%lx\n", addr);
152 vcpu->exception.nr = DF_VECTOR;
153 vcpu->exception.error_code = 0;
154 return;
155 }
156 vcpu->cr2 = addr;
157 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
158}
159
298101da
AK
160void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
161{
162 WARN_ON(vcpu->exception.pending);
163 vcpu->exception.pending = true;
164 vcpu->exception.has_error_code = true;
165 vcpu->exception.nr = nr;
166 vcpu->exception.error_code = error_code;
167}
168EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
169
170static void __queue_exception(struct kvm_vcpu *vcpu)
171{
172 kvm_x86_ops->queue_exception(vcpu, vcpu->exception.nr,
173 vcpu->exception.has_error_code,
174 vcpu->exception.error_code);
175}
176
a03490ed
CO
177/*
178 * Load the pae pdptrs. Return true is they are all valid.
179 */
180int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
181{
182 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
183 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
184 int i;
185 int ret;
186 u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
187
188 mutex_lock(&vcpu->kvm->lock);
189 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
190 offset * sizeof(u64), sizeof(pdpte));
191 if (ret < 0) {
192 ret = 0;
193 goto out;
194 }
195 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
196 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
197 ret = 0;
198 goto out;
199 }
200 }
201 ret = 1;
202
203 memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
204out:
205 mutex_unlock(&vcpu->kvm->lock);
206
207 return ret;
208}
209
d835dfec
AK
210static bool pdptrs_changed(struct kvm_vcpu *vcpu)
211{
212 u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
213 bool changed = true;
214 int r;
215
216 if (is_long_mode(vcpu) || !is_pae(vcpu))
217 return false;
218
219 mutex_lock(&vcpu->kvm->lock);
220 r = kvm_read_guest(vcpu->kvm, vcpu->cr3 & ~31u, pdpte, sizeof(pdpte));
221 if (r < 0)
222 goto out;
223 changed = memcmp(pdpte, vcpu->pdptrs, sizeof(pdpte)) != 0;
224out:
225 mutex_unlock(&vcpu->kvm->lock);
226
227 return changed;
228}
229
a03490ed
CO
230void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
231{
232 if (cr0 & CR0_RESERVED_BITS) {
233 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
234 cr0, vcpu->cr0);
235 inject_gp(vcpu);
236 return;
237 }
238
239 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
240 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
241 inject_gp(vcpu);
242 return;
243 }
244
245 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
246 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
247 "and a clear PE flag\n");
248 inject_gp(vcpu);
249 return;
250 }
251
252 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
253#ifdef CONFIG_X86_64
254 if ((vcpu->shadow_efer & EFER_LME)) {
255 int cs_db, cs_l;
256
257 if (!is_pae(vcpu)) {
258 printk(KERN_DEBUG "set_cr0: #GP, start paging "
259 "in long mode while PAE is disabled\n");
260 inject_gp(vcpu);
261 return;
262 }
263 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
264 if (cs_l) {
265 printk(KERN_DEBUG "set_cr0: #GP, start paging "
266 "in long mode while CS.L == 1\n");
267 inject_gp(vcpu);
268 return;
269
270 }
271 } else
272#endif
273 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
274 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
275 "reserved bits\n");
276 inject_gp(vcpu);
277 return;
278 }
279
280 }
281
282 kvm_x86_ops->set_cr0(vcpu, cr0);
283 vcpu->cr0 = cr0;
284
285 mutex_lock(&vcpu->kvm->lock);
286 kvm_mmu_reset_context(vcpu);
287 mutex_unlock(&vcpu->kvm->lock);
288 return;
289}
290EXPORT_SYMBOL_GPL(set_cr0);
291
292void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
293{
294 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
295}
296EXPORT_SYMBOL_GPL(lmsw);
297
298void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
299{
300 if (cr4 & CR4_RESERVED_BITS) {
301 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
302 inject_gp(vcpu);
303 return;
304 }
305
306 if (is_long_mode(vcpu)) {
307 if (!(cr4 & X86_CR4_PAE)) {
308 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
309 "in long mode\n");
310 inject_gp(vcpu);
311 return;
312 }
313 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
314 && !load_pdptrs(vcpu, vcpu->cr3)) {
315 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
316 inject_gp(vcpu);
317 return;
318 }
319
320 if (cr4 & X86_CR4_VMXE) {
321 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
322 inject_gp(vcpu);
323 return;
324 }
325 kvm_x86_ops->set_cr4(vcpu, cr4);
326 vcpu->cr4 = cr4;
327 mutex_lock(&vcpu->kvm->lock);
328 kvm_mmu_reset_context(vcpu);
329 mutex_unlock(&vcpu->kvm->lock);
330}
331EXPORT_SYMBOL_GPL(set_cr4);
332
333void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
334{
d835dfec
AK
335 if (cr3 == vcpu->cr3 && !pdptrs_changed(vcpu)) {
336 kvm_mmu_flush_tlb(vcpu);
337 return;
338 }
339
a03490ed
CO
340 if (is_long_mode(vcpu)) {
341 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
342 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
343 inject_gp(vcpu);
344 return;
345 }
346 } else {
347 if (is_pae(vcpu)) {
348 if (cr3 & CR3_PAE_RESERVED_BITS) {
349 printk(KERN_DEBUG
350 "set_cr3: #GP, reserved bits\n");
351 inject_gp(vcpu);
352 return;
353 }
354 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
355 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
356 "reserved bits\n");
357 inject_gp(vcpu);
358 return;
359 }
360 }
361 /*
362 * We don't check reserved bits in nonpae mode, because
363 * this isn't enforced, and VMware depends on this.
364 */
365 }
366
367 mutex_lock(&vcpu->kvm->lock);
368 /*
369 * Does the new cr3 value map to physical memory? (Note, we
370 * catch an invalid cr3 even in real-mode, because it would
371 * cause trouble later on when we turn on paging anyway.)
372 *
373 * A real CPU would silently accept an invalid cr3 and would
374 * attempt to use it - with largely undefined (and often hard
375 * to debug) behavior on the guest side.
376 */
377 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
378 inject_gp(vcpu);
379 else {
380 vcpu->cr3 = cr3;
381 vcpu->mmu.new_cr3(vcpu);
382 }
383 mutex_unlock(&vcpu->kvm->lock);
384}
385EXPORT_SYMBOL_GPL(set_cr3);
386
387void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
388{
389 if (cr8 & CR8_RESERVED_BITS) {
390 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
391 inject_gp(vcpu);
392 return;
393 }
394 if (irqchip_in_kernel(vcpu->kvm))
395 kvm_lapic_set_tpr(vcpu, cr8);
396 else
397 vcpu->cr8 = cr8;
398}
399EXPORT_SYMBOL_GPL(set_cr8);
400
401unsigned long get_cr8(struct kvm_vcpu *vcpu)
402{
403 if (irqchip_in_kernel(vcpu->kvm))
404 return kvm_lapic_get_cr8(vcpu);
405 else
406 return vcpu->cr8;
407}
408EXPORT_SYMBOL_GPL(get_cr8);
409
043405e1
CO
410/*
411 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
412 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
413 *
414 * This list is modified at module load time to reflect the
415 * capabilities of the host cpu.
416 */
417static u32 msrs_to_save[] = {
418 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
419 MSR_K6_STAR,
420#ifdef CONFIG_X86_64
421 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
422#endif
423 MSR_IA32_TIME_STAMP_COUNTER,
424};
425
426static unsigned num_msrs_to_save;
427
428static u32 emulated_msrs[] = {
429 MSR_IA32_MISC_ENABLE,
430};
431
15c4a640
CO
432#ifdef CONFIG_X86_64
433
434static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
435{
436 if (efer & EFER_RESERVED_BITS) {
437 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
438 efer);
439 inject_gp(vcpu);
440 return;
441 }
442
443 if (is_paging(vcpu)
444 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
445 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
446 inject_gp(vcpu);
447 return;
448 }
449
450 kvm_x86_ops->set_efer(vcpu, efer);
451
452 efer &= ~EFER_LMA;
453 efer |= vcpu->shadow_efer & EFER_LMA;
454
455 vcpu->shadow_efer = efer;
456}
457
458#endif
459
460/*
461 * Writes msr value into into the appropriate "register".
462 * Returns 0 on success, non-0 otherwise.
463 * Assumes vcpu_load() was already called.
464 */
465int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
466{
467 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
468}
469
313a3dc7
CO
470/*
471 * Adapt set_msr() to msr_io()'s calling convention
472 */
473static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
474{
475 return kvm_set_msr(vcpu, index, *data);
476}
477
15c4a640
CO
478
479int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
480{
481 switch (msr) {
482#ifdef CONFIG_X86_64
483 case MSR_EFER:
484 set_efer(vcpu, data);
485 break;
486#endif
487 case MSR_IA32_MC0_STATUS:
488 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
489 __FUNCTION__, data);
490 break;
491 case MSR_IA32_MCG_STATUS:
492 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
493 __FUNCTION__, data);
494 break;
495 case MSR_IA32_UCODE_REV:
496 case MSR_IA32_UCODE_WRITE:
497 case 0x200 ... 0x2ff: /* MTRRs */
498 break;
499 case MSR_IA32_APICBASE:
500 kvm_set_apic_base(vcpu, data);
501 break;
502 case MSR_IA32_MISC_ENABLE:
503 vcpu->ia32_misc_enable_msr = data;
504 break;
505 default:
506 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
507 return 1;
508 }
509 return 0;
510}
511EXPORT_SYMBOL_GPL(kvm_set_msr_common);
512
513
514/*
515 * Reads an msr value (of 'msr_index') into 'pdata'.
516 * Returns 0 on success, non-0 otherwise.
517 * Assumes vcpu_load() was already called.
518 */
519int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
520{
521 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
522}
523
524int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
525{
526 u64 data;
527
528 switch (msr) {
529 case 0xc0010010: /* SYSCFG */
530 case 0xc0010015: /* HWCR */
531 case MSR_IA32_PLATFORM_ID:
532 case MSR_IA32_P5_MC_ADDR:
533 case MSR_IA32_P5_MC_TYPE:
534 case MSR_IA32_MC0_CTL:
535 case MSR_IA32_MCG_STATUS:
536 case MSR_IA32_MCG_CAP:
537 case MSR_IA32_MC0_MISC:
538 case MSR_IA32_MC0_MISC+4:
539 case MSR_IA32_MC0_MISC+8:
540 case MSR_IA32_MC0_MISC+12:
541 case MSR_IA32_MC0_MISC+16:
542 case MSR_IA32_UCODE_REV:
543 case MSR_IA32_PERF_STATUS:
544 case MSR_IA32_EBL_CR_POWERON:
545 /* MTRR registers */
546 case 0xfe:
547 case 0x200 ... 0x2ff:
548 data = 0;
549 break;
550 case 0xcd: /* fsb frequency */
551 data = 3;
552 break;
553 case MSR_IA32_APICBASE:
554 data = kvm_get_apic_base(vcpu);
555 break;
556 case MSR_IA32_MISC_ENABLE:
557 data = vcpu->ia32_misc_enable_msr;
558 break;
559#ifdef CONFIG_X86_64
560 case MSR_EFER:
561 data = vcpu->shadow_efer;
562 break;
563#endif
564 default:
565 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
566 return 1;
567 }
568 *pdata = data;
569 return 0;
570}
571EXPORT_SYMBOL_GPL(kvm_get_msr_common);
572
313a3dc7
CO
573/*
574 * Read or write a bunch of msrs. All parameters are kernel addresses.
575 *
576 * @return number of msrs set successfully.
577 */
578static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
579 struct kvm_msr_entry *entries,
580 int (*do_msr)(struct kvm_vcpu *vcpu,
581 unsigned index, u64 *data))
582{
583 int i;
584
585 vcpu_load(vcpu);
586
587 for (i = 0; i < msrs->nmsrs; ++i)
588 if (do_msr(vcpu, entries[i].index, &entries[i].data))
589 break;
590
591 vcpu_put(vcpu);
592
593 return i;
594}
595
596/*
597 * Read or write a bunch of msrs. Parameters are user addresses.
598 *
599 * @return number of msrs set successfully.
600 */
601static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
602 int (*do_msr)(struct kvm_vcpu *vcpu,
603 unsigned index, u64 *data),
604 int writeback)
605{
606 struct kvm_msrs msrs;
607 struct kvm_msr_entry *entries;
608 int r, n;
609 unsigned size;
610
611 r = -EFAULT;
612 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
613 goto out;
614
615 r = -E2BIG;
616 if (msrs.nmsrs >= MAX_IO_MSRS)
617 goto out;
618
619 r = -ENOMEM;
620 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
621 entries = vmalloc(size);
622 if (!entries)
623 goto out;
624
625 r = -EFAULT;
626 if (copy_from_user(entries, user_msrs->entries, size))
627 goto out_free;
628
629 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
630 if (r < 0)
631 goto out_free;
632
633 r = -EFAULT;
634 if (writeback && copy_to_user(user_msrs->entries, entries, size))
635 goto out_free;
636
637 r = n;
638
639out_free:
640 vfree(entries);
641out:
642 return r;
643}
644
e9b11c17
ZX
645/*
646 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
647 * cached on it.
648 */
649void decache_vcpus_on_cpu(int cpu)
650{
651 struct kvm *vm;
652 struct kvm_vcpu *vcpu;
653 int i;
654
655 spin_lock(&kvm_lock);
656 list_for_each_entry(vm, &vm_list, vm_list)
657 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
658 vcpu = vm->vcpus[i];
659 if (!vcpu)
660 continue;
661 /*
662 * If the vcpu is locked, then it is running on some
663 * other cpu and therefore it is not cached on the
664 * cpu in question.
665 *
666 * If it's not locked, check the last cpu it executed
667 * on.
668 */
669 if (mutex_trylock(&vcpu->mutex)) {
670 if (vcpu->cpu == cpu) {
671 kvm_x86_ops->vcpu_decache(vcpu);
672 vcpu->cpu = -1;
673 }
674 mutex_unlock(&vcpu->mutex);
675 }
676 }
677 spin_unlock(&kvm_lock);
678}
679
018d00d2
ZX
680int kvm_dev_ioctl_check_extension(long ext)
681{
682 int r;
683
684 switch (ext) {
685 case KVM_CAP_IRQCHIP:
686 case KVM_CAP_HLT:
687 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
688 case KVM_CAP_USER_MEMORY:
689 case KVM_CAP_SET_TSS_ADDR:
07716717 690 case KVM_CAP_EXT_CPUID:
018d00d2
ZX
691 r = 1;
692 break;
693 default:
694 r = 0;
695 break;
696 }
697 return r;
698
699}
700
043405e1
CO
701long kvm_arch_dev_ioctl(struct file *filp,
702 unsigned int ioctl, unsigned long arg)
703{
704 void __user *argp = (void __user *)arg;
705 long r;
706
707 switch (ioctl) {
708 case KVM_GET_MSR_INDEX_LIST: {
709 struct kvm_msr_list __user *user_msr_list = argp;
710 struct kvm_msr_list msr_list;
711 unsigned n;
712
713 r = -EFAULT;
714 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
715 goto out;
716 n = msr_list.nmsrs;
717 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
718 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
719 goto out;
720 r = -E2BIG;
721 if (n < num_msrs_to_save)
722 goto out;
723 r = -EFAULT;
724 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
725 num_msrs_to_save * sizeof(u32)))
726 goto out;
727 if (copy_to_user(user_msr_list->indices
728 + num_msrs_to_save * sizeof(u32),
729 &emulated_msrs,
730 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
731 goto out;
732 r = 0;
733 break;
734 }
735 default:
736 r = -EINVAL;
737 }
738out:
739 return r;
740}
741
313a3dc7
CO
742void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
743{
744 kvm_x86_ops->vcpu_load(vcpu, cpu);
745}
746
747void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
748{
749 kvm_x86_ops->vcpu_put(vcpu);
9327fd11 750 kvm_put_guest_fpu(vcpu);
313a3dc7
CO
751}
752
07716717 753static int is_efer_nx(void)
313a3dc7
CO
754{
755 u64 efer;
313a3dc7
CO
756
757 rdmsrl(MSR_EFER, efer);
07716717
DK
758 return efer & EFER_NX;
759}
760
761static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
762{
763 int i;
764 struct kvm_cpuid_entry2 *e, *entry;
765
313a3dc7
CO
766 entry = NULL;
767 for (i = 0; i < vcpu->cpuid_nent; ++i) {
768 e = &vcpu->cpuid_entries[i];
769 if (e->function == 0x80000001) {
770 entry = e;
771 break;
772 }
773 }
07716717 774 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
313a3dc7
CO
775 entry->edx &= ~(1 << 20);
776 printk(KERN_INFO "kvm: guest NX capability removed\n");
777 }
778}
779
07716717 780/* when an old userspace process fills a new kernel module */
313a3dc7
CO
781static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
782 struct kvm_cpuid *cpuid,
783 struct kvm_cpuid_entry __user *entries)
07716717
DK
784{
785 int r, i;
786 struct kvm_cpuid_entry *cpuid_entries;
787
788 r = -E2BIG;
789 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
790 goto out;
791 r = -ENOMEM;
792 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
793 if (!cpuid_entries)
794 goto out;
795 r = -EFAULT;
796 if (copy_from_user(cpuid_entries, entries,
797 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
798 goto out_free;
799 for (i = 0; i < cpuid->nent; i++) {
800 vcpu->cpuid_entries[i].function = cpuid_entries[i].function;
801 vcpu->cpuid_entries[i].eax = cpuid_entries[i].eax;
802 vcpu->cpuid_entries[i].ebx = cpuid_entries[i].ebx;
803 vcpu->cpuid_entries[i].ecx = cpuid_entries[i].ecx;
804 vcpu->cpuid_entries[i].edx = cpuid_entries[i].edx;
805 vcpu->cpuid_entries[i].index = 0;
806 vcpu->cpuid_entries[i].flags = 0;
807 vcpu->cpuid_entries[i].padding[0] = 0;
808 vcpu->cpuid_entries[i].padding[1] = 0;
809 vcpu->cpuid_entries[i].padding[2] = 0;
810 }
811 vcpu->cpuid_nent = cpuid->nent;
812 cpuid_fix_nx_cap(vcpu);
813 r = 0;
814
815out_free:
816 vfree(cpuid_entries);
817out:
818 return r;
819}
820
821static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
822 struct kvm_cpuid2 *cpuid,
823 struct kvm_cpuid_entry2 __user *entries)
313a3dc7
CO
824{
825 int r;
826
827 r = -E2BIG;
828 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
829 goto out;
830 r = -EFAULT;
831 if (copy_from_user(&vcpu->cpuid_entries, entries,
07716717 832 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
313a3dc7
CO
833 goto out;
834 vcpu->cpuid_nent = cpuid->nent;
313a3dc7
CO
835 return 0;
836
837out:
838 return r;
839}
840
07716717
DK
841static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
842 struct kvm_cpuid2 *cpuid,
843 struct kvm_cpuid_entry2 __user *entries)
844{
845 int r;
846
847 r = -E2BIG;
848 if (cpuid->nent < vcpu->cpuid_nent)
849 goto out;
850 r = -EFAULT;
851 if (copy_to_user(entries, &vcpu->cpuid_entries,
852 vcpu->cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
853 goto out;
854 return 0;
855
856out:
857 cpuid->nent = vcpu->cpuid_nent;
858 return r;
859}
860
861static inline u32 bit(int bitno)
862{
863 return 1 << (bitno & 31);
864}
865
866static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
867 u32 index)
868{
869 entry->function = function;
870 entry->index = index;
871 cpuid_count(entry->function, entry->index,
872 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
873 entry->flags = 0;
874}
875
876static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
877 u32 index, int *nent, int maxnent)
878{
879 const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
880 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
881 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
882 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
883 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
884 bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
885 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
886 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
887 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
888 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
889 const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
890 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
891 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
892 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
893 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
894 bit(X86_FEATURE_PGE) |
895 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
896 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
897 bit(X86_FEATURE_SYSCALL) |
898 (bit(X86_FEATURE_NX) && is_efer_nx()) |
899#ifdef CONFIG_X86_64
900 bit(X86_FEATURE_LM) |
901#endif
902 bit(X86_FEATURE_MMXEXT) |
903 bit(X86_FEATURE_3DNOWEXT) |
904 bit(X86_FEATURE_3DNOW);
905 const u32 kvm_supported_word3_x86_features =
906 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
907 const u32 kvm_supported_word6_x86_features =
908 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY);
909
910 /* all func 2 cpuid_count() should be called on the same cpu */
911 get_cpu();
912 do_cpuid_1_ent(entry, function, index);
913 ++*nent;
914
915 switch (function) {
916 case 0:
917 entry->eax = min(entry->eax, (u32)0xb);
918 break;
919 case 1:
920 entry->edx &= kvm_supported_word0_x86_features;
921 entry->ecx &= kvm_supported_word3_x86_features;
922 break;
923 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
924 * may return different values. This forces us to get_cpu() before
925 * issuing the first command, and also to emulate this annoying behavior
926 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
927 case 2: {
928 int t, times = entry->eax & 0xff;
929
930 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
931 for (t = 1; t < times && *nent < maxnent; ++t) {
932 do_cpuid_1_ent(&entry[t], function, 0);
933 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
934 ++*nent;
935 }
936 break;
937 }
938 /* function 4 and 0xb have additional index. */
939 case 4: {
940 int index, cache_type;
941
942 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
943 /* read more entries until cache_type is zero */
944 for (index = 1; *nent < maxnent; ++index) {
945 cache_type = entry[index - 1].eax & 0x1f;
946 if (!cache_type)
947 break;
948 do_cpuid_1_ent(&entry[index], function, index);
949 entry[index].flags |=
950 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
951 ++*nent;
952 }
953 break;
954 }
955 case 0xb: {
956 int index, level_type;
957
958 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
959 /* read more entries until level_type is zero */
960 for (index = 1; *nent < maxnent; ++index) {
961 level_type = entry[index - 1].ecx & 0xff;
962 if (!level_type)
963 break;
964 do_cpuid_1_ent(&entry[index], function, index);
965 entry[index].flags |=
966 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
967 ++*nent;
968 }
969 break;
970 }
971 case 0x80000000:
972 entry->eax = min(entry->eax, 0x8000001a);
973 break;
974 case 0x80000001:
975 entry->edx &= kvm_supported_word1_x86_features;
976 entry->ecx &= kvm_supported_word6_x86_features;
977 break;
978 }
979 put_cpu();
980}
981
982static int kvm_vm_ioctl_get_supported_cpuid(struct kvm *kvm,
983 struct kvm_cpuid2 *cpuid,
984 struct kvm_cpuid_entry2 __user *entries)
985{
986 struct kvm_cpuid_entry2 *cpuid_entries;
987 int limit, nent = 0, r = -E2BIG;
988 u32 func;
989
990 if (cpuid->nent < 1)
991 goto out;
992 r = -ENOMEM;
993 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
994 if (!cpuid_entries)
995 goto out;
996
997 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
998 limit = cpuid_entries[0].eax;
999 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1000 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1001 &nent, cpuid->nent);
1002 r = -E2BIG;
1003 if (nent >= cpuid->nent)
1004 goto out_free;
1005
1006 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1007 limit = cpuid_entries[nent - 1].eax;
1008 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1009 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1010 &nent, cpuid->nent);
1011 r = -EFAULT;
1012 if (copy_to_user(entries, cpuid_entries,
1013 nent * sizeof(struct kvm_cpuid_entry2)))
1014 goto out_free;
1015 cpuid->nent = nent;
1016 r = 0;
1017
1018out_free:
1019 vfree(cpuid_entries);
1020out:
1021 return r;
1022}
1023
313a3dc7
CO
1024static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1025 struct kvm_lapic_state *s)
1026{
1027 vcpu_load(vcpu);
1028 memcpy(s->regs, vcpu->apic->regs, sizeof *s);
1029 vcpu_put(vcpu);
1030
1031 return 0;
1032}
1033
1034static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1035 struct kvm_lapic_state *s)
1036{
1037 vcpu_load(vcpu);
1038 memcpy(vcpu->apic->regs, s->regs, sizeof *s);
1039 kvm_apic_post_state_restore(vcpu);
1040 vcpu_put(vcpu);
1041
1042 return 0;
1043}
1044
f77bc6a4
ZX
1045static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1046 struct kvm_interrupt *irq)
1047{
1048 if (irq->irq < 0 || irq->irq >= 256)
1049 return -EINVAL;
1050 if (irqchip_in_kernel(vcpu->kvm))
1051 return -ENXIO;
1052 vcpu_load(vcpu);
1053
1054 set_bit(irq->irq, vcpu->irq_pending);
1055 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
1056
1057 vcpu_put(vcpu);
1058
1059 return 0;
1060}
1061
313a3dc7
CO
1062long kvm_arch_vcpu_ioctl(struct file *filp,
1063 unsigned int ioctl, unsigned long arg)
1064{
1065 struct kvm_vcpu *vcpu = filp->private_data;
1066 void __user *argp = (void __user *)arg;
1067 int r;
1068
1069 switch (ioctl) {
1070 case KVM_GET_LAPIC: {
1071 struct kvm_lapic_state lapic;
1072
1073 memset(&lapic, 0, sizeof lapic);
1074 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
1075 if (r)
1076 goto out;
1077 r = -EFAULT;
1078 if (copy_to_user(argp, &lapic, sizeof lapic))
1079 goto out;
1080 r = 0;
1081 break;
1082 }
1083 case KVM_SET_LAPIC: {
1084 struct kvm_lapic_state lapic;
1085
1086 r = -EFAULT;
1087 if (copy_from_user(&lapic, argp, sizeof lapic))
1088 goto out;
1089 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
1090 if (r)
1091 goto out;
1092 r = 0;
1093 break;
1094 }
f77bc6a4
ZX
1095 case KVM_INTERRUPT: {
1096 struct kvm_interrupt irq;
1097
1098 r = -EFAULT;
1099 if (copy_from_user(&irq, argp, sizeof irq))
1100 goto out;
1101 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1102 if (r)
1103 goto out;
1104 r = 0;
1105 break;
1106 }
313a3dc7
CO
1107 case KVM_SET_CPUID: {
1108 struct kvm_cpuid __user *cpuid_arg = argp;
1109 struct kvm_cpuid cpuid;
1110
1111 r = -EFAULT;
1112 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1113 goto out;
1114 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1115 if (r)
1116 goto out;
1117 break;
1118 }
07716717
DK
1119 case KVM_SET_CPUID2: {
1120 struct kvm_cpuid2 __user *cpuid_arg = argp;
1121 struct kvm_cpuid2 cpuid;
1122
1123 r = -EFAULT;
1124 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1125 goto out;
1126 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
1127 cpuid_arg->entries);
1128 if (r)
1129 goto out;
1130 break;
1131 }
1132 case KVM_GET_CPUID2: {
1133 struct kvm_cpuid2 __user *cpuid_arg = argp;
1134 struct kvm_cpuid2 cpuid;
1135
1136 r = -EFAULT;
1137 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1138 goto out;
1139 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
1140 cpuid_arg->entries);
1141 if (r)
1142 goto out;
1143 r = -EFAULT;
1144 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1145 goto out;
1146 r = 0;
1147 break;
1148 }
313a3dc7
CO
1149 case KVM_GET_MSRS:
1150 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1151 break;
1152 case KVM_SET_MSRS:
1153 r = msr_io(vcpu, argp, do_set_msr, 0);
1154 break;
1155 default:
1156 r = -EINVAL;
1157 }
1158out:
1159 return r;
1160}
1161
1fe779f8
CO
1162static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1163{
1164 int ret;
1165
1166 if (addr > (unsigned int)(-3 * PAGE_SIZE))
1167 return -1;
1168 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1169 return ret;
1170}
1171
1172static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1173 u32 kvm_nr_mmu_pages)
1174{
1175 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1176 return -EINVAL;
1177
1178 mutex_lock(&kvm->lock);
1179
1180 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
1181 kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
1182
1183 mutex_unlock(&kvm->lock);
1184 return 0;
1185}
1186
1187static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1188{
1189 return kvm->n_alloc_mmu_pages;
1190}
1191
e9f85cde
ZX
1192gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1193{
1194 int i;
1195 struct kvm_mem_alias *alias;
1196
1197 for (i = 0; i < kvm->naliases; ++i) {
1198 alias = &kvm->aliases[i];
1199 if (gfn >= alias->base_gfn
1200 && gfn < alias->base_gfn + alias->npages)
1201 return alias->target_gfn + gfn - alias->base_gfn;
1202 }
1203 return gfn;
1204}
1205
1fe779f8
CO
1206/*
1207 * Set a new alias region. Aliases map a portion of physical memory into
1208 * another portion. This is useful for memory windows, for example the PC
1209 * VGA region.
1210 */
1211static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1212 struct kvm_memory_alias *alias)
1213{
1214 int r, n;
1215 struct kvm_mem_alias *p;
1216
1217 r = -EINVAL;
1218 /* General sanity checks */
1219 if (alias->memory_size & (PAGE_SIZE - 1))
1220 goto out;
1221 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1222 goto out;
1223 if (alias->slot >= KVM_ALIAS_SLOTS)
1224 goto out;
1225 if (alias->guest_phys_addr + alias->memory_size
1226 < alias->guest_phys_addr)
1227 goto out;
1228 if (alias->target_phys_addr + alias->memory_size
1229 < alias->target_phys_addr)
1230 goto out;
1231
1232 mutex_lock(&kvm->lock);
1233
1234 p = &kvm->aliases[alias->slot];
1235 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1236 p->npages = alias->memory_size >> PAGE_SHIFT;
1237 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1238
1239 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
1240 if (kvm->aliases[n - 1].npages)
1241 break;
1242 kvm->naliases = n;
1243
1244 kvm_mmu_zap_all(kvm);
1245
1246 mutex_unlock(&kvm->lock);
1247
1248 return 0;
1249
1250out:
1251 return r;
1252}
1253
1254static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1255{
1256 int r;
1257
1258 r = 0;
1259 switch (chip->chip_id) {
1260 case KVM_IRQCHIP_PIC_MASTER:
1261 memcpy(&chip->chip.pic,
1262 &pic_irqchip(kvm)->pics[0],
1263 sizeof(struct kvm_pic_state));
1264 break;
1265 case KVM_IRQCHIP_PIC_SLAVE:
1266 memcpy(&chip->chip.pic,
1267 &pic_irqchip(kvm)->pics[1],
1268 sizeof(struct kvm_pic_state));
1269 break;
1270 case KVM_IRQCHIP_IOAPIC:
1271 memcpy(&chip->chip.ioapic,
1272 ioapic_irqchip(kvm),
1273 sizeof(struct kvm_ioapic_state));
1274 break;
1275 default:
1276 r = -EINVAL;
1277 break;
1278 }
1279 return r;
1280}
1281
1282static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1283{
1284 int r;
1285
1286 r = 0;
1287 switch (chip->chip_id) {
1288 case KVM_IRQCHIP_PIC_MASTER:
1289 memcpy(&pic_irqchip(kvm)->pics[0],
1290 &chip->chip.pic,
1291 sizeof(struct kvm_pic_state));
1292 break;
1293 case KVM_IRQCHIP_PIC_SLAVE:
1294 memcpy(&pic_irqchip(kvm)->pics[1],
1295 &chip->chip.pic,
1296 sizeof(struct kvm_pic_state));
1297 break;
1298 case KVM_IRQCHIP_IOAPIC:
1299 memcpy(ioapic_irqchip(kvm),
1300 &chip->chip.ioapic,
1301 sizeof(struct kvm_ioapic_state));
1302 break;
1303 default:
1304 r = -EINVAL;
1305 break;
1306 }
1307 kvm_pic_update_irq(pic_irqchip(kvm));
1308 return r;
1309}
1310
5bb064dc
ZX
1311/*
1312 * Get (and clear) the dirty memory log for a memory slot.
1313 */
1314int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1315 struct kvm_dirty_log *log)
1316{
1317 int r;
1318 int n;
1319 struct kvm_memory_slot *memslot;
1320 int is_dirty = 0;
1321
1322 mutex_lock(&kvm->lock);
1323
1324 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1325 if (r)
1326 goto out;
1327
1328 /* If nothing is dirty, don't bother messing with page tables. */
1329 if (is_dirty) {
1330 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1331 kvm_flush_remote_tlbs(kvm);
1332 memslot = &kvm->memslots[log->slot];
1333 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1334 memset(memslot->dirty_bitmap, 0, n);
1335 }
1336 r = 0;
1337out:
1338 mutex_unlock(&kvm->lock);
1339 return r;
1340}
1341
1fe779f8
CO
1342long kvm_arch_vm_ioctl(struct file *filp,
1343 unsigned int ioctl, unsigned long arg)
1344{
1345 struct kvm *kvm = filp->private_data;
1346 void __user *argp = (void __user *)arg;
1347 int r = -EINVAL;
1348
1349 switch (ioctl) {
1350 case KVM_SET_TSS_ADDR:
1351 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1352 if (r < 0)
1353 goto out;
1354 break;
1355 case KVM_SET_MEMORY_REGION: {
1356 struct kvm_memory_region kvm_mem;
1357 struct kvm_userspace_memory_region kvm_userspace_mem;
1358
1359 r = -EFAULT;
1360 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1361 goto out;
1362 kvm_userspace_mem.slot = kvm_mem.slot;
1363 kvm_userspace_mem.flags = kvm_mem.flags;
1364 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1365 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1366 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1367 if (r)
1368 goto out;
1369 break;
1370 }
1371 case KVM_SET_NR_MMU_PAGES:
1372 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1373 if (r)
1374 goto out;
1375 break;
1376 case KVM_GET_NR_MMU_PAGES:
1377 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1378 break;
1379 case KVM_SET_MEMORY_ALIAS: {
1380 struct kvm_memory_alias alias;
1381
1382 r = -EFAULT;
1383 if (copy_from_user(&alias, argp, sizeof alias))
1384 goto out;
1385 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
1386 if (r)
1387 goto out;
1388 break;
1389 }
1390 case KVM_CREATE_IRQCHIP:
1391 r = -ENOMEM;
1392 kvm->vpic = kvm_create_pic(kvm);
1393 if (kvm->vpic) {
1394 r = kvm_ioapic_init(kvm);
1395 if (r) {
1396 kfree(kvm->vpic);
1397 kvm->vpic = NULL;
1398 goto out;
1399 }
1400 } else
1401 goto out;
1402 break;
1403 case KVM_IRQ_LINE: {
1404 struct kvm_irq_level irq_event;
1405
1406 r = -EFAULT;
1407 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1408 goto out;
1409 if (irqchip_in_kernel(kvm)) {
1410 mutex_lock(&kvm->lock);
1411 if (irq_event.irq < 16)
1412 kvm_pic_set_irq(pic_irqchip(kvm),
1413 irq_event.irq,
1414 irq_event.level);
1415 kvm_ioapic_set_irq(kvm->vioapic,
1416 irq_event.irq,
1417 irq_event.level);
1418 mutex_unlock(&kvm->lock);
1419 r = 0;
1420 }
1421 break;
1422 }
1423 case KVM_GET_IRQCHIP: {
1424 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1425 struct kvm_irqchip chip;
1426
1427 r = -EFAULT;
1428 if (copy_from_user(&chip, argp, sizeof chip))
1429 goto out;
1430 r = -ENXIO;
1431 if (!irqchip_in_kernel(kvm))
1432 goto out;
1433 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1434 if (r)
1435 goto out;
1436 r = -EFAULT;
1437 if (copy_to_user(argp, &chip, sizeof chip))
1438 goto out;
1439 r = 0;
1440 break;
1441 }
1442 case KVM_SET_IRQCHIP: {
1443 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1444 struct kvm_irqchip chip;
1445
1446 r = -EFAULT;
1447 if (copy_from_user(&chip, argp, sizeof chip))
1448 goto out;
1449 r = -ENXIO;
1450 if (!irqchip_in_kernel(kvm))
1451 goto out;
1452 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1453 if (r)
1454 goto out;
1455 r = 0;
1456 break;
1457 }
07716717
DK
1458 case KVM_GET_SUPPORTED_CPUID: {
1459 struct kvm_cpuid2 __user *cpuid_arg = argp;
1460 struct kvm_cpuid2 cpuid;
1461
1462 r = -EFAULT;
1463 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1464 goto out;
1465 r = kvm_vm_ioctl_get_supported_cpuid(kvm, &cpuid,
1466 cpuid_arg->entries);
1467 if (r)
1468 goto out;
1469
1470 r = -EFAULT;
1471 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1472 goto out;
1473 r = 0;
1474 break;
1475 }
1fe779f8
CO
1476 default:
1477 ;
1478 }
1479out:
1480 return r;
1481}
1482
a16b043c 1483static void kvm_init_msr_list(void)
043405e1
CO
1484{
1485 u32 dummy[2];
1486 unsigned i, j;
1487
1488 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1489 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1490 continue;
1491 if (j < i)
1492 msrs_to_save[j] = msrs_to_save[i];
1493 j++;
1494 }
1495 num_msrs_to_save = j;
1496}
1497
bbd9b64e
CO
1498/*
1499 * Only apic need an MMIO device hook, so shortcut now..
1500 */
1501static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1502 gpa_t addr)
1503{
1504 struct kvm_io_device *dev;
1505
1506 if (vcpu->apic) {
1507 dev = &vcpu->apic->dev;
1508 if (dev->in_range(dev, addr))
1509 return dev;
1510 }
1511 return NULL;
1512}
1513
1514
1515static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1516 gpa_t addr)
1517{
1518 struct kvm_io_device *dev;
1519
1520 dev = vcpu_find_pervcpu_dev(vcpu, addr);
1521 if (dev == NULL)
1522 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1523 return dev;
1524}
1525
1526int emulator_read_std(unsigned long addr,
1527 void *val,
1528 unsigned int bytes,
1529 struct kvm_vcpu *vcpu)
1530{
1531 void *data = val;
1532
1533 while (bytes) {
1534 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1535 unsigned offset = addr & (PAGE_SIZE-1);
1536 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1537 int ret;
1538
1539 if (gpa == UNMAPPED_GVA)
1540 return X86EMUL_PROPAGATE_FAULT;
1541 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
1542 if (ret < 0)
1543 return X86EMUL_UNHANDLEABLE;
1544
1545 bytes -= tocopy;
1546 data += tocopy;
1547 addr += tocopy;
1548 }
1549
1550 return X86EMUL_CONTINUE;
1551}
1552EXPORT_SYMBOL_GPL(emulator_read_std);
1553
bbd9b64e
CO
1554static int emulator_read_emulated(unsigned long addr,
1555 void *val,
1556 unsigned int bytes,
1557 struct kvm_vcpu *vcpu)
1558{
1559 struct kvm_io_device *mmio_dev;
1560 gpa_t gpa;
1561
1562 if (vcpu->mmio_read_completed) {
1563 memcpy(val, vcpu->mmio_data, bytes);
1564 vcpu->mmio_read_completed = 0;
1565 return X86EMUL_CONTINUE;
1566 }
1567
1568 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1569
1570 /* For APIC access vmexit */
1571 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1572 goto mmio;
1573
1574 if (emulator_read_std(addr, val, bytes, vcpu)
1575 == X86EMUL_CONTINUE)
1576 return X86EMUL_CONTINUE;
1577 if (gpa == UNMAPPED_GVA)
1578 return X86EMUL_PROPAGATE_FAULT;
1579
1580mmio:
1581 /*
1582 * Is this MMIO handled locally?
1583 */
1584 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1585 if (mmio_dev) {
1586 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1587 return X86EMUL_CONTINUE;
1588 }
1589
1590 vcpu->mmio_needed = 1;
1591 vcpu->mmio_phys_addr = gpa;
1592 vcpu->mmio_size = bytes;
1593 vcpu->mmio_is_write = 0;
1594
1595 return X86EMUL_UNHANDLEABLE;
1596}
1597
1598static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1599 const void *val, int bytes)
1600{
1601 int ret;
1602
1603 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
1604 if (ret < 0)
1605 return 0;
1606 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
1607 return 1;
1608}
1609
1610static int emulator_write_emulated_onepage(unsigned long addr,
1611 const void *val,
1612 unsigned int bytes,
1613 struct kvm_vcpu *vcpu)
1614{
1615 struct kvm_io_device *mmio_dev;
1616 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1617
1618 if (gpa == UNMAPPED_GVA) {
c3c91fee 1619 kvm_inject_page_fault(vcpu, addr, 2);
bbd9b64e
CO
1620 return X86EMUL_PROPAGATE_FAULT;
1621 }
1622
1623 /* For APIC access vmexit */
1624 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1625 goto mmio;
1626
1627 if (emulator_write_phys(vcpu, gpa, val, bytes))
1628 return X86EMUL_CONTINUE;
1629
1630mmio:
1631 /*
1632 * Is this MMIO handled locally?
1633 */
1634 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1635 if (mmio_dev) {
1636 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
1637 return X86EMUL_CONTINUE;
1638 }
1639
1640 vcpu->mmio_needed = 1;
1641 vcpu->mmio_phys_addr = gpa;
1642 vcpu->mmio_size = bytes;
1643 vcpu->mmio_is_write = 1;
1644 memcpy(vcpu->mmio_data, val, bytes);
1645
1646 return X86EMUL_CONTINUE;
1647}
1648
1649int emulator_write_emulated(unsigned long addr,
1650 const void *val,
1651 unsigned int bytes,
1652 struct kvm_vcpu *vcpu)
1653{
1654 /* Crossing a page boundary? */
1655 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1656 int rc, now;
1657
1658 now = -addr & ~PAGE_MASK;
1659 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
1660 if (rc != X86EMUL_CONTINUE)
1661 return rc;
1662 addr += now;
1663 val += now;
1664 bytes -= now;
1665 }
1666 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
1667}
1668EXPORT_SYMBOL_GPL(emulator_write_emulated);
1669
1670static int emulator_cmpxchg_emulated(unsigned long addr,
1671 const void *old,
1672 const void *new,
1673 unsigned int bytes,
1674 struct kvm_vcpu *vcpu)
1675{
1676 static int reported;
1677
1678 if (!reported) {
1679 reported = 1;
1680 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1681 }
1682 return emulator_write_emulated(addr, new, bytes, vcpu);
1683}
1684
1685static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1686{
1687 return kvm_x86_ops->get_segment_base(vcpu, seg);
1688}
1689
1690int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1691{
1692 return X86EMUL_CONTINUE;
1693}
1694
1695int emulate_clts(struct kvm_vcpu *vcpu)
1696{
1697 kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS);
1698 return X86EMUL_CONTINUE;
1699}
1700
1701int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
1702{
1703 struct kvm_vcpu *vcpu = ctxt->vcpu;
1704
1705 switch (dr) {
1706 case 0 ... 3:
1707 *dest = kvm_x86_ops->get_dr(vcpu, dr);
1708 return X86EMUL_CONTINUE;
1709 default:
1710 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
1711 return X86EMUL_UNHANDLEABLE;
1712 }
1713}
1714
1715int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1716{
1717 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1718 int exception;
1719
1720 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1721 if (exception) {
1722 /* FIXME: better handling */
1723 return X86EMUL_UNHANDLEABLE;
1724 }
1725 return X86EMUL_CONTINUE;
1726}
1727
1728void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
1729{
1730 static int reported;
1731 u8 opcodes[4];
1732 unsigned long rip = vcpu->rip;
1733 unsigned long rip_linear;
1734
1735 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
1736
1737 if (reported)
1738 return;
1739
1740 emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
1741
1742 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
1743 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1744 reported = 1;
1745}
1746EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
1747
1748struct x86_emulate_ops emulate_ops = {
1749 .read_std = emulator_read_std,
bbd9b64e
CO
1750 .read_emulated = emulator_read_emulated,
1751 .write_emulated = emulator_write_emulated,
1752 .cmpxchg_emulated = emulator_cmpxchg_emulated,
1753};
1754
1755int emulate_instruction(struct kvm_vcpu *vcpu,
1756 struct kvm_run *run,
1757 unsigned long cr2,
1758 u16 error_code,
1759 int no_decode)
1760{
1761 int r;
1762
1763 vcpu->mmio_fault_cr2 = cr2;
1764 kvm_x86_ops->cache_regs(vcpu);
1765
1766 vcpu->mmio_is_write = 0;
1767 vcpu->pio.string = 0;
1768
1769 if (!no_decode) {
1770 int cs_db, cs_l;
1771 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1772
1773 vcpu->emulate_ctxt.vcpu = vcpu;
1774 vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
bbd9b64e
CO
1775 vcpu->emulate_ctxt.mode =
1776 (vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM)
1777 ? X86EMUL_MODE_REAL : cs_l
1778 ? X86EMUL_MODE_PROT64 : cs_db
1779 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1780
1781 if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1782 vcpu->emulate_ctxt.cs_base = 0;
1783 vcpu->emulate_ctxt.ds_base = 0;
1784 vcpu->emulate_ctxt.es_base = 0;
1785 vcpu->emulate_ctxt.ss_base = 0;
1786 } else {
1787 vcpu->emulate_ctxt.cs_base =
1788 get_segment_base(vcpu, VCPU_SREG_CS);
1789 vcpu->emulate_ctxt.ds_base =
1790 get_segment_base(vcpu, VCPU_SREG_DS);
1791 vcpu->emulate_ctxt.es_base =
1792 get_segment_base(vcpu, VCPU_SREG_ES);
1793 vcpu->emulate_ctxt.ss_base =
1794 get_segment_base(vcpu, VCPU_SREG_SS);
1795 }
1796
1797 vcpu->emulate_ctxt.gs_base =
1798 get_segment_base(vcpu, VCPU_SREG_GS);
1799 vcpu->emulate_ctxt.fs_base =
1800 get_segment_base(vcpu, VCPU_SREG_FS);
1801
1802 r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops);
f2b5756b 1803 ++vcpu->stat.insn_emulation;
bbd9b64e 1804 if (r) {
f2b5756b 1805 ++vcpu->stat.insn_emulation_fail;
bbd9b64e
CO
1806 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1807 return EMULATE_DONE;
1808 return EMULATE_FAIL;
1809 }
1810 }
1811
1812 r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops);
1813
1814 if (vcpu->pio.string)
1815 return EMULATE_DO_MMIO;
1816
1817 if ((r || vcpu->mmio_is_write) && run) {
1818 run->exit_reason = KVM_EXIT_MMIO;
1819 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1820 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1821 run->mmio.len = vcpu->mmio_size;
1822 run->mmio.is_write = vcpu->mmio_is_write;
1823 }
1824
1825 if (r) {
1826 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1827 return EMULATE_DONE;
1828 if (!vcpu->mmio_needed) {
1829 kvm_report_emulation_failure(vcpu, "mmio");
1830 return EMULATE_FAIL;
1831 }
1832 return EMULATE_DO_MMIO;
1833 }
1834
1835 kvm_x86_ops->decache_regs(vcpu);
1836 kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags);
1837
1838 if (vcpu->mmio_is_write) {
1839 vcpu->mmio_needed = 0;
1840 return EMULATE_DO_MMIO;
1841 }
1842
1843 return EMULATE_DONE;
1844}
1845EXPORT_SYMBOL_GPL(emulate_instruction);
1846
de7d789a
CO
1847static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
1848{
1849 int i;
1850
1851 for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
1852 if (vcpu->pio.guest_pages[i]) {
b4231d61 1853 kvm_release_page_dirty(vcpu->pio.guest_pages[i]);
de7d789a
CO
1854 vcpu->pio.guest_pages[i] = NULL;
1855 }
1856}
1857
1858static int pio_copy_data(struct kvm_vcpu *vcpu)
1859{
1860 void *p = vcpu->pio_data;
1861 void *q;
1862 unsigned bytes;
1863 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1864
1865 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1866 PAGE_KERNEL);
1867 if (!q) {
1868 free_pio_guest_pages(vcpu);
1869 return -ENOMEM;
1870 }
1871 q += vcpu->pio.guest_page_offset;
1872 bytes = vcpu->pio.size * vcpu->pio.cur_count;
1873 if (vcpu->pio.in)
1874 memcpy(q, p, bytes);
1875 else
1876 memcpy(p, q, bytes);
1877 q -= vcpu->pio.guest_page_offset;
1878 vunmap(q);
1879 free_pio_guest_pages(vcpu);
1880 return 0;
1881}
1882
1883int complete_pio(struct kvm_vcpu *vcpu)
1884{
1885 struct kvm_pio_request *io = &vcpu->pio;
1886 long delta;
1887 int r;
1888
1889 kvm_x86_ops->cache_regs(vcpu);
1890
1891 if (!io->string) {
1892 if (io->in)
1893 memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
1894 io->size);
1895 } else {
1896 if (io->in) {
1897 r = pio_copy_data(vcpu);
1898 if (r) {
1899 kvm_x86_ops->cache_regs(vcpu);
1900 return r;
1901 }
1902 }
1903
1904 delta = 1;
1905 if (io->rep) {
1906 delta *= io->cur_count;
1907 /*
1908 * The size of the register should really depend on
1909 * current address size.
1910 */
1911 vcpu->regs[VCPU_REGS_RCX] -= delta;
1912 }
1913 if (io->down)
1914 delta = -delta;
1915 delta *= io->size;
1916 if (io->in)
1917 vcpu->regs[VCPU_REGS_RDI] += delta;
1918 else
1919 vcpu->regs[VCPU_REGS_RSI] += delta;
1920 }
1921
1922 kvm_x86_ops->decache_regs(vcpu);
1923
1924 io->count -= io->cur_count;
1925 io->cur_count = 0;
1926
1927 return 0;
1928}
1929
1930static void kernel_pio(struct kvm_io_device *pio_dev,
1931 struct kvm_vcpu *vcpu,
1932 void *pd)
1933{
1934 /* TODO: String I/O for in kernel device */
1935
1936 mutex_lock(&vcpu->kvm->lock);
1937 if (vcpu->pio.in)
1938 kvm_iodevice_read(pio_dev, vcpu->pio.port,
1939 vcpu->pio.size,
1940 pd);
1941 else
1942 kvm_iodevice_write(pio_dev, vcpu->pio.port,
1943 vcpu->pio.size,
1944 pd);
1945 mutex_unlock(&vcpu->kvm->lock);
1946}
1947
1948static void pio_string_write(struct kvm_io_device *pio_dev,
1949 struct kvm_vcpu *vcpu)
1950{
1951 struct kvm_pio_request *io = &vcpu->pio;
1952 void *pd = vcpu->pio_data;
1953 int i;
1954
1955 mutex_lock(&vcpu->kvm->lock);
1956 for (i = 0; i < io->cur_count; i++) {
1957 kvm_iodevice_write(pio_dev, io->port,
1958 io->size,
1959 pd);
1960 pd += io->size;
1961 }
1962 mutex_unlock(&vcpu->kvm->lock);
1963}
1964
1965static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
1966 gpa_t addr)
1967{
1968 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
1969}
1970
1971int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1972 int size, unsigned port)
1973{
1974 struct kvm_io_device *pio_dev;
1975
1976 vcpu->run->exit_reason = KVM_EXIT_IO;
1977 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1978 vcpu->run->io.size = vcpu->pio.size = size;
1979 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1980 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1;
1981 vcpu->run->io.port = vcpu->pio.port = port;
1982 vcpu->pio.in = in;
1983 vcpu->pio.string = 0;
1984 vcpu->pio.down = 0;
1985 vcpu->pio.guest_page_offset = 0;
1986 vcpu->pio.rep = 0;
1987
1988 kvm_x86_ops->cache_regs(vcpu);
1989 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1990 kvm_x86_ops->decache_regs(vcpu);
1991
1992 kvm_x86_ops->skip_emulated_instruction(vcpu);
1993
1994 pio_dev = vcpu_find_pio_dev(vcpu, port);
1995 if (pio_dev) {
1996 kernel_pio(pio_dev, vcpu, vcpu->pio_data);
1997 complete_pio(vcpu);
1998 return 1;
1999 }
2000 return 0;
2001}
2002EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2003
2004int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2005 int size, unsigned long count, int down,
2006 gva_t address, int rep, unsigned port)
2007{
2008 unsigned now, in_page;
2009 int i, ret = 0;
2010 int nr_pages = 1;
2011 struct page *page;
2012 struct kvm_io_device *pio_dev;
2013
2014 vcpu->run->exit_reason = KVM_EXIT_IO;
2015 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
2016 vcpu->run->io.size = vcpu->pio.size = size;
2017 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
2018 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count;
2019 vcpu->run->io.port = vcpu->pio.port = port;
2020 vcpu->pio.in = in;
2021 vcpu->pio.string = 1;
2022 vcpu->pio.down = down;
2023 vcpu->pio.guest_page_offset = offset_in_page(address);
2024 vcpu->pio.rep = rep;
2025
2026 if (!count) {
2027 kvm_x86_ops->skip_emulated_instruction(vcpu);
2028 return 1;
2029 }
2030
2031 if (!down)
2032 in_page = PAGE_SIZE - offset_in_page(address);
2033 else
2034 in_page = offset_in_page(address) + size;
2035 now = min(count, (unsigned long)in_page / size);
2036 if (!now) {
2037 /*
2038 * String I/O straddles page boundary. Pin two guest pages
2039 * so that we satisfy atomicity constraints. Do just one
2040 * transaction to avoid complexity.
2041 */
2042 nr_pages = 2;
2043 now = 1;
2044 }
2045 if (down) {
2046 /*
2047 * String I/O in reverse. Yuck. Kill the guest, fix later.
2048 */
2049 pr_unimpl(vcpu, "guest string pio down\n");
2050 inject_gp(vcpu);
2051 return 1;
2052 }
2053 vcpu->run->io.count = now;
2054 vcpu->pio.cur_count = now;
2055
2056 if (vcpu->pio.cur_count == vcpu->pio.count)
2057 kvm_x86_ops->skip_emulated_instruction(vcpu);
2058
2059 for (i = 0; i < nr_pages; ++i) {
2060 mutex_lock(&vcpu->kvm->lock);
2061 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
2062 vcpu->pio.guest_pages[i] = page;
2063 mutex_unlock(&vcpu->kvm->lock);
2064 if (!page) {
2065 inject_gp(vcpu);
2066 free_pio_guest_pages(vcpu);
2067 return 1;
2068 }
2069 }
2070
2071 pio_dev = vcpu_find_pio_dev(vcpu, port);
2072 if (!vcpu->pio.in) {
2073 /* string PIO write */
2074 ret = pio_copy_data(vcpu);
2075 if (ret >= 0 && pio_dev) {
2076 pio_string_write(pio_dev, vcpu);
2077 complete_pio(vcpu);
2078 if (vcpu->pio.count == 0)
2079 ret = 1;
2080 }
2081 } else if (pio_dev)
2082 pr_unimpl(vcpu, "no string pio read support yet, "
2083 "port %x size %d count %ld\n",
2084 port, size, count);
2085
2086 return ret;
2087}
2088EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2089
f8c16bba 2090int kvm_arch_init(void *opaque)
043405e1 2091{
56c6d28a 2092 int r;
f8c16bba
ZX
2093 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2094
56c6d28a
ZX
2095 r = kvm_mmu_module_init();
2096 if (r)
2097 goto out_fail;
2098
043405e1 2099 kvm_init_msr_list();
f8c16bba
ZX
2100
2101 if (kvm_x86_ops) {
2102 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
2103 r = -EEXIST;
2104 goto out;
f8c16bba
ZX
2105 }
2106
2107 if (!ops->cpu_has_kvm_support()) {
2108 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
2109 r = -EOPNOTSUPP;
2110 goto out;
f8c16bba
ZX
2111 }
2112 if (ops->disabled_by_bios()) {
2113 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
2114 r = -EOPNOTSUPP;
2115 goto out;
f8c16bba
ZX
2116 }
2117
2118 kvm_x86_ops = ops;
56c6d28a 2119 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
f8c16bba 2120 return 0;
56c6d28a
ZX
2121
2122out:
2123 kvm_mmu_module_exit();
2124out_fail:
2125 return r;
043405e1 2126}
8776e519 2127
f8c16bba
ZX
2128void kvm_arch_exit(void)
2129{
2130 kvm_x86_ops = NULL;
56c6d28a
ZX
2131 kvm_mmu_module_exit();
2132}
f8c16bba 2133
8776e519
HB
2134int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2135{
2136 ++vcpu->stat.halt_exits;
2137 if (irqchip_in_kernel(vcpu->kvm)) {
2138 vcpu->mp_state = VCPU_MP_STATE_HALTED;
2139 kvm_vcpu_block(vcpu);
2140 if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
2141 return -EINTR;
2142 return 1;
2143 } else {
2144 vcpu->run->exit_reason = KVM_EXIT_HLT;
2145 return 0;
2146 }
2147}
2148EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2149
2150int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2151{
2152 unsigned long nr, a0, a1, a2, a3, ret;
2153
2154 kvm_x86_ops->cache_regs(vcpu);
2155
2156 nr = vcpu->regs[VCPU_REGS_RAX];
2157 a0 = vcpu->regs[VCPU_REGS_RBX];
2158 a1 = vcpu->regs[VCPU_REGS_RCX];
2159 a2 = vcpu->regs[VCPU_REGS_RDX];
2160 a3 = vcpu->regs[VCPU_REGS_RSI];
2161
2162 if (!is_long_mode(vcpu)) {
2163 nr &= 0xFFFFFFFF;
2164 a0 &= 0xFFFFFFFF;
2165 a1 &= 0xFFFFFFFF;
2166 a2 &= 0xFFFFFFFF;
2167 a3 &= 0xFFFFFFFF;
2168 }
2169
2170 switch (nr) {
2171 default:
2172 ret = -KVM_ENOSYS;
2173 break;
2174 }
2175 vcpu->regs[VCPU_REGS_RAX] = ret;
2176 kvm_x86_ops->decache_regs(vcpu);
2177 return 0;
2178}
2179EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2180
2181int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2182{
2183 char instruction[3];
2184 int ret = 0;
2185
2186 mutex_lock(&vcpu->kvm->lock);
2187
2188 /*
2189 * Blow out the MMU to ensure that no other VCPU has an active mapping
2190 * to ensure that the updated hypercall appears atomically across all
2191 * VCPUs.
2192 */
2193 kvm_mmu_zap_all(vcpu->kvm);
2194
2195 kvm_x86_ops->cache_regs(vcpu);
2196 kvm_x86_ops->patch_hypercall(vcpu, instruction);
2197 if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
2198 != X86EMUL_CONTINUE)
2199 ret = -EFAULT;
2200
2201 mutex_unlock(&vcpu->kvm->lock);
2202
2203 return ret;
2204}
2205
2206static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2207{
2208 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2209}
2210
2211void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2212{
2213 struct descriptor_table dt = { limit, base };
2214
2215 kvm_x86_ops->set_gdt(vcpu, &dt);
2216}
2217
2218void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2219{
2220 struct descriptor_table dt = { limit, base };
2221
2222 kvm_x86_ops->set_idt(vcpu, &dt);
2223}
2224
2225void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2226 unsigned long *rflags)
2227{
2228 lmsw(vcpu, msw);
2229 *rflags = kvm_x86_ops->get_rflags(vcpu);
2230}
2231
2232unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2233{
2234 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2235 switch (cr) {
2236 case 0:
2237 return vcpu->cr0;
2238 case 2:
2239 return vcpu->cr2;
2240 case 3:
2241 return vcpu->cr3;
2242 case 4:
2243 return vcpu->cr4;
2244 default:
2245 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
2246 return 0;
2247 }
2248}
2249
2250void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2251 unsigned long *rflags)
2252{
2253 switch (cr) {
2254 case 0:
2255 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
2256 *rflags = kvm_x86_ops->get_rflags(vcpu);
2257 break;
2258 case 2:
2259 vcpu->cr2 = val;
2260 break;
2261 case 3:
2262 set_cr3(vcpu, val);
2263 break;
2264 case 4:
2265 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
2266 break;
2267 default:
2268 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
2269 }
2270}
2271
07716717
DK
2272static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2273{
2274 struct kvm_cpuid_entry2 *e = &vcpu->cpuid_entries[i];
2275 int j, nent = vcpu->cpuid_nent;
2276
2277 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2278 /* when no next entry is found, the current entry[i] is reselected */
2279 for (j = i + 1; j == i; j = (j + 1) % nent) {
2280 struct kvm_cpuid_entry2 *ej = &vcpu->cpuid_entries[j];
2281 if (ej->function == e->function) {
2282 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2283 return j;
2284 }
2285 }
2286 return 0; /* silence gcc, even though control never reaches here */
2287}
2288
2289/* find an entry with matching function, matching index (if needed), and that
2290 * should be read next (if it's stateful) */
2291static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2292 u32 function, u32 index)
2293{
2294 if (e->function != function)
2295 return 0;
2296 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2297 return 0;
2298 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
2299 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
2300 return 0;
2301 return 1;
2302}
2303
8776e519
HB
2304void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2305{
2306 int i;
07716717
DK
2307 u32 function, index;
2308 struct kvm_cpuid_entry2 *e, *best;
8776e519
HB
2309
2310 kvm_x86_ops->cache_regs(vcpu);
2311 function = vcpu->regs[VCPU_REGS_RAX];
07716717 2312 index = vcpu->regs[VCPU_REGS_RCX];
8776e519
HB
2313 vcpu->regs[VCPU_REGS_RAX] = 0;
2314 vcpu->regs[VCPU_REGS_RBX] = 0;
2315 vcpu->regs[VCPU_REGS_RCX] = 0;
2316 vcpu->regs[VCPU_REGS_RDX] = 0;
2317 best = NULL;
2318 for (i = 0; i < vcpu->cpuid_nent; ++i) {
2319 e = &vcpu->cpuid_entries[i];
07716717
DK
2320 if (is_matching_cpuid_entry(e, function, index)) {
2321 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
2322 move_to_next_stateful_cpuid_entry(vcpu, i);
8776e519
HB
2323 best = e;
2324 break;
2325 }
2326 /*
2327 * Both basic or both extended?
2328 */
2329 if (((e->function ^ function) & 0x80000000) == 0)
2330 if (!best || e->function > best->function)
2331 best = e;
2332 }
2333 if (best) {
2334 vcpu->regs[VCPU_REGS_RAX] = best->eax;
2335 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
2336 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
2337 vcpu->regs[VCPU_REGS_RDX] = best->edx;
2338 }
2339 kvm_x86_ops->decache_regs(vcpu);
2340 kvm_x86_ops->skip_emulated_instruction(vcpu);
2341}
2342EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 2343
b6c7a5dc
HB
2344/*
2345 * Check if userspace requested an interrupt window, and that the
2346 * interrupt window is open.
2347 *
2348 * No need to exit to userspace if we already have an interrupt queued.
2349 */
2350static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2351 struct kvm_run *kvm_run)
2352{
2353 return (!vcpu->irq_summary &&
2354 kvm_run->request_interrupt_window &&
2355 vcpu->interrupt_window_open &&
2356 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2357}
2358
2359static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2360 struct kvm_run *kvm_run)
2361{
2362 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2363 kvm_run->cr8 = get_cr8(vcpu);
2364 kvm_run->apic_base = kvm_get_apic_base(vcpu);
2365 if (irqchip_in_kernel(vcpu->kvm))
2366 kvm_run->ready_for_interrupt_injection = 1;
2367 else
2368 kvm_run->ready_for_interrupt_injection =
2369 (vcpu->interrupt_window_open &&
2370 vcpu->irq_summary == 0);
2371}
2372
2373static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2374{
2375 int r;
2376
2377 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
2378 pr_debug("vcpu %d received sipi with vector # %x\n",
2379 vcpu->vcpu_id, vcpu->sipi_vector);
2380 kvm_lapic_reset(vcpu);
2381 r = kvm_x86_ops->vcpu_reset(vcpu);
2382 if (r)
2383 return r;
2384 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
2385 }
2386
2387preempted:
2388 if (vcpu->guest_debug.enabled)
2389 kvm_x86_ops->guest_debug_pre(vcpu);
2390
2391again:
2392 r = kvm_mmu_reload(vcpu);
2393 if (unlikely(r))
2394 goto out;
2395
2396 kvm_inject_pending_timer_irqs(vcpu);
2397
2398 preempt_disable();
2399
2400 kvm_x86_ops->prepare_guest_switch(vcpu);
2401 kvm_load_guest_fpu(vcpu);
2402
2403 local_irq_disable();
2404
2405 if (signal_pending(current)) {
2406 local_irq_enable();
2407 preempt_enable();
2408 r = -EINTR;
2409 kvm_run->exit_reason = KVM_EXIT_INTR;
2410 ++vcpu->stat.signal_exits;
2411 goto out;
2412 }
2413
298101da
AK
2414 if (vcpu->exception.pending)
2415 __queue_exception(vcpu);
2416 else if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 2417 kvm_x86_ops->inject_pending_irq(vcpu);
eb9774f0 2418 else
b6c7a5dc
HB
2419 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
2420
2421 vcpu->guest_mode = 1;
2422 kvm_guest_enter();
2423
2424 if (vcpu->requests)
2425 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
2426 kvm_x86_ops->tlb_flush(vcpu);
2427
2428 kvm_x86_ops->run(vcpu, kvm_run);
2429
2430 vcpu->guest_mode = 0;
2431 local_irq_enable();
2432
2433 ++vcpu->stat.exits;
2434
2435 /*
2436 * We must have an instruction between local_irq_enable() and
2437 * kvm_guest_exit(), so the timer interrupt isn't delayed by
2438 * the interrupt shadow. The stat.exits increment will do nicely.
2439 * But we need to prevent reordering, hence this barrier():
2440 */
2441 barrier();
2442
2443 kvm_guest_exit();
2444
2445 preempt_enable();
2446
2447 /*
2448 * Profile KVM exit RIPs:
2449 */
2450 if (unlikely(prof_on == KVM_PROFILING)) {
2451 kvm_x86_ops->cache_regs(vcpu);
2452 profile_hit(KVM_PROFILING, (void *)vcpu->rip);
2453 }
2454
298101da
AK
2455 if (vcpu->exception.pending && kvm_x86_ops->exception_injected(vcpu))
2456 vcpu->exception.pending = false;
2457
b6c7a5dc
HB
2458 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2459
2460 if (r > 0) {
2461 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2462 r = -EINTR;
2463 kvm_run->exit_reason = KVM_EXIT_INTR;
2464 ++vcpu->stat.request_irq_exits;
2465 goto out;
2466 }
e1beb1d3 2467 if (!need_resched())
b6c7a5dc 2468 goto again;
b6c7a5dc
HB
2469 }
2470
2471out:
2472 if (r > 0) {
2473 kvm_resched(vcpu);
2474 goto preempted;
2475 }
2476
2477 post_kvm_run_save(vcpu, kvm_run);
2478
2479 return r;
2480}
2481
2482int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2483{
2484 int r;
2485 sigset_t sigsaved;
2486
2487 vcpu_load(vcpu);
2488
2489 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
2490 kvm_vcpu_block(vcpu);
2491 vcpu_put(vcpu);
2492 return -EAGAIN;
2493 }
2494
2495 if (vcpu->sigset_active)
2496 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2497
2498 /* re-sync apic's tpr */
2499 if (!irqchip_in_kernel(vcpu->kvm))
2500 set_cr8(vcpu, kvm_run->cr8);
2501
2502 if (vcpu->pio.cur_count) {
2503 r = complete_pio(vcpu);
2504 if (r)
2505 goto out;
2506 }
2507#if CONFIG_HAS_IOMEM
2508 if (vcpu->mmio_needed) {
2509 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
2510 vcpu->mmio_read_completed = 1;
2511 vcpu->mmio_needed = 0;
2512 r = emulate_instruction(vcpu, kvm_run,
2513 vcpu->mmio_fault_cr2, 0, 1);
2514 if (r == EMULATE_DO_MMIO) {
2515 /*
2516 * Read-modify-write. Back to userspace.
2517 */
2518 r = 0;
2519 goto out;
2520 }
2521 }
2522#endif
2523 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
2524 kvm_x86_ops->cache_regs(vcpu);
2525 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
2526 kvm_x86_ops->decache_regs(vcpu);
2527 }
2528
2529 r = __vcpu_run(vcpu, kvm_run);
2530
2531out:
2532 if (vcpu->sigset_active)
2533 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2534
2535 vcpu_put(vcpu);
2536 return r;
2537}
2538
2539int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2540{
2541 vcpu_load(vcpu);
2542
2543 kvm_x86_ops->cache_regs(vcpu);
2544
2545 regs->rax = vcpu->regs[VCPU_REGS_RAX];
2546 regs->rbx = vcpu->regs[VCPU_REGS_RBX];
2547 regs->rcx = vcpu->regs[VCPU_REGS_RCX];
2548 regs->rdx = vcpu->regs[VCPU_REGS_RDX];
2549 regs->rsi = vcpu->regs[VCPU_REGS_RSI];
2550 regs->rdi = vcpu->regs[VCPU_REGS_RDI];
2551 regs->rsp = vcpu->regs[VCPU_REGS_RSP];
2552 regs->rbp = vcpu->regs[VCPU_REGS_RBP];
2553#ifdef CONFIG_X86_64
2554 regs->r8 = vcpu->regs[VCPU_REGS_R8];
2555 regs->r9 = vcpu->regs[VCPU_REGS_R9];
2556 regs->r10 = vcpu->regs[VCPU_REGS_R10];
2557 regs->r11 = vcpu->regs[VCPU_REGS_R11];
2558 regs->r12 = vcpu->regs[VCPU_REGS_R12];
2559 regs->r13 = vcpu->regs[VCPU_REGS_R13];
2560 regs->r14 = vcpu->regs[VCPU_REGS_R14];
2561 regs->r15 = vcpu->regs[VCPU_REGS_R15];
2562#endif
2563
2564 regs->rip = vcpu->rip;
2565 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
2566
2567 /*
2568 * Don't leak debug flags in case they were set for guest debugging
2569 */
2570 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
2571 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
2572
2573 vcpu_put(vcpu);
2574
2575 return 0;
2576}
2577
2578int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2579{
2580 vcpu_load(vcpu);
2581
2582 vcpu->regs[VCPU_REGS_RAX] = regs->rax;
2583 vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
2584 vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
2585 vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
2586 vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
2587 vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
2588 vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
2589 vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
2590#ifdef CONFIG_X86_64
2591 vcpu->regs[VCPU_REGS_R8] = regs->r8;
2592 vcpu->regs[VCPU_REGS_R9] = regs->r9;
2593 vcpu->regs[VCPU_REGS_R10] = regs->r10;
2594 vcpu->regs[VCPU_REGS_R11] = regs->r11;
2595 vcpu->regs[VCPU_REGS_R12] = regs->r12;
2596 vcpu->regs[VCPU_REGS_R13] = regs->r13;
2597 vcpu->regs[VCPU_REGS_R14] = regs->r14;
2598 vcpu->regs[VCPU_REGS_R15] = regs->r15;
2599#endif
2600
2601 vcpu->rip = regs->rip;
2602 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
2603
2604 kvm_x86_ops->decache_regs(vcpu);
2605
2606 vcpu_put(vcpu);
2607
2608 return 0;
2609}
2610
2611static void get_segment(struct kvm_vcpu *vcpu,
2612 struct kvm_segment *var, int seg)
2613{
2614 return kvm_x86_ops->get_segment(vcpu, var, seg);
2615}
2616
2617void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2618{
2619 struct kvm_segment cs;
2620
2621 get_segment(vcpu, &cs, VCPU_SREG_CS);
2622 *db = cs.db;
2623 *l = cs.l;
2624}
2625EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
2626
2627int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2628 struct kvm_sregs *sregs)
2629{
2630 struct descriptor_table dt;
2631 int pending_vec;
2632
2633 vcpu_load(vcpu);
2634
2635 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2636 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2637 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2638 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2639 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2640 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2641
2642 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2643 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2644
2645 kvm_x86_ops->get_idt(vcpu, &dt);
2646 sregs->idt.limit = dt.limit;
2647 sregs->idt.base = dt.base;
2648 kvm_x86_ops->get_gdt(vcpu, &dt);
2649 sregs->gdt.limit = dt.limit;
2650 sregs->gdt.base = dt.base;
2651
2652 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2653 sregs->cr0 = vcpu->cr0;
2654 sregs->cr2 = vcpu->cr2;
2655 sregs->cr3 = vcpu->cr3;
2656 sregs->cr4 = vcpu->cr4;
2657 sregs->cr8 = get_cr8(vcpu);
2658 sregs->efer = vcpu->shadow_efer;
2659 sregs->apic_base = kvm_get_apic_base(vcpu);
2660
2661 if (irqchip_in_kernel(vcpu->kvm)) {
2662 memset(sregs->interrupt_bitmap, 0,
2663 sizeof sregs->interrupt_bitmap);
2664 pending_vec = kvm_x86_ops->get_irq(vcpu);
2665 if (pending_vec >= 0)
2666 set_bit(pending_vec,
2667 (unsigned long *)sregs->interrupt_bitmap);
2668 } else
2669 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2670 sizeof sregs->interrupt_bitmap);
2671
2672 vcpu_put(vcpu);
2673
2674 return 0;
2675}
2676
2677static void set_segment(struct kvm_vcpu *vcpu,
2678 struct kvm_segment *var, int seg)
2679{
2680 return kvm_x86_ops->set_segment(vcpu, var, seg);
2681}
2682
2683int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2684 struct kvm_sregs *sregs)
2685{
2686 int mmu_reset_needed = 0;
2687 int i, pending_vec, max_bits;
2688 struct descriptor_table dt;
2689
2690 vcpu_load(vcpu);
2691
2692 dt.limit = sregs->idt.limit;
2693 dt.base = sregs->idt.base;
2694 kvm_x86_ops->set_idt(vcpu, &dt);
2695 dt.limit = sregs->gdt.limit;
2696 dt.base = sregs->gdt.base;
2697 kvm_x86_ops->set_gdt(vcpu, &dt);
2698
2699 vcpu->cr2 = sregs->cr2;
2700 mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2701 vcpu->cr3 = sregs->cr3;
2702
2703 set_cr8(vcpu, sregs->cr8);
2704
2705 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
2706#ifdef CONFIG_X86_64
2707 kvm_x86_ops->set_efer(vcpu, sregs->efer);
2708#endif
2709 kvm_set_apic_base(vcpu, sregs->apic_base);
2710
2711 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2712
2713 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
2714 vcpu->cr0 = sregs->cr0;
2715 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
2716
2717 mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
2718 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2719 if (!is_long_mode(vcpu) && is_pae(vcpu))
2720 load_pdptrs(vcpu, vcpu->cr3);
2721
2722 if (mmu_reset_needed)
2723 kvm_mmu_reset_context(vcpu);
2724
2725 if (!irqchip_in_kernel(vcpu->kvm)) {
2726 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2727 sizeof vcpu->irq_pending);
2728 vcpu->irq_summary = 0;
2729 for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
2730 if (vcpu->irq_pending[i])
2731 __set_bit(i, &vcpu->irq_summary);
2732 } else {
2733 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
2734 pending_vec = find_first_bit(
2735 (const unsigned long *)sregs->interrupt_bitmap,
2736 max_bits);
2737 /* Only pending external irq is handled here */
2738 if (pending_vec < max_bits) {
2739 kvm_x86_ops->set_irq(vcpu, pending_vec);
2740 pr_debug("Set back pending irq %d\n",
2741 pending_vec);
2742 }
2743 }
2744
2745 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2746 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2747 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2748 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2749 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2750 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2751
2752 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2753 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2754
2755 vcpu_put(vcpu);
2756
2757 return 0;
2758}
2759
2760int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2761 struct kvm_debug_guest *dbg)
2762{
2763 int r;
2764
2765 vcpu_load(vcpu);
2766
2767 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
2768
2769 vcpu_put(vcpu);
2770
2771 return r;
2772}
2773
d0752060
HB
2774/*
2775 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
2776 * we have asm/x86/processor.h
2777 */
2778struct fxsave {
2779 u16 cwd;
2780 u16 swd;
2781 u16 twd;
2782 u16 fop;
2783 u64 rip;
2784 u64 rdp;
2785 u32 mxcsr;
2786 u32 mxcsr_mask;
2787 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
2788#ifdef CONFIG_X86_64
2789 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
2790#else
2791 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
2792#endif
2793};
2794
8b006791
ZX
2795/*
2796 * Translate a guest virtual address to a guest physical address.
2797 */
2798int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2799 struct kvm_translation *tr)
2800{
2801 unsigned long vaddr = tr->linear_address;
2802 gpa_t gpa;
2803
2804 vcpu_load(vcpu);
2805 mutex_lock(&vcpu->kvm->lock);
2806 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
2807 tr->physical_address = gpa;
2808 tr->valid = gpa != UNMAPPED_GVA;
2809 tr->writeable = 1;
2810 tr->usermode = 0;
2811 mutex_unlock(&vcpu->kvm->lock);
2812 vcpu_put(vcpu);
2813
2814 return 0;
2815}
2816
d0752060
HB
2817int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2818{
2819 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2820
2821 vcpu_load(vcpu);
2822
2823 memcpy(fpu->fpr, fxsave->st_space, 128);
2824 fpu->fcw = fxsave->cwd;
2825 fpu->fsw = fxsave->swd;
2826 fpu->ftwx = fxsave->twd;
2827 fpu->last_opcode = fxsave->fop;
2828 fpu->last_ip = fxsave->rip;
2829 fpu->last_dp = fxsave->rdp;
2830 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
2831
2832 vcpu_put(vcpu);
2833
2834 return 0;
2835}
2836
2837int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2838{
2839 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2840
2841 vcpu_load(vcpu);
2842
2843 memcpy(fxsave->st_space, fpu->fpr, 128);
2844 fxsave->cwd = fpu->fcw;
2845 fxsave->swd = fpu->fsw;
2846 fxsave->twd = fpu->ftwx;
2847 fxsave->fop = fpu->last_opcode;
2848 fxsave->rip = fpu->last_ip;
2849 fxsave->rdp = fpu->last_dp;
2850 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
2851
2852 vcpu_put(vcpu);
2853
2854 return 0;
2855}
2856
2857void fx_init(struct kvm_vcpu *vcpu)
2858{
2859 unsigned after_mxcsr_mask;
2860
2861 /* Initialize guest FPU by resetting ours and saving into guest's */
2862 preempt_disable();
2863 fx_save(&vcpu->host_fx_image);
2864 fpu_init();
2865 fx_save(&vcpu->guest_fx_image);
2866 fx_restore(&vcpu->host_fx_image);
2867 preempt_enable();
2868
2869 vcpu->cr0 |= X86_CR0_ET;
2870 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
2871 vcpu->guest_fx_image.mxcsr = 0x1f80;
2872 memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
2873 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
2874}
2875EXPORT_SYMBOL_GPL(fx_init);
2876
2877void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
2878{
2879 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
2880 return;
2881
2882 vcpu->guest_fpu_loaded = 1;
2883 fx_save(&vcpu->host_fx_image);
2884 fx_restore(&vcpu->guest_fx_image);
2885}
2886EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
2887
2888void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
2889{
2890 if (!vcpu->guest_fpu_loaded)
2891 return;
2892
2893 vcpu->guest_fpu_loaded = 0;
2894 fx_save(&vcpu->guest_fx_image);
2895 fx_restore(&vcpu->host_fx_image);
f096ed85 2896 ++vcpu->stat.fpu_reload;
d0752060
HB
2897}
2898EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
e9b11c17
ZX
2899
2900void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
2901{
2902 kvm_x86_ops->vcpu_free(vcpu);
2903}
2904
2905struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2906 unsigned int id)
2907{
26e5215f
AK
2908 return kvm_x86_ops->vcpu_create(kvm, id);
2909}
e9b11c17 2910
26e5215f
AK
2911int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2912{
2913 int r;
e9b11c17
ZX
2914
2915 /* We do fxsave: this must be aligned. */
2916 BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
2917
2918 vcpu_load(vcpu);
2919 r = kvm_arch_vcpu_reset(vcpu);
2920 if (r == 0)
2921 r = kvm_mmu_setup(vcpu);
2922 vcpu_put(vcpu);
2923 if (r < 0)
2924 goto free_vcpu;
2925
26e5215f 2926 return 0;
e9b11c17
ZX
2927free_vcpu:
2928 kvm_x86_ops->vcpu_free(vcpu);
26e5215f 2929 return r;
e9b11c17
ZX
2930}
2931
d40ccc62 2932void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17
ZX
2933{
2934 vcpu_load(vcpu);
2935 kvm_mmu_unload(vcpu);
2936 vcpu_put(vcpu);
2937
2938 kvm_x86_ops->vcpu_free(vcpu);
2939}
2940
2941int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
2942{
2943 return kvm_x86_ops->vcpu_reset(vcpu);
2944}
2945
2946void kvm_arch_hardware_enable(void *garbage)
2947{
2948 kvm_x86_ops->hardware_enable(garbage);
2949}
2950
2951void kvm_arch_hardware_disable(void *garbage)
2952{
2953 kvm_x86_ops->hardware_disable(garbage);
2954}
2955
2956int kvm_arch_hardware_setup(void)
2957{
2958 return kvm_x86_ops->hardware_setup();
2959}
2960
2961void kvm_arch_hardware_unsetup(void)
2962{
2963 kvm_x86_ops->hardware_unsetup();
2964}
2965
2966void kvm_arch_check_processor_compat(void *rtn)
2967{
2968 kvm_x86_ops->check_processor_compatibility(rtn);
2969}
2970
2971int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2972{
2973 struct page *page;
2974 struct kvm *kvm;
2975 int r;
2976
2977 BUG_ON(vcpu->kvm == NULL);
2978 kvm = vcpu->kvm;
2979
2980 vcpu->mmu.root_hpa = INVALID_PAGE;
2981 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
2982 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
2983 else
2984 vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
2985
2986 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2987 if (!page) {
2988 r = -ENOMEM;
2989 goto fail;
2990 }
2991 vcpu->pio_data = page_address(page);
2992
2993 r = kvm_mmu_create(vcpu);
2994 if (r < 0)
2995 goto fail_free_pio_data;
2996
2997 if (irqchip_in_kernel(kvm)) {
2998 r = kvm_create_lapic(vcpu);
2999 if (r < 0)
3000 goto fail_mmu_destroy;
3001 }
3002
3003 return 0;
3004
3005fail_mmu_destroy:
3006 kvm_mmu_destroy(vcpu);
3007fail_free_pio_data:
3008 free_page((unsigned long)vcpu->pio_data);
3009fail:
3010 return r;
3011}
3012
3013void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
3014{
3015 kvm_free_lapic(vcpu);
3016 kvm_mmu_destroy(vcpu);
3017 free_page((unsigned long)vcpu->pio_data);
3018}
d19a9cd2
ZX
3019
3020struct kvm *kvm_arch_create_vm(void)
3021{
3022 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
3023
3024 if (!kvm)
3025 return ERR_PTR(-ENOMEM);
3026
3027 INIT_LIST_HEAD(&kvm->active_mmu_pages);
3028
3029 return kvm;
3030}
3031
3032static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
3033{
3034 vcpu_load(vcpu);
3035 kvm_mmu_unload(vcpu);
3036 vcpu_put(vcpu);
3037}
3038
3039static void kvm_free_vcpus(struct kvm *kvm)
3040{
3041 unsigned int i;
3042
3043 /*
3044 * Unpin any mmu pages first.
3045 */
3046 for (i = 0; i < KVM_MAX_VCPUS; ++i)
3047 if (kvm->vcpus[i])
3048 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
3049 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
3050 if (kvm->vcpus[i]) {
3051 kvm_arch_vcpu_free(kvm->vcpus[i]);
3052 kvm->vcpus[i] = NULL;
3053 }
3054 }
3055
3056}
3057
3058void kvm_arch_destroy_vm(struct kvm *kvm)
3059{
3060 kfree(kvm->vpic);
3061 kfree(kvm->vioapic);
3062 kvm_free_vcpus(kvm);
3063 kvm_free_physmem(kvm);
3064 kfree(kvm);
3065}
0de10343
ZX
3066
3067int kvm_arch_set_memory_region(struct kvm *kvm,
3068 struct kvm_userspace_memory_region *mem,
3069 struct kvm_memory_slot old,
3070 int user_alloc)
3071{
3072 int npages = mem->memory_size >> PAGE_SHIFT;
3073 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
3074
3075 /*To keep backward compatibility with older userspace,
3076 *x86 needs to hanlde !user_alloc case.
3077 */
3078 if (!user_alloc) {
3079 if (npages && !old.rmap) {
3080 down_write(&current->mm->mmap_sem);
3081 memslot->userspace_addr = do_mmap(NULL, 0,
3082 npages * PAGE_SIZE,
3083 PROT_READ | PROT_WRITE,
3084 MAP_SHARED | MAP_ANONYMOUS,
3085 0);
3086 up_write(&current->mm->mmap_sem);
3087
3088 if (IS_ERR((void *)memslot->userspace_addr))
3089 return PTR_ERR((void *)memslot->userspace_addr);
3090 } else {
3091 if (!old.user_alloc && old.rmap) {
3092 int ret;
3093
3094 down_write(&current->mm->mmap_sem);
3095 ret = do_munmap(current->mm, old.userspace_addr,
3096 old.npages * PAGE_SIZE);
3097 up_write(&current->mm->mmap_sem);
3098 if (ret < 0)
3099 printk(KERN_WARNING
3100 "kvm_vm_ioctl_set_memory_region: "
3101 "failed to munmap memory\n");
3102 }
3103 }
3104 }
3105
3106 if (!kvm->n_requested_mmu_pages) {
3107 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
3108 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
3109 }
3110
3111 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
3112 kvm_flush_remote_tlbs(kvm);
3113
3114 return 0;
3115}