]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kvm/x86.c
KVM: Update cpuid 1.ecx reporting
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
4d5c5d0f
BAY
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
043405e1
CO
9 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
4d5c5d0f
BAY
13 * Amit Shah <amit.shah@qumranet.com>
14 * Ben-Ami Yassour <benami@il.ibm.com>
043405e1
CO
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 */
20
edf88417 21#include <linux/kvm_host.h>
313a3dc7 22#include "irq.h"
1d737c8a 23#include "mmu.h"
7837699f 24#include "i8254.h"
37817f29 25#include "tss.h"
5fdbf976 26#include "kvm_cache_regs.h"
26eef70c 27#include "x86.h"
313a3dc7 28
18068523 29#include <linux/clocksource.h>
4d5c5d0f 30#include <linux/interrupt.h>
313a3dc7
CO
31#include <linux/kvm.h>
32#include <linux/fs.h>
33#include <linux/vmalloc.h>
5fb76f9b 34#include <linux/module.h>
0de10343 35#include <linux/mman.h>
2bacc55c 36#include <linux/highmem.h>
19de40a8 37#include <linux/iommu.h>
62c476c7 38#include <linux/intel-iommu.h>
c8076604 39#include <linux/cpufreq.h>
043405e1
CO
40
41#include <asm/uaccess.h>
d825ed0a 42#include <asm/msr.h>
a5f61300 43#include <asm/desc.h>
0bed3b56 44#include <asm/mtrr.h>
043405e1 45
313a3dc7 46#define MAX_IO_MSRS 256
a03490ed
CO
47#define CR0_RESERVED_BITS \
48 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
49 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
50 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
51#define CR4_RESERVED_BITS \
52 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
53 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
54 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
55 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
56
57#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
50a37eb4
JR
58/* EFER defaults:
59 * - enable syscall per default because its emulated by KVM
60 * - enable LME and LMA per default on 64 bit KVM
61 */
62#ifdef CONFIG_X86_64
63static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
64#else
65static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
66#endif
313a3dc7 67
ba1389b7
AK
68#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
69#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
417bc304 70
674eea0f
AK
71static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
72 struct kvm_cpuid_entry2 __user *entries);
d8017474
AG
73struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
74 u32 function, u32 index);
674eea0f 75
97896d04 76struct kvm_x86_ops *kvm_x86_ops;
5fdbf976 77EXPORT_SYMBOL_GPL(kvm_x86_ops);
97896d04 78
417bc304 79struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
80 { "pf_fixed", VCPU_STAT(pf_fixed) },
81 { "pf_guest", VCPU_STAT(pf_guest) },
82 { "tlb_flush", VCPU_STAT(tlb_flush) },
83 { "invlpg", VCPU_STAT(invlpg) },
84 { "exits", VCPU_STAT(exits) },
85 { "io_exits", VCPU_STAT(io_exits) },
86 { "mmio_exits", VCPU_STAT(mmio_exits) },
87 { "signal_exits", VCPU_STAT(signal_exits) },
88 { "irq_window", VCPU_STAT(irq_window_exits) },
f08864b4 89 { "nmi_window", VCPU_STAT(nmi_window_exits) },
ba1389b7
AK
90 { "halt_exits", VCPU_STAT(halt_exits) },
91 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f11c3a8d 92 { "hypercalls", VCPU_STAT(hypercalls) },
ba1389b7
AK
93 { "request_irq", VCPU_STAT(request_irq_exits) },
94 { "irq_exits", VCPU_STAT(irq_exits) },
95 { "host_state_reload", VCPU_STAT(host_state_reload) },
96 { "efer_reload", VCPU_STAT(efer_reload) },
97 { "fpu_reload", VCPU_STAT(fpu_reload) },
98 { "insn_emulation", VCPU_STAT(insn_emulation) },
99 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
fa89a817 100 { "irq_injections", VCPU_STAT(irq_injections) },
c4abb7c9 101 { "nmi_injections", VCPU_STAT(nmi_injections) },
4cee5764
AK
102 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
103 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
104 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
105 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
106 { "mmu_flooded", VM_STAT(mmu_flooded) },
107 { "mmu_recycled", VM_STAT(mmu_recycled) },
dfc5aa00 108 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
4731d4c7 109 { "mmu_unsync", VM_STAT(mmu_unsync) },
0f74a24c 110 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
05da4558 111 { "largepages", VM_STAT(lpages) },
417bc304
HB
112 { NULL }
113};
114
5fb76f9b
CO
115unsigned long segment_base(u16 selector)
116{
117 struct descriptor_table gdt;
a5f61300 118 struct desc_struct *d;
5fb76f9b
CO
119 unsigned long table_base;
120 unsigned long v;
121
122 if (selector == 0)
123 return 0;
124
125 asm("sgdt %0" : "=m"(gdt));
126 table_base = gdt.base;
127
128 if (selector & 4) { /* from ldt */
129 u16 ldt_selector;
130
131 asm("sldt %0" : "=g"(ldt_selector));
132 table_base = segment_base(ldt_selector);
133 }
a5f61300
AK
134 d = (struct desc_struct *)(table_base + (selector & ~7));
135 v = d->base0 | ((unsigned long)d->base1 << 16) |
136 ((unsigned long)d->base2 << 24);
5fb76f9b 137#ifdef CONFIG_X86_64
a5f61300
AK
138 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
139 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
5fb76f9b
CO
140#endif
141 return v;
142}
143EXPORT_SYMBOL_GPL(segment_base);
144
6866b83e
CO
145u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
146{
147 if (irqchip_in_kernel(vcpu->kvm))
ad312c7c 148 return vcpu->arch.apic_base;
6866b83e 149 else
ad312c7c 150 return vcpu->arch.apic_base;
6866b83e
CO
151}
152EXPORT_SYMBOL_GPL(kvm_get_apic_base);
153
154void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
155{
156 /* TODO: reserve bits check */
157 if (irqchip_in_kernel(vcpu->kvm))
158 kvm_lapic_set_base(vcpu, data);
159 else
ad312c7c 160 vcpu->arch.apic_base = data;
6866b83e
CO
161}
162EXPORT_SYMBOL_GPL(kvm_set_apic_base);
163
298101da
AK
164void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
165{
ad312c7c
ZX
166 WARN_ON(vcpu->arch.exception.pending);
167 vcpu->arch.exception.pending = true;
168 vcpu->arch.exception.has_error_code = false;
169 vcpu->arch.exception.nr = nr;
298101da
AK
170}
171EXPORT_SYMBOL_GPL(kvm_queue_exception);
172
c3c91fee
AK
173void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
174 u32 error_code)
175{
176 ++vcpu->stat.pf_guest;
d8017474 177
71c4dfaf
JR
178 if (vcpu->arch.exception.pending) {
179 if (vcpu->arch.exception.nr == PF_VECTOR) {
180 printk(KERN_DEBUG "kvm: inject_page_fault:"
181 " double fault 0x%lx\n", addr);
182 vcpu->arch.exception.nr = DF_VECTOR;
183 vcpu->arch.exception.error_code = 0;
184 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
185 /* triple fault -> shutdown */
186 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
187 }
c3c91fee
AK
188 return;
189 }
ad312c7c 190 vcpu->arch.cr2 = addr;
c3c91fee
AK
191 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
192}
193
3419ffc8
SY
194void kvm_inject_nmi(struct kvm_vcpu *vcpu)
195{
196 vcpu->arch.nmi_pending = 1;
197}
198EXPORT_SYMBOL_GPL(kvm_inject_nmi);
199
298101da
AK
200void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
201{
ad312c7c
ZX
202 WARN_ON(vcpu->arch.exception.pending);
203 vcpu->arch.exception.pending = true;
204 vcpu->arch.exception.has_error_code = true;
205 vcpu->arch.exception.nr = nr;
206 vcpu->arch.exception.error_code = error_code;
298101da
AK
207}
208EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
209
210static void __queue_exception(struct kvm_vcpu *vcpu)
211{
ad312c7c
ZX
212 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
213 vcpu->arch.exception.has_error_code,
214 vcpu->arch.exception.error_code);
298101da
AK
215}
216
a03490ed
CO
217/*
218 * Load the pae pdptrs. Return true is they are all valid.
219 */
220int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
221{
222 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
223 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
224 int i;
225 int ret;
ad312c7c 226 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
a03490ed 227
a03490ed
CO
228 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
229 offset * sizeof(u64), sizeof(pdpte));
230 if (ret < 0) {
231 ret = 0;
232 goto out;
233 }
234 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
20c466b5
DE
235 if (is_present_pte(pdpte[i]) &&
236 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
a03490ed
CO
237 ret = 0;
238 goto out;
239 }
240 }
241 ret = 1;
242
ad312c7c 243 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
a03490ed 244out:
a03490ed
CO
245
246 return ret;
247}
cc4b6871 248EXPORT_SYMBOL_GPL(load_pdptrs);
a03490ed 249
d835dfec
AK
250static bool pdptrs_changed(struct kvm_vcpu *vcpu)
251{
ad312c7c 252 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
d835dfec
AK
253 bool changed = true;
254 int r;
255
256 if (is_long_mode(vcpu) || !is_pae(vcpu))
257 return false;
258
ad312c7c 259 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
d835dfec
AK
260 if (r < 0)
261 goto out;
ad312c7c 262 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
d835dfec 263out:
d835dfec
AK
264
265 return changed;
266}
267
2d3ad1f4 268void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
a03490ed
CO
269{
270 if (cr0 & CR0_RESERVED_BITS) {
271 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
ad312c7c 272 cr0, vcpu->arch.cr0);
c1a5d4f9 273 kvm_inject_gp(vcpu, 0);
a03490ed
CO
274 return;
275 }
276
277 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
278 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
c1a5d4f9 279 kvm_inject_gp(vcpu, 0);
a03490ed
CO
280 return;
281 }
282
283 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
284 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
285 "and a clear PE flag\n");
c1a5d4f9 286 kvm_inject_gp(vcpu, 0);
a03490ed
CO
287 return;
288 }
289
290 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
291#ifdef CONFIG_X86_64
ad312c7c 292 if ((vcpu->arch.shadow_efer & EFER_LME)) {
a03490ed
CO
293 int cs_db, cs_l;
294
295 if (!is_pae(vcpu)) {
296 printk(KERN_DEBUG "set_cr0: #GP, start paging "
297 "in long mode while PAE is disabled\n");
c1a5d4f9 298 kvm_inject_gp(vcpu, 0);
a03490ed
CO
299 return;
300 }
301 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
302 if (cs_l) {
303 printk(KERN_DEBUG "set_cr0: #GP, start paging "
304 "in long mode while CS.L == 1\n");
c1a5d4f9 305 kvm_inject_gp(vcpu, 0);
a03490ed
CO
306 return;
307
308 }
309 } else
310#endif
ad312c7c 311 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed
CO
312 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
313 "reserved bits\n");
c1a5d4f9 314 kvm_inject_gp(vcpu, 0);
a03490ed
CO
315 return;
316 }
317
318 }
319
320 kvm_x86_ops->set_cr0(vcpu, cr0);
ad312c7c 321 vcpu->arch.cr0 = cr0;
a03490ed 322
a03490ed 323 kvm_mmu_reset_context(vcpu);
a03490ed
CO
324 return;
325}
2d3ad1f4 326EXPORT_SYMBOL_GPL(kvm_set_cr0);
a03490ed 327
2d3ad1f4 328void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
a03490ed 329{
2d3ad1f4 330 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
2714d1d3
FEL
331 KVMTRACE_1D(LMSW, vcpu,
332 (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
333 handler);
a03490ed 334}
2d3ad1f4 335EXPORT_SYMBOL_GPL(kvm_lmsw);
a03490ed 336
2d3ad1f4 337void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
a03490ed 338{
a2edf57f
AK
339 unsigned long old_cr4 = vcpu->arch.cr4;
340 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
341
a03490ed
CO
342 if (cr4 & CR4_RESERVED_BITS) {
343 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
c1a5d4f9 344 kvm_inject_gp(vcpu, 0);
a03490ed
CO
345 return;
346 }
347
348 if (is_long_mode(vcpu)) {
349 if (!(cr4 & X86_CR4_PAE)) {
350 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
351 "in long mode\n");
c1a5d4f9 352 kvm_inject_gp(vcpu, 0);
a03490ed
CO
353 return;
354 }
a2edf57f
AK
355 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
356 && ((cr4 ^ old_cr4) & pdptr_bits)
ad312c7c 357 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed 358 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
c1a5d4f9 359 kvm_inject_gp(vcpu, 0);
a03490ed
CO
360 return;
361 }
362
363 if (cr4 & X86_CR4_VMXE) {
364 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
c1a5d4f9 365 kvm_inject_gp(vcpu, 0);
a03490ed
CO
366 return;
367 }
368 kvm_x86_ops->set_cr4(vcpu, cr4);
ad312c7c 369 vcpu->arch.cr4 = cr4;
5a41accd 370 vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
a03490ed 371 kvm_mmu_reset_context(vcpu);
a03490ed 372}
2d3ad1f4 373EXPORT_SYMBOL_GPL(kvm_set_cr4);
a03490ed 374
2d3ad1f4 375void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
a03490ed 376{
ad312c7c 377 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
0ba73cda 378 kvm_mmu_sync_roots(vcpu);
d835dfec
AK
379 kvm_mmu_flush_tlb(vcpu);
380 return;
381 }
382
a03490ed
CO
383 if (is_long_mode(vcpu)) {
384 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
385 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
c1a5d4f9 386 kvm_inject_gp(vcpu, 0);
a03490ed
CO
387 return;
388 }
389 } else {
390 if (is_pae(vcpu)) {
391 if (cr3 & CR3_PAE_RESERVED_BITS) {
392 printk(KERN_DEBUG
393 "set_cr3: #GP, reserved bits\n");
c1a5d4f9 394 kvm_inject_gp(vcpu, 0);
a03490ed
CO
395 return;
396 }
397 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
398 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
399 "reserved bits\n");
c1a5d4f9 400 kvm_inject_gp(vcpu, 0);
a03490ed
CO
401 return;
402 }
403 }
404 /*
405 * We don't check reserved bits in nonpae mode, because
406 * this isn't enforced, and VMware depends on this.
407 */
408 }
409
a03490ed
CO
410 /*
411 * Does the new cr3 value map to physical memory? (Note, we
412 * catch an invalid cr3 even in real-mode, because it would
413 * cause trouble later on when we turn on paging anyway.)
414 *
415 * A real CPU would silently accept an invalid cr3 and would
416 * attempt to use it - with largely undefined (and often hard
417 * to debug) behavior on the guest side.
418 */
419 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
c1a5d4f9 420 kvm_inject_gp(vcpu, 0);
a03490ed 421 else {
ad312c7c
ZX
422 vcpu->arch.cr3 = cr3;
423 vcpu->arch.mmu.new_cr3(vcpu);
a03490ed 424 }
a03490ed 425}
2d3ad1f4 426EXPORT_SYMBOL_GPL(kvm_set_cr3);
a03490ed 427
2d3ad1f4 428void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
a03490ed
CO
429{
430 if (cr8 & CR8_RESERVED_BITS) {
431 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
c1a5d4f9 432 kvm_inject_gp(vcpu, 0);
a03490ed
CO
433 return;
434 }
435 if (irqchip_in_kernel(vcpu->kvm))
436 kvm_lapic_set_tpr(vcpu, cr8);
437 else
ad312c7c 438 vcpu->arch.cr8 = cr8;
a03490ed 439}
2d3ad1f4 440EXPORT_SYMBOL_GPL(kvm_set_cr8);
a03490ed 441
2d3ad1f4 442unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
a03490ed
CO
443{
444 if (irqchip_in_kernel(vcpu->kvm))
445 return kvm_lapic_get_cr8(vcpu);
446 else
ad312c7c 447 return vcpu->arch.cr8;
a03490ed 448}
2d3ad1f4 449EXPORT_SYMBOL_GPL(kvm_get_cr8);
a03490ed 450
d8017474
AG
451static inline u32 bit(int bitno)
452{
453 return 1 << (bitno & 31);
454}
455
043405e1
CO
456/*
457 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
458 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
459 *
460 * This list is modified at module load time to reflect the
461 * capabilities of the host cpu.
462 */
463static u32 msrs_to_save[] = {
464 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
465 MSR_K6_STAR,
466#ifdef CONFIG_X86_64
467 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
468#endif
18068523 469 MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
b286d5d8 470 MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
043405e1
CO
471};
472
473static unsigned num_msrs_to_save;
474
475static u32 emulated_msrs[] = {
476 MSR_IA32_MISC_ENABLE,
477};
478
15c4a640
CO
479static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
480{
f2b4b7dd 481 if (efer & efer_reserved_bits) {
15c4a640
CO
482 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
483 efer);
c1a5d4f9 484 kvm_inject_gp(vcpu, 0);
15c4a640
CO
485 return;
486 }
487
488 if (is_paging(vcpu)
ad312c7c 489 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
15c4a640 490 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
c1a5d4f9 491 kvm_inject_gp(vcpu, 0);
15c4a640
CO
492 return;
493 }
494
1b2fd70c
AG
495 if (efer & EFER_FFXSR) {
496 struct kvm_cpuid_entry2 *feat;
497
498 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
499 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
500 printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
501 kvm_inject_gp(vcpu, 0);
502 return;
503 }
504 }
505
d8017474
AG
506 if (efer & EFER_SVME) {
507 struct kvm_cpuid_entry2 *feat;
508
509 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
510 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
511 printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
512 kvm_inject_gp(vcpu, 0);
513 return;
514 }
515 }
516
15c4a640
CO
517 kvm_x86_ops->set_efer(vcpu, efer);
518
519 efer &= ~EFER_LMA;
ad312c7c 520 efer |= vcpu->arch.shadow_efer & EFER_LMA;
15c4a640 521
ad312c7c 522 vcpu->arch.shadow_efer = efer;
9645bb56
AK
523
524 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
525 kvm_mmu_reset_context(vcpu);
15c4a640
CO
526}
527
f2b4b7dd
JR
528void kvm_enable_efer_bits(u64 mask)
529{
530 efer_reserved_bits &= ~mask;
531}
532EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
533
534
15c4a640
CO
535/*
536 * Writes msr value into into the appropriate "register".
537 * Returns 0 on success, non-0 otherwise.
538 * Assumes vcpu_load() was already called.
539 */
540int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
541{
542 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
543}
544
313a3dc7
CO
545/*
546 * Adapt set_msr() to msr_io()'s calling convention
547 */
548static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
549{
550 return kvm_set_msr(vcpu, index, *data);
551}
552
18068523
GOC
553static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
554{
555 static int version;
50d0a0f9
GH
556 struct pvclock_wall_clock wc;
557 struct timespec now, sys, boot;
18068523
GOC
558
559 if (!wall_clock)
560 return;
561
562 version++;
563
18068523
GOC
564 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
565
50d0a0f9
GH
566 /*
567 * The guest calculates current wall clock time by adding
568 * system time (updated by kvm_write_guest_time below) to the
569 * wall clock specified here. guest system time equals host
570 * system time for us, thus we must fill in host boot time here.
571 */
572 now = current_kernel_time();
573 ktime_get_ts(&sys);
574 boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
575
576 wc.sec = boot.tv_sec;
577 wc.nsec = boot.tv_nsec;
578 wc.version = version;
18068523
GOC
579
580 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
581
582 version++;
583 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
18068523
GOC
584}
585
50d0a0f9
GH
586static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
587{
588 uint32_t quotient, remainder;
589
590 /* Don't try to replace with do_div(), this one calculates
591 * "(dividend << 32) / divisor" */
592 __asm__ ( "divl %4"
593 : "=a" (quotient), "=d" (remainder)
594 : "0" (0), "1" (dividend), "r" (divisor) );
595 return quotient;
596}
597
598static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
599{
600 uint64_t nsecs = 1000000000LL;
601 int32_t shift = 0;
602 uint64_t tps64;
603 uint32_t tps32;
604
605 tps64 = tsc_khz * 1000LL;
606 while (tps64 > nsecs*2) {
607 tps64 >>= 1;
608 shift--;
609 }
610
611 tps32 = (uint32_t)tps64;
612 while (tps32 <= (uint32_t)nsecs) {
613 tps32 <<= 1;
614 shift++;
615 }
616
617 hv_clock->tsc_shift = shift;
618 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
619
620 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
80a914dc 621 __func__, tsc_khz, hv_clock->tsc_shift,
50d0a0f9
GH
622 hv_clock->tsc_to_system_mul);
623}
624
c8076604
GH
625static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
626
18068523
GOC
627static void kvm_write_guest_time(struct kvm_vcpu *v)
628{
629 struct timespec ts;
630 unsigned long flags;
631 struct kvm_vcpu_arch *vcpu = &v->arch;
632 void *shared_kaddr;
463656c0 633 unsigned long this_tsc_khz;
18068523
GOC
634
635 if ((!vcpu->time_page))
636 return;
637
463656c0
AK
638 this_tsc_khz = get_cpu_var(cpu_tsc_khz);
639 if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
640 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
641 vcpu->hv_clock_tsc_khz = this_tsc_khz;
50d0a0f9 642 }
463656c0 643 put_cpu_var(cpu_tsc_khz);
50d0a0f9 644
18068523
GOC
645 /* Keep irq disabled to prevent changes to the clock */
646 local_irq_save(flags);
647 kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
648 &vcpu->hv_clock.tsc_timestamp);
649 ktime_get_ts(&ts);
650 local_irq_restore(flags);
651
652 /* With all the info we got, fill in the values */
653
654 vcpu->hv_clock.system_time = ts.tv_nsec +
655 (NSEC_PER_SEC * (u64)ts.tv_sec);
656 /*
657 * The interface expects us to write an even number signaling that the
658 * update is finished. Since the guest won't see the intermediate
50d0a0f9 659 * state, we just increase by 2 at the end.
18068523 660 */
50d0a0f9 661 vcpu->hv_clock.version += 2;
18068523
GOC
662
663 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
664
665 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
50d0a0f9 666 sizeof(vcpu->hv_clock));
18068523
GOC
667
668 kunmap_atomic(shared_kaddr, KM_USER0);
669
670 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
671}
672
c8076604
GH
673static int kvm_request_guest_time_update(struct kvm_vcpu *v)
674{
675 struct kvm_vcpu_arch *vcpu = &v->arch;
676
677 if (!vcpu->time_page)
678 return 0;
679 set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
680 return 1;
681}
682
9ba075a6
AK
683static bool msr_mtrr_valid(unsigned msr)
684{
685 switch (msr) {
686 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
687 case MSR_MTRRfix64K_00000:
688 case MSR_MTRRfix16K_80000:
689 case MSR_MTRRfix16K_A0000:
690 case MSR_MTRRfix4K_C0000:
691 case MSR_MTRRfix4K_C8000:
692 case MSR_MTRRfix4K_D0000:
693 case MSR_MTRRfix4K_D8000:
694 case MSR_MTRRfix4K_E0000:
695 case MSR_MTRRfix4K_E8000:
696 case MSR_MTRRfix4K_F0000:
697 case MSR_MTRRfix4K_F8000:
698 case MSR_MTRRdefType:
699 case MSR_IA32_CR_PAT:
700 return true;
701 case 0x2f8:
702 return true;
703 }
704 return false;
705}
706
707static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
708{
0bed3b56
SY
709 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
710
9ba075a6
AK
711 if (!msr_mtrr_valid(msr))
712 return 1;
713
0bed3b56
SY
714 if (msr == MSR_MTRRdefType) {
715 vcpu->arch.mtrr_state.def_type = data;
716 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
717 } else if (msr == MSR_MTRRfix64K_00000)
718 p[0] = data;
719 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
720 p[1 + msr - MSR_MTRRfix16K_80000] = data;
721 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
722 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
723 else if (msr == MSR_IA32_CR_PAT)
724 vcpu->arch.pat = data;
725 else { /* Variable MTRRs */
726 int idx, is_mtrr_mask;
727 u64 *pt;
728
729 idx = (msr - 0x200) / 2;
730 is_mtrr_mask = msr - 0x200 - 2 * idx;
731 if (!is_mtrr_mask)
732 pt =
733 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
734 else
735 pt =
736 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
737 *pt = data;
738 }
739
740 kvm_mmu_reset_context(vcpu);
9ba075a6
AK
741 return 0;
742}
15c4a640
CO
743
744int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
745{
746 switch (msr) {
15c4a640
CO
747 case MSR_EFER:
748 set_efer(vcpu, data);
749 break;
15c4a640
CO
750 case MSR_IA32_MC0_STATUS:
751 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
b8688d51 752 __func__, data);
15c4a640
CO
753 break;
754 case MSR_IA32_MCG_STATUS:
755 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
b8688d51 756 __func__, data);
15c4a640 757 break;
c7ac679c
JR
758 case MSR_IA32_MCG_CTL:
759 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
b8688d51 760 __func__, data);
c7ac679c 761 break;
b5e2fec0
AG
762 case MSR_IA32_DEBUGCTLMSR:
763 if (!data) {
764 /* We support the non-activated case already */
765 break;
766 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
767 /* Values other than LBR and BTF are vendor-specific,
768 thus reserved and should throw a #GP */
769 return 1;
770 }
771 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
772 __func__, data);
773 break;
15c4a640
CO
774 case MSR_IA32_UCODE_REV:
775 case MSR_IA32_UCODE_WRITE:
61a6bd67 776 case MSR_VM_HSAVE_PA:
15c4a640 777 break;
9ba075a6
AK
778 case 0x200 ... 0x2ff:
779 return set_msr_mtrr(vcpu, msr, data);
15c4a640
CO
780 case MSR_IA32_APICBASE:
781 kvm_set_apic_base(vcpu, data);
782 break;
783 case MSR_IA32_MISC_ENABLE:
ad312c7c 784 vcpu->arch.ia32_misc_enable_msr = data;
15c4a640 785 break;
18068523
GOC
786 case MSR_KVM_WALL_CLOCK:
787 vcpu->kvm->arch.wall_clock = data;
788 kvm_write_wall_clock(vcpu->kvm, data);
789 break;
790 case MSR_KVM_SYSTEM_TIME: {
791 if (vcpu->arch.time_page) {
792 kvm_release_page_dirty(vcpu->arch.time_page);
793 vcpu->arch.time_page = NULL;
794 }
795
796 vcpu->arch.time = data;
797
798 /* we verify if the enable bit is set... */
799 if (!(data & 1))
800 break;
801
802 /* ...but clean it before doing the actual write */
803 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
804
18068523
GOC
805 vcpu->arch.time_page =
806 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
18068523
GOC
807
808 if (is_error_page(vcpu->arch.time_page)) {
809 kvm_release_page_clean(vcpu->arch.time_page);
810 vcpu->arch.time_page = NULL;
811 }
812
c8076604 813 kvm_request_guest_time_update(vcpu);
18068523
GOC
814 break;
815 }
15c4a640 816 default:
565f1fbd 817 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
15c4a640
CO
818 return 1;
819 }
820 return 0;
821}
822EXPORT_SYMBOL_GPL(kvm_set_msr_common);
823
824
825/*
826 * Reads an msr value (of 'msr_index') into 'pdata'.
827 * Returns 0 on success, non-0 otherwise.
828 * Assumes vcpu_load() was already called.
829 */
830int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
831{
832 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
833}
834
9ba075a6
AK
835static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
836{
0bed3b56
SY
837 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
838
9ba075a6
AK
839 if (!msr_mtrr_valid(msr))
840 return 1;
841
0bed3b56
SY
842 if (msr == MSR_MTRRdefType)
843 *pdata = vcpu->arch.mtrr_state.def_type +
844 (vcpu->arch.mtrr_state.enabled << 10);
845 else if (msr == MSR_MTRRfix64K_00000)
846 *pdata = p[0];
847 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
848 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
849 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
850 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
851 else if (msr == MSR_IA32_CR_PAT)
852 *pdata = vcpu->arch.pat;
853 else { /* Variable MTRRs */
854 int idx, is_mtrr_mask;
855 u64 *pt;
856
857 idx = (msr - 0x200) / 2;
858 is_mtrr_mask = msr - 0x200 - 2 * idx;
859 if (!is_mtrr_mask)
860 pt =
861 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
862 else
863 pt =
864 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
865 *pdata = *pt;
866 }
867
9ba075a6
AK
868 return 0;
869}
870
15c4a640
CO
871int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
872{
873 u64 data;
874
875 switch (msr) {
876 case 0xc0010010: /* SYSCFG */
877 case 0xc0010015: /* HWCR */
878 case MSR_IA32_PLATFORM_ID:
879 case MSR_IA32_P5_MC_ADDR:
880 case MSR_IA32_P5_MC_TYPE:
881 case MSR_IA32_MC0_CTL:
882 case MSR_IA32_MCG_STATUS:
883 case MSR_IA32_MCG_CAP:
c7ac679c 884 case MSR_IA32_MCG_CTL:
15c4a640
CO
885 case MSR_IA32_MC0_MISC:
886 case MSR_IA32_MC0_MISC+4:
887 case MSR_IA32_MC0_MISC+8:
888 case MSR_IA32_MC0_MISC+12:
889 case MSR_IA32_MC0_MISC+16:
a89c1ad2 890 case MSR_IA32_MC0_MISC+20:
15c4a640 891 case MSR_IA32_UCODE_REV:
15c4a640 892 case MSR_IA32_EBL_CR_POWERON:
b5e2fec0
AG
893 case MSR_IA32_DEBUGCTLMSR:
894 case MSR_IA32_LASTBRANCHFROMIP:
895 case MSR_IA32_LASTBRANCHTOIP:
896 case MSR_IA32_LASTINTFROMIP:
897 case MSR_IA32_LASTINTTOIP:
61a6bd67 898 case MSR_VM_HSAVE_PA:
7fe29e0f
AS
899 case MSR_P6_EVNTSEL0:
900 case MSR_P6_EVNTSEL1:
15c4a640
CO
901 data = 0;
902 break;
9ba075a6
AK
903 case MSR_MTRRcap:
904 data = 0x500 | KVM_NR_VAR_MTRR;
905 break;
906 case 0x200 ... 0x2ff:
907 return get_msr_mtrr(vcpu, msr, pdata);
15c4a640
CO
908 case 0xcd: /* fsb frequency */
909 data = 3;
910 break;
911 case MSR_IA32_APICBASE:
912 data = kvm_get_apic_base(vcpu);
913 break;
914 case MSR_IA32_MISC_ENABLE:
ad312c7c 915 data = vcpu->arch.ia32_misc_enable_msr;
15c4a640 916 break;
847f0ad8
AG
917 case MSR_IA32_PERF_STATUS:
918 /* TSC increment by tick */
919 data = 1000ULL;
920 /* CPU multiplier */
921 data |= (((uint64_t)4ULL) << 40);
922 break;
15c4a640 923 case MSR_EFER:
ad312c7c 924 data = vcpu->arch.shadow_efer;
15c4a640 925 break;
18068523
GOC
926 case MSR_KVM_WALL_CLOCK:
927 data = vcpu->kvm->arch.wall_clock;
928 break;
929 case MSR_KVM_SYSTEM_TIME:
930 data = vcpu->arch.time;
931 break;
15c4a640
CO
932 default:
933 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
934 return 1;
935 }
936 *pdata = data;
937 return 0;
938}
939EXPORT_SYMBOL_GPL(kvm_get_msr_common);
940
313a3dc7
CO
941/*
942 * Read or write a bunch of msrs. All parameters are kernel addresses.
943 *
944 * @return number of msrs set successfully.
945 */
946static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
947 struct kvm_msr_entry *entries,
948 int (*do_msr)(struct kvm_vcpu *vcpu,
949 unsigned index, u64 *data))
950{
951 int i;
952
953 vcpu_load(vcpu);
954
3200f405 955 down_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
956 for (i = 0; i < msrs->nmsrs; ++i)
957 if (do_msr(vcpu, entries[i].index, &entries[i].data))
958 break;
3200f405 959 up_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
960
961 vcpu_put(vcpu);
962
963 return i;
964}
965
966/*
967 * Read or write a bunch of msrs. Parameters are user addresses.
968 *
969 * @return number of msrs set successfully.
970 */
971static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
972 int (*do_msr)(struct kvm_vcpu *vcpu,
973 unsigned index, u64 *data),
974 int writeback)
975{
976 struct kvm_msrs msrs;
977 struct kvm_msr_entry *entries;
978 int r, n;
979 unsigned size;
980
981 r = -EFAULT;
982 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
983 goto out;
984
985 r = -E2BIG;
986 if (msrs.nmsrs >= MAX_IO_MSRS)
987 goto out;
988
989 r = -ENOMEM;
990 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
991 entries = vmalloc(size);
992 if (!entries)
993 goto out;
994
995 r = -EFAULT;
996 if (copy_from_user(entries, user_msrs->entries, size))
997 goto out_free;
998
999 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1000 if (r < 0)
1001 goto out_free;
1002
1003 r = -EFAULT;
1004 if (writeback && copy_to_user(user_msrs->entries, entries, size))
1005 goto out_free;
1006
1007 r = n;
1008
1009out_free:
1010 vfree(entries);
1011out:
1012 return r;
1013}
1014
018d00d2
ZX
1015int kvm_dev_ioctl_check_extension(long ext)
1016{
1017 int r;
1018
1019 switch (ext) {
1020 case KVM_CAP_IRQCHIP:
1021 case KVM_CAP_HLT:
1022 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
018d00d2 1023 case KVM_CAP_SET_TSS_ADDR:
07716717 1024 case KVM_CAP_EXT_CPUID:
c8076604 1025 case KVM_CAP_CLOCKSOURCE:
7837699f 1026 case KVM_CAP_PIT:
a28e4f5a 1027 case KVM_CAP_NOP_IO_DELAY:
62d9f0db 1028 case KVM_CAP_MP_STATE:
ed848624 1029 case KVM_CAP_SYNC_MMU:
52d939a0 1030 case KVM_CAP_REINJECT_CONTROL:
4925663a 1031 case KVM_CAP_IRQ_INJECT_STATUS:
e56d532f 1032 case KVM_CAP_ASSIGN_DEV_IRQ:
018d00d2
ZX
1033 r = 1;
1034 break;
542472b5
LV
1035 case KVM_CAP_COALESCED_MMIO:
1036 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1037 break;
774ead3a
AK
1038 case KVM_CAP_VAPIC:
1039 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1040 break;
f725230a
AK
1041 case KVM_CAP_NR_VCPUS:
1042 r = KVM_MAX_VCPUS;
1043 break;
a988b910
AK
1044 case KVM_CAP_NR_MEMSLOTS:
1045 r = KVM_MEMORY_SLOTS;
1046 break;
2f333bcb
MT
1047 case KVM_CAP_PV_MMU:
1048 r = !tdp_enabled;
1049 break;
62c476c7 1050 case KVM_CAP_IOMMU:
19de40a8 1051 r = iommu_found();
62c476c7 1052 break;
018d00d2
ZX
1053 default:
1054 r = 0;
1055 break;
1056 }
1057 return r;
1058
1059}
1060
043405e1
CO
1061long kvm_arch_dev_ioctl(struct file *filp,
1062 unsigned int ioctl, unsigned long arg)
1063{
1064 void __user *argp = (void __user *)arg;
1065 long r;
1066
1067 switch (ioctl) {
1068 case KVM_GET_MSR_INDEX_LIST: {
1069 struct kvm_msr_list __user *user_msr_list = argp;
1070 struct kvm_msr_list msr_list;
1071 unsigned n;
1072
1073 r = -EFAULT;
1074 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1075 goto out;
1076 n = msr_list.nmsrs;
1077 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1078 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1079 goto out;
1080 r = -E2BIG;
1081 if (n < num_msrs_to_save)
1082 goto out;
1083 r = -EFAULT;
1084 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1085 num_msrs_to_save * sizeof(u32)))
1086 goto out;
1087 if (copy_to_user(user_msr_list->indices
1088 + num_msrs_to_save * sizeof(u32),
1089 &emulated_msrs,
1090 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1091 goto out;
1092 r = 0;
1093 break;
1094 }
674eea0f
AK
1095 case KVM_GET_SUPPORTED_CPUID: {
1096 struct kvm_cpuid2 __user *cpuid_arg = argp;
1097 struct kvm_cpuid2 cpuid;
1098
1099 r = -EFAULT;
1100 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1101 goto out;
1102 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
19355475 1103 cpuid_arg->entries);
674eea0f
AK
1104 if (r)
1105 goto out;
1106
1107 r = -EFAULT;
1108 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1109 goto out;
1110 r = 0;
1111 break;
1112 }
043405e1
CO
1113 default:
1114 r = -EINVAL;
1115 }
1116out:
1117 return r;
1118}
1119
313a3dc7
CO
1120void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1121{
1122 kvm_x86_ops->vcpu_load(vcpu, cpu);
c8076604 1123 kvm_request_guest_time_update(vcpu);
313a3dc7
CO
1124}
1125
1126void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1127{
1128 kvm_x86_ops->vcpu_put(vcpu);
9327fd11 1129 kvm_put_guest_fpu(vcpu);
313a3dc7
CO
1130}
1131
07716717 1132static int is_efer_nx(void)
313a3dc7 1133{
e286e86e 1134 unsigned long long efer = 0;
313a3dc7 1135
e286e86e 1136 rdmsrl_safe(MSR_EFER, &efer);
07716717
DK
1137 return efer & EFER_NX;
1138}
1139
1140static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1141{
1142 int i;
1143 struct kvm_cpuid_entry2 *e, *entry;
1144
313a3dc7 1145 entry = NULL;
ad312c7c
ZX
1146 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1147 e = &vcpu->arch.cpuid_entries[i];
313a3dc7
CO
1148 if (e->function == 0x80000001) {
1149 entry = e;
1150 break;
1151 }
1152 }
07716717 1153 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
313a3dc7
CO
1154 entry->edx &= ~(1 << 20);
1155 printk(KERN_INFO "kvm: guest NX capability removed\n");
1156 }
1157}
1158
07716717 1159/* when an old userspace process fills a new kernel module */
313a3dc7
CO
1160static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1161 struct kvm_cpuid *cpuid,
1162 struct kvm_cpuid_entry __user *entries)
07716717
DK
1163{
1164 int r, i;
1165 struct kvm_cpuid_entry *cpuid_entries;
1166
1167 r = -E2BIG;
1168 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1169 goto out;
1170 r = -ENOMEM;
1171 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1172 if (!cpuid_entries)
1173 goto out;
1174 r = -EFAULT;
1175 if (copy_from_user(cpuid_entries, entries,
1176 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1177 goto out_free;
1178 for (i = 0; i < cpuid->nent; i++) {
ad312c7c
ZX
1179 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1180 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1181 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1182 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1183 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1184 vcpu->arch.cpuid_entries[i].index = 0;
1185 vcpu->arch.cpuid_entries[i].flags = 0;
1186 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1187 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1188 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1189 }
1190 vcpu->arch.cpuid_nent = cpuid->nent;
07716717
DK
1191 cpuid_fix_nx_cap(vcpu);
1192 r = 0;
1193
1194out_free:
1195 vfree(cpuid_entries);
1196out:
1197 return r;
1198}
1199
1200static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1201 struct kvm_cpuid2 *cpuid,
1202 struct kvm_cpuid_entry2 __user *entries)
313a3dc7
CO
1203{
1204 int r;
1205
1206 r = -E2BIG;
1207 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1208 goto out;
1209 r = -EFAULT;
ad312c7c 1210 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
07716717 1211 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
313a3dc7 1212 goto out;
ad312c7c 1213 vcpu->arch.cpuid_nent = cpuid->nent;
313a3dc7
CO
1214 return 0;
1215
1216out:
1217 return r;
1218}
1219
07716717 1220static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1221 struct kvm_cpuid2 *cpuid,
1222 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1223{
1224 int r;
1225
1226 r = -E2BIG;
ad312c7c 1227 if (cpuid->nent < vcpu->arch.cpuid_nent)
07716717
DK
1228 goto out;
1229 r = -EFAULT;
ad312c7c 1230 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
19355475 1231 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1232 goto out;
1233 return 0;
1234
1235out:
ad312c7c 1236 cpuid->nent = vcpu->arch.cpuid_nent;
07716717
DK
1237 return r;
1238}
1239
07716717 1240static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
19355475 1241 u32 index)
07716717
DK
1242{
1243 entry->function = function;
1244 entry->index = index;
1245 cpuid_count(entry->function, entry->index,
19355475 1246 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
07716717
DK
1247 entry->flags = 0;
1248}
1249
7faa4ee1
AK
1250#define F(x) bit(X86_FEATURE_##x)
1251
07716717
DK
1252static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1253 u32 index, int *nent, int maxnent)
1254{
7faa4ee1 1255 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
07716717 1256#ifdef CONFIG_X86_64
7faa4ee1
AK
1257 unsigned f_lm = F(LM);
1258#else
1259 unsigned f_lm = 0;
07716717 1260#endif
7faa4ee1
AK
1261
1262 /* cpuid 1.edx */
1263 const u32 kvm_supported_word0_x86_features =
1264 F(FPU) | F(VME) | F(DE) | F(PSE) |
1265 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1266 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1267 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1268 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1269 0 /* Reserved, DS, ACPI */ | F(MMX) |
1270 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1271 0 /* HTT, TM, Reserved, PBE */;
1272 /* cpuid 0x80000001.edx */
1273 const u32 kvm_supported_word1_x86_features =
1274 F(FPU) | F(VME) | F(DE) | F(PSE) |
1275 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1276 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1277 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1278 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1279 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
1280 F(FXSR) | F(FXSR_OPT) | 0 /* GBPAGES */ | 0 /* RDTSCP */ |
1281 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1282 /* cpuid 1.ecx */
1283 const u32 kvm_supported_word4_x86_features =
d149c731
AK
1284 F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
1285 0 /* DS-CPL, VMX, SMX, EST */ |
1286 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1287 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1288 0 /* Reserved, DCA */ | F(XMM4_1) |
1289 F(XMM4_2) | 0 /* x2APIC */ | F(MOVBE) | F(POPCNT) |
1290 0 /* Reserved, XSAVE, OSXSAVE */;
7faa4ee1 1291 /* cpuid 0x80000001.ecx */
07716717 1292 const u32 kvm_supported_word6_x86_features =
7faa4ee1
AK
1293 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
1294 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1295 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
1296 0 /* SKINIT */ | 0 /* WDT */;
07716717 1297
19355475 1298 /* all calls to cpuid_count() should be made on the same cpu */
07716717
DK
1299 get_cpu();
1300 do_cpuid_1_ent(entry, function, index);
1301 ++*nent;
1302
1303 switch (function) {
1304 case 0:
1305 entry->eax = min(entry->eax, (u32)0xb);
1306 break;
1307 case 1:
1308 entry->edx &= kvm_supported_word0_x86_features;
7faa4ee1 1309 entry->ecx &= kvm_supported_word4_x86_features;
07716717
DK
1310 break;
1311 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1312 * may return different values. This forces us to get_cpu() before
1313 * issuing the first command, and also to emulate this annoying behavior
1314 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1315 case 2: {
1316 int t, times = entry->eax & 0xff;
1317
1318 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
0fdf8e59 1319 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
07716717
DK
1320 for (t = 1; t < times && *nent < maxnent; ++t) {
1321 do_cpuid_1_ent(&entry[t], function, 0);
1322 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1323 ++*nent;
1324 }
1325 break;
1326 }
1327 /* function 4 and 0xb have additional index. */
1328 case 4: {
14af3f3c 1329 int i, cache_type;
07716717
DK
1330
1331 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1332 /* read more entries until cache_type is zero */
14af3f3c
HH
1333 for (i = 1; *nent < maxnent; ++i) {
1334 cache_type = entry[i - 1].eax & 0x1f;
07716717
DK
1335 if (!cache_type)
1336 break;
14af3f3c
HH
1337 do_cpuid_1_ent(&entry[i], function, i);
1338 entry[i].flags |=
07716717
DK
1339 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1340 ++*nent;
1341 }
1342 break;
1343 }
1344 case 0xb: {
14af3f3c 1345 int i, level_type;
07716717
DK
1346
1347 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1348 /* read more entries until level_type is zero */
14af3f3c 1349 for (i = 1; *nent < maxnent; ++i) {
0853d2c1 1350 level_type = entry[i - 1].ecx & 0xff00;
07716717
DK
1351 if (!level_type)
1352 break;
14af3f3c
HH
1353 do_cpuid_1_ent(&entry[i], function, i);
1354 entry[i].flags |=
07716717
DK
1355 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1356 ++*nent;
1357 }
1358 break;
1359 }
1360 case 0x80000000:
1361 entry->eax = min(entry->eax, 0x8000001a);
1362 break;
1363 case 0x80000001:
1364 entry->edx &= kvm_supported_word1_x86_features;
1365 entry->ecx &= kvm_supported_word6_x86_features;
1366 break;
1367 }
1368 put_cpu();
1369}
1370
7faa4ee1
AK
1371#undef F
1372
674eea0f 1373static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
19355475 1374 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1375{
1376 struct kvm_cpuid_entry2 *cpuid_entries;
1377 int limit, nent = 0, r = -E2BIG;
1378 u32 func;
1379
1380 if (cpuid->nent < 1)
1381 goto out;
1382 r = -ENOMEM;
1383 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1384 if (!cpuid_entries)
1385 goto out;
1386
1387 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1388 limit = cpuid_entries[0].eax;
1389 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1390 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1391 &nent, cpuid->nent);
07716717
DK
1392 r = -E2BIG;
1393 if (nent >= cpuid->nent)
1394 goto out_free;
1395
1396 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1397 limit = cpuid_entries[nent - 1].eax;
1398 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1399 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1400 &nent, cpuid->nent);
07716717
DK
1401 r = -EFAULT;
1402 if (copy_to_user(entries, cpuid_entries,
19355475 1403 nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1404 goto out_free;
1405 cpuid->nent = nent;
1406 r = 0;
1407
1408out_free:
1409 vfree(cpuid_entries);
1410out:
1411 return r;
1412}
1413
313a3dc7
CO
1414static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1415 struct kvm_lapic_state *s)
1416{
1417 vcpu_load(vcpu);
ad312c7c 1418 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
313a3dc7
CO
1419 vcpu_put(vcpu);
1420
1421 return 0;
1422}
1423
1424static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1425 struct kvm_lapic_state *s)
1426{
1427 vcpu_load(vcpu);
ad312c7c 1428 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
313a3dc7
CO
1429 kvm_apic_post_state_restore(vcpu);
1430 vcpu_put(vcpu);
1431
1432 return 0;
1433}
1434
f77bc6a4
ZX
1435static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1436 struct kvm_interrupt *irq)
1437{
1438 if (irq->irq < 0 || irq->irq >= 256)
1439 return -EINVAL;
1440 if (irqchip_in_kernel(vcpu->kvm))
1441 return -ENXIO;
1442 vcpu_load(vcpu);
1443
ad312c7c
ZX
1444 set_bit(irq->irq, vcpu->arch.irq_pending);
1445 set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
f77bc6a4
ZX
1446
1447 vcpu_put(vcpu);
1448
1449 return 0;
1450}
1451
c4abb7c9
JK
1452static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
1453{
1454 vcpu_load(vcpu);
1455 kvm_inject_nmi(vcpu);
1456 vcpu_put(vcpu);
1457
1458 return 0;
1459}
1460
b209749f
AK
1461static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1462 struct kvm_tpr_access_ctl *tac)
1463{
1464 if (tac->flags)
1465 return -EINVAL;
1466 vcpu->arch.tpr_access_reporting = !!tac->enabled;
1467 return 0;
1468}
1469
313a3dc7
CO
1470long kvm_arch_vcpu_ioctl(struct file *filp,
1471 unsigned int ioctl, unsigned long arg)
1472{
1473 struct kvm_vcpu *vcpu = filp->private_data;
1474 void __user *argp = (void __user *)arg;
1475 int r;
b772ff36 1476 struct kvm_lapic_state *lapic = NULL;
313a3dc7
CO
1477
1478 switch (ioctl) {
1479 case KVM_GET_LAPIC: {
b772ff36 1480 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
313a3dc7 1481
b772ff36
DH
1482 r = -ENOMEM;
1483 if (!lapic)
1484 goto out;
1485 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
313a3dc7
CO
1486 if (r)
1487 goto out;
1488 r = -EFAULT;
b772ff36 1489 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
313a3dc7
CO
1490 goto out;
1491 r = 0;
1492 break;
1493 }
1494 case KVM_SET_LAPIC: {
b772ff36
DH
1495 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1496 r = -ENOMEM;
1497 if (!lapic)
1498 goto out;
313a3dc7 1499 r = -EFAULT;
b772ff36 1500 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
313a3dc7 1501 goto out;
b772ff36 1502 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
313a3dc7
CO
1503 if (r)
1504 goto out;
1505 r = 0;
1506 break;
1507 }
f77bc6a4
ZX
1508 case KVM_INTERRUPT: {
1509 struct kvm_interrupt irq;
1510
1511 r = -EFAULT;
1512 if (copy_from_user(&irq, argp, sizeof irq))
1513 goto out;
1514 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1515 if (r)
1516 goto out;
1517 r = 0;
1518 break;
1519 }
c4abb7c9
JK
1520 case KVM_NMI: {
1521 r = kvm_vcpu_ioctl_nmi(vcpu);
1522 if (r)
1523 goto out;
1524 r = 0;
1525 break;
1526 }
313a3dc7
CO
1527 case KVM_SET_CPUID: {
1528 struct kvm_cpuid __user *cpuid_arg = argp;
1529 struct kvm_cpuid cpuid;
1530
1531 r = -EFAULT;
1532 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1533 goto out;
1534 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1535 if (r)
1536 goto out;
1537 break;
1538 }
07716717
DK
1539 case KVM_SET_CPUID2: {
1540 struct kvm_cpuid2 __user *cpuid_arg = argp;
1541 struct kvm_cpuid2 cpuid;
1542
1543 r = -EFAULT;
1544 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1545 goto out;
1546 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
19355475 1547 cpuid_arg->entries);
07716717
DK
1548 if (r)
1549 goto out;
1550 break;
1551 }
1552 case KVM_GET_CPUID2: {
1553 struct kvm_cpuid2 __user *cpuid_arg = argp;
1554 struct kvm_cpuid2 cpuid;
1555
1556 r = -EFAULT;
1557 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1558 goto out;
1559 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
19355475 1560 cpuid_arg->entries);
07716717
DK
1561 if (r)
1562 goto out;
1563 r = -EFAULT;
1564 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1565 goto out;
1566 r = 0;
1567 break;
1568 }
313a3dc7
CO
1569 case KVM_GET_MSRS:
1570 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1571 break;
1572 case KVM_SET_MSRS:
1573 r = msr_io(vcpu, argp, do_set_msr, 0);
1574 break;
b209749f
AK
1575 case KVM_TPR_ACCESS_REPORTING: {
1576 struct kvm_tpr_access_ctl tac;
1577
1578 r = -EFAULT;
1579 if (copy_from_user(&tac, argp, sizeof tac))
1580 goto out;
1581 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1582 if (r)
1583 goto out;
1584 r = -EFAULT;
1585 if (copy_to_user(argp, &tac, sizeof tac))
1586 goto out;
1587 r = 0;
1588 break;
1589 };
b93463aa
AK
1590 case KVM_SET_VAPIC_ADDR: {
1591 struct kvm_vapic_addr va;
1592
1593 r = -EINVAL;
1594 if (!irqchip_in_kernel(vcpu->kvm))
1595 goto out;
1596 r = -EFAULT;
1597 if (copy_from_user(&va, argp, sizeof va))
1598 goto out;
1599 r = 0;
1600 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1601 break;
1602 }
313a3dc7
CO
1603 default:
1604 r = -EINVAL;
1605 }
1606out:
7a6ce84c 1607 kfree(lapic);
313a3dc7
CO
1608 return r;
1609}
1610
1fe779f8
CO
1611static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1612{
1613 int ret;
1614
1615 if (addr > (unsigned int)(-3 * PAGE_SIZE))
1616 return -1;
1617 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1618 return ret;
1619}
1620
1621static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1622 u32 kvm_nr_mmu_pages)
1623{
1624 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1625 return -EINVAL;
1626
72dc67a6 1627 down_write(&kvm->slots_lock);
1fe779f8
CO
1628
1629 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
f05e70ac 1630 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1fe779f8 1631
72dc67a6 1632 up_write(&kvm->slots_lock);
1fe779f8
CO
1633 return 0;
1634}
1635
1636static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1637{
f05e70ac 1638 return kvm->arch.n_alloc_mmu_pages;
1fe779f8
CO
1639}
1640
e9f85cde
ZX
1641gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1642{
1643 int i;
1644 struct kvm_mem_alias *alias;
1645
d69fb81f
ZX
1646 for (i = 0; i < kvm->arch.naliases; ++i) {
1647 alias = &kvm->arch.aliases[i];
e9f85cde
ZX
1648 if (gfn >= alias->base_gfn
1649 && gfn < alias->base_gfn + alias->npages)
1650 return alias->target_gfn + gfn - alias->base_gfn;
1651 }
1652 return gfn;
1653}
1654
1fe779f8
CO
1655/*
1656 * Set a new alias region. Aliases map a portion of physical memory into
1657 * another portion. This is useful for memory windows, for example the PC
1658 * VGA region.
1659 */
1660static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1661 struct kvm_memory_alias *alias)
1662{
1663 int r, n;
1664 struct kvm_mem_alias *p;
1665
1666 r = -EINVAL;
1667 /* General sanity checks */
1668 if (alias->memory_size & (PAGE_SIZE - 1))
1669 goto out;
1670 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1671 goto out;
1672 if (alias->slot >= KVM_ALIAS_SLOTS)
1673 goto out;
1674 if (alias->guest_phys_addr + alias->memory_size
1675 < alias->guest_phys_addr)
1676 goto out;
1677 if (alias->target_phys_addr + alias->memory_size
1678 < alias->target_phys_addr)
1679 goto out;
1680
72dc67a6 1681 down_write(&kvm->slots_lock);
a1708ce8 1682 spin_lock(&kvm->mmu_lock);
1fe779f8 1683
d69fb81f 1684 p = &kvm->arch.aliases[alias->slot];
1fe779f8
CO
1685 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1686 p->npages = alias->memory_size >> PAGE_SHIFT;
1687 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1688
1689 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
d69fb81f 1690 if (kvm->arch.aliases[n - 1].npages)
1fe779f8 1691 break;
d69fb81f 1692 kvm->arch.naliases = n;
1fe779f8 1693
a1708ce8 1694 spin_unlock(&kvm->mmu_lock);
1fe779f8
CO
1695 kvm_mmu_zap_all(kvm);
1696
72dc67a6 1697 up_write(&kvm->slots_lock);
1fe779f8
CO
1698
1699 return 0;
1700
1701out:
1702 return r;
1703}
1704
1705static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1706{
1707 int r;
1708
1709 r = 0;
1710 switch (chip->chip_id) {
1711 case KVM_IRQCHIP_PIC_MASTER:
1712 memcpy(&chip->chip.pic,
1713 &pic_irqchip(kvm)->pics[0],
1714 sizeof(struct kvm_pic_state));
1715 break;
1716 case KVM_IRQCHIP_PIC_SLAVE:
1717 memcpy(&chip->chip.pic,
1718 &pic_irqchip(kvm)->pics[1],
1719 sizeof(struct kvm_pic_state));
1720 break;
1721 case KVM_IRQCHIP_IOAPIC:
1722 memcpy(&chip->chip.ioapic,
1723 ioapic_irqchip(kvm),
1724 sizeof(struct kvm_ioapic_state));
1725 break;
1726 default:
1727 r = -EINVAL;
1728 break;
1729 }
1730 return r;
1731}
1732
1733static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1734{
1735 int r;
1736
1737 r = 0;
1738 switch (chip->chip_id) {
1739 case KVM_IRQCHIP_PIC_MASTER:
1740 memcpy(&pic_irqchip(kvm)->pics[0],
1741 &chip->chip.pic,
1742 sizeof(struct kvm_pic_state));
1743 break;
1744 case KVM_IRQCHIP_PIC_SLAVE:
1745 memcpy(&pic_irqchip(kvm)->pics[1],
1746 &chip->chip.pic,
1747 sizeof(struct kvm_pic_state));
1748 break;
1749 case KVM_IRQCHIP_IOAPIC:
1750 memcpy(ioapic_irqchip(kvm),
1751 &chip->chip.ioapic,
1752 sizeof(struct kvm_ioapic_state));
1753 break;
1754 default:
1755 r = -EINVAL;
1756 break;
1757 }
1758 kvm_pic_update_irq(pic_irqchip(kvm));
1759 return r;
1760}
1761
e0f63cb9
SY
1762static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1763{
1764 int r = 0;
1765
1766 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
1767 return r;
1768}
1769
1770static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1771{
1772 int r = 0;
1773
1774 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1775 kvm_pit_load_count(kvm, 0, ps->channels[0].count);
1776 return r;
1777}
1778
52d939a0
MT
1779static int kvm_vm_ioctl_reinject(struct kvm *kvm,
1780 struct kvm_reinject_control *control)
1781{
1782 if (!kvm->arch.vpit)
1783 return -ENXIO;
1784 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
1785 return 0;
1786}
1787
5bb064dc
ZX
1788/*
1789 * Get (and clear) the dirty memory log for a memory slot.
1790 */
1791int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1792 struct kvm_dirty_log *log)
1793{
1794 int r;
1795 int n;
1796 struct kvm_memory_slot *memslot;
1797 int is_dirty = 0;
1798
72dc67a6 1799 down_write(&kvm->slots_lock);
5bb064dc
ZX
1800
1801 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1802 if (r)
1803 goto out;
1804
1805 /* If nothing is dirty, don't bother messing with page tables. */
1806 if (is_dirty) {
1807 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1808 kvm_flush_remote_tlbs(kvm);
1809 memslot = &kvm->memslots[log->slot];
1810 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1811 memset(memslot->dirty_bitmap, 0, n);
1812 }
1813 r = 0;
1814out:
72dc67a6 1815 up_write(&kvm->slots_lock);
5bb064dc
ZX
1816 return r;
1817}
1818
1fe779f8
CO
1819long kvm_arch_vm_ioctl(struct file *filp,
1820 unsigned int ioctl, unsigned long arg)
1821{
1822 struct kvm *kvm = filp->private_data;
1823 void __user *argp = (void __user *)arg;
1824 int r = -EINVAL;
f0d66275
DH
1825 /*
1826 * This union makes it completely explicit to gcc-3.x
1827 * that these two variables' stack usage should be
1828 * combined, not added together.
1829 */
1830 union {
1831 struct kvm_pit_state ps;
1832 struct kvm_memory_alias alias;
1833 } u;
1fe779f8
CO
1834
1835 switch (ioctl) {
1836 case KVM_SET_TSS_ADDR:
1837 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1838 if (r < 0)
1839 goto out;
1840 break;
1841 case KVM_SET_MEMORY_REGION: {
1842 struct kvm_memory_region kvm_mem;
1843 struct kvm_userspace_memory_region kvm_userspace_mem;
1844
1845 r = -EFAULT;
1846 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1847 goto out;
1848 kvm_userspace_mem.slot = kvm_mem.slot;
1849 kvm_userspace_mem.flags = kvm_mem.flags;
1850 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1851 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1852 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1853 if (r)
1854 goto out;
1855 break;
1856 }
1857 case KVM_SET_NR_MMU_PAGES:
1858 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1859 if (r)
1860 goto out;
1861 break;
1862 case KVM_GET_NR_MMU_PAGES:
1863 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1864 break;
f0d66275 1865 case KVM_SET_MEMORY_ALIAS:
1fe779f8 1866 r = -EFAULT;
f0d66275 1867 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
1fe779f8 1868 goto out;
f0d66275 1869 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
1fe779f8
CO
1870 if (r)
1871 goto out;
1872 break;
1fe779f8
CO
1873 case KVM_CREATE_IRQCHIP:
1874 r = -ENOMEM;
d7deeeb0
ZX
1875 kvm->arch.vpic = kvm_create_pic(kvm);
1876 if (kvm->arch.vpic) {
1fe779f8
CO
1877 r = kvm_ioapic_init(kvm);
1878 if (r) {
d7deeeb0
ZX
1879 kfree(kvm->arch.vpic);
1880 kvm->arch.vpic = NULL;
1fe779f8
CO
1881 goto out;
1882 }
1883 } else
1884 goto out;
399ec807
AK
1885 r = kvm_setup_default_irq_routing(kvm);
1886 if (r) {
1887 kfree(kvm->arch.vpic);
1888 kfree(kvm->arch.vioapic);
1889 goto out;
1890 }
1fe779f8 1891 break;
7837699f 1892 case KVM_CREATE_PIT:
269e05e4
AK
1893 mutex_lock(&kvm->lock);
1894 r = -EEXIST;
1895 if (kvm->arch.vpit)
1896 goto create_pit_unlock;
7837699f
SY
1897 r = -ENOMEM;
1898 kvm->arch.vpit = kvm_create_pit(kvm);
1899 if (kvm->arch.vpit)
1900 r = 0;
269e05e4
AK
1901 create_pit_unlock:
1902 mutex_unlock(&kvm->lock);
7837699f 1903 break;
4925663a 1904 case KVM_IRQ_LINE_STATUS:
1fe779f8
CO
1905 case KVM_IRQ_LINE: {
1906 struct kvm_irq_level irq_event;
1907
1908 r = -EFAULT;
1909 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1910 goto out;
1911 if (irqchip_in_kernel(kvm)) {
4925663a 1912 __s32 status;
1fe779f8 1913 mutex_lock(&kvm->lock);
4925663a
GN
1914 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1915 irq_event.irq, irq_event.level);
1fe779f8 1916 mutex_unlock(&kvm->lock);
4925663a
GN
1917 if (ioctl == KVM_IRQ_LINE_STATUS) {
1918 irq_event.status = status;
1919 if (copy_to_user(argp, &irq_event,
1920 sizeof irq_event))
1921 goto out;
1922 }
1fe779f8
CO
1923 r = 0;
1924 }
1925 break;
1926 }
1927 case KVM_GET_IRQCHIP: {
1928 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 1929 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 1930
f0d66275
DH
1931 r = -ENOMEM;
1932 if (!chip)
1fe779f8 1933 goto out;
f0d66275
DH
1934 r = -EFAULT;
1935 if (copy_from_user(chip, argp, sizeof *chip))
1936 goto get_irqchip_out;
1fe779f8
CO
1937 r = -ENXIO;
1938 if (!irqchip_in_kernel(kvm))
f0d66275
DH
1939 goto get_irqchip_out;
1940 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
1fe779f8 1941 if (r)
f0d66275 1942 goto get_irqchip_out;
1fe779f8 1943 r = -EFAULT;
f0d66275
DH
1944 if (copy_to_user(argp, chip, sizeof *chip))
1945 goto get_irqchip_out;
1fe779f8 1946 r = 0;
f0d66275
DH
1947 get_irqchip_out:
1948 kfree(chip);
1949 if (r)
1950 goto out;
1fe779f8
CO
1951 break;
1952 }
1953 case KVM_SET_IRQCHIP: {
1954 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 1955 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 1956
f0d66275
DH
1957 r = -ENOMEM;
1958 if (!chip)
1fe779f8 1959 goto out;
f0d66275
DH
1960 r = -EFAULT;
1961 if (copy_from_user(chip, argp, sizeof *chip))
1962 goto set_irqchip_out;
1fe779f8
CO
1963 r = -ENXIO;
1964 if (!irqchip_in_kernel(kvm))
f0d66275
DH
1965 goto set_irqchip_out;
1966 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
1fe779f8 1967 if (r)
f0d66275 1968 goto set_irqchip_out;
1fe779f8 1969 r = 0;
f0d66275
DH
1970 set_irqchip_out:
1971 kfree(chip);
1972 if (r)
1973 goto out;
1fe779f8
CO
1974 break;
1975 }
e0f63cb9 1976 case KVM_GET_PIT: {
e0f63cb9 1977 r = -EFAULT;
f0d66275 1978 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
1979 goto out;
1980 r = -ENXIO;
1981 if (!kvm->arch.vpit)
1982 goto out;
f0d66275 1983 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
e0f63cb9
SY
1984 if (r)
1985 goto out;
1986 r = -EFAULT;
f0d66275 1987 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
1988 goto out;
1989 r = 0;
1990 break;
1991 }
1992 case KVM_SET_PIT: {
e0f63cb9 1993 r = -EFAULT;
f0d66275 1994 if (copy_from_user(&u.ps, argp, sizeof u.ps))
e0f63cb9
SY
1995 goto out;
1996 r = -ENXIO;
1997 if (!kvm->arch.vpit)
1998 goto out;
f0d66275 1999 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
e0f63cb9
SY
2000 if (r)
2001 goto out;
2002 r = 0;
2003 break;
2004 }
52d939a0
MT
2005 case KVM_REINJECT_CONTROL: {
2006 struct kvm_reinject_control control;
2007 r = -EFAULT;
2008 if (copy_from_user(&control, argp, sizeof(control)))
2009 goto out;
2010 r = kvm_vm_ioctl_reinject(kvm, &control);
2011 if (r)
2012 goto out;
2013 r = 0;
2014 break;
2015 }
1fe779f8
CO
2016 default:
2017 ;
2018 }
2019out:
2020 return r;
2021}
2022
a16b043c 2023static void kvm_init_msr_list(void)
043405e1
CO
2024{
2025 u32 dummy[2];
2026 unsigned i, j;
2027
2028 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2029 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2030 continue;
2031 if (j < i)
2032 msrs_to_save[j] = msrs_to_save[i];
2033 j++;
2034 }
2035 num_msrs_to_save = j;
2036}
2037
bbd9b64e
CO
2038/*
2039 * Only apic need an MMIO device hook, so shortcut now..
2040 */
2041static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
92760499
LV
2042 gpa_t addr, int len,
2043 int is_write)
bbd9b64e
CO
2044{
2045 struct kvm_io_device *dev;
2046
ad312c7c
ZX
2047 if (vcpu->arch.apic) {
2048 dev = &vcpu->arch.apic->dev;
92760499 2049 if (dev->in_range(dev, addr, len, is_write))
bbd9b64e
CO
2050 return dev;
2051 }
2052 return NULL;
2053}
2054
2055
2056static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2057 gpa_t addr, int len,
2058 int is_write)
bbd9b64e
CO
2059{
2060 struct kvm_io_device *dev;
2061
92760499 2062 dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
bbd9b64e 2063 if (dev == NULL)
92760499
LV
2064 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
2065 is_write);
bbd9b64e
CO
2066 return dev;
2067}
2068
cded19f3
HE
2069static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2070 struct kvm_vcpu *vcpu)
bbd9b64e
CO
2071{
2072 void *data = val;
10589a46 2073 int r = X86EMUL_CONTINUE;
bbd9b64e
CO
2074
2075 while (bytes) {
ad312c7c 2076 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e 2077 unsigned offset = addr & (PAGE_SIZE-1);
77c2002e 2078 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
bbd9b64e
CO
2079 int ret;
2080
10589a46
MT
2081 if (gpa == UNMAPPED_GVA) {
2082 r = X86EMUL_PROPAGATE_FAULT;
2083 goto out;
2084 }
77c2002e 2085 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
10589a46
MT
2086 if (ret < 0) {
2087 r = X86EMUL_UNHANDLEABLE;
2088 goto out;
2089 }
bbd9b64e 2090
77c2002e
IE
2091 bytes -= toread;
2092 data += toread;
2093 addr += toread;
bbd9b64e 2094 }
10589a46 2095out:
10589a46 2096 return r;
bbd9b64e 2097}
77c2002e 2098
cded19f3
HE
2099static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2100 struct kvm_vcpu *vcpu)
77c2002e
IE
2101{
2102 void *data = val;
2103 int r = X86EMUL_CONTINUE;
2104
2105 while (bytes) {
2106 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2107 unsigned offset = addr & (PAGE_SIZE-1);
2108 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2109 int ret;
2110
2111 if (gpa == UNMAPPED_GVA) {
2112 r = X86EMUL_PROPAGATE_FAULT;
2113 goto out;
2114 }
2115 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
2116 if (ret < 0) {
2117 r = X86EMUL_UNHANDLEABLE;
2118 goto out;
2119 }
2120
2121 bytes -= towrite;
2122 data += towrite;
2123 addr += towrite;
2124 }
2125out:
2126 return r;
2127}
2128
bbd9b64e 2129
bbd9b64e
CO
2130static int emulator_read_emulated(unsigned long addr,
2131 void *val,
2132 unsigned int bytes,
2133 struct kvm_vcpu *vcpu)
2134{
2135 struct kvm_io_device *mmio_dev;
2136 gpa_t gpa;
2137
2138 if (vcpu->mmio_read_completed) {
2139 memcpy(val, vcpu->mmio_data, bytes);
2140 vcpu->mmio_read_completed = 0;
2141 return X86EMUL_CONTINUE;
2142 }
2143
ad312c7c 2144 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2145
2146 /* For APIC access vmexit */
2147 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2148 goto mmio;
2149
77c2002e
IE
2150 if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2151 == X86EMUL_CONTINUE)
bbd9b64e
CO
2152 return X86EMUL_CONTINUE;
2153 if (gpa == UNMAPPED_GVA)
2154 return X86EMUL_PROPAGATE_FAULT;
2155
2156mmio:
2157 /*
2158 * Is this MMIO handled locally?
2159 */
10589a46 2160 mutex_lock(&vcpu->kvm->lock);
92760499 2161 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
bbd9b64e
CO
2162 if (mmio_dev) {
2163 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
10589a46 2164 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2165 return X86EMUL_CONTINUE;
2166 }
10589a46 2167 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2168
2169 vcpu->mmio_needed = 1;
2170 vcpu->mmio_phys_addr = gpa;
2171 vcpu->mmio_size = bytes;
2172 vcpu->mmio_is_write = 0;
2173
2174 return X86EMUL_UNHANDLEABLE;
2175}
2176
3200f405 2177int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
9f811285 2178 const void *val, int bytes)
bbd9b64e
CO
2179{
2180 int ret;
2181
2182 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
9f811285 2183 if (ret < 0)
bbd9b64e 2184 return 0;
ad218f85 2185 kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
bbd9b64e
CO
2186 return 1;
2187}
2188
2189static int emulator_write_emulated_onepage(unsigned long addr,
2190 const void *val,
2191 unsigned int bytes,
2192 struct kvm_vcpu *vcpu)
2193{
2194 struct kvm_io_device *mmio_dev;
10589a46
MT
2195 gpa_t gpa;
2196
10589a46 2197 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2198
2199 if (gpa == UNMAPPED_GVA) {
c3c91fee 2200 kvm_inject_page_fault(vcpu, addr, 2);
bbd9b64e
CO
2201 return X86EMUL_PROPAGATE_FAULT;
2202 }
2203
2204 /* For APIC access vmexit */
2205 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2206 goto mmio;
2207
2208 if (emulator_write_phys(vcpu, gpa, val, bytes))
2209 return X86EMUL_CONTINUE;
2210
2211mmio:
2212 /*
2213 * Is this MMIO handled locally?
2214 */
10589a46 2215 mutex_lock(&vcpu->kvm->lock);
92760499 2216 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
bbd9b64e
CO
2217 if (mmio_dev) {
2218 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
10589a46 2219 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2220 return X86EMUL_CONTINUE;
2221 }
10589a46 2222 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2223
2224 vcpu->mmio_needed = 1;
2225 vcpu->mmio_phys_addr = gpa;
2226 vcpu->mmio_size = bytes;
2227 vcpu->mmio_is_write = 1;
2228 memcpy(vcpu->mmio_data, val, bytes);
2229
2230 return X86EMUL_CONTINUE;
2231}
2232
2233int emulator_write_emulated(unsigned long addr,
2234 const void *val,
2235 unsigned int bytes,
2236 struct kvm_vcpu *vcpu)
2237{
2238 /* Crossing a page boundary? */
2239 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
2240 int rc, now;
2241
2242 now = -addr & ~PAGE_MASK;
2243 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
2244 if (rc != X86EMUL_CONTINUE)
2245 return rc;
2246 addr += now;
2247 val += now;
2248 bytes -= now;
2249 }
2250 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
2251}
2252EXPORT_SYMBOL_GPL(emulator_write_emulated);
2253
2254static int emulator_cmpxchg_emulated(unsigned long addr,
2255 const void *old,
2256 const void *new,
2257 unsigned int bytes,
2258 struct kvm_vcpu *vcpu)
2259{
2260 static int reported;
2261
2262 if (!reported) {
2263 reported = 1;
2264 printk(KERN_WARNING "kvm: emulating exchange as write\n");
2265 }
2bacc55c
MT
2266#ifndef CONFIG_X86_64
2267 /* guests cmpxchg8b have to be emulated atomically */
2268 if (bytes == 8) {
10589a46 2269 gpa_t gpa;
2bacc55c 2270 struct page *page;
c0b49b0d 2271 char *kaddr;
2bacc55c
MT
2272 u64 val;
2273
10589a46
MT
2274 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2275
2bacc55c
MT
2276 if (gpa == UNMAPPED_GVA ||
2277 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2278 goto emul_write;
2279
2280 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2281 goto emul_write;
2282
2283 val = *(u64 *)new;
72dc67a6 2284
2bacc55c 2285 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
72dc67a6 2286
c0b49b0d
AM
2287 kaddr = kmap_atomic(page, KM_USER0);
2288 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2289 kunmap_atomic(kaddr, KM_USER0);
2bacc55c
MT
2290 kvm_release_page_dirty(page);
2291 }
3200f405 2292emul_write:
2bacc55c
MT
2293#endif
2294
bbd9b64e
CO
2295 return emulator_write_emulated(addr, new, bytes, vcpu);
2296}
2297
2298static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
2299{
2300 return kvm_x86_ops->get_segment_base(vcpu, seg);
2301}
2302
2303int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2304{
a7052897 2305 kvm_mmu_invlpg(vcpu, address);
bbd9b64e
CO
2306 return X86EMUL_CONTINUE;
2307}
2308
2309int emulate_clts(struct kvm_vcpu *vcpu)
2310{
54e445ca 2311 KVMTRACE_0D(CLTS, vcpu, handler);
ad312c7c 2312 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
bbd9b64e
CO
2313 return X86EMUL_CONTINUE;
2314}
2315
2316int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2317{
2318 struct kvm_vcpu *vcpu = ctxt->vcpu;
2319
2320 switch (dr) {
2321 case 0 ... 3:
2322 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2323 return X86EMUL_CONTINUE;
2324 default:
b8688d51 2325 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
bbd9b64e
CO
2326 return X86EMUL_UNHANDLEABLE;
2327 }
2328}
2329
2330int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2331{
2332 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2333 int exception;
2334
2335 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
2336 if (exception) {
2337 /* FIXME: better handling */
2338 return X86EMUL_UNHANDLEABLE;
2339 }
2340 return X86EMUL_CONTINUE;
2341}
2342
2343void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2344{
bbd9b64e 2345 u8 opcodes[4];
5fdbf976 2346 unsigned long rip = kvm_rip_read(vcpu);
bbd9b64e
CO
2347 unsigned long rip_linear;
2348
f76c710d 2349 if (!printk_ratelimit())
bbd9b64e
CO
2350 return;
2351
25be4608
GC
2352 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2353
77c2002e 2354 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
bbd9b64e
CO
2355
2356 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2357 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
bbd9b64e
CO
2358}
2359EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2360
14af3f3c 2361static struct x86_emulate_ops emulate_ops = {
77c2002e 2362 .read_std = kvm_read_guest_virt,
bbd9b64e
CO
2363 .read_emulated = emulator_read_emulated,
2364 .write_emulated = emulator_write_emulated,
2365 .cmpxchg_emulated = emulator_cmpxchg_emulated,
2366};
2367
5fdbf976
MT
2368static void cache_all_regs(struct kvm_vcpu *vcpu)
2369{
2370 kvm_register_read(vcpu, VCPU_REGS_RAX);
2371 kvm_register_read(vcpu, VCPU_REGS_RSP);
2372 kvm_register_read(vcpu, VCPU_REGS_RIP);
2373 vcpu->arch.regs_dirty = ~0;
2374}
2375
bbd9b64e
CO
2376int emulate_instruction(struct kvm_vcpu *vcpu,
2377 struct kvm_run *run,
2378 unsigned long cr2,
2379 u16 error_code,
571008da 2380 int emulation_type)
bbd9b64e
CO
2381{
2382 int r;
571008da 2383 struct decode_cache *c;
bbd9b64e 2384
26eef70c 2385 kvm_clear_exception_queue(vcpu);
ad312c7c 2386 vcpu->arch.mmio_fault_cr2 = cr2;
5fdbf976
MT
2387 /*
2388 * TODO: fix x86_emulate.c to use guest_read/write_register
2389 * instead of direct ->regs accesses, can save hundred cycles
2390 * on Intel for instructions that don't read/change RSP, for
2391 * for example.
2392 */
2393 cache_all_regs(vcpu);
bbd9b64e
CO
2394
2395 vcpu->mmio_is_write = 0;
ad312c7c 2396 vcpu->arch.pio.string = 0;
bbd9b64e 2397
571008da 2398 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
bbd9b64e
CO
2399 int cs_db, cs_l;
2400 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2401
ad312c7c
ZX
2402 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2403 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2404 vcpu->arch.emulate_ctxt.mode =
2405 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
bbd9b64e
CO
2406 ? X86EMUL_MODE_REAL : cs_l
2407 ? X86EMUL_MODE_PROT64 : cs_db
2408 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2409
ad312c7c 2410 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
571008da
SY
2411
2412 /* Reject the instructions other than VMCALL/VMMCALL when
2413 * try to emulate invalid opcode */
2414 c = &vcpu->arch.emulate_ctxt.decode;
2415 if ((emulation_type & EMULTYPE_TRAP_UD) &&
2416 (!(c->twobyte && c->b == 0x01 &&
2417 (c->modrm_reg == 0 || c->modrm_reg == 3) &&
2418 c->modrm_mod == 3 && c->modrm_rm == 1)))
2419 return EMULATE_FAIL;
2420
f2b5756b 2421 ++vcpu->stat.insn_emulation;
bbd9b64e 2422 if (r) {
f2b5756b 2423 ++vcpu->stat.insn_emulation_fail;
bbd9b64e
CO
2424 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2425 return EMULATE_DONE;
2426 return EMULATE_FAIL;
2427 }
2428 }
2429
ba8afb6b
GN
2430 if (emulation_type & EMULTYPE_SKIP) {
2431 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
2432 return EMULATE_DONE;
2433 }
2434
ad312c7c 2435 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
bbd9b64e 2436
ad312c7c 2437 if (vcpu->arch.pio.string)
bbd9b64e
CO
2438 return EMULATE_DO_MMIO;
2439
2440 if ((r || vcpu->mmio_is_write) && run) {
2441 run->exit_reason = KVM_EXIT_MMIO;
2442 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2443 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2444 run->mmio.len = vcpu->mmio_size;
2445 run->mmio.is_write = vcpu->mmio_is_write;
2446 }
2447
2448 if (r) {
2449 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2450 return EMULATE_DONE;
2451 if (!vcpu->mmio_needed) {
2452 kvm_report_emulation_failure(vcpu, "mmio");
2453 return EMULATE_FAIL;
2454 }
2455 return EMULATE_DO_MMIO;
2456 }
2457
ad312c7c 2458 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
bbd9b64e
CO
2459
2460 if (vcpu->mmio_is_write) {
2461 vcpu->mmio_needed = 0;
2462 return EMULATE_DO_MMIO;
2463 }
2464
2465 return EMULATE_DONE;
2466}
2467EXPORT_SYMBOL_GPL(emulate_instruction);
2468
de7d789a
CO
2469static int pio_copy_data(struct kvm_vcpu *vcpu)
2470{
ad312c7c 2471 void *p = vcpu->arch.pio_data;
0f346074 2472 gva_t q = vcpu->arch.pio.guest_gva;
de7d789a 2473 unsigned bytes;
0f346074 2474 int ret;
de7d789a 2475
ad312c7c
ZX
2476 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2477 if (vcpu->arch.pio.in)
0f346074 2478 ret = kvm_write_guest_virt(q, p, bytes, vcpu);
de7d789a 2479 else
0f346074
IE
2480 ret = kvm_read_guest_virt(q, p, bytes, vcpu);
2481 return ret;
de7d789a
CO
2482}
2483
2484int complete_pio(struct kvm_vcpu *vcpu)
2485{
ad312c7c 2486 struct kvm_pio_request *io = &vcpu->arch.pio;
de7d789a
CO
2487 long delta;
2488 int r;
5fdbf976 2489 unsigned long val;
de7d789a
CO
2490
2491 if (!io->string) {
5fdbf976
MT
2492 if (io->in) {
2493 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2494 memcpy(&val, vcpu->arch.pio_data, io->size);
2495 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
2496 }
de7d789a
CO
2497 } else {
2498 if (io->in) {
2499 r = pio_copy_data(vcpu);
5fdbf976 2500 if (r)
de7d789a 2501 return r;
de7d789a
CO
2502 }
2503
2504 delta = 1;
2505 if (io->rep) {
2506 delta *= io->cur_count;
2507 /*
2508 * The size of the register should really depend on
2509 * current address size.
2510 */
5fdbf976
MT
2511 val = kvm_register_read(vcpu, VCPU_REGS_RCX);
2512 val -= delta;
2513 kvm_register_write(vcpu, VCPU_REGS_RCX, val);
de7d789a
CO
2514 }
2515 if (io->down)
2516 delta = -delta;
2517 delta *= io->size;
5fdbf976
MT
2518 if (io->in) {
2519 val = kvm_register_read(vcpu, VCPU_REGS_RDI);
2520 val += delta;
2521 kvm_register_write(vcpu, VCPU_REGS_RDI, val);
2522 } else {
2523 val = kvm_register_read(vcpu, VCPU_REGS_RSI);
2524 val += delta;
2525 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2526 }
de7d789a
CO
2527 }
2528
de7d789a
CO
2529 io->count -= io->cur_count;
2530 io->cur_count = 0;
2531
2532 return 0;
2533}
2534
2535static void kernel_pio(struct kvm_io_device *pio_dev,
2536 struct kvm_vcpu *vcpu,
2537 void *pd)
2538{
2539 /* TODO: String I/O for in kernel device */
2540
2541 mutex_lock(&vcpu->kvm->lock);
ad312c7c
ZX
2542 if (vcpu->arch.pio.in)
2543 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2544 vcpu->arch.pio.size,
de7d789a
CO
2545 pd);
2546 else
ad312c7c
ZX
2547 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2548 vcpu->arch.pio.size,
de7d789a
CO
2549 pd);
2550 mutex_unlock(&vcpu->kvm->lock);
2551}
2552
2553static void pio_string_write(struct kvm_io_device *pio_dev,
2554 struct kvm_vcpu *vcpu)
2555{
ad312c7c
ZX
2556 struct kvm_pio_request *io = &vcpu->arch.pio;
2557 void *pd = vcpu->arch.pio_data;
de7d789a
CO
2558 int i;
2559
2560 mutex_lock(&vcpu->kvm->lock);
2561 for (i = 0; i < io->cur_count; i++) {
2562 kvm_iodevice_write(pio_dev, io->port,
2563 io->size,
2564 pd);
2565 pd += io->size;
2566 }
2567 mutex_unlock(&vcpu->kvm->lock);
2568}
2569
2570static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2571 gpa_t addr, int len,
2572 int is_write)
de7d789a 2573{
92760499 2574 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
de7d789a
CO
2575}
2576
2577int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2578 int size, unsigned port)
2579{
2580 struct kvm_io_device *pio_dev;
5fdbf976 2581 unsigned long val;
de7d789a
CO
2582
2583 vcpu->run->exit_reason = KVM_EXIT_IO;
2584 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2585 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2586 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2587 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2588 vcpu->run->io.port = vcpu->arch.pio.port = port;
2589 vcpu->arch.pio.in = in;
2590 vcpu->arch.pio.string = 0;
2591 vcpu->arch.pio.down = 0;
ad312c7c 2592 vcpu->arch.pio.rep = 0;
de7d789a 2593
2714d1d3
FEL
2594 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2595 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2596 handler);
2597 else
2598 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2599 handler);
2600
5fdbf976
MT
2601 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2602 memcpy(vcpu->arch.pio_data, &val, 4);
de7d789a 2603
92760499 2604 pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
de7d789a 2605 if (pio_dev) {
ad312c7c 2606 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
de7d789a
CO
2607 complete_pio(vcpu);
2608 return 1;
2609 }
2610 return 0;
2611}
2612EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2613
2614int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2615 int size, unsigned long count, int down,
2616 gva_t address, int rep, unsigned port)
2617{
2618 unsigned now, in_page;
0f346074 2619 int ret = 0;
de7d789a
CO
2620 struct kvm_io_device *pio_dev;
2621
2622 vcpu->run->exit_reason = KVM_EXIT_IO;
2623 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2624 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2625 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2626 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2627 vcpu->run->io.port = vcpu->arch.pio.port = port;
2628 vcpu->arch.pio.in = in;
2629 vcpu->arch.pio.string = 1;
2630 vcpu->arch.pio.down = down;
ad312c7c 2631 vcpu->arch.pio.rep = rep;
de7d789a 2632
2714d1d3
FEL
2633 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2634 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2635 handler);
2636 else
2637 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2638 handler);
2639
de7d789a
CO
2640 if (!count) {
2641 kvm_x86_ops->skip_emulated_instruction(vcpu);
2642 return 1;
2643 }
2644
2645 if (!down)
2646 in_page = PAGE_SIZE - offset_in_page(address);
2647 else
2648 in_page = offset_in_page(address) + size;
2649 now = min(count, (unsigned long)in_page / size);
0f346074 2650 if (!now)
de7d789a 2651 now = 1;
de7d789a
CO
2652 if (down) {
2653 /*
2654 * String I/O in reverse. Yuck. Kill the guest, fix later.
2655 */
2656 pr_unimpl(vcpu, "guest string pio down\n");
c1a5d4f9 2657 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2658 return 1;
2659 }
2660 vcpu->run->io.count = now;
ad312c7c 2661 vcpu->arch.pio.cur_count = now;
de7d789a 2662
ad312c7c 2663 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
de7d789a
CO
2664 kvm_x86_ops->skip_emulated_instruction(vcpu);
2665
0f346074 2666 vcpu->arch.pio.guest_gva = address;
de7d789a 2667
92760499
LV
2668 pio_dev = vcpu_find_pio_dev(vcpu, port,
2669 vcpu->arch.pio.cur_count,
2670 !vcpu->arch.pio.in);
ad312c7c 2671 if (!vcpu->arch.pio.in) {
de7d789a
CO
2672 /* string PIO write */
2673 ret = pio_copy_data(vcpu);
0f346074
IE
2674 if (ret == X86EMUL_PROPAGATE_FAULT) {
2675 kvm_inject_gp(vcpu, 0);
2676 return 1;
2677 }
2678 if (ret == 0 && pio_dev) {
de7d789a
CO
2679 pio_string_write(pio_dev, vcpu);
2680 complete_pio(vcpu);
ad312c7c 2681 if (vcpu->arch.pio.count == 0)
de7d789a
CO
2682 ret = 1;
2683 }
2684 } else if (pio_dev)
2685 pr_unimpl(vcpu, "no string pio read support yet, "
2686 "port %x size %d count %ld\n",
2687 port, size, count);
2688
2689 return ret;
2690}
2691EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2692
c8076604
GH
2693static void bounce_off(void *info)
2694{
2695 /* nothing */
2696}
2697
2698static unsigned int ref_freq;
2699static unsigned long tsc_khz_ref;
2700
2701static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
2702 void *data)
2703{
2704 struct cpufreq_freqs *freq = data;
2705 struct kvm *kvm;
2706 struct kvm_vcpu *vcpu;
2707 int i, send_ipi = 0;
2708
2709 if (!ref_freq)
2710 ref_freq = freq->old;
2711
2712 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
2713 return 0;
2714 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
2715 return 0;
2716 per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
2717
2718 spin_lock(&kvm_lock);
2719 list_for_each_entry(kvm, &vm_list, vm_list) {
2720 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2721 vcpu = kvm->vcpus[i];
2722 if (!vcpu)
2723 continue;
2724 if (vcpu->cpu != freq->cpu)
2725 continue;
2726 if (!kvm_request_guest_time_update(vcpu))
2727 continue;
2728 if (vcpu->cpu != smp_processor_id())
2729 send_ipi++;
2730 }
2731 }
2732 spin_unlock(&kvm_lock);
2733
2734 if (freq->old < freq->new && send_ipi) {
2735 /*
2736 * We upscale the frequency. Must make the guest
2737 * doesn't see old kvmclock values while running with
2738 * the new frequency, otherwise we risk the guest sees
2739 * time go backwards.
2740 *
2741 * In case we update the frequency for another cpu
2742 * (which might be in guest context) send an interrupt
2743 * to kick the cpu out of guest context. Next time
2744 * guest context is entered kvmclock will be updated,
2745 * so the guest will not see stale values.
2746 */
2747 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
2748 }
2749 return 0;
2750}
2751
2752static struct notifier_block kvmclock_cpufreq_notifier_block = {
2753 .notifier_call = kvmclock_cpufreq_notifier
2754};
2755
f8c16bba 2756int kvm_arch_init(void *opaque)
043405e1 2757{
c8076604 2758 int r, cpu;
f8c16bba
ZX
2759 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2760
f8c16bba
ZX
2761 if (kvm_x86_ops) {
2762 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
2763 r = -EEXIST;
2764 goto out;
f8c16bba
ZX
2765 }
2766
2767 if (!ops->cpu_has_kvm_support()) {
2768 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
2769 r = -EOPNOTSUPP;
2770 goto out;
f8c16bba
ZX
2771 }
2772 if (ops->disabled_by_bios()) {
2773 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
2774 r = -EOPNOTSUPP;
2775 goto out;
f8c16bba
ZX
2776 }
2777
97db56ce
AK
2778 r = kvm_mmu_module_init();
2779 if (r)
2780 goto out;
2781
2782 kvm_init_msr_list();
2783
f8c16bba 2784 kvm_x86_ops = ops;
56c6d28a 2785 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
7b52345e
SY
2786 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
2787 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
4b12f0de 2788 PT_DIRTY_MASK, PT64_NX_MASK, 0);
c8076604
GH
2789
2790 for_each_possible_cpu(cpu)
2791 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
2792 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
2793 tsc_khz_ref = tsc_khz;
2794 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
2795 CPUFREQ_TRANSITION_NOTIFIER);
2796 }
2797
f8c16bba 2798 return 0;
56c6d28a
ZX
2799
2800out:
56c6d28a 2801 return r;
043405e1 2802}
8776e519 2803
f8c16bba
ZX
2804void kvm_arch_exit(void)
2805{
888d256e
JK
2806 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
2807 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
2808 CPUFREQ_TRANSITION_NOTIFIER);
f8c16bba 2809 kvm_x86_ops = NULL;
56c6d28a
ZX
2810 kvm_mmu_module_exit();
2811}
f8c16bba 2812
8776e519
HB
2813int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2814{
2815 ++vcpu->stat.halt_exits;
2714d1d3 2816 KVMTRACE_0D(HLT, vcpu, handler);
8776e519 2817 if (irqchip_in_kernel(vcpu->kvm)) {
a4535290 2818 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
8776e519
HB
2819 return 1;
2820 } else {
2821 vcpu->run->exit_reason = KVM_EXIT_HLT;
2822 return 0;
2823 }
2824}
2825EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2826
2f333bcb
MT
2827static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
2828 unsigned long a1)
2829{
2830 if (is_long_mode(vcpu))
2831 return a0;
2832 else
2833 return a0 | ((gpa_t)a1 << 32);
2834}
2835
8776e519
HB
2836int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2837{
2838 unsigned long nr, a0, a1, a2, a3, ret;
2f333bcb 2839 int r = 1;
8776e519 2840
5fdbf976
MT
2841 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
2842 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
2843 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
2844 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
2845 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
8776e519 2846
2714d1d3
FEL
2847 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
2848
8776e519
HB
2849 if (!is_long_mode(vcpu)) {
2850 nr &= 0xFFFFFFFF;
2851 a0 &= 0xFFFFFFFF;
2852 a1 &= 0xFFFFFFFF;
2853 a2 &= 0xFFFFFFFF;
2854 a3 &= 0xFFFFFFFF;
2855 }
2856
2857 switch (nr) {
b93463aa
AK
2858 case KVM_HC_VAPIC_POLL_IRQ:
2859 ret = 0;
2860 break;
2f333bcb
MT
2861 case KVM_HC_MMU_OP:
2862 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
2863 break;
8776e519
HB
2864 default:
2865 ret = -KVM_ENOSYS;
2866 break;
2867 }
5fdbf976 2868 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
f11c3a8d 2869 ++vcpu->stat.hypercalls;
2f333bcb 2870 return r;
8776e519
HB
2871}
2872EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2873
2874int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2875{
2876 char instruction[3];
2877 int ret = 0;
5fdbf976 2878 unsigned long rip = kvm_rip_read(vcpu);
8776e519 2879
8776e519
HB
2880
2881 /*
2882 * Blow out the MMU to ensure that no other VCPU has an active mapping
2883 * to ensure that the updated hypercall appears atomically across all
2884 * VCPUs.
2885 */
2886 kvm_mmu_zap_all(vcpu->kvm);
2887
8776e519 2888 kvm_x86_ops->patch_hypercall(vcpu, instruction);
5fdbf976 2889 if (emulator_write_emulated(rip, instruction, 3, vcpu)
8776e519
HB
2890 != X86EMUL_CONTINUE)
2891 ret = -EFAULT;
2892
8776e519
HB
2893 return ret;
2894}
2895
2896static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2897{
2898 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2899}
2900
2901void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2902{
2903 struct descriptor_table dt = { limit, base };
2904
2905 kvm_x86_ops->set_gdt(vcpu, &dt);
2906}
2907
2908void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2909{
2910 struct descriptor_table dt = { limit, base };
2911
2912 kvm_x86_ops->set_idt(vcpu, &dt);
2913}
2914
2915void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2916 unsigned long *rflags)
2917{
2d3ad1f4 2918 kvm_lmsw(vcpu, msw);
8776e519
HB
2919 *rflags = kvm_x86_ops->get_rflags(vcpu);
2920}
2921
2922unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2923{
54e445ca
JR
2924 unsigned long value;
2925
8776e519
HB
2926 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2927 switch (cr) {
2928 case 0:
54e445ca
JR
2929 value = vcpu->arch.cr0;
2930 break;
8776e519 2931 case 2:
54e445ca
JR
2932 value = vcpu->arch.cr2;
2933 break;
8776e519 2934 case 3:
54e445ca
JR
2935 value = vcpu->arch.cr3;
2936 break;
8776e519 2937 case 4:
54e445ca
JR
2938 value = vcpu->arch.cr4;
2939 break;
152ff9be 2940 case 8:
54e445ca
JR
2941 value = kvm_get_cr8(vcpu);
2942 break;
8776e519 2943 default:
b8688d51 2944 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2945 return 0;
2946 }
54e445ca
JR
2947 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
2948 (u32)((u64)value >> 32), handler);
2949
2950 return value;
8776e519
HB
2951}
2952
2953void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2954 unsigned long *rflags)
2955{
54e445ca
JR
2956 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
2957 (u32)((u64)val >> 32), handler);
2958
8776e519
HB
2959 switch (cr) {
2960 case 0:
2d3ad1f4 2961 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
8776e519
HB
2962 *rflags = kvm_x86_ops->get_rflags(vcpu);
2963 break;
2964 case 2:
ad312c7c 2965 vcpu->arch.cr2 = val;
8776e519
HB
2966 break;
2967 case 3:
2d3ad1f4 2968 kvm_set_cr3(vcpu, val);
8776e519
HB
2969 break;
2970 case 4:
2d3ad1f4 2971 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
8776e519 2972 break;
152ff9be 2973 case 8:
2d3ad1f4 2974 kvm_set_cr8(vcpu, val & 0xfUL);
152ff9be 2975 break;
8776e519 2976 default:
b8688d51 2977 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2978 }
2979}
2980
07716717
DK
2981static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2982{
ad312c7c
ZX
2983 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2984 int j, nent = vcpu->arch.cpuid_nent;
07716717
DK
2985
2986 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2987 /* when no next entry is found, the current entry[i] is reselected */
0fdf8e59 2988 for (j = i + 1; ; j = (j + 1) % nent) {
ad312c7c 2989 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
07716717
DK
2990 if (ej->function == e->function) {
2991 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2992 return j;
2993 }
2994 }
2995 return 0; /* silence gcc, even though control never reaches here */
2996}
2997
2998/* find an entry with matching function, matching index (if needed), and that
2999 * should be read next (if it's stateful) */
3000static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
3001 u32 function, u32 index)
3002{
3003 if (e->function != function)
3004 return 0;
3005 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
3006 return 0;
3007 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
19355475 3008 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
07716717
DK
3009 return 0;
3010 return 1;
3011}
3012
d8017474
AG
3013struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
3014 u32 function, u32 index)
8776e519
HB
3015{
3016 int i;
d8017474 3017 struct kvm_cpuid_entry2 *best = NULL;
8776e519 3018
ad312c7c 3019 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
d8017474
AG
3020 struct kvm_cpuid_entry2 *e;
3021
ad312c7c 3022 e = &vcpu->arch.cpuid_entries[i];
07716717
DK
3023 if (is_matching_cpuid_entry(e, function, index)) {
3024 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
3025 move_to_next_stateful_cpuid_entry(vcpu, i);
8776e519
HB
3026 best = e;
3027 break;
3028 }
3029 /*
3030 * Both basic or both extended?
3031 */
3032 if (((e->function ^ function) & 0x80000000) == 0)
3033 if (!best || e->function > best->function)
3034 best = e;
3035 }
d8017474
AG
3036 return best;
3037}
3038
82725b20
DE
3039int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
3040{
3041 struct kvm_cpuid_entry2 *best;
3042
3043 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
3044 if (best)
3045 return best->eax & 0xff;
3046 return 36;
3047}
3048
d8017474
AG
3049void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3050{
3051 u32 function, index;
3052 struct kvm_cpuid_entry2 *best;
3053
3054 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
3055 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
3056 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
3057 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
3058 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
3059 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
3060 best = kvm_find_cpuid_entry(vcpu, function, index);
8776e519 3061 if (best) {
5fdbf976
MT
3062 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
3063 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
3064 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
3065 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
8776e519 3066 }
8776e519 3067 kvm_x86_ops->skip_emulated_instruction(vcpu);
2714d1d3 3068 KVMTRACE_5D(CPUID, vcpu, function,
5fdbf976
MT
3069 (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
3070 (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
3071 (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
3072 (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
8776e519
HB
3073}
3074EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 3075
b6c7a5dc
HB
3076/*
3077 * Check if userspace requested an interrupt window, and that the
3078 * interrupt window is open.
3079 *
3080 * No need to exit to userspace if we already have an interrupt queued.
3081 */
3082static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
3083 struct kvm_run *kvm_run)
3084{
8061823a 3085 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
b6c7a5dc 3086 kvm_run->request_interrupt_window &&
5df56646 3087 kvm_arch_interrupt_allowed(vcpu));
b6c7a5dc
HB
3088}
3089
3090static void post_kvm_run_save(struct kvm_vcpu *vcpu,
3091 struct kvm_run *kvm_run)
3092{
3093 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2d3ad1f4 3094 kvm_run->cr8 = kvm_get_cr8(vcpu);
b6c7a5dc 3095 kvm_run->apic_base = kvm_get_apic_base(vcpu);
4531220b 3096 if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 3097 kvm_run->ready_for_interrupt_injection = 1;
4531220b 3098 else
b6c7a5dc 3099 kvm_run->ready_for_interrupt_injection =
5df56646 3100 (kvm_arch_interrupt_allowed(vcpu) &&
8061823a 3101 !kvm_cpu_has_interrupt(vcpu));
b6c7a5dc
HB
3102}
3103
b93463aa
AK
3104static void vapic_enter(struct kvm_vcpu *vcpu)
3105{
3106 struct kvm_lapic *apic = vcpu->arch.apic;
3107 struct page *page;
3108
3109 if (!apic || !apic->vapic_addr)
3110 return;
3111
3112 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
72dc67a6
IE
3113
3114 vcpu->arch.apic->vapic_page = page;
b93463aa
AK
3115}
3116
3117static void vapic_exit(struct kvm_vcpu *vcpu)
3118{
3119 struct kvm_lapic *apic = vcpu->arch.apic;
3120
3121 if (!apic || !apic->vapic_addr)
3122 return;
3123
f8b78fa3 3124 down_read(&vcpu->kvm->slots_lock);
b93463aa
AK
3125 kvm_release_page_dirty(apic->vapic_page);
3126 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
f8b78fa3 3127 up_read(&vcpu->kvm->slots_lock);
b93463aa
AK
3128}
3129
95ba8273
GN
3130static void update_cr8_intercept(struct kvm_vcpu *vcpu)
3131{
3132 int max_irr, tpr;
3133
3134 if (!kvm_x86_ops->update_cr8_intercept)
3135 return;
3136
3137 max_irr = kvm_lapic_find_highest_irr(vcpu);
3138
3139 if (max_irr != -1)
3140 max_irr >>= 4;
3141
3142 tpr = kvm_lapic_get_cr8(vcpu);
3143
3144 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
3145}
3146
3147static void inject_irq(struct kvm_vcpu *vcpu)
3148{
3149 /* try to reinject previous events if any */
3150 if (vcpu->arch.nmi_injected) {
3151 kvm_x86_ops->set_nmi(vcpu);
3152 return;
3153 }
3154
3155 if (vcpu->arch.interrupt.pending) {
3156 kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr);
3157 return;
3158 }
3159
3160 /* try to inject new event if pending */
3161 if (vcpu->arch.nmi_pending) {
3162 if (kvm_x86_ops->nmi_allowed(vcpu)) {
3163 vcpu->arch.nmi_pending = false;
3164 vcpu->arch.nmi_injected = true;
3165 kvm_x86_ops->set_nmi(vcpu);
3166 }
3167 } else if (kvm_cpu_has_interrupt(vcpu)) {
3168 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
3169 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
3170 kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr);
3171 }
3172 }
3173}
3174
3175static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3176{
3177 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
3178 kvm_run->request_interrupt_window;
3179
3180 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3181 kvm_x86_ops->drop_interrupt_shadow(vcpu);
3182
3183 inject_irq(vcpu);
3184
3185 /* enable NMI/IRQ window open exits if needed */
3186 if (vcpu->arch.nmi_pending)
3187 kvm_x86_ops->enable_nmi_window(vcpu);
3188 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
3189 kvm_x86_ops->enable_irq_window(vcpu);
3190}
3191
d7690175 3192static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
b6c7a5dc
HB
3193{
3194 int r;
3195
2e53d63a
MT
3196 if (vcpu->requests)
3197 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
3198 kvm_mmu_unload(vcpu);
3199
b6c7a5dc
HB
3200 r = kvm_mmu_reload(vcpu);
3201 if (unlikely(r))
3202 goto out;
3203
2f52d58c
AK
3204 if (vcpu->requests) {
3205 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2f599714 3206 __kvm_migrate_timers(vcpu);
c8076604
GH
3207 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
3208 kvm_write_guest_time(vcpu);
4731d4c7
MT
3209 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
3210 kvm_mmu_sync_roots(vcpu);
d4acf7e7
MT
3211 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
3212 kvm_x86_ops->tlb_flush(vcpu);
b93463aa
AK
3213 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
3214 &vcpu->requests)) {
3215 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
3216 r = 0;
3217 goto out;
3218 }
71c4dfaf
JR
3219 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
3220 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
3221 r = 0;
3222 goto out;
3223 }
2f52d58c 3224 }
b93463aa 3225
b6c7a5dc
HB
3226 preempt_disable();
3227
3228 kvm_x86_ops->prepare_guest_switch(vcpu);
3229 kvm_load_guest_fpu(vcpu);
3230
3231 local_irq_disable();
3232
d7690175 3233 if (vcpu->requests || need_resched() || signal_pending(current)) {
6c142801
AK
3234 local_irq_enable();
3235 preempt_enable();
3236 r = 1;
3237 goto out;
3238 }
3239
e9571ed5
MT
3240 vcpu->guest_mode = 1;
3241 /*
3242 * Make sure that guest_mode assignment won't happen after
3243 * testing the pending IRQ vector bitmap.
3244 */
3245 smp_wmb();
3246
ad312c7c 3247 if (vcpu->arch.exception.pending)
298101da 3248 __queue_exception(vcpu);
eb9774f0 3249 else
95ba8273 3250 inject_pending_irq(vcpu, kvm_run);
b6c7a5dc 3251
95ba8273
GN
3252 if (kvm_lapic_enabled(vcpu)) {
3253 if (!vcpu->arch.apic->vapic_addr)
3254 update_cr8_intercept(vcpu);
3255 else
3256 kvm_lapic_sync_to_vapic(vcpu);
3257 }
b93463aa 3258
3200f405
MT
3259 up_read(&vcpu->kvm->slots_lock);
3260
b6c7a5dc
HB
3261 kvm_guest_enter();
3262
42dbaa5a
JK
3263 get_debugreg(vcpu->arch.host_dr6, 6);
3264 get_debugreg(vcpu->arch.host_dr7, 7);
3265 if (unlikely(vcpu->arch.switch_db_regs)) {
3266 get_debugreg(vcpu->arch.host_db[0], 0);
3267 get_debugreg(vcpu->arch.host_db[1], 1);
3268 get_debugreg(vcpu->arch.host_db[2], 2);
3269 get_debugreg(vcpu->arch.host_db[3], 3);
3270
3271 set_debugreg(0, 7);
3272 set_debugreg(vcpu->arch.eff_db[0], 0);
3273 set_debugreg(vcpu->arch.eff_db[1], 1);
3274 set_debugreg(vcpu->arch.eff_db[2], 2);
3275 set_debugreg(vcpu->arch.eff_db[3], 3);
3276 }
b6c7a5dc 3277
2714d1d3 3278 KVMTRACE_0D(VMENTRY, vcpu, entryexit);
b6c7a5dc
HB
3279 kvm_x86_ops->run(vcpu, kvm_run);
3280
42dbaa5a
JK
3281 if (unlikely(vcpu->arch.switch_db_regs)) {
3282 set_debugreg(0, 7);
3283 set_debugreg(vcpu->arch.host_db[0], 0);
3284 set_debugreg(vcpu->arch.host_db[1], 1);
3285 set_debugreg(vcpu->arch.host_db[2], 2);
3286 set_debugreg(vcpu->arch.host_db[3], 3);
3287 }
3288 set_debugreg(vcpu->arch.host_dr6, 6);
3289 set_debugreg(vcpu->arch.host_dr7, 7);
3290
b6c7a5dc
HB
3291 vcpu->guest_mode = 0;
3292 local_irq_enable();
3293
3294 ++vcpu->stat.exits;
3295
3296 /*
3297 * We must have an instruction between local_irq_enable() and
3298 * kvm_guest_exit(), so the timer interrupt isn't delayed by
3299 * the interrupt shadow. The stat.exits increment will do nicely.
3300 * But we need to prevent reordering, hence this barrier():
3301 */
3302 barrier();
3303
3304 kvm_guest_exit();
3305
3306 preempt_enable();
3307
3200f405
MT
3308 down_read(&vcpu->kvm->slots_lock);
3309
b6c7a5dc
HB
3310 /*
3311 * Profile KVM exit RIPs:
3312 */
3313 if (unlikely(prof_on == KVM_PROFILING)) {
5fdbf976
MT
3314 unsigned long rip = kvm_rip_read(vcpu);
3315 profile_hit(KVM_PROFILING, (void *)rip);
b6c7a5dc
HB
3316 }
3317
298101da 3318
b93463aa
AK
3319 kvm_lapic_sync_from_vapic(vcpu);
3320
b6c7a5dc 3321 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
d7690175
MT
3322out:
3323 return r;
3324}
b6c7a5dc 3325
09cec754 3326
d7690175
MT
3327static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3328{
3329 int r;
3330
3331 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
1b10bf31
JK
3332 pr_debug("vcpu %d received sipi with vector # %x\n",
3333 vcpu->vcpu_id, vcpu->arch.sipi_vector);
d7690175 3334 kvm_lapic_reset(vcpu);
5f179287 3335 r = kvm_arch_vcpu_reset(vcpu);
d7690175
MT
3336 if (r)
3337 return r;
3338 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b6c7a5dc
HB
3339 }
3340
d7690175
MT
3341 down_read(&vcpu->kvm->slots_lock);
3342 vapic_enter(vcpu);
3343
3344 r = 1;
3345 while (r > 0) {
af2152f5 3346 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
d7690175
MT
3347 r = vcpu_enter_guest(vcpu, kvm_run);
3348 else {
3349 up_read(&vcpu->kvm->slots_lock);
3350 kvm_vcpu_block(vcpu);
3351 down_read(&vcpu->kvm->slots_lock);
3352 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
09cec754
GN
3353 {
3354 switch(vcpu->arch.mp_state) {
3355 case KVM_MP_STATE_HALTED:
d7690175 3356 vcpu->arch.mp_state =
09cec754
GN
3357 KVM_MP_STATE_RUNNABLE;
3358 case KVM_MP_STATE_RUNNABLE:
3359 break;
3360 case KVM_MP_STATE_SIPI_RECEIVED:
3361 default:
3362 r = -EINTR;
3363 break;
3364 }
3365 }
d7690175
MT
3366 }
3367
09cec754
GN
3368 if (r <= 0)
3369 break;
3370
3371 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
3372 if (kvm_cpu_has_pending_timer(vcpu))
3373 kvm_inject_pending_timer_irqs(vcpu);
3374
3375 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
3376 r = -EINTR;
3377 kvm_run->exit_reason = KVM_EXIT_INTR;
3378 ++vcpu->stat.request_irq_exits;
3379 }
3380 if (signal_pending(current)) {
3381 r = -EINTR;
3382 kvm_run->exit_reason = KVM_EXIT_INTR;
3383 ++vcpu->stat.signal_exits;
3384 }
3385 if (need_resched()) {
3386 up_read(&vcpu->kvm->slots_lock);
3387 kvm_resched(vcpu);
3388 down_read(&vcpu->kvm->slots_lock);
d7690175 3389 }
b6c7a5dc
HB
3390 }
3391
d7690175 3392 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3393 post_kvm_run_save(vcpu, kvm_run);
3394
b93463aa
AK
3395 vapic_exit(vcpu);
3396
b6c7a5dc
HB
3397 return r;
3398}
3399
3400int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3401{
3402 int r;
3403 sigset_t sigsaved;
3404
3405 vcpu_load(vcpu);
3406
ac9f6dc0
AK
3407 if (vcpu->sigset_active)
3408 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3409
a4535290 3410 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
b6c7a5dc 3411 kvm_vcpu_block(vcpu);
d7690175 3412 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
ac9f6dc0
AK
3413 r = -EAGAIN;
3414 goto out;
b6c7a5dc
HB
3415 }
3416
b6c7a5dc
HB
3417 /* re-sync apic's tpr */
3418 if (!irqchip_in_kernel(vcpu->kvm))
2d3ad1f4 3419 kvm_set_cr8(vcpu, kvm_run->cr8);
b6c7a5dc 3420
ad312c7c 3421 if (vcpu->arch.pio.cur_count) {
b6c7a5dc
HB
3422 r = complete_pio(vcpu);
3423 if (r)
3424 goto out;
3425 }
3426#if CONFIG_HAS_IOMEM
3427 if (vcpu->mmio_needed) {
3428 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
3429 vcpu->mmio_read_completed = 1;
3430 vcpu->mmio_needed = 0;
3200f405
MT
3431
3432 down_read(&vcpu->kvm->slots_lock);
b6c7a5dc 3433 r = emulate_instruction(vcpu, kvm_run,
571008da
SY
3434 vcpu->arch.mmio_fault_cr2, 0,
3435 EMULTYPE_NO_DECODE);
3200f405 3436 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3437 if (r == EMULATE_DO_MMIO) {
3438 /*
3439 * Read-modify-write. Back to userspace.
3440 */
3441 r = 0;
3442 goto out;
3443 }
3444 }
3445#endif
5fdbf976
MT
3446 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
3447 kvm_register_write(vcpu, VCPU_REGS_RAX,
3448 kvm_run->hypercall.ret);
b6c7a5dc
HB
3449
3450 r = __vcpu_run(vcpu, kvm_run);
3451
3452out:
3453 if (vcpu->sigset_active)
3454 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3455
3456 vcpu_put(vcpu);
3457 return r;
3458}
3459
3460int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3461{
3462 vcpu_load(vcpu);
3463
5fdbf976
MT
3464 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3465 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3466 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3467 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3468 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3469 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3470 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3471 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
b6c7a5dc 3472#ifdef CONFIG_X86_64
5fdbf976
MT
3473 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
3474 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
3475 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
3476 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
3477 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
3478 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
3479 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
3480 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
b6c7a5dc
HB
3481#endif
3482
5fdbf976 3483 regs->rip = kvm_rip_read(vcpu);
b6c7a5dc
HB
3484 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3485
3486 /*
3487 * Don't leak debug flags in case they were set for guest debugging
3488 */
d0bfb940 3489 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
b6c7a5dc
HB
3490 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3491
3492 vcpu_put(vcpu);
3493
3494 return 0;
3495}
3496
3497int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3498{
3499 vcpu_load(vcpu);
3500
5fdbf976
MT
3501 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
3502 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
3503 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
3504 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
3505 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
3506 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
3507 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
3508 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
b6c7a5dc 3509#ifdef CONFIG_X86_64
5fdbf976
MT
3510 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
3511 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
3512 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
3513 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
3514 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
3515 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
3516 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
3517 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
3518
b6c7a5dc
HB
3519#endif
3520
5fdbf976 3521 kvm_rip_write(vcpu, regs->rip);
b6c7a5dc
HB
3522 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3523
b6c7a5dc 3524
b4f14abd
JK
3525 vcpu->arch.exception.pending = false;
3526
b6c7a5dc
HB
3527 vcpu_put(vcpu);
3528
3529 return 0;
3530}
3531
3e6e0aab
GT
3532void kvm_get_segment(struct kvm_vcpu *vcpu,
3533 struct kvm_segment *var, int seg)
b6c7a5dc 3534{
14af3f3c 3535 kvm_x86_ops->get_segment(vcpu, var, seg);
b6c7a5dc
HB
3536}
3537
3538void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3539{
3540 struct kvm_segment cs;
3541
3e6e0aab 3542 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
b6c7a5dc
HB
3543 *db = cs.db;
3544 *l = cs.l;
3545}
3546EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3547
3548int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3549 struct kvm_sregs *sregs)
3550{
3551 struct descriptor_table dt;
b6c7a5dc
HB
3552
3553 vcpu_load(vcpu);
3554
3e6e0aab
GT
3555 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3556 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3557 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3558 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3559 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3560 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 3561
3e6e0aab
GT
3562 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3563 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc
HB
3564
3565 kvm_x86_ops->get_idt(vcpu, &dt);
3566 sregs->idt.limit = dt.limit;
3567 sregs->idt.base = dt.base;
3568 kvm_x86_ops->get_gdt(vcpu, &dt);
3569 sregs->gdt.limit = dt.limit;
3570 sregs->gdt.base = dt.base;
3571
3572 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
ad312c7c
ZX
3573 sregs->cr0 = vcpu->arch.cr0;
3574 sregs->cr2 = vcpu->arch.cr2;
3575 sregs->cr3 = vcpu->arch.cr3;
3576 sregs->cr4 = vcpu->arch.cr4;
2d3ad1f4 3577 sregs->cr8 = kvm_get_cr8(vcpu);
ad312c7c 3578 sregs->efer = vcpu->arch.shadow_efer;
b6c7a5dc
HB
3579 sregs->apic_base = kvm_get_apic_base(vcpu);
3580
16d7a191 3581 if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc
HB
3582 memset(sregs->interrupt_bitmap, 0,
3583 sizeof sregs->interrupt_bitmap);
16d7a191 3584 else
ad312c7c 3585 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
b6c7a5dc
HB
3586 sizeof sregs->interrupt_bitmap);
3587
14d0bc1f
GN
3588 if (vcpu->arch.interrupt.pending)
3589 set_bit(vcpu->arch.interrupt.nr,
3590 (unsigned long *)sregs->interrupt_bitmap);
16d7a191 3591
b6c7a5dc
HB
3592 vcpu_put(vcpu);
3593
3594 return 0;
3595}
3596
62d9f0db
MT
3597int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3598 struct kvm_mp_state *mp_state)
3599{
3600 vcpu_load(vcpu);
3601 mp_state->mp_state = vcpu->arch.mp_state;
3602 vcpu_put(vcpu);
3603 return 0;
3604}
3605
3606int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3607 struct kvm_mp_state *mp_state)
3608{
3609 vcpu_load(vcpu);
3610 vcpu->arch.mp_state = mp_state->mp_state;
3611 vcpu_put(vcpu);
3612 return 0;
3613}
3614
3e6e0aab 3615static void kvm_set_segment(struct kvm_vcpu *vcpu,
b6c7a5dc
HB
3616 struct kvm_segment *var, int seg)
3617{
14af3f3c 3618 kvm_x86_ops->set_segment(vcpu, var, seg);
b6c7a5dc
HB
3619}
3620
37817f29
IE
3621static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3622 struct kvm_segment *kvm_desct)
3623{
3624 kvm_desct->base = seg_desc->base0;
3625 kvm_desct->base |= seg_desc->base1 << 16;
3626 kvm_desct->base |= seg_desc->base2 << 24;
3627 kvm_desct->limit = seg_desc->limit0;
3628 kvm_desct->limit |= seg_desc->limit << 16;
c93cd3a5
MT
3629 if (seg_desc->g) {
3630 kvm_desct->limit <<= 12;
3631 kvm_desct->limit |= 0xfff;
3632 }
37817f29
IE
3633 kvm_desct->selector = selector;
3634 kvm_desct->type = seg_desc->type;
3635 kvm_desct->present = seg_desc->p;
3636 kvm_desct->dpl = seg_desc->dpl;
3637 kvm_desct->db = seg_desc->d;
3638 kvm_desct->s = seg_desc->s;
3639 kvm_desct->l = seg_desc->l;
3640 kvm_desct->g = seg_desc->g;
3641 kvm_desct->avl = seg_desc->avl;
3642 if (!selector)
3643 kvm_desct->unusable = 1;
3644 else
3645 kvm_desct->unusable = 0;
3646 kvm_desct->padding = 0;
3647}
3648
b8222ad2
AS
3649static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
3650 u16 selector,
3651 struct descriptor_table *dtable)
37817f29
IE
3652{
3653 if (selector & 1 << 2) {
3654 struct kvm_segment kvm_seg;
3655
3e6e0aab 3656 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
37817f29
IE
3657
3658 if (kvm_seg.unusable)
3659 dtable->limit = 0;
3660 else
3661 dtable->limit = kvm_seg.limit;
3662 dtable->base = kvm_seg.base;
3663 }
3664 else
3665 kvm_x86_ops->get_gdt(vcpu, dtable);
3666}
3667
3668/* allowed just for 8 bytes segments */
3669static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3670 struct desc_struct *seg_desc)
3671{
98899aa0 3672 gpa_t gpa;
37817f29
IE
3673 struct descriptor_table dtable;
3674 u16 index = selector >> 3;
3675
b8222ad2 3676 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3677
3678 if (dtable.limit < index * 8 + 7) {
3679 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3680 return 1;
3681 }
98899aa0
MT
3682 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3683 gpa += index * 8;
3684 return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3685}
3686
3687/* allowed just for 8 bytes segments */
3688static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3689 struct desc_struct *seg_desc)
3690{
98899aa0 3691 gpa_t gpa;
37817f29
IE
3692 struct descriptor_table dtable;
3693 u16 index = selector >> 3;
3694
b8222ad2 3695 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3696
3697 if (dtable.limit < index * 8 + 7)
3698 return 1;
98899aa0
MT
3699 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3700 gpa += index * 8;
3701 return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3702}
3703
3704static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3705 struct desc_struct *seg_desc)
3706{
3707 u32 base_addr;
3708
3709 base_addr = seg_desc->base0;
3710 base_addr |= (seg_desc->base1 << 16);
3711 base_addr |= (seg_desc->base2 << 24);
3712
98899aa0 3713 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
37817f29
IE
3714}
3715
37817f29
IE
3716static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3717{
3718 struct kvm_segment kvm_seg;
3719
3e6e0aab 3720 kvm_get_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3721 return kvm_seg.selector;
3722}
3723
3724static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3725 u16 selector,
3726 struct kvm_segment *kvm_seg)
3727{
3728 struct desc_struct seg_desc;
3729
3730 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3731 return 1;
3732 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3733 return 0;
3734}
3735
2259e3a7 3736static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
f4bbd9aa
AK
3737{
3738 struct kvm_segment segvar = {
3739 .base = selector << 4,
3740 .limit = 0xffff,
3741 .selector = selector,
3742 .type = 3,
3743 .present = 1,
3744 .dpl = 3,
3745 .db = 0,
3746 .s = 1,
3747 .l = 0,
3748 .g = 0,
3749 .avl = 0,
3750 .unusable = 0,
3751 };
3752 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
3753 return 0;
3754}
3755
3e6e0aab
GT
3756int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3757 int type_bits, int seg)
37817f29
IE
3758{
3759 struct kvm_segment kvm_seg;
3760
f4bbd9aa
AK
3761 if (!(vcpu->arch.cr0 & X86_CR0_PE))
3762 return kvm_load_realmode_segment(vcpu, selector, seg);
37817f29
IE
3763 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3764 return 1;
3765 kvm_seg.type |= type_bits;
3766
3767 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3768 seg != VCPU_SREG_LDTR)
3769 if (!kvm_seg.s)
3770 kvm_seg.unusable = 1;
3771
3e6e0aab 3772 kvm_set_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3773 return 0;
3774}
3775
3776static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3777 struct tss_segment_32 *tss)
3778{
3779 tss->cr3 = vcpu->arch.cr3;
5fdbf976 3780 tss->eip = kvm_rip_read(vcpu);
37817f29 3781 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
3782 tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3783 tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3784 tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3785 tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3786 tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3787 tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3788 tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3789 tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
3790 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3791 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3792 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3793 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3794 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
3795 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
3796 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
37817f29
IE
3797}
3798
3799static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3800 struct tss_segment_32 *tss)
3801{
3802 kvm_set_cr3(vcpu, tss->cr3);
3803
5fdbf976 3804 kvm_rip_write(vcpu, tss->eip);
37817f29
IE
3805 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3806
5fdbf976
MT
3807 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
3808 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
3809 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
3810 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
3811 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
3812 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
3813 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
3814 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
37817f29 3815
3e6e0aab 3816 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
37817f29
IE
3817 return 1;
3818
3e6e0aab 3819 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
3820 return 1;
3821
3e6e0aab 3822 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
3823 return 1;
3824
3e6e0aab 3825 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
3826 return 1;
3827
3e6e0aab 3828 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
3829 return 1;
3830
3e6e0aab 3831 if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
37817f29
IE
3832 return 1;
3833
3e6e0aab 3834 if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
37817f29
IE
3835 return 1;
3836 return 0;
3837}
3838
3839static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3840 struct tss_segment_16 *tss)
3841{
5fdbf976 3842 tss->ip = kvm_rip_read(vcpu);
37817f29 3843 tss->flag = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
3844 tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3845 tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3846 tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3847 tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3848 tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3849 tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3850 tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
3851 tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
3852
3853 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3854 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3855 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3856 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3857 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3858 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3859}
3860
3861static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3862 struct tss_segment_16 *tss)
3863{
5fdbf976 3864 kvm_rip_write(vcpu, tss->ip);
37817f29 3865 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
5fdbf976
MT
3866 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
3867 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
3868 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
3869 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
3870 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
3871 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
3872 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
3873 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
37817f29 3874
3e6e0aab 3875 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
37817f29
IE
3876 return 1;
3877
3e6e0aab 3878 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
3879 return 1;
3880
3e6e0aab 3881 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
3882 return 1;
3883
3e6e0aab 3884 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
3885 return 1;
3886
3e6e0aab 3887 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
3888 return 1;
3889 return 0;
3890}
3891
8b2cf73c 3892static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
b237ac37
GN
3893 u16 old_tss_sel, u32 old_tss_base,
3894 struct desc_struct *nseg_desc)
37817f29
IE
3895{
3896 struct tss_segment_16 tss_segment_16;
3897 int ret = 0;
3898
34198bf8
MT
3899 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3900 sizeof tss_segment_16))
37817f29
IE
3901 goto out;
3902
3903 save_state_to_tss16(vcpu, &tss_segment_16);
37817f29 3904
34198bf8
MT
3905 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3906 sizeof tss_segment_16))
37817f29 3907 goto out;
34198bf8
MT
3908
3909 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3910 &tss_segment_16, sizeof tss_segment_16))
3911 goto out;
3912
b237ac37
GN
3913 if (old_tss_sel != 0xffff) {
3914 tss_segment_16.prev_task_link = old_tss_sel;
3915
3916 if (kvm_write_guest(vcpu->kvm,
3917 get_tss_base_addr(vcpu, nseg_desc),
3918 &tss_segment_16.prev_task_link,
3919 sizeof tss_segment_16.prev_task_link))
3920 goto out;
3921 }
3922
37817f29
IE
3923 if (load_state_from_tss16(vcpu, &tss_segment_16))
3924 goto out;
3925
3926 ret = 1;
3927out:
3928 return ret;
3929}
3930
8b2cf73c 3931static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
b237ac37 3932 u16 old_tss_sel, u32 old_tss_base,
37817f29
IE
3933 struct desc_struct *nseg_desc)
3934{
3935 struct tss_segment_32 tss_segment_32;
3936 int ret = 0;
3937
34198bf8
MT
3938 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3939 sizeof tss_segment_32))
37817f29
IE
3940 goto out;
3941
3942 save_state_to_tss32(vcpu, &tss_segment_32);
37817f29 3943
34198bf8
MT
3944 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3945 sizeof tss_segment_32))
3946 goto out;
3947
3948 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3949 &tss_segment_32, sizeof tss_segment_32))
37817f29 3950 goto out;
34198bf8 3951
b237ac37
GN
3952 if (old_tss_sel != 0xffff) {
3953 tss_segment_32.prev_task_link = old_tss_sel;
3954
3955 if (kvm_write_guest(vcpu->kvm,
3956 get_tss_base_addr(vcpu, nseg_desc),
3957 &tss_segment_32.prev_task_link,
3958 sizeof tss_segment_32.prev_task_link))
3959 goto out;
3960 }
3961
37817f29
IE
3962 if (load_state_from_tss32(vcpu, &tss_segment_32))
3963 goto out;
3964
3965 ret = 1;
3966out:
3967 return ret;
3968}
3969
3970int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3971{
3972 struct kvm_segment tr_seg;
3973 struct desc_struct cseg_desc;
3974 struct desc_struct nseg_desc;
3975 int ret = 0;
34198bf8
MT
3976 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
3977 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
37817f29 3978
34198bf8 3979 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
37817f29 3980
34198bf8
MT
3981 /* FIXME: Handle errors. Failure to read either TSS or their
3982 * descriptors should generate a pagefault.
3983 */
37817f29
IE
3984 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3985 goto out;
3986
34198bf8 3987 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
37817f29
IE
3988 goto out;
3989
37817f29
IE
3990 if (reason != TASK_SWITCH_IRET) {
3991 int cpl;
3992
3993 cpl = kvm_x86_ops->get_cpl(vcpu);
3994 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
3995 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3996 return 1;
3997 }
3998 }
3999
4000 if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
4001 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
4002 return 1;
4003 }
4004
4005 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3fe913e7 4006 cseg_desc.type &= ~(1 << 1); //clear the B flag
34198bf8 4007 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
37817f29
IE
4008 }
4009
4010 if (reason == TASK_SWITCH_IRET) {
4011 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4012 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
4013 }
4014
64a7ec06
GN
4015 /* set back link to prev task only if NT bit is set in eflags
4016 note that old_tss_sel is not used afetr this point */
4017 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4018 old_tss_sel = 0xffff;
37817f29 4019
b237ac37
GN
4020 /* set back link to prev task only if NT bit is set in eflags
4021 note that old_tss_sel is not used afetr this point */
4022 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4023 old_tss_sel = 0xffff;
4024
37817f29 4025 if (nseg_desc.type & 8)
b237ac37
GN
4026 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
4027 old_tss_base, &nseg_desc);
37817f29 4028 else
b237ac37
GN
4029 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
4030 old_tss_base, &nseg_desc);
37817f29
IE
4031
4032 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
4033 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4034 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
4035 }
4036
4037 if (reason != TASK_SWITCH_IRET) {
3fe913e7 4038 nseg_desc.type |= (1 << 1);
37817f29
IE
4039 save_guest_segment_descriptor(vcpu, tss_selector,
4040 &nseg_desc);
4041 }
4042
4043 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
4044 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
4045 tr_seg.type = 11;
3e6e0aab 4046 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
37817f29 4047out:
37817f29
IE
4048 return ret;
4049}
4050EXPORT_SYMBOL_GPL(kvm_task_switch);
4051
b6c7a5dc
HB
4052int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4053 struct kvm_sregs *sregs)
4054{
4055 int mmu_reset_needed = 0;
4056 int i, pending_vec, max_bits;
4057 struct descriptor_table dt;
4058
4059 vcpu_load(vcpu);
4060
4061 dt.limit = sregs->idt.limit;
4062 dt.base = sregs->idt.base;
4063 kvm_x86_ops->set_idt(vcpu, &dt);
4064 dt.limit = sregs->gdt.limit;
4065 dt.base = sregs->gdt.base;
4066 kvm_x86_ops->set_gdt(vcpu, &dt);
4067
ad312c7c
ZX
4068 vcpu->arch.cr2 = sregs->cr2;
4069 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
59839dff
MT
4070
4071 down_read(&vcpu->kvm->slots_lock);
4072 if (gfn_to_memslot(vcpu->kvm, sregs->cr3 >> PAGE_SHIFT))
4073 vcpu->arch.cr3 = sregs->cr3;
4074 else
4075 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
4076 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc 4077
2d3ad1f4 4078 kvm_set_cr8(vcpu, sregs->cr8);
b6c7a5dc 4079
ad312c7c 4080 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
b6c7a5dc 4081 kvm_x86_ops->set_efer(vcpu, sregs->efer);
b6c7a5dc
HB
4082 kvm_set_apic_base(vcpu, sregs->apic_base);
4083
4084 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
4085
ad312c7c 4086 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
b6c7a5dc 4087 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
d7306163 4088 vcpu->arch.cr0 = sregs->cr0;
b6c7a5dc 4089
ad312c7c 4090 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
b6c7a5dc
HB
4091 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
4092 if (!is_long_mode(vcpu) && is_pae(vcpu))
ad312c7c 4093 load_pdptrs(vcpu, vcpu->arch.cr3);
b6c7a5dc
HB
4094
4095 if (mmu_reset_needed)
4096 kvm_mmu_reset_context(vcpu);
4097
4098 if (!irqchip_in_kernel(vcpu->kvm)) {
ad312c7c
ZX
4099 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
4100 sizeof vcpu->arch.irq_pending);
4101 vcpu->arch.irq_summary = 0;
4102 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
4103 if (vcpu->arch.irq_pending[i])
4104 __set_bit(i, &vcpu->arch.irq_summary);
b6c7a5dc
HB
4105 } else {
4106 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
4107 pending_vec = find_first_bit(
4108 (const unsigned long *)sregs->interrupt_bitmap,
4109 max_bits);
4110 /* Only pending external irq is handled here */
4111 if (pending_vec < max_bits) {
14d0bc1f
GN
4112 kvm_queue_interrupt(vcpu, pending_vec);
4113 pr_debug("Set back pending irq %d\n", pending_vec);
b6c7a5dc 4114 }
e4825800 4115 kvm_pic_clear_isr_ack(vcpu->kvm);
b6c7a5dc
HB
4116 }
4117
3e6e0aab
GT
4118 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4119 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4120 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4121 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4122 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4123 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 4124
3e6e0aab
GT
4125 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4126 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc 4127
9c3e4aab
MT
4128 /* Older userspace won't unhalt the vcpu on reset. */
4129 if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
4130 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4131 !(vcpu->arch.cr0 & X86_CR0_PE))
4132 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4133
b6c7a5dc
HB
4134 vcpu_put(vcpu);
4135
4136 return 0;
4137}
4138
d0bfb940
JK
4139int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4140 struct kvm_guest_debug *dbg)
b6c7a5dc 4141{
ae675ef0 4142 int i, r;
b6c7a5dc
HB
4143
4144 vcpu_load(vcpu);
4145
ae675ef0
JK
4146 if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
4147 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
4148 for (i = 0; i < KVM_NR_DB_REGS; ++i)
4149 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
4150 vcpu->arch.switch_db_regs =
4151 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
4152 } else {
4153 for (i = 0; i < KVM_NR_DB_REGS; i++)
4154 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
4155 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
4156 }
4157
b6c7a5dc
HB
4158 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
4159
d0bfb940
JK
4160 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
4161 kvm_queue_exception(vcpu, DB_VECTOR);
4162 else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
4163 kvm_queue_exception(vcpu, BP_VECTOR);
4164
b6c7a5dc
HB
4165 vcpu_put(vcpu);
4166
4167 return r;
4168}
4169
d0752060
HB
4170/*
4171 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
4172 * we have asm/x86/processor.h
4173 */
4174struct fxsave {
4175 u16 cwd;
4176 u16 swd;
4177 u16 twd;
4178 u16 fop;
4179 u64 rip;
4180 u64 rdp;
4181 u32 mxcsr;
4182 u32 mxcsr_mask;
4183 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
4184#ifdef CONFIG_X86_64
4185 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
4186#else
4187 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
4188#endif
4189};
4190
8b006791
ZX
4191/*
4192 * Translate a guest virtual address to a guest physical address.
4193 */
4194int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4195 struct kvm_translation *tr)
4196{
4197 unsigned long vaddr = tr->linear_address;
4198 gpa_t gpa;
4199
4200 vcpu_load(vcpu);
72dc67a6 4201 down_read(&vcpu->kvm->slots_lock);
ad312c7c 4202 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
72dc67a6 4203 up_read(&vcpu->kvm->slots_lock);
8b006791
ZX
4204 tr->physical_address = gpa;
4205 tr->valid = gpa != UNMAPPED_GVA;
4206 tr->writeable = 1;
4207 tr->usermode = 0;
8b006791
ZX
4208 vcpu_put(vcpu);
4209
4210 return 0;
4211}
4212
d0752060
HB
4213int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4214{
ad312c7c 4215 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
4216
4217 vcpu_load(vcpu);
4218
4219 memcpy(fpu->fpr, fxsave->st_space, 128);
4220 fpu->fcw = fxsave->cwd;
4221 fpu->fsw = fxsave->swd;
4222 fpu->ftwx = fxsave->twd;
4223 fpu->last_opcode = fxsave->fop;
4224 fpu->last_ip = fxsave->rip;
4225 fpu->last_dp = fxsave->rdp;
4226 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
4227
4228 vcpu_put(vcpu);
4229
4230 return 0;
4231}
4232
4233int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4234{
ad312c7c 4235 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
4236
4237 vcpu_load(vcpu);
4238
4239 memcpy(fxsave->st_space, fpu->fpr, 128);
4240 fxsave->cwd = fpu->fcw;
4241 fxsave->swd = fpu->fsw;
4242 fxsave->twd = fpu->ftwx;
4243 fxsave->fop = fpu->last_opcode;
4244 fxsave->rip = fpu->last_ip;
4245 fxsave->rdp = fpu->last_dp;
4246 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
4247
4248 vcpu_put(vcpu);
4249
4250 return 0;
4251}
4252
4253void fx_init(struct kvm_vcpu *vcpu)
4254{
4255 unsigned after_mxcsr_mask;
4256
bc1a34f1
AA
4257 /*
4258 * Touch the fpu the first time in non atomic context as if
4259 * this is the first fpu instruction the exception handler
4260 * will fire before the instruction returns and it'll have to
4261 * allocate ram with GFP_KERNEL.
4262 */
4263 if (!used_math())
d6e88aec 4264 kvm_fx_save(&vcpu->arch.host_fx_image);
bc1a34f1 4265
d0752060
HB
4266 /* Initialize guest FPU by resetting ours and saving into guest's */
4267 preempt_disable();
d6e88aec
AK
4268 kvm_fx_save(&vcpu->arch.host_fx_image);
4269 kvm_fx_finit();
4270 kvm_fx_save(&vcpu->arch.guest_fx_image);
4271 kvm_fx_restore(&vcpu->arch.host_fx_image);
d0752060
HB
4272 preempt_enable();
4273
ad312c7c 4274 vcpu->arch.cr0 |= X86_CR0_ET;
d0752060 4275 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
ad312c7c
ZX
4276 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
4277 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
d0752060
HB
4278 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
4279}
4280EXPORT_SYMBOL_GPL(fx_init);
4281
4282void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
4283{
4284 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
4285 return;
4286
4287 vcpu->guest_fpu_loaded = 1;
d6e88aec
AK
4288 kvm_fx_save(&vcpu->arch.host_fx_image);
4289 kvm_fx_restore(&vcpu->arch.guest_fx_image);
d0752060
HB
4290}
4291EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
4292
4293void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4294{
4295 if (!vcpu->guest_fpu_loaded)
4296 return;
4297
4298 vcpu->guest_fpu_loaded = 0;
d6e88aec
AK
4299 kvm_fx_save(&vcpu->arch.guest_fx_image);
4300 kvm_fx_restore(&vcpu->arch.host_fx_image);
f096ed85 4301 ++vcpu->stat.fpu_reload;
d0752060
HB
4302}
4303EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
e9b11c17
ZX
4304
4305void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4306{
7f1ea208
JR
4307 if (vcpu->arch.time_page) {
4308 kvm_release_page_dirty(vcpu->arch.time_page);
4309 vcpu->arch.time_page = NULL;
4310 }
4311
e9b11c17
ZX
4312 kvm_x86_ops->vcpu_free(vcpu);
4313}
4314
4315struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
4316 unsigned int id)
4317{
26e5215f
AK
4318 return kvm_x86_ops->vcpu_create(kvm, id);
4319}
e9b11c17 4320
26e5215f
AK
4321int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
4322{
4323 int r;
e9b11c17
ZX
4324
4325 /* We do fxsave: this must be aligned. */
ad312c7c 4326 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
e9b11c17 4327
0bed3b56 4328 vcpu->arch.mtrr_state.have_fixed = 1;
e9b11c17
ZX
4329 vcpu_load(vcpu);
4330 r = kvm_arch_vcpu_reset(vcpu);
4331 if (r == 0)
4332 r = kvm_mmu_setup(vcpu);
4333 vcpu_put(vcpu);
4334 if (r < 0)
4335 goto free_vcpu;
4336
26e5215f 4337 return 0;
e9b11c17
ZX
4338free_vcpu:
4339 kvm_x86_ops->vcpu_free(vcpu);
26e5215f 4340 return r;
e9b11c17
ZX
4341}
4342
d40ccc62 4343void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17
ZX
4344{
4345 vcpu_load(vcpu);
4346 kvm_mmu_unload(vcpu);
4347 vcpu_put(vcpu);
4348
4349 kvm_x86_ops->vcpu_free(vcpu);
4350}
4351
4352int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
4353{
448fa4a9
JK
4354 vcpu->arch.nmi_pending = false;
4355 vcpu->arch.nmi_injected = false;
4356
42dbaa5a
JK
4357 vcpu->arch.switch_db_regs = 0;
4358 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
4359 vcpu->arch.dr6 = DR6_FIXED_1;
4360 vcpu->arch.dr7 = DR7_FIXED_1;
4361
e9b11c17
ZX
4362 return kvm_x86_ops->vcpu_reset(vcpu);
4363}
4364
4365void kvm_arch_hardware_enable(void *garbage)
4366{
4367 kvm_x86_ops->hardware_enable(garbage);
4368}
4369
4370void kvm_arch_hardware_disable(void *garbage)
4371{
4372 kvm_x86_ops->hardware_disable(garbage);
4373}
4374
4375int kvm_arch_hardware_setup(void)
4376{
4377 return kvm_x86_ops->hardware_setup();
4378}
4379
4380void kvm_arch_hardware_unsetup(void)
4381{
4382 kvm_x86_ops->hardware_unsetup();
4383}
4384
4385void kvm_arch_check_processor_compat(void *rtn)
4386{
4387 kvm_x86_ops->check_processor_compatibility(rtn);
4388}
4389
4390int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
4391{
4392 struct page *page;
4393 struct kvm *kvm;
4394 int r;
4395
4396 BUG_ON(vcpu->kvm == NULL);
4397 kvm = vcpu->kvm;
4398
ad312c7c 4399 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
e9b11c17 4400 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
a4535290 4401 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
e9b11c17 4402 else
a4535290 4403 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
e9b11c17
ZX
4404
4405 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
4406 if (!page) {
4407 r = -ENOMEM;
4408 goto fail;
4409 }
ad312c7c 4410 vcpu->arch.pio_data = page_address(page);
e9b11c17
ZX
4411
4412 r = kvm_mmu_create(vcpu);
4413 if (r < 0)
4414 goto fail_free_pio_data;
4415
4416 if (irqchip_in_kernel(kvm)) {
4417 r = kvm_create_lapic(vcpu);
4418 if (r < 0)
4419 goto fail_mmu_destroy;
4420 }
4421
4422 return 0;
4423
4424fail_mmu_destroy:
4425 kvm_mmu_destroy(vcpu);
4426fail_free_pio_data:
ad312c7c 4427 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17
ZX
4428fail:
4429 return r;
4430}
4431
4432void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
4433{
4434 kvm_free_lapic(vcpu);
3200f405 4435 down_read(&vcpu->kvm->slots_lock);
e9b11c17 4436 kvm_mmu_destroy(vcpu);
3200f405 4437 up_read(&vcpu->kvm->slots_lock);
ad312c7c 4438 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17 4439}
d19a9cd2
ZX
4440
4441struct kvm *kvm_arch_create_vm(void)
4442{
4443 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
4444
4445 if (!kvm)
4446 return ERR_PTR(-ENOMEM);
4447
f05e70ac 4448 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
4d5c5d0f 4449 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
d19a9cd2 4450
5550af4d
SY
4451 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
4452 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
4453
53f658b3
MT
4454 rdtscll(kvm->arch.vm_init_tsc);
4455
d19a9cd2
ZX
4456 return kvm;
4457}
4458
4459static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4460{
4461 vcpu_load(vcpu);
4462 kvm_mmu_unload(vcpu);
4463 vcpu_put(vcpu);
4464}
4465
4466static void kvm_free_vcpus(struct kvm *kvm)
4467{
4468 unsigned int i;
4469
4470 /*
4471 * Unpin any mmu pages first.
4472 */
4473 for (i = 0; i < KVM_MAX_VCPUS; ++i)
4474 if (kvm->vcpus[i])
4475 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
4476 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
4477 if (kvm->vcpus[i]) {
4478 kvm_arch_vcpu_free(kvm->vcpus[i]);
4479 kvm->vcpus[i] = NULL;
4480 }
4481 }
4482
4483}
4484
ad8ba2cd
SY
4485void kvm_arch_sync_events(struct kvm *kvm)
4486{
ba4cef31 4487 kvm_free_all_assigned_devices(kvm);
ad8ba2cd
SY
4488}
4489
d19a9cd2
ZX
4490void kvm_arch_destroy_vm(struct kvm *kvm)
4491{
6eb55818 4492 kvm_iommu_unmap_guest(kvm);
7837699f 4493 kvm_free_pit(kvm);
d7deeeb0
ZX
4494 kfree(kvm->arch.vpic);
4495 kfree(kvm->arch.vioapic);
d19a9cd2
ZX
4496 kvm_free_vcpus(kvm);
4497 kvm_free_physmem(kvm);
3d45830c
AK
4498 if (kvm->arch.apic_access_page)
4499 put_page(kvm->arch.apic_access_page);
b7ebfb05
SY
4500 if (kvm->arch.ept_identity_pagetable)
4501 put_page(kvm->arch.ept_identity_pagetable);
d19a9cd2
ZX
4502 kfree(kvm);
4503}
0de10343
ZX
4504
4505int kvm_arch_set_memory_region(struct kvm *kvm,
4506 struct kvm_userspace_memory_region *mem,
4507 struct kvm_memory_slot old,
4508 int user_alloc)
4509{
4510 int npages = mem->memory_size >> PAGE_SHIFT;
4511 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
4512
4513 /*To keep backward compatibility with older userspace,
4514 *x86 needs to hanlde !user_alloc case.
4515 */
4516 if (!user_alloc) {
4517 if (npages && !old.rmap) {
604b38ac
AA
4518 unsigned long userspace_addr;
4519
72dc67a6 4520 down_write(&current->mm->mmap_sem);
604b38ac
AA
4521 userspace_addr = do_mmap(NULL, 0,
4522 npages * PAGE_SIZE,
4523 PROT_READ | PROT_WRITE,
acee3c04 4524 MAP_PRIVATE | MAP_ANONYMOUS,
604b38ac 4525 0);
72dc67a6 4526 up_write(&current->mm->mmap_sem);
0de10343 4527
604b38ac
AA
4528 if (IS_ERR((void *)userspace_addr))
4529 return PTR_ERR((void *)userspace_addr);
4530
4531 /* set userspace_addr atomically for kvm_hva_to_rmapp */
4532 spin_lock(&kvm->mmu_lock);
4533 memslot->userspace_addr = userspace_addr;
4534 spin_unlock(&kvm->mmu_lock);
0de10343
ZX
4535 } else {
4536 if (!old.user_alloc && old.rmap) {
4537 int ret;
4538
72dc67a6 4539 down_write(&current->mm->mmap_sem);
0de10343
ZX
4540 ret = do_munmap(current->mm, old.userspace_addr,
4541 old.npages * PAGE_SIZE);
72dc67a6 4542 up_write(&current->mm->mmap_sem);
0de10343
ZX
4543 if (ret < 0)
4544 printk(KERN_WARNING
4545 "kvm_vm_ioctl_set_memory_region: "
4546 "failed to munmap memory\n");
4547 }
4548 }
4549 }
4550
f05e70ac 4551 if (!kvm->arch.n_requested_mmu_pages) {
0de10343
ZX
4552 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
4553 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
4554 }
4555
4556 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
4557 kvm_flush_remote_tlbs(kvm);
4558
4559 return 0;
4560}
1d737c8a 4561
34d4cb8f
MT
4562void kvm_arch_flush_shadow(struct kvm *kvm)
4563{
4564 kvm_mmu_zap_all(kvm);
4565}
4566
1d737c8a
ZX
4567int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4568{
a4535290 4569 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
0496fbb9
JK
4570 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
4571 || vcpu->arch.nmi_pending;
1d737c8a 4572}
5736199a
ZX
4573
4574static void vcpu_kick_intr(void *info)
4575{
4576#ifdef DEBUG
4577 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
4578 printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
4579#endif
4580}
4581
4582void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4583{
4584 int ipi_pcpu = vcpu->cpu;
9b62e5b1 4585 int cpu;
5736199a
ZX
4586
4587 if (waitqueue_active(&vcpu->wq)) {
4588 wake_up_interruptible(&vcpu->wq);
4589 ++vcpu->stat.halt_wakeup;
4590 }
e9571ed5
MT
4591 /*
4592 * We may be called synchronously with irqs disabled in guest mode,
4593 * So need not to call smp_call_function_single() in that case.
4594 */
9b62e5b1 4595 cpu = get_cpu();
e9571ed5 4596 if (vcpu->guest_mode && vcpu->cpu != cpu)
8691e5a8 4597 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
e9571ed5 4598 put_cpu();
5736199a 4599}
78646121
GN
4600
4601int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
4602{
4603 return kvm_x86_ops->interrupt_allowed(vcpu);
4604}