]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kvm/x86.c
KVM: Break dependency between vcpu index in vcpus array and vcpu_id.
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
4d5c5d0f
BAY
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
043405e1
CO
9 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
4d5c5d0f
BAY
13 * Amit Shah <amit.shah@qumranet.com>
14 * Ben-Ami Yassour <benami@il.ibm.com>
043405e1
CO
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 */
20
edf88417 21#include <linux/kvm_host.h>
313a3dc7 22#include "irq.h"
1d737c8a 23#include "mmu.h"
7837699f 24#include "i8254.h"
37817f29 25#include "tss.h"
5fdbf976 26#include "kvm_cache_regs.h"
26eef70c 27#include "x86.h"
313a3dc7 28
18068523 29#include <linux/clocksource.h>
4d5c5d0f 30#include <linux/interrupt.h>
313a3dc7
CO
31#include <linux/kvm.h>
32#include <linux/fs.h>
33#include <linux/vmalloc.h>
5fb76f9b 34#include <linux/module.h>
0de10343 35#include <linux/mman.h>
2bacc55c 36#include <linux/highmem.h>
19de40a8 37#include <linux/iommu.h>
62c476c7 38#include <linux/intel-iommu.h>
c8076604 39#include <linux/cpufreq.h>
043405e1
CO
40
41#include <asm/uaccess.h>
d825ed0a 42#include <asm/msr.h>
a5f61300 43#include <asm/desc.h>
0bed3b56 44#include <asm/mtrr.h>
890ca9ae 45#include <asm/mce.h>
043405e1 46
313a3dc7 47#define MAX_IO_MSRS 256
a03490ed
CO
48#define CR0_RESERVED_BITS \
49 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
50 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
51 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
52#define CR4_RESERVED_BITS \
53 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
54 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
55 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
56 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
57
58#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
890ca9ae
HY
59
60#define KVM_MAX_MCE_BANKS 32
61#define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
62
50a37eb4
JR
63/* EFER defaults:
64 * - enable syscall per default because its emulated by KVM
65 * - enable LME and LMA per default on 64 bit KVM
66 */
67#ifdef CONFIG_X86_64
68static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
69#else
70static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
71#endif
313a3dc7 72
ba1389b7
AK
73#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
74#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
417bc304 75
674eea0f
AK
76static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
77 struct kvm_cpuid_entry2 __user *entries);
d8017474
AG
78struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
79 u32 function, u32 index);
674eea0f 80
97896d04 81struct kvm_x86_ops *kvm_x86_ops;
5fdbf976 82EXPORT_SYMBOL_GPL(kvm_x86_ops);
97896d04 83
417bc304 84struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
85 { "pf_fixed", VCPU_STAT(pf_fixed) },
86 { "pf_guest", VCPU_STAT(pf_guest) },
87 { "tlb_flush", VCPU_STAT(tlb_flush) },
88 { "invlpg", VCPU_STAT(invlpg) },
89 { "exits", VCPU_STAT(exits) },
90 { "io_exits", VCPU_STAT(io_exits) },
91 { "mmio_exits", VCPU_STAT(mmio_exits) },
92 { "signal_exits", VCPU_STAT(signal_exits) },
93 { "irq_window", VCPU_STAT(irq_window_exits) },
f08864b4 94 { "nmi_window", VCPU_STAT(nmi_window_exits) },
ba1389b7
AK
95 { "halt_exits", VCPU_STAT(halt_exits) },
96 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f11c3a8d 97 { "hypercalls", VCPU_STAT(hypercalls) },
ba1389b7
AK
98 { "request_irq", VCPU_STAT(request_irq_exits) },
99 { "irq_exits", VCPU_STAT(irq_exits) },
100 { "host_state_reload", VCPU_STAT(host_state_reload) },
101 { "efer_reload", VCPU_STAT(efer_reload) },
102 { "fpu_reload", VCPU_STAT(fpu_reload) },
103 { "insn_emulation", VCPU_STAT(insn_emulation) },
104 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
fa89a817 105 { "irq_injections", VCPU_STAT(irq_injections) },
c4abb7c9 106 { "nmi_injections", VCPU_STAT(nmi_injections) },
4cee5764
AK
107 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
108 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
109 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
110 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
111 { "mmu_flooded", VM_STAT(mmu_flooded) },
112 { "mmu_recycled", VM_STAT(mmu_recycled) },
dfc5aa00 113 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
4731d4c7 114 { "mmu_unsync", VM_STAT(mmu_unsync) },
0f74a24c 115 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
05da4558 116 { "largepages", VM_STAT(lpages) },
417bc304
HB
117 { NULL }
118};
119
5fb76f9b
CO
120unsigned long segment_base(u16 selector)
121{
122 struct descriptor_table gdt;
a5f61300 123 struct desc_struct *d;
5fb76f9b
CO
124 unsigned long table_base;
125 unsigned long v;
126
127 if (selector == 0)
128 return 0;
129
130 asm("sgdt %0" : "=m"(gdt));
131 table_base = gdt.base;
132
133 if (selector & 4) { /* from ldt */
134 u16 ldt_selector;
135
136 asm("sldt %0" : "=g"(ldt_selector));
137 table_base = segment_base(ldt_selector);
138 }
a5f61300
AK
139 d = (struct desc_struct *)(table_base + (selector & ~7));
140 v = d->base0 | ((unsigned long)d->base1 << 16) |
141 ((unsigned long)d->base2 << 24);
5fb76f9b 142#ifdef CONFIG_X86_64
a5f61300
AK
143 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
144 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
5fb76f9b
CO
145#endif
146 return v;
147}
148EXPORT_SYMBOL_GPL(segment_base);
149
6866b83e
CO
150u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
151{
152 if (irqchip_in_kernel(vcpu->kvm))
ad312c7c 153 return vcpu->arch.apic_base;
6866b83e 154 else
ad312c7c 155 return vcpu->arch.apic_base;
6866b83e
CO
156}
157EXPORT_SYMBOL_GPL(kvm_get_apic_base);
158
159void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
160{
161 /* TODO: reserve bits check */
162 if (irqchip_in_kernel(vcpu->kvm))
163 kvm_lapic_set_base(vcpu, data);
164 else
ad312c7c 165 vcpu->arch.apic_base = data;
6866b83e
CO
166}
167EXPORT_SYMBOL_GPL(kvm_set_apic_base);
168
298101da
AK
169void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
170{
ad312c7c
ZX
171 WARN_ON(vcpu->arch.exception.pending);
172 vcpu->arch.exception.pending = true;
173 vcpu->arch.exception.has_error_code = false;
174 vcpu->arch.exception.nr = nr;
298101da
AK
175}
176EXPORT_SYMBOL_GPL(kvm_queue_exception);
177
c3c91fee
AK
178void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
179 u32 error_code)
180{
181 ++vcpu->stat.pf_guest;
d8017474 182
71c4dfaf
JR
183 if (vcpu->arch.exception.pending) {
184 if (vcpu->arch.exception.nr == PF_VECTOR) {
185 printk(KERN_DEBUG "kvm: inject_page_fault:"
186 " double fault 0x%lx\n", addr);
187 vcpu->arch.exception.nr = DF_VECTOR;
188 vcpu->arch.exception.error_code = 0;
189 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
190 /* triple fault -> shutdown */
191 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
192 }
c3c91fee
AK
193 return;
194 }
ad312c7c 195 vcpu->arch.cr2 = addr;
c3c91fee
AK
196 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
197}
198
3419ffc8
SY
199void kvm_inject_nmi(struct kvm_vcpu *vcpu)
200{
201 vcpu->arch.nmi_pending = 1;
202}
203EXPORT_SYMBOL_GPL(kvm_inject_nmi);
204
298101da
AK
205void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
206{
ad312c7c
ZX
207 WARN_ON(vcpu->arch.exception.pending);
208 vcpu->arch.exception.pending = true;
209 vcpu->arch.exception.has_error_code = true;
210 vcpu->arch.exception.nr = nr;
211 vcpu->arch.exception.error_code = error_code;
298101da
AK
212}
213EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
214
215static void __queue_exception(struct kvm_vcpu *vcpu)
216{
ad312c7c
ZX
217 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
218 vcpu->arch.exception.has_error_code,
219 vcpu->arch.exception.error_code);
298101da
AK
220}
221
a03490ed
CO
222/*
223 * Load the pae pdptrs. Return true is they are all valid.
224 */
225int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
226{
227 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
228 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
229 int i;
230 int ret;
ad312c7c 231 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
a03490ed 232
a03490ed
CO
233 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
234 offset * sizeof(u64), sizeof(pdpte));
235 if (ret < 0) {
236 ret = 0;
237 goto out;
238 }
239 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
43a3795a 240 if (is_present_gpte(pdpte[i]) &&
20c466b5 241 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
a03490ed
CO
242 ret = 0;
243 goto out;
244 }
245 }
246 ret = 1;
247
ad312c7c 248 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
6de4f3ad
AK
249 __set_bit(VCPU_EXREG_PDPTR,
250 (unsigned long *)&vcpu->arch.regs_avail);
251 __set_bit(VCPU_EXREG_PDPTR,
252 (unsigned long *)&vcpu->arch.regs_dirty);
a03490ed 253out:
a03490ed
CO
254
255 return ret;
256}
cc4b6871 257EXPORT_SYMBOL_GPL(load_pdptrs);
a03490ed 258
d835dfec
AK
259static bool pdptrs_changed(struct kvm_vcpu *vcpu)
260{
ad312c7c 261 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
d835dfec
AK
262 bool changed = true;
263 int r;
264
265 if (is_long_mode(vcpu) || !is_pae(vcpu))
266 return false;
267
6de4f3ad
AK
268 if (!test_bit(VCPU_EXREG_PDPTR,
269 (unsigned long *)&vcpu->arch.regs_avail))
270 return true;
271
ad312c7c 272 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
d835dfec
AK
273 if (r < 0)
274 goto out;
ad312c7c 275 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
d835dfec 276out:
d835dfec
AK
277
278 return changed;
279}
280
2d3ad1f4 281void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
a03490ed
CO
282{
283 if (cr0 & CR0_RESERVED_BITS) {
284 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
ad312c7c 285 cr0, vcpu->arch.cr0);
c1a5d4f9 286 kvm_inject_gp(vcpu, 0);
a03490ed
CO
287 return;
288 }
289
290 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
291 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
c1a5d4f9 292 kvm_inject_gp(vcpu, 0);
a03490ed
CO
293 return;
294 }
295
296 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
297 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
298 "and a clear PE flag\n");
c1a5d4f9 299 kvm_inject_gp(vcpu, 0);
a03490ed
CO
300 return;
301 }
302
303 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
304#ifdef CONFIG_X86_64
ad312c7c 305 if ((vcpu->arch.shadow_efer & EFER_LME)) {
a03490ed
CO
306 int cs_db, cs_l;
307
308 if (!is_pae(vcpu)) {
309 printk(KERN_DEBUG "set_cr0: #GP, start paging "
310 "in long mode while PAE is disabled\n");
c1a5d4f9 311 kvm_inject_gp(vcpu, 0);
a03490ed
CO
312 return;
313 }
314 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
315 if (cs_l) {
316 printk(KERN_DEBUG "set_cr0: #GP, start paging "
317 "in long mode while CS.L == 1\n");
c1a5d4f9 318 kvm_inject_gp(vcpu, 0);
a03490ed
CO
319 return;
320
321 }
322 } else
323#endif
ad312c7c 324 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed
CO
325 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
326 "reserved bits\n");
c1a5d4f9 327 kvm_inject_gp(vcpu, 0);
a03490ed
CO
328 return;
329 }
330
331 }
332
333 kvm_x86_ops->set_cr0(vcpu, cr0);
ad312c7c 334 vcpu->arch.cr0 = cr0;
a03490ed 335
a03490ed 336 kvm_mmu_reset_context(vcpu);
a03490ed
CO
337 return;
338}
2d3ad1f4 339EXPORT_SYMBOL_GPL(kvm_set_cr0);
a03490ed 340
2d3ad1f4 341void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
a03490ed 342{
2d3ad1f4 343 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
2714d1d3
FEL
344 KVMTRACE_1D(LMSW, vcpu,
345 (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
346 handler);
a03490ed 347}
2d3ad1f4 348EXPORT_SYMBOL_GPL(kvm_lmsw);
a03490ed 349
2d3ad1f4 350void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
a03490ed 351{
a2edf57f
AK
352 unsigned long old_cr4 = vcpu->arch.cr4;
353 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
354
a03490ed
CO
355 if (cr4 & CR4_RESERVED_BITS) {
356 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
c1a5d4f9 357 kvm_inject_gp(vcpu, 0);
a03490ed
CO
358 return;
359 }
360
361 if (is_long_mode(vcpu)) {
362 if (!(cr4 & X86_CR4_PAE)) {
363 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
364 "in long mode\n");
c1a5d4f9 365 kvm_inject_gp(vcpu, 0);
a03490ed
CO
366 return;
367 }
a2edf57f
AK
368 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
369 && ((cr4 ^ old_cr4) & pdptr_bits)
ad312c7c 370 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed 371 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
c1a5d4f9 372 kvm_inject_gp(vcpu, 0);
a03490ed
CO
373 return;
374 }
375
376 if (cr4 & X86_CR4_VMXE) {
377 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
c1a5d4f9 378 kvm_inject_gp(vcpu, 0);
a03490ed
CO
379 return;
380 }
381 kvm_x86_ops->set_cr4(vcpu, cr4);
ad312c7c 382 vcpu->arch.cr4 = cr4;
5a41accd 383 vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
a03490ed 384 kvm_mmu_reset_context(vcpu);
a03490ed 385}
2d3ad1f4 386EXPORT_SYMBOL_GPL(kvm_set_cr4);
a03490ed 387
2d3ad1f4 388void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
a03490ed 389{
ad312c7c 390 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
0ba73cda 391 kvm_mmu_sync_roots(vcpu);
d835dfec
AK
392 kvm_mmu_flush_tlb(vcpu);
393 return;
394 }
395
a03490ed
CO
396 if (is_long_mode(vcpu)) {
397 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
398 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
c1a5d4f9 399 kvm_inject_gp(vcpu, 0);
a03490ed
CO
400 return;
401 }
402 } else {
403 if (is_pae(vcpu)) {
404 if (cr3 & CR3_PAE_RESERVED_BITS) {
405 printk(KERN_DEBUG
406 "set_cr3: #GP, reserved bits\n");
c1a5d4f9 407 kvm_inject_gp(vcpu, 0);
a03490ed
CO
408 return;
409 }
410 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
411 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
412 "reserved bits\n");
c1a5d4f9 413 kvm_inject_gp(vcpu, 0);
a03490ed
CO
414 return;
415 }
416 }
417 /*
418 * We don't check reserved bits in nonpae mode, because
419 * this isn't enforced, and VMware depends on this.
420 */
421 }
422
a03490ed
CO
423 /*
424 * Does the new cr3 value map to physical memory? (Note, we
425 * catch an invalid cr3 even in real-mode, because it would
426 * cause trouble later on when we turn on paging anyway.)
427 *
428 * A real CPU would silently accept an invalid cr3 and would
429 * attempt to use it - with largely undefined (and often hard
430 * to debug) behavior on the guest side.
431 */
432 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
c1a5d4f9 433 kvm_inject_gp(vcpu, 0);
a03490ed 434 else {
ad312c7c
ZX
435 vcpu->arch.cr3 = cr3;
436 vcpu->arch.mmu.new_cr3(vcpu);
a03490ed 437 }
a03490ed 438}
2d3ad1f4 439EXPORT_SYMBOL_GPL(kvm_set_cr3);
a03490ed 440
2d3ad1f4 441void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
a03490ed
CO
442{
443 if (cr8 & CR8_RESERVED_BITS) {
444 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
c1a5d4f9 445 kvm_inject_gp(vcpu, 0);
a03490ed
CO
446 return;
447 }
448 if (irqchip_in_kernel(vcpu->kvm))
449 kvm_lapic_set_tpr(vcpu, cr8);
450 else
ad312c7c 451 vcpu->arch.cr8 = cr8;
a03490ed 452}
2d3ad1f4 453EXPORT_SYMBOL_GPL(kvm_set_cr8);
a03490ed 454
2d3ad1f4 455unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
a03490ed
CO
456{
457 if (irqchip_in_kernel(vcpu->kvm))
458 return kvm_lapic_get_cr8(vcpu);
459 else
ad312c7c 460 return vcpu->arch.cr8;
a03490ed 461}
2d3ad1f4 462EXPORT_SYMBOL_GPL(kvm_get_cr8);
a03490ed 463
d8017474
AG
464static inline u32 bit(int bitno)
465{
466 return 1 << (bitno & 31);
467}
468
043405e1
CO
469/*
470 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
471 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
472 *
473 * This list is modified at module load time to reflect the
474 * capabilities of the host cpu.
475 */
476static u32 msrs_to_save[] = {
477 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
478 MSR_K6_STAR,
479#ifdef CONFIG_X86_64
480 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
481#endif
af24a4e4 482 MSR_IA32_TSC, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
b286d5d8 483 MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
043405e1
CO
484};
485
486static unsigned num_msrs_to_save;
487
488static u32 emulated_msrs[] = {
489 MSR_IA32_MISC_ENABLE,
490};
491
15c4a640
CO
492static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
493{
f2b4b7dd 494 if (efer & efer_reserved_bits) {
15c4a640
CO
495 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
496 efer);
c1a5d4f9 497 kvm_inject_gp(vcpu, 0);
15c4a640
CO
498 return;
499 }
500
501 if (is_paging(vcpu)
ad312c7c 502 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
15c4a640 503 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
c1a5d4f9 504 kvm_inject_gp(vcpu, 0);
15c4a640
CO
505 return;
506 }
507
1b2fd70c
AG
508 if (efer & EFER_FFXSR) {
509 struct kvm_cpuid_entry2 *feat;
510
511 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
512 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
513 printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
514 kvm_inject_gp(vcpu, 0);
515 return;
516 }
517 }
518
d8017474
AG
519 if (efer & EFER_SVME) {
520 struct kvm_cpuid_entry2 *feat;
521
522 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
523 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
524 printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
525 kvm_inject_gp(vcpu, 0);
526 return;
527 }
528 }
529
15c4a640
CO
530 kvm_x86_ops->set_efer(vcpu, efer);
531
532 efer &= ~EFER_LMA;
ad312c7c 533 efer |= vcpu->arch.shadow_efer & EFER_LMA;
15c4a640 534
ad312c7c 535 vcpu->arch.shadow_efer = efer;
9645bb56
AK
536
537 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
538 kvm_mmu_reset_context(vcpu);
15c4a640
CO
539}
540
f2b4b7dd
JR
541void kvm_enable_efer_bits(u64 mask)
542{
543 efer_reserved_bits &= ~mask;
544}
545EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
546
547
15c4a640
CO
548/*
549 * Writes msr value into into the appropriate "register".
550 * Returns 0 on success, non-0 otherwise.
551 * Assumes vcpu_load() was already called.
552 */
553int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
554{
555 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
556}
557
313a3dc7
CO
558/*
559 * Adapt set_msr() to msr_io()'s calling convention
560 */
561static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
562{
563 return kvm_set_msr(vcpu, index, *data);
564}
565
18068523
GOC
566static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
567{
568 static int version;
50d0a0f9
GH
569 struct pvclock_wall_clock wc;
570 struct timespec now, sys, boot;
18068523
GOC
571
572 if (!wall_clock)
573 return;
574
575 version++;
576
18068523
GOC
577 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
578
50d0a0f9
GH
579 /*
580 * The guest calculates current wall clock time by adding
581 * system time (updated by kvm_write_guest_time below) to the
582 * wall clock specified here. guest system time equals host
583 * system time for us, thus we must fill in host boot time here.
584 */
585 now = current_kernel_time();
586 ktime_get_ts(&sys);
587 boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
588
589 wc.sec = boot.tv_sec;
590 wc.nsec = boot.tv_nsec;
591 wc.version = version;
18068523
GOC
592
593 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
594
595 version++;
596 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
18068523
GOC
597}
598
50d0a0f9
GH
599static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
600{
601 uint32_t quotient, remainder;
602
603 /* Don't try to replace with do_div(), this one calculates
604 * "(dividend << 32) / divisor" */
605 __asm__ ( "divl %4"
606 : "=a" (quotient), "=d" (remainder)
607 : "0" (0), "1" (dividend), "r" (divisor) );
608 return quotient;
609}
610
611static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
612{
613 uint64_t nsecs = 1000000000LL;
614 int32_t shift = 0;
615 uint64_t tps64;
616 uint32_t tps32;
617
618 tps64 = tsc_khz * 1000LL;
619 while (tps64 > nsecs*2) {
620 tps64 >>= 1;
621 shift--;
622 }
623
624 tps32 = (uint32_t)tps64;
625 while (tps32 <= (uint32_t)nsecs) {
626 tps32 <<= 1;
627 shift++;
628 }
629
630 hv_clock->tsc_shift = shift;
631 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
632
633 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
80a914dc 634 __func__, tsc_khz, hv_clock->tsc_shift,
50d0a0f9
GH
635 hv_clock->tsc_to_system_mul);
636}
637
c8076604
GH
638static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
639
18068523
GOC
640static void kvm_write_guest_time(struct kvm_vcpu *v)
641{
642 struct timespec ts;
643 unsigned long flags;
644 struct kvm_vcpu_arch *vcpu = &v->arch;
645 void *shared_kaddr;
463656c0 646 unsigned long this_tsc_khz;
18068523
GOC
647
648 if ((!vcpu->time_page))
649 return;
650
463656c0
AK
651 this_tsc_khz = get_cpu_var(cpu_tsc_khz);
652 if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
653 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
654 vcpu->hv_clock_tsc_khz = this_tsc_khz;
50d0a0f9 655 }
463656c0 656 put_cpu_var(cpu_tsc_khz);
50d0a0f9 657
18068523
GOC
658 /* Keep irq disabled to prevent changes to the clock */
659 local_irq_save(flags);
af24a4e4 660 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
18068523
GOC
661 ktime_get_ts(&ts);
662 local_irq_restore(flags);
663
664 /* With all the info we got, fill in the values */
665
666 vcpu->hv_clock.system_time = ts.tv_nsec +
667 (NSEC_PER_SEC * (u64)ts.tv_sec);
668 /*
669 * The interface expects us to write an even number signaling that the
670 * update is finished. Since the guest won't see the intermediate
50d0a0f9 671 * state, we just increase by 2 at the end.
18068523 672 */
50d0a0f9 673 vcpu->hv_clock.version += 2;
18068523
GOC
674
675 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
676
677 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
50d0a0f9 678 sizeof(vcpu->hv_clock));
18068523
GOC
679
680 kunmap_atomic(shared_kaddr, KM_USER0);
681
682 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
683}
684
c8076604
GH
685static int kvm_request_guest_time_update(struct kvm_vcpu *v)
686{
687 struct kvm_vcpu_arch *vcpu = &v->arch;
688
689 if (!vcpu->time_page)
690 return 0;
691 set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
692 return 1;
693}
694
9ba075a6
AK
695static bool msr_mtrr_valid(unsigned msr)
696{
697 switch (msr) {
698 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
699 case MSR_MTRRfix64K_00000:
700 case MSR_MTRRfix16K_80000:
701 case MSR_MTRRfix16K_A0000:
702 case MSR_MTRRfix4K_C0000:
703 case MSR_MTRRfix4K_C8000:
704 case MSR_MTRRfix4K_D0000:
705 case MSR_MTRRfix4K_D8000:
706 case MSR_MTRRfix4K_E0000:
707 case MSR_MTRRfix4K_E8000:
708 case MSR_MTRRfix4K_F0000:
709 case MSR_MTRRfix4K_F8000:
710 case MSR_MTRRdefType:
711 case MSR_IA32_CR_PAT:
712 return true;
713 case 0x2f8:
714 return true;
715 }
716 return false;
717}
718
d6289b93
MT
719static bool valid_pat_type(unsigned t)
720{
721 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
722}
723
724static bool valid_mtrr_type(unsigned t)
725{
726 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
727}
728
729static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
730{
731 int i;
732
733 if (!msr_mtrr_valid(msr))
734 return false;
735
736 if (msr == MSR_IA32_CR_PAT) {
737 for (i = 0; i < 8; i++)
738 if (!valid_pat_type((data >> (i * 8)) & 0xff))
739 return false;
740 return true;
741 } else if (msr == MSR_MTRRdefType) {
742 if (data & ~0xcff)
743 return false;
744 return valid_mtrr_type(data & 0xff);
745 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
746 for (i = 0; i < 8 ; i++)
747 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
748 return false;
749 return true;
750 }
751
752 /* variable MTRRs */
753 return valid_mtrr_type(data & 0xff);
754}
755
9ba075a6
AK
756static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
757{
0bed3b56
SY
758 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
759
d6289b93 760 if (!mtrr_valid(vcpu, msr, data))
9ba075a6
AK
761 return 1;
762
0bed3b56
SY
763 if (msr == MSR_MTRRdefType) {
764 vcpu->arch.mtrr_state.def_type = data;
765 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
766 } else if (msr == MSR_MTRRfix64K_00000)
767 p[0] = data;
768 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
769 p[1 + msr - MSR_MTRRfix16K_80000] = data;
770 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
771 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
772 else if (msr == MSR_IA32_CR_PAT)
773 vcpu->arch.pat = data;
774 else { /* Variable MTRRs */
775 int idx, is_mtrr_mask;
776 u64 *pt;
777
778 idx = (msr - 0x200) / 2;
779 is_mtrr_mask = msr - 0x200 - 2 * idx;
780 if (!is_mtrr_mask)
781 pt =
782 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
783 else
784 pt =
785 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
786 *pt = data;
787 }
788
789 kvm_mmu_reset_context(vcpu);
9ba075a6
AK
790 return 0;
791}
15c4a640 792
890ca9ae 793static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
15c4a640 794{
890ca9ae
HY
795 u64 mcg_cap = vcpu->arch.mcg_cap;
796 unsigned bank_num = mcg_cap & 0xff;
797
15c4a640 798 switch (msr) {
15c4a640 799 case MSR_IA32_MCG_STATUS:
890ca9ae 800 vcpu->arch.mcg_status = data;
15c4a640 801 break;
c7ac679c 802 case MSR_IA32_MCG_CTL:
890ca9ae
HY
803 if (!(mcg_cap & MCG_CTL_P))
804 return 1;
805 if (data != 0 && data != ~(u64)0)
806 return -1;
807 vcpu->arch.mcg_ctl = data;
808 break;
809 default:
810 if (msr >= MSR_IA32_MC0_CTL &&
811 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
812 u32 offset = msr - MSR_IA32_MC0_CTL;
813 /* only 0 or all 1s can be written to IA32_MCi_CTL */
814 if ((offset & 0x3) == 0 &&
815 data != 0 && data != ~(u64)0)
816 return -1;
817 vcpu->arch.mce_banks[offset] = data;
818 break;
819 }
820 return 1;
821 }
822 return 0;
823}
824
825int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
826{
827 switch (msr) {
828 case MSR_EFER:
829 set_efer(vcpu, data);
c7ac679c 830 break;
b5e2fec0
AG
831 case MSR_IA32_DEBUGCTLMSR:
832 if (!data) {
833 /* We support the non-activated case already */
834 break;
835 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
836 /* Values other than LBR and BTF are vendor-specific,
837 thus reserved and should throw a #GP */
838 return 1;
839 }
840 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
841 __func__, data);
842 break;
15c4a640
CO
843 case MSR_IA32_UCODE_REV:
844 case MSR_IA32_UCODE_WRITE:
61a6bd67 845 case MSR_VM_HSAVE_PA:
15c4a640 846 break;
9ba075a6
AK
847 case 0x200 ... 0x2ff:
848 return set_msr_mtrr(vcpu, msr, data);
15c4a640
CO
849 case MSR_IA32_APICBASE:
850 kvm_set_apic_base(vcpu, data);
851 break;
852 case MSR_IA32_MISC_ENABLE:
ad312c7c 853 vcpu->arch.ia32_misc_enable_msr = data;
15c4a640 854 break;
18068523
GOC
855 case MSR_KVM_WALL_CLOCK:
856 vcpu->kvm->arch.wall_clock = data;
857 kvm_write_wall_clock(vcpu->kvm, data);
858 break;
859 case MSR_KVM_SYSTEM_TIME: {
860 if (vcpu->arch.time_page) {
861 kvm_release_page_dirty(vcpu->arch.time_page);
862 vcpu->arch.time_page = NULL;
863 }
864
865 vcpu->arch.time = data;
866
867 /* we verify if the enable bit is set... */
868 if (!(data & 1))
869 break;
870
871 /* ...but clean it before doing the actual write */
872 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
873
18068523
GOC
874 vcpu->arch.time_page =
875 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
18068523
GOC
876
877 if (is_error_page(vcpu->arch.time_page)) {
878 kvm_release_page_clean(vcpu->arch.time_page);
879 vcpu->arch.time_page = NULL;
880 }
881
c8076604 882 kvm_request_guest_time_update(vcpu);
18068523
GOC
883 break;
884 }
890ca9ae
HY
885 case MSR_IA32_MCG_CTL:
886 case MSR_IA32_MCG_STATUS:
887 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
888 return set_msr_mce(vcpu, msr, data);
15c4a640 889 default:
565f1fbd 890 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
15c4a640
CO
891 return 1;
892 }
893 return 0;
894}
895EXPORT_SYMBOL_GPL(kvm_set_msr_common);
896
897
898/*
899 * Reads an msr value (of 'msr_index') into 'pdata'.
900 * Returns 0 on success, non-0 otherwise.
901 * Assumes vcpu_load() was already called.
902 */
903int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
904{
905 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
906}
907
9ba075a6
AK
908static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
909{
0bed3b56
SY
910 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
911
9ba075a6
AK
912 if (!msr_mtrr_valid(msr))
913 return 1;
914
0bed3b56
SY
915 if (msr == MSR_MTRRdefType)
916 *pdata = vcpu->arch.mtrr_state.def_type +
917 (vcpu->arch.mtrr_state.enabled << 10);
918 else if (msr == MSR_MTRRfix64K_00000)
919 *pdata = p[0];
920 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
921 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
922 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
923 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
924 else if (msr == MSR_IA32_CR_PAT)
925 *pdata = vcpu->arch.pat;
926 else { /* Variable MTRRs */
927 int idx, is_mtrr_mask;
928 u64 *pt;
929
930 idx = (msr - 0x200) / 2;
931 is_mtrr_mask = msr - 0x200 - 2 * idx;
932 if (!is_mtrr_mask)
933 pt =
934 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
935 else
936 pt =
937 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
938 *pdata = *pt;
939 }
940
9ba075a6
AK
941 return 0;
942}
943
890ca9ae 944static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
15c4a640
CO
945{
946 u64 data;
890ca9ae
HY
947 u64 mcg_cap = vcpu->arch.mcg_cap;
948 unsigned bank_num = mcg_cap & 0xff;
15c4a640
CO
949
950 switch (msr) {
15c4a640
CO
951 case MSR_IA32_P5_MC_ADDR:
952 case MSR_IA32_P5_MC_TYPE:
890ca9ae
HY
953 data = 0;
954 break;
15c4a640 955 case MSR_IA32_MCG_CAP:
890ca9ae
HY
956 data = vcpu->arch.mcg_cap;
957 break;
c7ac679c 958 case MSR_IA32_MCG_CTL:
890ca9ae
HY
959 if (!(mcg_cap & MCG_CTL_P))
960 return 1;
961 data = vcpu->arch.mcg_ctl;
962 break;
963 case MSR_IA32_MCG_STATUS:
964 data = vcpu->arch.mcg_status;
965 break;
966 default:
967 if (msr >= MSR_IA32_MC0_CTL &&
968 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
969 u32 offset = msr - MSR_IA32_MC0_CTL;
970 data = vcpu->arch.mce_banks[offset];
971 break;
972 }
973 return 1;
974 }
975 *pdata = data;
976 return 0;
977}
978
979int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
980{
981 u64 data;
982
983 switch (msr) {
890ca9ae 984 case MSR_IA32_PLATFORM_ID:
15c4a640 985 case MSR_IA32_UCODE_REV:
15c4a640 986 case MSR_IA32_EBL_CR_POWERON:
b5e2fec0
AG
987 case MSR_IA32_DEBUGCTLMSR:
988 case MSR_IA32_LASTBRANCHFROMIP:
989 case MSR_IA32_LASTBRANCHTOIP:
990 case MSR_IA32_LASTINTFROMIP:
991 case MSR_IA32_LASTINTTOIP:
60af2ecd
JSR
992 case MSR_K8_SYSCFG:
993 case MSR_K7_HWCR:
61a6bd67 994 case MSR_VM_HSAVE_PA:
7fe29e0f
AS
995 case MSR_P6_EVNTSEL0:
996 case MSR_P6_EVNTSEL1:
9e699624 997 case MSR_K7_EVNTSEL0:
15c4a640
CO
998 data = 0;
999 break;
9ba075a6
AK
1000 case MSR_MTRRcap:
1001 data = 0x500 | KVM_NR_VAR_MTRR;
1002 break;
1003 case 0x200 ... 0x2ff:
1004 return get_msr_mtrr(vcpu, msr, pdata);
15c4a640
CO
1005 case 0xcd: /* fsb frequency */
1006 data = 3;
1007 break;
1008 case MSR_IA32_APICBASE:
1009 data = kvm_get_apic_base(vcpu);
1010 break;
1011 case MSR_IA32_MISC_ENABLE:
ad312c7c 1012 data = vcpu->arch.ia32_misc_enable_msr;
15c4a640 1013 break;
847f0ad8
AG
1014 case MSR_IA32_PERF_STATUS:
1015 /* TSC increment by tick */
1016 data = 1000ULL;
1017 /* CPU multiplier */
1018 data |= (((uint64_t)4ULL) << 40);
1019 break;
15c4a640 1020 case MSR_EFER:
ad312c7c 1021 data = vcpu->arch.shadow_efer;
15c4a640 1022 break;
18068523
GOC
1023 case MSR_KVM_WALL_CLOCK:
1024 data = vcpu->kvm->arch.wall_clock;
1025 break;
1026 case MSR_KVM_SYSTEM_TIME:
1027 data = vcpu->arch.time;
1028 break;
890ca9ae
HY
1029 case MSR_IA32_P5_MC_ADDR:
1030 case MSR_IA32_P5_MC_TYPE:
1031 case MSR_IA32_MCG_CAP:
1032 case MSR_IA32_MCG_CTL:
1033 case MSR_IA32_MCG_STATUS:
1034 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1035 return get_msr_mce(vcpu, msr, pdata);
15c4a640
CO
1036 default:
1037 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1038 return 1;
1039 }
1040 *pdata = data;
1041 return 0;
1042}
1043EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1044
313a3dc7
CO
1045/*
1046 * Read or write a bunch of msrs. All parameters are kernel addresses.
1047 *
1048 * @return number of msrs set successfully.
1049 */
1050static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1051 struct kvm_msr_entry *entries,
1052 int (*do_msr)(struct kvm_vcpu *vcpu,
1053 unsigned index, u64 *data))
1054{
1055 int i;
1056
1057 vcpu_load(vcpu);
1058
3200f405 1059 down_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
1060 for (i = 0; i < msrs->nmsrs; ++i)
1061 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1062 break;
3200f405 1063 up_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
1064
1065 vcpu_put(vcpu);
1066
1067 return i;
1068}
1069
1070/*
1071 * Read or write a bunch of msrs. Parameters are user addresses.
1072 *
1073 * @return number of msrs set successfully.
1074 */
1075static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1076 int (*do_msr)(struct kvm_vcpu *vcpu,
1077 unsigned index, u64 *data),
1078 int writeback)
1079{
1080 struct kvm_msrs msrs;
1081 struct kvm_msr_entry *entries;
1082 int r, n;
1083 unsigned size;
1084
1085 r = -EFAULT;
1086 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1087 goto out;
1088
1089 r = -E2BIG;
1090 if (msrs.nmsrs >= MAX_IO_MSRS)
1091 goto out;
1092
1093 r = -ENOMEM;
1094 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1095 entries = vmalloc(size);
1096 if (!entries)
1097 goto out;
1098
1099 r = -EFAULT;
1100 if (copy_from_user(entries, user_msrs->entries, size))
1101 goto out_free;
1102
1103 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1104 if (r < 0)
1105 goto out_free;
1106
1107 r = -EFAULT;
1108 if (writeback && copy_to_user(user_msrs->entries, entries, size))
1109 goto out_free;
1110
1111 r = n;
1112
1113out_free:
1114 vfree(entries);
1115out:
1116 return r;
1117}
1118
018d00d2
ZX
1119int kvm_dev_ioctl_check_extension(long ext)
1120{
1121 int r;
1122
1123 switch (ext) {
1124 case KVM_CAP_IRQCHIP:
1125 case KVM_CAP_HLT:
1126 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
018d00d2 1127 case KVM_CAP_SET_TSS_ADDR:
07716717 1128 case KVM_CAP_EXT_CPUID:
c8076604 1129 case KVM_CAP_CLOCKSOURCE:
7837699f 1130 case KVM_CAP_PIT:
a28e4f5a 1131 case KVM_CAP_NOP_IO_DELAY:
62d9f0db 1132 case KVM_CAP_MP_STATE:
ed848624 1133 case KVM_CAP_SYNC_MMU:
52d939a0 1134 case KVM_CAP_REINJECT_CONTROL:
4925663a 1135 case KVM_CAP_IRQ_INJECT_STATUS:
e56d532f 1136 case KVM_CAP_ASSIGN_DEV_IRQ:
721eecbf 1137 case KVM_CAP_IRQFD:
c5ff41ce 1138 case KVM_CAP_PIT2:
018d00d2
ZX
1139 r = 1;
1140 break;
542472b5
LV
1141 case KVM_CAP_COALESCED_MMIO:
1142 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1143 break;
774ead3a
AK
1144 case KVM_CAP_VAPIC:
1145 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1146 break;
f725230a
AK
1147 case KVM_CAP_NR_VCPUS:
1148 r = KVM_MAX_VCPUS;
1149 break;
a988b910
AK
1150 case KVM_CAP_NR_MEMSLOTS:
1151 r = KVM_MEMORY_SLOTS;
1152 break;
2f333bcb
MT
1153 case KVM_CAP_PV_MMU:
1154 r = !tdp_enabled;
1155 break;
62c476c7 1156 case KVM_CAP_IOMMU:
19de40a8 1157 r = iommu_found();
62c476c7 1158 break;
890ca9ae
HY
1159 case KVM_CAP_MCE:
1160 r = KVM_MAX_MCE_BANKS;
1161 break;
018d00d2
ZX
1162 default:
1163 r = 0;
1164 break;
1165 }
1166 return r;
1167
1168}
1169
043405e1
CO
1170long kvm_arch_dev_ioctl(struct file *filp,
1171 unsigned int ioctl, unsigned long arg)
1172{
1173 void __user *argp = (void __user *)arg;
1174 long r;
1175
1176 switch (ioctl) {
1177 case KVM_GET_MSR_INDEX_LIST: {
1178 struct kvm_msr_list __user *user_msr_list = argp;
1179 struct kvm_msr_list msr_list;
1180 unsigned n;
1181
1182 r = -EFAULT;
1183 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1184 goto out;
1185 n = msr_list.nmsrs;
1186 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1187 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1188 goto out;
1189 r = -E2BIG;
e125e7b6 1190 if (n < msr_list.nmsrs)
043405e1
CO
1191 goto out;
1192 r = -EFAULT;
1193 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1194 num_msrs_to_save * sizeof(u32)))
1195 goto out;
e125e7b6 1196 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
043405e1
CO
1197 &emulated_msrs,
1198 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1199 goto out;
1200 r = 0;
1201 break;
1202 }
674eea0f
AK
1203 case KVM_GET_SUPPORTED_CPUID: {
1204 struct kvm_cpuid2 __user *cpuid_arg = argp;
1205 struct kvm_cpuid2 cpuid;
1206
1207 r = -EFAULT;
1208 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1209 goto out;
1210 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
19355475 1211 cpuid_arg->entries);
674eea0f
AK
1212 if (r)
1213 goto out;
1214
1215 r = -EFAULT;
1216 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1217 goto out;
1218 r = 0;
1219 break;
1220 }
890ca9ae
HY
1221 case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1222 u64 mce_cap;
1223
1224 mce_cap = KVM_MCE_CAP_SUPPORTED;
1225 r = -EFAULT;
1226 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1227 goto out;
1228 r = 0;
1229 break;
1230 }
043405e1
CO
1231 default:
1232 r = -EINVAL;
1233 }
1234out:
1235 return r;
1236}
1237
313a3dc7
CO
1238void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1239{
1240 kvm_x86_ops->vcpu_load(vcpu, cpu);
c8076604 1241 kvm_request_guest_time_update(vcpu);
313a3dc7
CO
1242}
1243
1244void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1245{
1246 kvm_x86_ops->vcpu_put(vcpu);
9327fd11 1247 kvm_put_guest_fpu(vcpu);
313a3dc7
CO
1248}
1249
07716717 1250static int is_efer_nx(void)
313a3dc7 1251{
e286e86e 1252 unsigned long long efer = 0;
313a3dc7 1253
e286e86e 1254 rdmsrl_safe(MSR_EFER, &efer);
07716717
DK
1255 return efer & EFER_NX;
1256}
1257
1258static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1259{
1260 int i;
1261 struct kvm_cpuid_entry2 *e, *entry;
1262
313a3dc7 1263 entry = NULL;
ad312c7c
ZX
1264 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1265 e = &vcpu->arch.cpuid_entries[i];
313a3dc7
CO
1266 if (e->function == 0x80000001) {
1267 entry = e;
1268 break;
1269 }
1270 }
07716717 1271 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
313a3dc7
CO
1272 entry->edx &= ~(1 << 20);
1273 printk(KERN_INFO "kvm: guest NX capability removed\n");
1274 }
1275}
1276
07716717 1277/* when an old userspace process fills a new kernel module */
313a3dc7
CO
1278static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1279 struct kvm_cpuid *cpuid,
1280 struct kvm_cpuid_entry __user *entries)
07716717
DK
1281{
1282 int r, i;
1283 struct kvm_cpuid_entry *cpuid_entries;
1284
1285 r = -E2BIG;
1286 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1287 goto out;
1288 r = -ENOMEM;
1289 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1290 if (!cpuid_entries)
1291 goto out;
1292 r = -EFAULT;
1293 if (copy_from_user(cpuid_entries, entries,
1294 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1295 goto out_free;
1296 for (i = 0; i < cpuid->nent; i++) {
ad312c7c
ZX
1297 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1298 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1299 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1300 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1301 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1302 vcpu->arch.cpuid_entries[i].index = 0;
1303 vcpu->arch.cpuid_entries[i].flags = 0;
1304 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1305 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1306 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1307 }
1308 vcpu->arch.cpuid_nent = cpuid->nent;
07716717
DK
1309 cpuid_fix_nx_cap(vcpu);
1310 r = 0;
1311
1312out_free:
1313 vfree(cpuid_entries);
1314out:
1315 return r;
1316}
1317
1318static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1319 struct kvm_cpuid2 *cpuid,
1320 struct kvm_cpuid_entry2 __user *entries)
313a3dc7
CO
1321{
1322 int r;
1323
1324 r = -E2BIG;
1325 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1326 goto out;
1327 r = -EFAULT;
ad312c7c 1328 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
07716717 1329 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
313a3dc7 1330 goto out;
ad312c7c 1331 vcpu->arch.cpuid_nent = cpuid->nent;
313a3dc7
CO
1332 return 0;
1333
1334out:
1335 return r;
1336}
1337
07716717 1338static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1339 struct kvm_cpuid2 *cpuid,
1340 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1341{
1342 int r;
1343
1344 r = -E2BIG;
ad312c7c 1345 if (cpuid->nent < vcpu->arch.cpuid_nent)
07716717
DK
1346 goto out;
1347 r = -EFAULT;
ad312c7c 1348 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
19355475 1349 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1350 goto out;
1351 return 0;
1352
1353out:
ad312c7c 1354 cpuid->nent = vcpu->arch.cpuid_nent;
07716717
DK
1355 return r;
1356}
1357
07716717 1358static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
19355475 1359 u32 index)
07716717
DK
1360{
1361 entry->function = function;
1362 entry->index = index;
1363 cpuid_count(entry->function, entry->index,
19355475 1364 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
07716717
DK
1365 entry->flags = 0;
1366}
1367
7faa4ee1
AK
1368#define F(x) bit(X86_FEATURE_##x)
1369
07716717
DK
1370static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1371 u32 index, int *nent, int maxnent)
1372{
7faa4ee1 1373 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
07716717 1374#ifdef CONFIG_X86_64
7faa4ee1
AK
1375 unsigned f_lm = F(LM);
1376#else
1377 unsigned f_lm = 0;
07716717 1378#endif
7faa4ee1
AK
1379
1380 /* cpuid 1.edx */
1381 const u32 kvm_supported_word0_x86_features =
1382 F(FPU) | F(VME) | F(DE) | F(PSE) |
1383 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1384 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1385 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1386 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1387 0 /* Reserved, DS, ACPI */ | F(MMX) |
1388 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1389 0 /* HTT, TM, Reserved, PBE */;
1390 /* cpuid 0x80000001.edx */
1391 const u32 kvm_supported_word1_x86_features =
1392 F(FPU) | F(VME) | F(DE) | F(PSE) |
1393 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1394 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1395 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1396 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1397 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
1398 F(FXSR) | F(FXSR_OPT) | 0 /* GBPAGES */ | 0 /* RDTSCP */ |
1399 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1400 /* cpuid 1.ecx */
1401 const u32 kvm_supported_word4_x86_features =
d149c731
AK
1402 F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
1403 0 /* DS-CPL, VMX, SMX, EST */ |
1404 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1405 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1406 0 /* Reserved, DCA */ | F(XMM4_1) |
1407 F(XMM4_2) | 0 /* x2APIC */ | F(MOVBE) | F(POPCNT) |
1408 0 /* Reserved, XSAVE, OSXSAVE */;
7faa4ee1 1409 /* cpuid 0x80000001.ecx */
07716717 1410 const u32 kvm_supported_word6_x86_features =
7faa4ee1
AK
1411 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
1412 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1413 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
1414 0 /* SKINIT */ | 0 /* WDT */;
07716717 1415
19355475 1416 /* all calls to cpuid_count() should be made on the same cpu */
07716717
DK
1417 get_cpu();
1418 do_cpuid_1_ent(entry, function, index);
1419 ++*nent;
1420
1421 switch (function) {
1422 case 0:
1423 entry->eax = min(entry->eax, (u32)0xb);
1424 break;
1425 case 1:
1426 entry->edx &= kvm_supported_word0_x86_features;
7faa4ee1 1427 entry->ecx &= kvm_supported_word4_x86_features;
07716717
DK
1428 break;
1429 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1430 * may return different values. This forces us to get_cpu() before
1431 * issuing the first command, and also to emulate this annoying behavior
1432 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1433 case 2: {
1434 int t, times = entry->eax & 0xff;
1435
1436 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
0fdf8e59 1437 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
07716717
DK
1438 for (t = 1; t < times && *nent < maxnent; ++t) {
1439 do_cpuid_1_ent(&entry[t], function, 0);
1440 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1441 ++*nent;
1442 }
1443 break;
1444 }
1445 /* function 4 and 0xb have additional index. */
1446 case 4: {
14af3f3c 1447 int i, cache_type;
07716717
DK
1448
1449 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1450 /* read more entries until cache_type is zero */
14af3f3c
HH
1451 for (i = 1; *nent < maxnent; ++i) {
1452 cache_type = entry[i - 1].eax & 0x1f;
07716717
DK
1453 if (!cache_type)
1454 break;
14af3f3c
HH
1455 do_cpuid_1_ent(&entry[i], function, i);
1456 entry[i].flags |=
07716717
DK
1457 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1458 ++*nent;
1459 }
1460 break;
1461 }
1462 case 0xb: {
14af3f3c 1463 int i, level_type;
07716717
DK
1464
1465 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1466 /* read more entries until level_type is zero */
14af3f3c 1467 for (i = 1; *nent < maxnent; ++i) {
0853d2c1 1468 level_type = entry[i - 1].ecx & 0xff00;
07716717
DK
1469 if (!level_type)
1470 break;
14af3f3c
HH
1471 do_cpuid_1_ent(&entry[i], function, i);
1472 entry[i].flags |=
07716717
DK
1473 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1474 ++*nent;
1475 }
1476 break;
1477 }
1478 case 0x80000000:
1479 entry->eax = min(entry->eax, 0x8000001a);
1480 break;
1481 case 0x80000001:
1482 entry->edx &= kvm_supported_word1_x86_features;
1483 entry->ecx &= kvm_supported_word6_x86_features;
1484 break;
1485 }
1486 put_cpu();
1487}
1488
7faa4ee1
AK
1489#undef F
1490
674eea0f 1491static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
19355475 1492 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1493{
1494 struct kvm_cpuid_entry2 *cpuid_entries;
1495 int limit, nent = 0, r = -E2BIG;
1496 u32 func;
1497
1498 if (cpuid->nent < 1)
1499 goto out;
1500 r = -ENOMEM;
1501 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1502 if (!cpuid_entries)
1503 goto out;
1504
1505 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1506 limit = cpuid_entries[0].eax;
1507 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1508 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1509 &nent, cpuid->nent);
07716717
DK
1510 r = -E2BIG;
1511 if (nent >= cpuid->nent)
1512 goto out_free;
1513
1514 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1515 limit = cpuid_entries[nent - 1].eax;
1516 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1517 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1518 &nent, cpuid->nent);
cb007648
MM
1519 r = -E2BIG;
1520 if (nent >= cpuid->nent)
1521 goto out_free;
1522
07716717
DK
1523 r = -EFAULT;
1524 if (copy_to_user(entries, cpuid_entries,
19355475 1525 nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1526 goto out_free;
1527 cpuid->nent = nent;
1528 r = 0;
1529
1530out_free:
1531 vfree(cpuid_entries);
1532out:
1533 return r;
1534}
1535
313a3dc7
CO
1536static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1537 struct kvm_lapic_state *s)
1538{
1539 vcpu_load(vcpu);
ad312c7c 1540 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
313a3dc7
CO
1541 vcpu_put(vcpu);
1542
1543 return 0;
1544}
1545
1546static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1547 struct kvm_lapic_state *s)
1548{
1549 vcpu_load(vcpu);
ad312c7c 1550 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
313a3dc7
CO
1551 kvm_apic_post_state_restore(vcpu);
1552 vcpu_put(vcpu);
1553
1554 return 0;
1555}
1556
f77bc6a4
ZX
1557static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1558 struct kvm_interrupt *irq)
1559{
1560 if (irq->irq < 0 || irq->irq >= 256)
1561 return -EINVAL;
1562 if (irqchip_in_kernel(vcpu->kvm))
1563 return -ENXIO;
1564 vcpu_load(vcpu);
1565
66fd3f7f 1566 kvm_queue_interrupt(vcpu, irq->irq, false);
f77bc6a4
ZX
1567
1568 vcpu_put(vcpu);
1569
1570 return 0;
1571}
1572
c4abb7c9
JK
1573static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
1574{
1575 vcpu_load(vcpu);
1576 kvm_inject_nmi(vcpu);
1577 vcpu_put(vcpu);
1578
1579 return 0;
1580}
1581
b209749f
AK
1582static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1583 struct kvm_tpr_access_ctl *tac)
1584{
1585 if (tac->flags)
1586 return -EINVAL;
1587 vcpu->arch.tpr_access_reporting = !!tac->enabled;
1588 return 0;
1589}
1590
890ca9ae
HY
1591static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
1592 u64 mcg_cap)
1593{
1594 int r;
1595 unsigned bank_num = mcg_cap & 0xff, bank;
1596
1597 r = -EINVAL;
1598 if (!bank_num)
1599 goto out;
1600 if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
1601 goto out;
1602 r = 0;
1603 vcpu->arch.mcg_cap = mcg_cap;
1604 /* Init IA32_MCG_CTL to all 1s */
1605 if (mcg_cap & MCG_CTL_P)
1606 vcpu->arch.mcg_ctl = ~(u64)0;
1607 /* Init IA32_MCi_CTL to all 1s */
1608 for (bank = 0; bank < bank_num; bank++)
1609 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
1610out:
1611 return r;
1612}
1613
1614static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
1615 struct kvm_x86_mce *mce)
1616{
1617 u64 mcg_cap = vcpu->arch.mcg_cap;
1618 unsigned bank_num = mcg_cap & 0xff;
1619 u64 *banks = vcpu->arch.mce_banks;
1620
1621 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
1622 return -EINVAL;
1623 /*
1624 * if IA32_MCG_CTL is not all 1s, the uncorrected error
1625 * reporting is disabled
1626 */
1627 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1628 vcpu->arch.mcg_ctl != ~(u64)0)
1629 return 0;
1630 banks += 4 * mce->bank;
1631 /*
1632 * if IA32_MCi_CTL is not all 1s, the uncorrected error
1633 * reporting is disabled for the bank
1634 */
1635 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
1636 return 0;
1637 if (mce->status & MCI_STATUS_UC) {
1638 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
1639 !(vcpu->arch.cr4 & X86_CR4_MCE)) {
1640 printk(KERN_DEBUG "kvm: set_mce: "
1641 "injects mce exception while "
1642 "previous one is in progress!\n");
1643 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
1644 return 0;
1645 }
1646 if (banks[1] & MCI_STATUS_VAL)
1647 mce->status |= MCI_STATUS_OVER;
1648 banks[2] = mce->addr;
1649 banks[3] = mce->misc;
1650 vcpu->arch.mcg_status = mce->mcg_status;
1651 banks[1] = mce->status;
1652 kvm_queue_exception(vcpu, MC_VECTOR);
1653 } else if (!(banks[1] & MCI_STATUS_VAL)
1654 || !(banks[1] & MCI_STATUS_UC)) {
1655 if (banks[1] & MCI_STATUS_VAL)
1656 mce->status |= MCI_STATUS_OVER;
1657 banks[2] = mce->addr;
1658 banks[3] = mce->misc;
1659 banks[1] = mce->status;
1660 } else
1661 banks[1] |= MCI_STATUS_OVER;
1662 return 0;
1663}
1664
313a3dc7
CO
1665long kvm_arch_vcpu_ioctl(struct file *filp,
1666 unsigned int ioctl, unsigned long arg)
1667{
1668 struct kvm_vcpu *vcpu = filp->private_data;
1669 void __user *argp = (void __user *)arg;
1670 int r;
b772ff36 1671 struct kvm_lapic_state *lapic = NULL;
313a3dc7
CO
1672
1673 switch (ioctl) {
1674 case KVM_GET_LAPIC: {
b772ff36 1675 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
313a3dc7 1676
b772ff36
DH
1677 r = -ENOMEM;
1678 if (!lapic)
1679 goto out;
1680 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
313a3dc7
CO
1681 if (r)
1682 goto out;
1683 r = -EFAULT;
b772ff36 1684 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
313a3dc7
CO
1685 goto out;
1686 r = 0;
1687 break;
1688 }
1689 case KVM_SET_LAPIC: {
b772ff36
DH
1690 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1691 r = -ENOMEM;
1692 if (!lapic)
1693 goto out;
313a3dc7 1694 r = -EFAULT;
b772ff36 1695 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
313a3dc7 1696 goto out;
b772ff36 1697 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
313a3dc7
CO
1698 if (r)
1699 goto out;
1700 r = 0;
1701 break;
1702 }
f77bc6a4
ZX
1703 case KVM_INTERRUPT: {
1704 struct kvm_interrupt irq;
1705
1706 r = -EFAULT;
1707 if (copy_from_user(&irq, argp, sizeof irq))
1708 goto out;
1709 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1710 if (r)
1711 goto out;
1712 r = 0;
1713 break;
1714 }
c4abb7c9
JK
1715 case KVM_NMI: {
1716 r = kvm_vcpu_ioctl_nmi(vcpu);
1717 if (r)
1718 goto out;
1719 r = 0;
1720 break;
1721 }
313a3dc7
CO
1722 case KVM_SET_CPUID: {
1723 struct kvm_cpuid __user *cpuid_arg = argp;
1724 struct kvm_cpuid cpuid;
1725
1726 r = -EFAULT;
1727 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1728 goto out;
1729 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1730 if (r)
1731 goto out;
1732 break;
1733 }
07716717
DK
1734 case KVM_SET_CPUID2: {
1735 struct kvm_cpuid2 __user *cpuid_arg = argp;
1736 struct kvm_cpuid2 cpuid;
1737
1738 r = -EFAULT;
1739 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1740 goto out;
1741 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
19355475 1742 cpuid_arg->entries);
07716717
DK
1743 if (r)
1744 goto out;
1745 break;
1746 }
1747 case KVM_GET_CPUID2: {
1748 struct kvm_cpuid2 __user *cpuid_arg = argp;
1749 struct kvm_cpuid2 cpuid;
1750
1751 r = -EFAULT;
1752 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1753 goto out;
1754 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
19355475 1755 cpuid_arg->entries);
07716717
DK
1756 if (r)
1757 goto out;
1758 r = -EFAULT;
1759 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1760 goto out;
1761 r = 0;
1762 break;
1763 }
313a3dc7
CO
1764 case KVM_GET_MSRS:
1765 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1766 break;
1767 case KVM_SET_MSRS:
1768 r = msr_io(vcpu, argp, do_set_msr, 0);
1769 break;
b209749f
AK
1770 case KVM_TPR_ACCESS_REPORTING: {
1771 struct kvm_tpr_access_ctl tac;
1772
1773 r = -EFAULT;
1774 if (copy_from_user(&tac, argp, sizeof tac))
1775 goto out;
1776 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1777 if (r)
1778 goto out;
1779 r = -EFAULT;
1780 if (copy_to_user(argp, &tac, sizeof tac))
1781 goto out;
1782 r = 0;
1783 break;
1784 };
b93463aa
AK
1785 case KVM_SET_VAPIC_ADDR: {
1786 struct kvm_vapic_addr va;
1787
1788 r = -EINVAL;
1789 if (!irqchip_in_kernel(vcpu->kvm))
1790 goto out;
1791 r = -EFAULT;
1792 if (copy_from_user(&va, argp, sizeof va))
1793 goto out;
1794 r = 0;
1795 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1796 break;
1797 }
890ca9ae
HY
1798 case KVM_X86_SETUP_MCE: {
1799 u64 mcg_cap;
1800
1801 r = -EFAULT;
1802 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
1803 goto out;
1804 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
1805 break;
1806 }
1807 case KVM_X86_SET_MCE: {
1808 struct kvm_x86_mce mce;
1809
1810 r = -EFAULT;
1811 if (copy_from_user(&mce, argp, sizeof mce))
1812 goto out;
1813 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
1814 break;
1815 }
313a3dc7
CO
1816 default:
1817 r = -EINVAL;
1818 }
1819out:
7a6ce84c 1820 kfree(lapic);
313a3dc7
CO
1821 return r;
1822}
1823
1fe779f8
CO
1824static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1825{
1826 int ret;
1827
1828 if (addr > (unsigned int)(-3 * PAGE_SIZE))
1829 return -1;
1830 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1831 return ret;
1832}
1833
1834static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1835 u32 kvm_nr_mmu_pages)
1836{
1837 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1838 return -EINVAL;
1839
72dc67a6 1840 down_write(&kvm->slots_lock);
7c8a83b7 1841 spin_lock(&kvm->mmu_lock);
1fe779f8
CO
1842
1843 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
f05e70ac 1844 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1fe779f8 1845
7c8a83b7 1846 spin_unlock(&kvm->mmu_lock);
72dc67a6 1847 up_write(&kvm->slots_lock);
1fe779f8
CO
1848 return 0;
1849}
1850
1851static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1852{
f05e70ac 1853 return kvm->arch.n_alloc_mmu_pages;
1fe779f8
CO
1854}
1855
e9f85cde
ZX
1856gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1857{
1858 int i;
1859 struct kvm_mem_alias *alias;
1860
d69fb81f
ZX
1861 for (i = 0; i < kvm->arch.naliases; ++i) {
1862 alias = &kvm->arch.aliases[i];
e9f85cde
ZX
1863 if (gfn >= alias->base_gfn
1864 && gfn < alias->base_gfn + alias->npages)
1865 return alias->target_gfn + gfn - alias->base_gfn;
1866 }
1867 return gfn;
1868}
1869
1fe779f8
CO
1870/*
1871 * Set a new alias region. Aliases map a portion of physical memory into
1872 * another portion. This is useful for memory windows, for example the PC
1873 * VGA region.
1874 */
1875static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1876 struct kvm_memory_alias *alias)
1877{
1878 int r, n;
1879 struct kvm_mem_alias *p;
1880
1881 r = -EINVAL;
1882 /* General sanity checks */
1883 if (alias->memory_size & (PAGE_SIZE - 1))
1884 goto out;
1885 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1886 goto out;
1887 if (alias->slot >= KVM_ALIAS_SLOTS)
1888 goto out;
1889 if (alias->guest_phys_addr + alias->memory_size
1890 < alias->guest_phys_addr)
1891 goto out;
1892 if (alias->target_phys_addr + alias->memory_size
1893 < alias->target_phys_addr)
1894 goto out;
1895
72dc67a6 1896 down_write(&kvm->slots_lock);
a1708ce8 1897 spin_lock(&kvm->mmu_lock);
1fe779f8 1898
d69fb81f 1899 p = &kvm->arch.aliases[alias->slot];
1fe779f8
CO
1900 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1901 p->npages = alias->memory_size >> PAGE_SHIFT;
1902 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1903
1904 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
d69fb81f 1905 if (kvm->arch.aliases[n - 1].npages)
1fe779f8 1906 break;
d69fb81f 1907 kvm->arch.naliases = n;
1fe779f8 1908
a1708ce8 1909 spin_unlock(&kvm->mmu_lock);
1fe779f8
CO
1910 kvm_mmu_zap_all(kvm);
1911
72dc67a6 1912 up_write(&kvm->slots_lock);
1fe779f8
CO
1913
1914 return 0;
1915
1916out:
1917 return r;
1918}
1919
1920static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1921{
1922 int r;
1923
1924 r = 0;
1925 switch (chip->chip_id) {
1926 case KVM_IRQCHIP_PIC_MASTER:
1927 memcpy(&chip->chip.pic,
1928 &pic_irqchip(kvm)->pics[0],
1929 sizeof(struct kvm_pic_state));
1930 break;
1931 case KVM_IRQCHIP_PIC_SLAVE:
1932 memcpy(&chip->chip.pic,
1933 &pic_irqchip(kvm)->pics[1],
1934 sizeof(struct kvm_pic_state));
1935 break;
1936 case KVM_IRQCHIP_IOAPIC:
1937 memcpy(&chip->chip.ioapic,
1938 ioapic_irqchip(kvm),
1939 sizeof(struct kvm_ioapic_state));
1940 break;
1941 default:
1942 r = -EINVAL;
1943 break;
1944 }
1945 return r;
1946}
1947
1948static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1949{
1950 int r;
1951
1952 r = 0;
1953 switch (chip->chip_id) {
1954 case KVM_IRQCHIP_PIC_MASTER:
1955 memcpy(&pic_irqchip(kvm)->pics[0],
1956 &chip->chip.pic,
1957 sizeof(struct kvm_pic_state));
1958 break;
1959 case KVM_IRQCHIP_PIC_SLAVE:
1960 memcpy(&pic_irqchip(kvm)->pics[1],
1961 &chip->chip.pic,
1962 sizeof(struct kvm_pic_state));
1963 break;
1964 case KVM_IRQCHIP_IOAPIC:
1965 memcpy(ioapic_irqchip(kvm),
1966 &chip->chip.ioapic,
1967 sizeof(struct kvm_ioapic_state));
1968 break;
1969 default:
1970 r = -EINVAL;
1971 break;
1972 }
1973 kvm_pic_update_irq(pic_irqchip(kvm));
1974 return r;
1975}
1976
e0f63cb9
SY
1977static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1978{
1979 int r = 0;
1980
1981 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
1982 return r;
1983}
1984
1985static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1986{
1987 int r = 0;
1988
1989 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1990 kvm_pit_load_count(kvm, 0, ps->channels[0].count);
1991 return r;
1992}
1993
52d939a0
MT
1994static int kvm_vm_ioctl_reinject(struct kvm *kvm,
1995 struct kvm_reinject_control *control)
1996{
1997 if (!kvm->arch.vpit)
1998 return -ENXIO;
1999 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
2000 return 0;
2001}
2002
5bb064dc
ZX
2003/*
2004 * Get (and clear) the dirty memory log for a memory slot.
2005 */
2006int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2007 struct kvm_dirty_log *log)
2008{
2009 int r;
2010 int n;
2011 struct kvm_memory_slot *memslot;
2012 int is_dirty = 0;
2013
72dc67a6 2014 down_write(&kvm->slots_lock);
5bb064dc
ZX
2015
2016 r = kvm_get_dirty_log(kvm, log, &is_dirty);
2017 if (r)
2018 goto out;
2019
2020 /* If nothing is dirty, don't bother messing with page tables. */
2021 if (is_dirty) {
7c8a83b7 2022 spin_lock(&kvm->mmu_lock);
5bb064dc 2023 kvm_mmu_slot_remove_write_access(kvm, log->slot);
7c8a83b7 2024 spin_unlock(&kvm->mmu_lock);
5bb064dc
ZX
2025 kvm_flush_remote_tlbs(kvm);
2026 memslot = &kvm->memslots[log->slot];
2027 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
2028 memset(memslot->dirty_bitmap, 0, n);
2029 }
2030 r = 0;
2031out:
72dc67a6 2032 up_write(&kvm->slots_lock);
5bb064dc
ZX
2033 return r;
2034}
2035
1fe779f8
CO
2036long kvm_arch_vm_ioctl(struct file *filp,
2037 unsigned int ioctl, unsigned long arg)
2038{
2039 struct kvm *kvm = filp->private_data;
2040 void __user *argp = (void __user *)arg;
2041 int r = -EINVAL;
f0d66275
DH
2042 /*
2043 * This union makes it completely explicit to gcc-3.x
2044 * that these two variables' stack usage should be
2045 * combined, not added together.
2046 */
2047 union {
2048 struct kvm_pit_state ps;
2049 struct kvm_memory_alias alias;
c5ff41ce 2050 struct kvm_pit_config pit_config;
f0d66275 2051 } u;
1fe779f8
CO
2052
2053 switch (ioctl) {
2054 case KVM_SET_TSS_ADDR:
2055 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
2056 if (r < 0)
2057 goto out;
2058 break;
2059 case KVM_SET_MEMORY_REGION: {
2060 struct kvm_memory_region kvm_mem;
2061 struct kvm_userspace_memory_region kvm_userspace_mem;
2062
2063 r = -EFAULT;
2064 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
2065 goto out;
2066 kvm_userspace_mem.slot = kvm_mem.slot;
2067 kvm_userspace_mem.flags = kvm_mem.flags;
2068 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
2069 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
2070 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
2071 if (r)
2072 goto out;
2073 break;
2074 }
2075 case KVM_SET_NR_MMU_PAGES:
2076 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
2077 if (r)
2078 goto out;
2079 break;
2080 case KVM_GET_NR_MMU_PAGES:
2081 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
2082 break;
f0d66275 2083 case KVM_SET_MEMORY_ALIAS:
1fe779f8 2084 r = -EFAULT;
f0d66275 2085 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
1fe779f8 2086 goto out;
f0d66275 2087 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
1fe779f8
CO
2088 if (r)
2089 goto out;
2090 break;
1fe779f8
CO
2091 case KVM_CREATE_IRQCHIP:
2092 r = -ENOMEM;
d7deeeb0
ZX
2093 kvm->arch.vpic = kvm_create_pic(kvm);
2094 if (kvm->arch.vpic) {
1fe779f8
CO
2095 r = kvm_ioapic_init(kvm);
2096 if (r) {
d7deeeb0
ZX
2097 kfree(kvm->arch.vpic);
2098 kvm->arch.vpic = NULL;
1fe779f8
CO
2099 goto out;
2100 }
2101 } else
2102 goto out;
399ec807
AK
2103 r = kvm_setup_default_irq_routing(kvm);
2104 if (r) {
2105 kfree(kvm->arch.vpic);
2106 kfree(kvm->arch.vioapic);
2107 goto out;
2108 }
1fe779f8 2109 break;
7837699f 2110 case KVM_CREATE_PIT:
c5ff41ce
JK
2111 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
2112 goto create_pit;
2113 case KVM_CREATE_PIT2:
2114 r = -EFAULT;
2115 if (copy_from_user(&u.pit_config, argp,
2116 sizeof(struct kvm_pit_config)))
2117 goto out;
2118 create_pit:
269e05e4
AK
2119 mutex_lock(&kvm->lock);
2120 r = -EEXIST;
2121 if (kvm->arch.vpit)
2122 goto create_pit_unlock;
7837699f 2123 r = -ENOMEM;
c5ff41ce 2124 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
7837699f
SY
2125 if (kvm->arch.vpit)
2126 r = 0;
269e05e4
AK
2127 create_pit_unlock:
2128 mutex_unlock(&kvm->lock);
7837699f 2129 break;
4925663a 2130 case KVM_IRQ_LINE_STATUS:
1fe779f8
CO
2131 case KVM_IRQ_LINE: {
2132 struct kvm_irq_level irq_event;
2133
2134 r = -EFAULT;
2135 if (copy_from_user(&irq_event, argp, sizeof irq_event))
2136 goto out;
2137 if (irqchip_in_kernel(kvm)) {
4925663a 2138 __s32 status;
fa40a821 2139 mutex_lock(&kvm->irq_lock);
4925663a
GN
2140 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2141 irq_event.irq, irq_event.level);
fa40a821 2142 mutex_unlock(&kvm->irq_lock);
4925663a
GN
2143 if (ioctl == KVM_IRQ_LINE_STATUS) {
2144 irq_event.status = status;
2145 if (copy_to_user(argp, &irq_event,
2146 sizeof irq_event))
2147 goto out;
2148 }
1fe779f8
CO
2149 r = 0;
2150 }
2151 break;
2152 }
2153 case KVM_GET_IRQCHIP: {
2154 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 2155 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 2156
f0d66275
DH
2157 r = -ENOMEM;
2158 if (!chip)
1fe779f8 2159 goto out;
f0d66275
DH
2160 r = -EFAULT;
2161 if (copy_from_user(chip, argp, sizeof *chip))
2162 goto get_irqchip_out;
1fe779f8
CO
2163 r = -ENXIO;
2164 if (!irqchip_in_kernel(kvm))
f0d66275
DH
2165 goto get_irqchip_out;
2166 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
1fe779f8 2167 if (r)
f0d66275 2168 goto get_irqchip_out;
1fe779f8 2169 r = -EFAULT;
f0d66275
DH
2170 if (copy_to_user(argp, chip, sizeof *chip))
2171 goto get_irqchip_out;
1fe779f8 2172 r = 0;
f0d66275
DH
2173 get_irqchip_out:
2174 kfree(chip);
2175 if (r)
2176 goto out;
1fe779f8
CO
2177 break;
2178 }
2179 case KVM_SET_IRQCHIP: {
2180 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 2181 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 2182
f0d66275
DH
2183 r = -ENOMEM;
2184 if (!chip)
1fe779f8 2185 goto out;
f0d66275
DH
2186 r = -EFAULT;
2187 if (copy_from_user(chip, argp, sizeof *chip))
2188 goto set_irqchip_out;
1fe779f8
CO
2189 r = -ENXIO;
2190 if (!irqchip_in_kernel(kvm))
f0d66275
DH
2191 goto set_irqchip_out;
2192 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
1fe779f8 2193 if (r)
f0d66275 2194 goto set_irqchip_out;
1fe779f8 2195 r = 0;
f0d66275
DH
2196 set_irqchip_out:
2197 kfree(chip);
2198 if (r)
2199 goto out;
1fe779f8
CO
2200 break;
2201 }
e0f63cb9 2202 case KVM_GET_PIT: {
e0f63cb9 2203 r = -EFAULT;
f0d66275 2204 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
2205 goto out;
2206 r = -ENXIO;
2207 if (!kvm->arch.vpit)
2208 goto out;
f0d66275 2209 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
e0f63cb9
SY
2210 if (r)
2211 goto out;
2212 r = -EFAULT;
f0d66275 2213 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
2214 goto out;
2215 r = 0;
2216 break;
2217 }
2218 case KVM_SET_PIT: {
e0f63cb9 2219 r = -EFAULT;
f0d66275 2220 if (copy_from_user(&u.ps, argp, sizeof u.ps))
e0f63cb9
SY
2221 goto out;
2222 r = -ENXIO;
2223 if (!kvm->arch.vpit)
2224 goto out;
f0d66275 2225 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
e0f63cb9
SY
2226 if (r)
2227 goto out;
2228 r = 0;
2229 break;
2230 }
52d939a0
MT
2231 case KVM_REINJECT_CONTROL: {
2232 struct kvm_reinject_control control;
2233 r = -EFAULT;
2234 if (copy_from_user(&control, argp, sizeof(control)))
2235 goto out;
2236 r = kvm_vm_ioctl_reinject(kvm, &control);
2237 if (r)
2238 goto out;
2239 r = 0;
2240 break;
2241 }
1fe779f8
CO
2242 default:
2243 ;
2244 }
2245out:
2246 return r;
2247}
2248
a16b043c 2249static void kvm_init_msr_list(void)
043405e1
CO
2250{
2251 u32 dummy[2];
2252 unsigned i, j;
2253
2254 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2255 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2256 continue;
2257 if (j < i)
2258 msrs_to_save[j] = msrs_to_save[i];
2259 j++;
2260 }
2261 num_msrs_to_save = j;
2262}
2263
bbd9b64e
CO
2264/*
2265 * Only apic need an MMIO device hook, so shortcut now..
2266 */
2267static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
92760499
LV
2268 gpa_t addr, int len,
2269 int is_write)
bbd9b64e
CO
2270{
2271 struct kvm_io_device *dev;
2272
ad312c7c
ZX
2273 if (vcpu->arch.apic) {
2274 dev = &vcpu->arch.apic->dev;
d76685c4 2275 if (kvm_iodevice_in_range(dev, addr, len, is_write))
bbd9b64e
CO
2276 return dev;
2277 }
2278 return NULL;
2279}
2280
2281
2282static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2283 gpa_t addr, int len,
2284 int is_write)
bbd9b64e
CO
2285{
2286 struct kvm_io_device *dev;
2287
92760499 2288 dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
bbd9b64e 2289 if (dev == NULL)
92760499
LV
2290 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
2291 is_write);
bbd9b64e
CO
2292 return dev;
2293}
2294
cded19f3
HE
2295static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2296 struct kvm_vcpu *vcpu)
bbd9b64e
CO
2297{
2298 void *data = val;
10589a46 2299 int r = X86EMUL_CONTINUE;
bbd9b64e
CO
2300
2301 while (bytes) {
ad312c7c 2302 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e 2303 unsigned offset = addr & (PAGE_SIZE-1);
77c2002e 2304 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
bbd9b64e
CO
2305 int ret;
2306
10589a46
MT
2307 if (gpa == UNMAPPED_GVA) {
2308 r = X86EMUL_PROPAGATE_FAULT;
2309 goto out;
2310 }
77c2002e 2311 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
10589a46
MT
2312 if (ret < 0) {
2313 r = X86EMUL_UNHANDLEABLE;
2314 goto out;
2315 }
bbd9b64e 2316
77c2002e
IE
2317 bytes -= toread;
2318 data += toread;
2319 addr += toread;
bbd9b64e 2320 }
10589a46 2321out:
10589a46 2322 return r;
bbd9b64e 2323}
77c2002e 2324
cded19f3
HE
2325static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2326 struct kvm_vcpu *vcpu)
77c2002e
IE
2327{
2328 void *data = val;
2329 int r = X86EMUL_CONTINUE;
2330
2331 while (bytes) {
2332 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2333 unsigned offset = addr & (PAGE_SIZE-1);
2334 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2335 int ret;
2336
2337 if (gpa == UNMAPPED_GVA) {
2338 r = X86EMUL_PROPAGATE_FAULT;
2339 goto out;
2340 }
2341 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
2342 if (ret < 0) {
2343 r = X86EMUL_UNHANDLEABLE;
2344 goto out;
2345 }
2346
2347 bytes -= towrite;
2348 data += towrite;
2349 addr += towrite;
2350 }
2351out:
2352 return r;
2353}
2354
bbd9b64e 2355
bbd9b64e
CO
2356static int emulator_read_emulated(unsigned long addr,
2357 void *val,
2358 unsigned int bytes,
2359 struct kvm_vcpu *vcpu)
2360{
2361 struct kvm_io_device *mmio_dev;
2362 gpa_t gpa;
2363
2364 if (vcpu->mmio_read_completed) {
2365 memcpy(val, vcpu->mmio_data, bytes);
2366 vcpu->mmio_read_completed = 0;
2367 return X86EMUL_CONTINUE;
2368 }
2369
ad312c7c 2370 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2371
2372 /* For APIC access vmexit */
2373 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2374 goto mmio;
2375
77c2002e
IE
2376 if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2377 == X86EMUL_CONTINUE)
bbd9b64e
CO
2378 return X86EMUL_CONTINUE;
2379 if (gpa == UNMAPPED_GVA)
2380 return X86EMUL_PROPAGATE_FAULT;
2381
2382mmio:
2383 /*
2384 * Is this MMIO handled locally?
2385 */
10589a46 2386 mutex_lock(&vcpu->kvm->lock);
92760499 2387 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
fa40a821 2388 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2389 if (mmio_dev) {
2390 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
2391 return X86EMUL_CONTINUE;
2392 }
2393
2394 vcpu->mmio_needed = 1;
2395 vcpu->mmio_phys_addr = gpa;
2396 vcpu->mmio_size = bytes;
2397 vcpu->mmio_is_write = 0;
2398
2399 return X86EMUL_UNHANDLEABLE;
2400}
2401
3200f405 2402int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
9f811285 2403 const void *val, int bytes)
bbd9b64e
CO
2404{
2405 int ret;
2406
2407 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
9f811285 2408 if (ret < 0)
bbd9b64e 2409 return 0;
ad218f85 2410 kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
bbd9b64e
CO
2411 return 1;
2412}
2413
2414static int emulator_write_emulated_onepage(unsigned long addr,
2415 const void *val,
2416 unsigned int bytes,
2417 struct kvm_vcpu *vcpu)
2418{
2419 struct kvm_io_device *mmio_dev;
10589a46
MT
2420 gpa_t gpa;
2421
10589a46 2422 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2423
2424 if (gpa == UNMAPPED_GVA) {
c3c91fee 2425 kvm_inject_page_fault(vcpu, addr, 2);
bbd9b64e
CO
2426 return X86EMUL_PROPAGATE_FAULT;
2427 }
2428
2429 /* For APIC access vmexit */
2430 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2431 goto mmio;
2432
2433 if (emulator_write_phys(vcpu, gpa, val, bytes))
2434 return X86EMUL_CONTINUE;
2435
2436mmio:
2437 /*
2438 * Is this MMIO handled locally?
2439 */
10589a46 2440 mutex_lock(&vcpu->kvm->lock);
92760499 2441 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
fa40a821 2442 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2443 if (mmio_dev) {
2444 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
2445 return X86EMUL_CONTINUE;
2446 }
2447
2448 vcpu->mmio_needed = 1;
2449 vcpu->mmio_phys_addr = gpa;
2450 vcpu->mmio_size = bytes;
2451 vcpu->mmio_is_write = 1;
2452 memcpy(vcpu->mmio_data, val, bytes);
2453
2454 return X86EMUL_CONTINUE;
2455}
2456
2457int emulator_write_emulated(unsigned long addr,
2458 const void *val,
2459 unsigned int bytes,
2460 struct kvm_vcpu *vcpu)
2461{
2462 /* Crossing a page boundary? */
2463 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
2464 int rc, now;
2465
2466 now = -addr & ~PAGE_MASK;
2467 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
2468 if (rc != X86EMUL_CONTINUE)
2469 return rc;
2470 addr += now;
2471 val += now;
2472 bytes -= now;
2473 }
2474 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
2475}
2476EXPORT_SYMBOL_GPL(emulator_write_emulated);
2477
2478static int emulator_cmpxchg_emulated(unsigned long addr,
2479 const void *old,
2480 const void *new,
2481 unsigned int bytes,
2482 struct kvm_vcpu *vcpu)
2483{
2484 static int reported;
2485
2486 if (!reported) {
2487 reported = 1;
2488 printk(KERN_WARNING "kvm: emulating exchange as write\n");
2489 }
2bacc55c
MT
2490#ifndef CONFIG_X86_64
2491 /* guests cmpxchg8b have to be emulated atomically */
2492 if (bytes == 8) {
10589a46 2493 gpa_t gpa;
2bacc55c 2494 struct page *page;
c0b49b0d 2495 char *kaddr;
2bacc55c
MT
2496 u64 val;
2497
10589a46
MT
2498 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2499
2bacc55c
MT
2500 if (gpa == UNMAPPED_GVA ||
2501 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2502 goto emul_write;
2503
2504 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2505 goto emul_write;
2506
2507 val = *(u64 *)new;
72dc67a6 2508
2bacc55c 2509 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
72dc67a6 2510
c0b49b0d
AM
2511 kaddr = kmap_atomic(page, KM_USER0);
2512 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2513 kunmap_atomic(kaddr, KM_USER0);
2bacc55c
MT
2514 kvm_release_page_dirty(page);
2515 }
3200f405 2516emul_write:
2bacc55c
MT
2517#endif
2518
bbd9b64e
CO
2519 return emulator_write_emulated(addr, new, bytes, vcpu);
2520}
2521
2522static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
2523{
2524 return kvm_x86_ops->get_segment_base(vcpu, seg);
2525}
2526
2527int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2528{
a7052897 2529 kvm_mmu_invlpg(vcpu, address);
bbd9b64e
CO
2530 return X86EMUL_CONTINUE;
2531}
2532
2533int emulate_clts(struct kvm_vcpu *vcpu)
2534{
54e445ca 2535 KVMTRACE_0D(CLTS, vcpu, handler);
ad312c7c 2536 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
bbd9b64e
CO
2537 return X86EMUL_CONTINUE;
2538}
2539
2540int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2541{
2542 struct kvm_vcpu *vcpu = ctxt->vcpu;
2543
2544 switch (dr) {
2545 case 0 ... 3:
2546 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2547 return X86EMUL_CONTINUE;
2548 default:
b8688d51 2549 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
bbd9b64e
CO
2550 return X86EMUL_UNHANDLEABLE;
2551 }
2552}
2553
2554int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2555{
2556 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2557 int exception;
2558
2559 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
2560 if (exception) {
2561 /* FIXME: better handling */
2562 return X86EMUL_UNHANDLEABLE;
2563 }
2564 return X86EMUL_CONTINUE;
2565}
2566
2567void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2568{
bbd9b64e 2569 u8 opcodes[4];
5fdbf976 2570 unsigned long rip = kvm_rip_read(vcpu);
bbd9b64e
CO
2571 unsigned long rip_linear;
2572
f76c710d 2573 if (!printk_ratelimit())
bbd9b64e
CO
2574 return;
2575
25be4608
GC
2576 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2577
77c2002e 2578 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
bbd9b64e
CO
2579
2580 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2581 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
bbd9b64e
CO
2582}
2583EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2584
14af3f3c 2585static struct x86_emulate_ops emulate_ops = {
77c2002e 2586 .read_std = kvm_read_guest_virt,
bbd9b64e
CO
2587 .read_emulated = emulator_read_emulated,
2588 .write_emulated = emulator_write_emulated,
2589 .cmpxchg_emulated = emulator_cmpxchg_emulated,
2590};
2591
5fdbf976
MT
2592static void cache_all_regs(struct kvm_vcpu *vcpu)
2593{
2594 kvm_register_read(vcpu, VCPU_REGS_RAX);
2595 kvm_register_read(vcpu, VCPU_REGS_RSP);
2596 kvm_register_read(vcpu, VCPU_REGS_RIP);
2597 vcpu->arch.regs_dirty = ~0;
2598}
2599
bbd9b64e
CO
2600int emulate_instruction(struct kvm_vcpu *vcpu,
2601 struct kvm_run *run,
2602 unsigned long cr2,
2603 u16 error_code,
571008da 2604 int emulation_type)
bbd9b64e 2605{
310b5d30 2606 int r, shadow_mask;
571008da 2607 struct decode_cache *c;
bbd9b64e 2608
26eef70c 2609 kvm_clear_exception_queue(vcpu);
ad312c7c 2610 vcpu->arch.mmio_fault_cr2 = cr2;
5fdbf976
MT
2611 /*
2612 * TODO: fix x86_emulate.c to use guest_read/write_register
2613 * instead of direct ->regs accesses, can save hundred cycles
2614 * on Intel for instructions that don't read/change RSP, for
2615 * for example.
2616 */
2617 cache_all_regs(vcpu);
bbd9b64e
CO
2618
2619 vcpu->mmio_is_write = 0;
ad312c7c 2620 vcpu->arch.pio.string = 0;
bbd9b64e 2621
571008da 2622 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
bbd9b64e
CO
2623 int cs_db, cs_l;
2624 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2625
ad312c7c
ZX
2626 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2627 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2628 vcpu->arch.emulate_ctxt.mode =
2629 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
bbd9b64e
CO
2630 ? X86EMUL_MODE_REAL : cs_l
2631 ? X86EMUL_MODE_PROT64 : cs_db
2632 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2633
ad312c7c 2634 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
571008da
SY
2635
2636 /* Reject the instructions other than VMCALL/VMMCALL when
2637 * try to emulate invalid opcode */
2638 c = &vcpu->arch.emulate_ctxt.decode;
2639 if ((emulation_type & EMULTYPE_TRAP_UD) &&
2640 (!(c->twobyte && c->b == 0x01 &&
2641 (c->modrm_reg == 0 || c->modrm_reg == 3) &&
2642 c->modrm_mod == 3 && c->modrm_rm == 1)))
2643 return EMULATE_FAIL;
2644
f2b5756b 2645 ++vcpu->stat.insn_emulation;
bbd9b64e 2646 if (r) {
f2b5756b 2647 ++vcpu->stat.insn_emulation_fail;
bbd9b64e
CO
2648 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2649 return EMULATE_DONE;
2650 return EMULATE_FAIL;
2651 }
2652 }
2653
ba8afb6b
GN
2654 if (emulation_type & EMULTYPE_SKIP) {
2655 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
2656 return EMULATE_DONE;
2657 }
2658
ad312c7c 2659 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
310b5d30
GC
2660 shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
2661
2662 if (r == 0)
2663 kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
bbd9b64e 2664
ad312c7c 2665 if (vcpu->arch.pio.string)
bbd9b64e
CO
2666 return EMULATE_DO_MMIO;
2667
2668 if ((r || vcpu->mmio_is_write) && run) {
2669 run->exit_reason = KVM_EXIT_MMIO;
2670 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2671 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2672 run->mmio.len = vcpu->mmio_size;
2673 run->mmio.is_write = vcpu->mmio_is_write;
2674 }
2675
2676 if (r) {
2677 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2678 return EMULATE_DONE;
2679 if (!vcpu->mmio_needed) {
2680 kvm_report_emulation_failure(vcpu, "mmio");
2681 return EMULATE_FAIL;
2682 }
2683 return EMULATE_DO_MMIO;
2684 }
2685
ad312c7c 2686 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
bbd9b64e
CO
2687
2688 if (vcpu->mmio_is_write) {
2689 vcpu->mmio_needed = 0;
2690 return EMULATE_DO_MMIO;
2691 }
2692
2693 return EMULATE_DONE;
2694}
2695EXPORT_SYMBOL_GPL(emulate_instruction);
2696
de7d789a
CO
2697static int pio_copy_data(struct kvm_vcpu *vcpu)
2698{
ad312c7c 2699 void *p = vcpu->arch.pio_data;
0f346074 2700 gva_t q = vcpu->arch.pio.guest_gva;
de7d789a 2701 unsigned bytes;
0f346074 2702 int ret;
de7d789a 2703
ad312c7c
ZX
2704 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2705 if (vcpu->arch.pio.in)
0f346074 2706 ret = kvm_write_guest_virt(q, p, bytes, vcpu);
de7d789a 2707 else
0f346074
IE
2708 ret = kvm_read_guest_virt(q, p, bytes, vcpu);
2709 return ret;
de7d789a
CO
2710}
2711
2712int complete_pio(struct kvm_vcpu *vcpu)
2713{
ad312c7c 2714 struct kvm_pio_request *io = &vcpu->arch.pio;
de7d789a
CO
2715 long delta;
2716 int r;
5fdbf976 2717 unsigned long val;
de7d789a
CO
2718
2719 if (!io->string) {
5fdbf976
MT
2720 if (io->in) {
2721 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2722 memcpy(&val, vcpu->arch.pio_data, io->size);
2723 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
2724 }
de7d789a
CO
2725 } else {
2726 if (io->in) {
2727 r = pio_copy_data(vcpu);
5fdbf976 2728 if (r)
de7d789a 2729 return r;
de7d789a
CO
2730 }
2731
2732 delta = 1;
2733 if (io->rep) {
2734 delta *= io->cur_count;
2735 /*
2736 * The size of the register should really depend on
2737 * current address size.
2738 */
5fdbf976
MT
2739 val = kvm_register_read(vcpu, VCPU_REGS_RCX);
2740 val -= delta;
2741 kvm_register_write(vcpu, VCPU_REGS_RCX, val);
de7d789a
CO
2742 }
2743 if (io->down)
2744 delta = -delta;
2745 delta *= io->size;
5fdbf976
MT
2746 if (io->in) {
2747 val = kvm_register_read(vcpu, VCPU_REGS_RDI);
2748 val += delta;
2749 kvm_register_write(vcpu, VCPU_REGS_RDI, val);
2750 } else {
2751 val = kvm_register_read(vcpu, VCPU_REGS_RSI);
2752 val += delta;
2753 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2754 }
de7d789a
CO
2755 }
2756
de7d789a
CO
2757 io->count -= io->cur_count;
2758 io->cur_count = 0;
2759
2760 return 0;
2761}
2762
2763static void kernel_pio(struct kvm_io_device *pio_dev,
2764 struct kvm_vcpu *vcpu,
2765 void *pd)
2766{
2767 /* TODO: String I/O for in kernel device */
2768
ad312c7c
ZX
2769 if (vcpu->arch.pio.in)
2770 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2771 vcpu->arch.pio.size,
de7d789a
CO
2772 pd);
2773 else
ad312c7c
ZX
2774 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2775 vcpu->arch.pio.size,
de7d789a 2776 pd);
de7d789a
CO
2777}
2778
2779static void pio_string_write(struct kvm_io_device *pio_dev,
2780 struct kvm_vcpu *vcpu)
2781{
ad312c7c
ZX
2782 struct kvm_pio_request *io = &vcpu->arch.pio;
2783 void *pd = vcpu->arch.pio_data;
de7d789a
CO
2784 int i;
2785
de7d789a
CO
2786 for (i = 0; i < io->cur_count; i++) {
2787 kvm_iodevice_write(pio_dev, io->port,
2788 io->size,
2789 pd);
2790 pd += io->size;
2791 }
de7d789a
CO
2792}
2793
2794static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2795 gpa_t addr, int len,
2796 int is_write)
de7d789a 2797{
92760499 2798 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
de7d789a
CO
2799}
2800
2801int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2802 int size, unsigned port)
2803{
2804 struct kvm_io_device *pio_dev;
5fdbf976 2805 unsigned long val;
de7d789a
CO
2806
2807 vcpu->run->exit_reason = KVM_EXIT_IO;
2808 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2809 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2810 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2811 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2812 vcpu->run->io.port = vcpu->arch.pio.port = port;
2813 vcpu->arch.pio.in = in;
2814 vcpu->arch.pio.string = 0;
2815 vcpu->arch.pio.down = 0;
ad312c7c 2816 vcpu->arch.pio.rep = 0;
de7d789a 2817
2714d1d3
FEL
2818 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2819 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2820 handler);
2821 else
2822 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2823 handler);
2824
5fdbf976
MT
2825 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2826 memcpy(vcpu->arch.pio_data, &val, 4);
de7d789a 2827
fa40a821 2828 mutex_lock(&vcpu->kvm->lock);
92760499 2829 pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
fa40a821 2830 mutex_unlock(&vcpu->kvm->lock);
de7d789a 2831 if (pio_dev) {
ad312c7c 2832 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
de7d789a
CO
2833 complete_pio(vcpu);
2834 return 1;
2835 }
2836 return 0;
2837}
2838EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2839
2840int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2841 int size, unsigned long count, int down,
2842 gva_t address, int rep, unsigned port)
2843{
2844 unsigned now, in_page;
0f346074 2845 int ret = 0;
de7d789a
CO
2846 struct kvm_io_device *pio_dev;
2847
2848 vcpu->run->exit_reason = KVM_EXIT_IO;
2849 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2850 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2851 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2852 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2853 vcpu->run->io.port = vcpu->arch.pio.port = port;
2854 vcpu->arch.pio.in = in;
2855 vcpu->arch.pio.string = 1;
2856 vcpu->arch.pio.down = down;
ad312c7c 2857 vcpu->arch.pio.rep = rep;
de7d789a 2858
2714d1d3
FEL
2859 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2860 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2861 handler);
2862 else
2863 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2864 handler);
2865
de7d789a
CO
2866 if (!count) {
2867 kvm_x86_ops->skip_emulated_instruction(vcpu);
2868 return 1;
2869 }
2870
2871 if (!down)
2872 in_page = PAGE_SIZE - offset_in_page(address);
2873 else
2874 in_page = offset_in_page(address) + size;
2875 now = min(count, (unsigned long)in_page / size);
0f346074 2876 if (!now)
de7d789a 2877 now = 1;
de7d789a
CO
2878 if (down) {
2879 /*
2880 * String I/O in reverse. Yuck. Kill the guest, fix later.
2881 */
2882 pr_unimpl(vcpu, "guest string pio down\n");
c1a5d4f9 2883 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2884 return 1;
2885 }
2886 vcpu->run->io.count = now;
ad312c7c 2887 vcpu->arch.pio.cur_count = now;
de7d789a 2888
ad312c7c 2889 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
de7d789a
CO
2890 kvm_x86_ops->skip_emulated_instruction(vcpu);
2891
0f346074 2892 vcpu->arch.pio.guest_gva = address;
de7d789a 2893
fa40a821 2894 mutex_lock(&vcpu->kvm->lock);
92760499
LV
2895 pio_dev = vcpu_find_pio_dev(vcpu, port,
2896 vcpu->arch.pio.cur_count,
2897 !vcpu->arch.pio.in);
fa40a821
MT
2898 mutex_unlock(&vcpu->kvm->lock);
2899
ad312c7c 2900 if (!vcpu->arch.pio.in) {
de7d789a
CO
2901 /* string PIO write */
2902 ret = pio_copy_data(vcpu);
0f346074
IE
2903 if (ret == X86EMUL_PROPAGATE_FAULT) {
2904 kvm_inject_gp(vcpu, 0);
2905 return 1;
2906 }
2907 if (ret == 0 && pio_dev) {
de7d789a
CO
2908 pio_string_write(pio_dev, vcpu);
2909 complete_pio(vcpu);
ad312c7c 2910 if (vcpu->arch.pio.count == 0)
de7d789a
CO
2911 ret = 1;
2912 }
2913 } else if (pio_dev)
2914 pr_unimpl(vcpu, "no string pio read support yet, "
2915 "port %x size %d count %ld\n",
2916 port, size, count);
2917
2918 return ret;
2919}
2920EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2921
c8076604
GH
2922static void bounce_off(void *info)
2923{
2924 /* nothing */
2925}
2926
2927static unsigned int ref_freq;
2928static unsigned long tsc_khz_ref;
2929
2930static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
2931 void *data)
2932{
2933 struct cpufreq_freqs *freq = data;
2934 struct kvm *kvm;
2935 struct kvm_vcpu *vcpu;
2936 int i, send_ipi = 0;
2937
2938 if (!ref_freq)
2939 ref_freq = freq->old;
2940
2941 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
2942 return 0;
2943 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
2944 return 0;
2945 per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
2946
2947 spin_lock(&kvm_lock);
2948 list_for_each_entry(kvm, &vm_list, vm_list) {
2949 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2950 vcpu = kvm->vcpus[i];
2951 if (!vcpu)
2952 continue;
2953 if (vcpu->cpu != freq->cpu)
2954 continue;
2955 if (!kvm_request_guest_time_update(vcpu))
2956 continue;
2957 if (vcpu->cpu != smp_processor_id())
2958 send_ipi++;
2959 }
2960 }
2961 spin_unlock(&kvm_lock);
2962
2963 if (freq->old < freq->new && send_ipi) {
2964 /*
2965 * We upscale the frequency. Must make the guest
2966 * doesn't see old kvmclock values while running with
2967 * the new frequency, otherwise we risk the guest sees
2968 * time go backwards.
2969 *
2970 * In case we update the frequency for another cpu
2971 * (which might be in guest context) send an interrupt
2972 * to kick the cpu out of guest context. Next time
2973 * guest context is entered kvmclock will be updated,
2974 * so the guest will not see stale values.
2975 */
2976 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
2977 }
2978 return 0;
2979}
2980
2981static struct notifier_block kvmclock_cpufreq_notifier_block = {
2982 .notifier_call = kvmclock_cpufreq_notifier
2983};
2984
f8c16bba 2985int kvm_arch_init(void *opaque)
043405e1 2986{
c8076604 2987 int r, cpu;
f8c16bba
ZX
2988 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2989
f8c16bba
ZX
2990 if (kvm_x86_ops) {
2991 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
2992 r = -EEXIST;
2993 goto out;
f8c16bba
ZX
2994 }
2995
2996 if (!ops->cpu_has_kvm_support()) {
2997 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
2998 r = -EOPNOTSUPP;
2999 goto out;
f8c16bba
ZX
3000 }
3001 if (ops->disabled_by_bios()) {
3002 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
3003 r = -EOPNOTSUPP;
3004 goto out;
f8c16bba
ZX
3005 }
3006
97db56ce
AK
3007 r = kvm_mmu_module_init();
3008 if (r)
3009 goto out;
3010
3011 kvm_init_msr_list();
3012
f8c16bba 3013 kvm_x86_ops = ops;
56c6d28a 3014 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
7b52345e
SY
3015 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
3016 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
4b12f0de 3017 PT_DIRTY_MASK, PT64_NX_MASK, 0);
c8076604
GH
3018
3019 for_each_possible_cpu(cpu)
3020 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
3021 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
3022 tsc_khz_ref = tsc_khz;
3023 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
3024 CPUFREQ_TRANSITION_NOTIFIER);
3025 }
3026
f8c16bba 3027 return 0;
56c6d28a
ZX
3028
3029out:
56c6d28a 3030 return r;
043405e1 3031}
8776e519 3032
f8c16bba
ZX
3033void kvm_arch_exit(void)
3034{
888d256e
JK
3035 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
3036 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
3037 CPUFREQ_TRANSITION_NOTIFIER);
f8c16bba 3038 kvm_x86_ops = NULL;
56c6d28a
ZX
3039 kvm_mmu_module_exit();
3040}
f8c16bba 3041
8776e519
HB
3042int kvm_emulate_halt(struct kvm_vcpu *vcpu)
3043{
3044 ++vcpu->stat.halt_exits;
2714d1d3 3045 KVMTRACE_0D(HLT, vcpu, handler);
8776e519 3046 if (irqchip_in_kernel(vcpu->kvm)) {
a4535290 3047 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
8776e519
HB
3048 return 1;
3049 } else {
3050 vcpu->run->exit_reason = KVM_EXIT_HLT;
3051 return 0;
3052 }
3053}
3054EXPORT_SYMBOL_GPL(kvm_emulate_halt);
3055
2f333bcb
MT
3056static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
3057 unsigned long a1)
3058{
3059 if (is_long_mode(vcpu))
3060 return a0;
3061 else
3062 return a0 | ((gpa_t)a1 << 32);
3063}
3064
8776e519
HB
3065int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
3066{
3067 unsigned long nr, a0, a1, a2, a3, ret;
2f333bcb 3068 int r = 1;
8776e519 3069
5fdbf976
MT
3070 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
3071 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
3072 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
3073 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
3074 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
8776e519 3075
2714d1d3
FEL
3076 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
3077
8776e519
HB
3078 if (!is_long_mode(vcpu)) {
3079 nr &= 0xFFFFFFFF;
3080 a0 &= 0xFFFFFFFF;
3081 a1 &= 0xFFFFFFFF;
3082 a2 &= 0xFFFFFFFF;
3083 a3 &= 0xFFFFFFFF;
3084 }
3085
3086 switch (nr) {
b93463aa
AK
3087 case KVM_HC_VAPIC_POLL_IRQ:
3088 ret = 0;
3089 break;
2f333bcb
MT
3090 case KVM_HC_MMU_OP:
3091 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
3092 break;
8776e519
HB
3093 default:
3094 ret = -KVM_ENOSYS;
3095 break;
3096 }
5fdbf976 3097 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
f11c3a8d 3098 ++vcpu->stat.hypercalls;
2f333bcb 3099 return r;
8776e519
HB
3100}
3101EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
3102
3103int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
3104{
3105 char instruction[3];
3106 int ret = 0;
5fdbf976 3107 unsigned long rip = kvm_rip_read(vcpu);
8776e519 3108
8776e519
HB
3109
3110 /*
3111 * Blow out the MMU to ensure that no other VCPU has an active mapping
3112 * to ensure that the updated hypercall appears atomically across all
3113 * VCPUs.
3114 */
3115 kvm_mmu_zap_all(vcpu->kvm);
3116
8776e519 3117 kvm_x86_ops->patch_hypercall(vcpu, instruction);
5fdbf976 3118 if (emulator_write_emulated(rip, instruction, 3, vcpu)
8776e519
HB
3119 != X86EMUL_CONTINUE)
3120 ret = -EFAULT;
3121
8776e519
HB
3122 return ret;
3123}
3124
3125static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3126{
3127 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
3128}
3129
3130void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3131{
3132 struct descriptor_table dt = { limit, base };
3133
3134 kvm_x86_ops->set_gdt(vcpu, &dt);
3135}
3136
3137void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3138{
3139 struct descriptor_table dt = { limit, base };
3140
3141 kvm_x86_ops->set_idt(vcpu, &dt);
3142}
3143
3144void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
3145 unsigned long *rflags)
3146{
2d3ad1f4 3147 kvm_lmsw(vcpu, msw);
8776e519
HB
3148 *rflags = kvm_x86_ops->get_rflags(vcpu);
3149}
3150
3151unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3152{
54e445ca
JR
3153 unsigned long value;
3154
8776e519
HB
3155 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3156 switch (cr) {
3157 case 0:
54e445ca
JR
3158 value = vcpu->arch.cr0;
3159 break;
8776e519 3160 case 2:
54e445ca
JR
3161 value = vcpu->arch.cr2;
3162 break;
8776e519 3163 case 3:
54e445ca
JR
3164 value = vcpu->arch.cr3;
3165 break;
8776e519 3166 case 4:
54e445ca
JR
3167 value = vcpu->arch.cr4;
3168 break;
152ff9be 3169 case 8:
54e445ca
JR
3170 value = kvm_get_cr8(vcpu);
3171 break;
8776e519 3172 default:
b8688d51 3173 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
3174 return 0;
3175 }
54e445ca
JR
3176 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
3177 (u32)((u64)value >> 32), handler);
3178
3179 return value;
8776e519
HB
3180}
3181
3182void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
3183 unsigned long *rflags)
3184{
54e445ca
JR
3185 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
3186 (u32)((u64)val >> 32), handler);
3187
8776e519
HB
3188 switch (cr) {
3189 case 0:
2d3ad1f4 3190 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
8776e519
HB
3191 *rflags = kvm_x86_ops->get_rflags(vcpu);
3192 break;
3193 case 2:
ad312c7c 3194 vcpu->arch.cr2 = val;
8776e519
HB
3195 break;
3196 case 3:
2d3ad1f4 3197 kvm_set_cr3(vcpu, val);
8776e519
HB
3198 break;
3199 case 4:
2d3ad1f4 3200 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
8776e519 3201 break;
152ff9be 3202 case 8:
2d3ad1f4 3203 kvm_set_cr8(vcpu, val & 0xfUL);
152ff9be 3204 break;
8776e519 3205 default:
b8688d51 3206 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
3207 }
3208}
3209
07716717
DK
3210static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
3211{
ad312c7c
ZX
3212 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
3213 int j, nent = vcpu->arch.cpuid_nent;
07716717
DK
3214
3215 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
3216 /* when no next entry is found, the current entry[i] is reselected */
0fdf8e59 3217 for (j = i + 1; ; j = (j + 1) % nent) {
ad312c7c 3218 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
07716717
DK
3219 if (ej->function == e->function) {
3220 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
3221 return j;
3222 }
3223 }
3224 return 0; /* silence gcc, even though control never reaches here */
3225}
3226
3227/* find an entry with matching function, matching index (if needed), and that
3228 * should be read next (if it's stateful) */
3229static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
3230 u32 function, u32 index)
3231{
3232 if (e->function != function)
3233 return 0;
3234 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
3235 return 0;
3236 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
19355475 3237 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
07716717
DK
3238 return 0;
3239 return 1;
3240}
3241
d8017474
AG
3242struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
3243 u32 function, u32 index)
8776e519
HB
3244{
3245 int i;
d8017474 3246 struct kvm_cpuid_entry2 *best = NULL;
8776e519 3247
ad312c7c 3248 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
d8017474
AG
3249 struct kvm_cpuid_entry2 *e;
3250
ad312c7c 3251 e = &vcpu->arch.cpuid_entries[i];
07716717
DK
3252 if (is_matching_cpuid_entry(e, function, index)) {
3253 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
3254 move_to_next_stateful_cpuid_entry(vcpu, i);
8776e519
HB
3255 best = e;
3256 break;
3257 }
3258 /*
3259 * Both basic or both extended?
3260 */
3261 if (((e->function ^ function) & 0x80000000) == 0)
3262 if (!best || e->function > best->function)
3263 best = e;
3264 }
d8017474
AG
3265 return best;
3266}
3267
82725b20
DE
3268int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
3269{
3270 struct kvm_cpuid_entry2 *best;
3271
3272 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
3273 if (best)
3274 return best->eax & 0xff;
3275 return 36;
3276}
3277
d8017474
AG
3278void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3279{
3280 u32 function, index;
3281 struct kvm_cpuid_entry2 *best;
3282
3283 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
3284 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
3285 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
3286 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
3287 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
3288 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
3289 best = kvm_find_cpuid_entry(vcpu, function, index);
8776e519 3290 if (best) {
5fdbf976
MT
3291 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
3292 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
3293 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
3294 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
8776e519 3295 }
8776e519 3296 kvm_x86_ops->skip_emulated_instruction(vcpu);
2714d1d3 3297 KVMTRACE_5D(CPUID, vcpu, function,
5fdbf976
MT
3298 (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
3299 (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
3300 (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
3301 (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
8776e519
HB
3302}
3303EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 3304
b6c7a5dc
HB
3305/*
3306 * Check if userspace requested an interrupt window, and that the
3307 * interrupt window is open.
3308 *
3309 * No need to exit to userspace if we already have an interrupt queued.
3310 */
3311static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
3312 struct kvm_run *kvm_run)
3313{
8061823a 3314 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
b6c7a5dc 3315 kvm_run->request_interrupt_window &&
5df56646 3316 kvm_arch_interrupt_allowed(vcpu));
b6c7a5dc
HB
3317}
3318
3319static void post_kvm_run_save(struct kvm_vcpu *vcpu,
3320 struct kvm_run *kvm_run)
3321{
3322 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2d3ad1f4 3323 kvm_run->cr8 = kvm_get_cr8(vcpu);
b6c7a5dc 3324 kvm_run->apic_base = kvm_get_apic_base(vcpu);
4531220b 3325 if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 3326 kvm_run->ready_for_interrupt_injection = 1;
4531220b 3327 else
b6c7a5dc 3328 kvm_run->ready_for_interrupt_injection =
fa9726b0
GN
3329 kvm_arch_interrupt_allowed(vcpu) &&
3330 !kvm_cpu_has_interrupt(vcpu) &&
3331 !kvm_event_needs_reinjection(vcpu);
b6c7a5dc
HB
3332}
3333
b93463aa
AK
3334static void vapic_enter(struct kvm_vcpu *vcpu)
3335{
3336 struct kvm_lapic *apic = vcpu->arch.apic;
3337 struct page *page;
3338
3339 if (!apic || !apic->vapic_addr)
3340 return;
3341
3342 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
72dc67a6
IE
3343
3344 vcpu->arch.apic->vapic_page = page;
b93463aa
AK
3345}
3346
3347static void vapic_exit(struct kvm_vcpu *vcpu)
3348{
3349 struct kvm_lapic *apic = vcpu->arch.apic;
3350
3351 if (!apic || !apic->vapic_addr)
3352 return;
3353
f8b78fa3 3354 down_read(&vcpu->kvm->slots_lock);
b93463aa
AK
3355 kvm_release_page_dirty(apic->vapic_page);
3356 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
f8b78fa3 3357 up_read(&vcpu->kvm->slots_lock);
b93463aa
AK
3358}
3359
95ba8273
GN
3360static void update_cr8_intercept(struct kvm_vcpu *vcpu)
3361{
3362 int max_irr, tpr;
3363
3364 if (!kvm_x86_ops->update_cr8_intercept)
3365 return;
3366
8db3baa2
GN
3367 if (!vcpu->arch.apic->vapic_addr)
3368 max_irr = kvm_lapic_find_highest_irr(vcpu);
3369 else
3370 max_irr = -1;
95ba8273
GN
3371
3372 if (max_irr != -1)
3373 max_irr >>= 4;
3374
3375 tpr = kvm_lapic_get_cr8(vcpu);
3376
3377 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
3378}
3379
6a8b1d13 3380static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
95ba8273
GN
3381{
3382 /* try to reinject previous events if any */
3383 if (vcpu->arch.nmi_injected) {
3384 kvm_x86_ops->set_nmi(vcpu);
3385 return;
3386 }
3387
3388 if (vcpu->arch.interrupt.pending) {
66fd3f7f 3389 kvm_x86_ops->set_irq(vcpu);
95ba8273
GN
3390 return;
3391 }
3392
3393 /* try to inject new event if pending */
3394 if (vcpu->arch.nmi_pending) {
3395 if (kvm_x86_ops->nmi_allowed(vcpu)) {
3396 vcpu->arch.nmi_pending = false;
3397 vcpu->arch.nmi_injected = true;
3398 kvm_x86_ops->set_nmi(vcpu);
3399 }
3400 } else if (kvm_cpu_has_interrupt(vcpu)) {
3401 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
66fd3f7f
GN
3402 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
3403 false);
3404 kvm_x86_ops->set_irq(vcpu);
95ba8273
GN
3405 }
3406 }
3407}
3408
d7690175 3409static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
b6c7a5dc
HB
3410{
3411 int r;
6a8b1d13
GN
3412 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
3413 kvm_run->request_interrupt_window;
b6c7a5dc 3414
2e53d63a
MT
3415 if (vcpu->requests)
3416 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
3417 kvm_mmu_unload(vcpu);
3418
b6c7a5dc
HB
3419 r = kvm_mmu_reload(vcpu);
3420 if (unlikely(r))
3421 goto out;
3422
2f52d58c
AK
3423 if (vcpu->requests) {
3424 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2f599714 3425 __kvm_migrate_timers(vcpu);
c8076604
GH
3426 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
3427 kvm_write_guest_time(vcpu);
4731d4c7
MT
3428 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
3429 kvm_mmu_sync_roots(vcpu);
d4acf7e7
MT
3430 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
3431 kvm_x86_ops->tlb_flush(vcpu);
b93463aa
AK
3432 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
3433 &vcpu->requests)) {
3434 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
3435 r = 0;
3436 goto out;
3437 }
71c4dfaf
JR
3438 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
3439 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
3440 r = 0;
3441 goto out;
3442 }
2f52d58c 3443 }
b93463aa 3444
b6c7a5dc
HB
3445 preempt_disable();
3446
3447 kvm_x86_ops->prepare_guest_switch(vcpu);
3448 kvm_load_guest_fpu(vcpu);
3449
3450 local_irq_disable();
3451
32f88400
MT
3452 clear_bit(KVM_REQ_KICK, &vcpu->requests);
3453 smp_mb__after_clear_bit();
3454
d7690175 3455 if (vcpu->requests || need_resched() || signal_pending(current)) {
6c142801
AK
3456 local_irq_enable();
3457 preempt_enable();
3458 r = 1;
3459 goto out;
3460 }
3461
ad312c7c 3462 if (vcpu->arch.exception.pending)
298101da 3463 __queue_exception(vcpu);
eb9774f0 3464 else
95ba8273 3465 inject_pending_irq(vcpu, kvm_run);
b6c7a5dc 3466
6a8b1d13
GN
3467 /* enable NMI/IRQ window open exits if needed */
3468 if (vcpu->arch.nmi_pending)
3469 kvm_x86_ops->enable_nmi_window(vcpu);
3470 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
3471 kvm_x86_ops->enable_irq_window(vcpu);
3472
95ba8273 3473 if (kvm_lapic_enabled(vcpu)) {
8db3baa2
GN
3474 update_cr8_intercept(vcpu);
3475 kvm_lapic_sync_to_vapic(vcpu);
95ba8273 3476 }
b93463aa 3477
3200f405
MT
3478 up_read(&vcpu->kvm->slots_lock);
3479
b6c7a5dc
HB
3480 kvm_guest_enter();
3481
42dbaa5a
JK
3482 get_debugreg(vcpu->arch.host_dr6, 6);
3483 get_debugreg(vcpu->arch.host_dr7, 7);
3484 if (unlikely(vcpu->arch.switch_db_regs)) {
3485 get_debugreg(vcpu->arch.host_db[0], 0);
3486 get_debugreg(vcpu->arch.host_db[1], 1);
3487 get_debugreg(vcpu->arch.host_db[2], 2);
3488 get_debugreg(vcpu->arch.host_db[3], 3);
3489
3490 set_debugreg(0, 7);
3491 set_debugreg(vcpu->arch.eff_db[0], 0);
3492 set_debugreg(vcpu->arch.eff_db[1], 1);
3493 set_debugreg(vcpu->arch.eff_db[2], 2);
3494 set_debugreg(vcpu->arch.eff_db[3], 3);
3495 }
b6c7a5dc 3496
2714d1d3 3497 KVMTRACE_0D(VMENTRY, vcpu, entryexit);
b6c7a5dc
HB
3498 kvm_x86_ops->run(vcpu, kvm_run);
3499
42dbaa5a
JK
3500 if (unlikely(vcpu->arch.switch_db_regs)) {
3501 set_debugreg(0, 7);
3502 set_debugreg(vcpu->arch.host_db[0], 0);
3503 set_debugreg(vcpu->arch.host_db[1], 1);
3504 set_debugreg(vcpu->arch.host_db[2], 2);
3505 set_debugreg(vcpu->arch.host_db[3], 3);
3506 }
3507 set_debugreg(vcpu->arch.host_dr6, 6);
3508 set_debugreg(vcpu->arch.host_dr7, 7);
3509
32f88400 3510 set_bit(KVM_REQ_KICK, &vcpu->requests);
b6c7a5dc
HB
3511 local_irq_enable();
3512
3513 ++vcpu->stat.exits;
3514
3515 /*
3516 * We must have an instruction between local_irq_enable() and
3517 * kvm_guest_exit(), so the timer interrupt isn't delayed by
3518 * the interrupt shadow. The stat.exits increment will do nicely.
3519 * But we need to prevent reordering, hence this barrier():
3520 */
3521 barrier();
3522
3523 kvm_guest_exit();
3524
3525 preempt_enable();
3526
3200f405
MT
3527 down_read(&vcpu->kvm->slots_lock);
3528
b6c7a5dc
HB
3529 /*
3530 * Profile KVM exit RIPs:
3531 */
3532 if (unlikely(prof_on == KVM_PROFILING)) {
5fdbf976
MT
3533 unsigned long rip = kvm_rip_read(vcpu);
3534 profile_hit(KVM_PROFILING, (void *)rip);
b6c7a5dc
HB
3535 }
3536
298101da 3537
b93463aa
AK
3538 kvm_lapic_sync_from_vapic(vcpu);
3539
b6c7a5dc 3540 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
d7690175
MT
3541out:
3542 return r;
3543}
b6c7a5dc 3544
09cec754 3545
d7690175
MT
3546static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3547{
3548 int r;
3549
3550 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
1b10bf31
JK
3551 pr_debug("vcpu %d received sipi with vector # %x\n",
3552 vcpu->vcpu_id, vcpu->arch.sipi_vector);
d7690175 3553 kvm_lapic_reset(vcpu);
5f179287 3554 r = kvm_arch_vcpu_reset(vcpu);
d7690175
MT
3555 if (r)
3556 return r;
3557 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b6c7a5dc
HB
3558 }
3559
d7690175
MT
3560 down_read(&vcpu->kvm->slots_lock);
3561 vapic_enter(vcpu);
3562
3563 r = 1;
3564 while (r > 0) {
af2152f5 3565 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
d7690175
MT
3566 r = vcpu_enter_guest(vcpu, kvm_run);
3567 else {
3568 up_read(&vcpu->kvm->slots_lock);
3569 kvm_vcpu_block(vcpu);
3570 down_read(&vcpu->kvm->slots_lock);
3571 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
09cec754
GN
3572 {
3573 switch(vcpu->arch.mp_state) {
3574 case KVM_MP_STATE_HALTED:
d7690175 3575 vcpu->arch.mp_state =
09cec754
GN
3576 KVM_MP_STATE_RUNNABLE;
3577 case KVM_MP_STATE_RUNNABLE:
3578 break;
3579 case KVM_MP_STATE_SIPI_RECEIVED:
3580 default:
3581 r = -EINTR;
3582 break;
3583 }
3584 }
d7690175
MT
3585 }
3586
09cec754
GN
3587 if (r <= 0)
3588 break;
3589
3590 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
3591 if (kvm_cpu_has_pending_timer(vcpu))
3592 kvm_inject_pending_timer_irqs(vcpu);
3593
3594 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
3595 r = -EINTR;
3596 kvm_run->exit_reason = KVM_EXIT_INTR;
3597 ++vcpu->stat.request_irq_exits;
3598 }
3599 if (signal_pending(current)) {
3600 r = -EINTR;
3601 kvm_run->exit_reason = KVM_EXIT_INTR;
3602 ++vcpu->stat.signal_exits;
3603 }
3604 if (need_resched()) {
3605 up_read(&vcpu->kvm->slots_lock);
3606 kvm_resched(vcpu);
3607 down_read(&vcpu->kvm->slots_lock);
d7690175 3608 }
b6c7a5dc
HB
3609 }
3610
d7690175 3611 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3612 post_kvm_run_save(vcpu, kvm_run);
3613
b93463aa
AK
3614 vapic_exit(vcpu);
3615
b6c7a5dc
HB
3616 return r;
3617}
3618
3619int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3620{
3621 int r;
3622 sigset_t sigsaved;
3623
3624 vcpu_load(vcpu);
3625
ac9f6dc0
AK
3626 if (vcpu->sigset_active)
3627 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3628
a4535290 3629 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
b6c7a5dc 3630 kvm_vcpu_block(vcpu);
d7690175 3631 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
ac9f6dc0
AK
3632 r = -EAGAIN;
3633 goto out;
b6c7a5dc
HB
3634 }
3635
b6c7a5dc
HB
3636 /* re-sync apic's tpr */
3637 if (!irqchip_in_kernel(vcpu->kvm))
2d3ad1f4 3638 kvm_set_cr8(vcpu, kvm_run->cr8);
b6c7a5dc 3639
ad312c7c 3640 if (vcpu->arch.pio.cur_count) {
b6c7a5dc
HB
3641 r = complete_pio(vcpu);
3642 if (r)
3643 goto out;
3644 }
3645#if CONFIG_HAS_IOMEM
3646 if (vcpu->mmio_needed) {
3647 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
3648 vcpu->mmio_read_completed = 1;
3649 vcpu->mmio_needed = 0;
3200f405
MT
3650
3651 down_read(&vcpu->kvm->slots_lock);
b6c7a5dc 3652 r = emulate_instruction(vcpu, kvm_run,
571008da
SY
3653 vcpu->arch.mmio_fault_cr2, 0,
3654 EMULTYPE_NO_DECODE);
3200f405 3655 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3656 if (r == EMULATE_DO_MMIO) {
3657 /*
3658 * Read-modify-write. Back to userspace.
3659 */
3660 r = 0;
3661 goto out;
3662 }
3663 }
3664#endif
5fdbf976
MT
3665 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
3666 kvm_register_write(vcpu, VCPU_REGS_RAX,
3667 kvm_run->hypercall.ret);
b6c7a5dc
HB
3668
3669 r = __vcpu_run(vcpu, kvm_run);
3670
3671out:
3672 if (vcpu->sigset_active)
3673 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3674
3675 vcpu_put(vcpu);
3676 return r;
3677}
3678
3679int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3680{
3681 vcpu_load(vcpu);
3682
5fdbf976
MT
3683 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3684 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3685 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3686 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3687 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3688 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3689 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3690 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
b6c7a5dc 3691#ifdef CONFIG_X86_64
5fdbf976
MT
3692 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
3693 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
3694 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
3695 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
3696 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
3697 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
3698 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
3699 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
b6c7a5dc
HB
3700#endif
3701
5fdbf976 3702 regs->rip = kvm_rip_read(vcpu);
b6c7a5dc
HB
3703 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3704
3705 /*
3706 * Don't leak debug flags in case they were set for guest debugging
3707 */
d0bfb940 3708 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
b6c7a5dc
HB
3709 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3710
3711 vcpu_put(vcpu);
3712
3713 return 0;
3714}
3715
3716int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3717{
3718 vcpu_load(vcpu);
3719
5fdbf976
MT
3720 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
3721 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
3722 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
3723 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
3724 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
3725 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
3726 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
3727 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
b6c7a5dc 3728#ifdef CONFIG_X86_64
5fdbf976
MT
3729 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
3730 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
3731 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
3732 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
3733 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
3734 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
3735 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
3736 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
3737
b6c7a5dc
HB
3738#endif
3739
5fdbf976 3740 kvm_rip_write(vcpu, regs->rip);
b6c7a5dc
HB
3741 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3742
b6c7a5dc 3743
b4f14abd
JK
3744 vcpu->arch.exception.pending = false;
3745
b6c7a5dc
HB
3746 vcpu_put(vcpu);
3747
3748 return 0;
3749}
3750
3e6e0aab
GT
3751void kvm_get_segment(struct kvm_vcpu *vcpu,
3752 struct kvm_segment *var, int seg)
b6c7a5dc 3753{
14af3f3c 3754 kvm_x86_ops->get_segment(vcpu, var, seg);
b6c7a5dc
HB
3755}
3756
3757void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3758{
3759 struct kvm_segment cs;
3760
3e6e0aab 3761 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
b6c7a5dc
HB
3762 *db = cs.db;
3763 *l = cs.l;
3764}
3765EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3766
3767int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3768 struct kvm_sregs *sregs)
3769{
3770 struct descriptor_table dt;
b6c7a5dc
HB
3771
3772 vcpu_load(vcpu);
3773
3e6e0aab
GT
3774 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3775 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3776 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3777 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3778 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3779 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 3780
3e6e0aab
GT
3781 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3782 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc
HB
3783
3784 kvm_x86_ops->get_idt(vcpu, &dt);
3785 sregs->idt.limit = dt.limit;
3786 sregs->idt.base = dt.base;
3787 kvm_x86_ops->get_gdt(vcpu, &dt);
3788 sregs->gdt.limit = dt.limit;
3789 sregs->gdt.base = dt.base;
3790
3791 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
ad312c7c
ZX
3792 sregs->cr0 = vcpu->arch.cr0;
3793 sregs->cr2 = vcpu->arch.cr2;
3794 sregs->cr3 = vcpu->arch.cr3;
3795 sregs->cr4 = vcpu->arch.cr4;
2d3ad1f4 3796 sregs->cr8 = kvm_get_cr8(vcpu);
ad312c7c 3797 sregs->efer = vcpu->arch.shadow_efer;
b6c7a5dc
HB
3798 sregs->apic_base = kvm_get_apic_base(vcpu);
3799
923c61bb 3800 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
b6c7a5dc 3801
36752c9b 3802 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
14d0bc1f
GN
3803 set_bit(vcpu->arch.interrupt.nr,
3804 (unsigned long *)sregs->interrupt_bitmap);
16d7a191 3805
b6c7a5dc
HB
3806 vcpu_put(vcpu);
3807
3808 return 0;
3809}
3810
62d9f0db
MT
3811int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3812 struct kvm_mp_state *mp_state)
3813{
3814 vcpu_load(vcpu);
3815 mp_state->mp_state = vcpu->arch.mp_state;
3816 vcpu_put(vcpu);
3817 return 0;
3818}
3819
3820int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3821 struct kvm_mp_state *mp_state)
3822{
3823 vcpu_load(vcpu);
3824 vcpu->arch.mp_state = mp_state->mp_state;
3825 vcpu_put(vcpu);
3826 return 0;
3827}
3828
3e6e0aab 3829static void kvm_set_segment(struct kvm_vcpu *vcpu,
b6c7a5dc
HB
3830 struct kvm_segment *var, int seg)
3831{
14af3f3c 3832 kvm_x86_ops->set_segment(vcpu, var, seg);
b6c7a5dc
HB
3833}
3834
37817f29
IE
3835static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3836 struct kvm_segment *kvm_desct)
3837{
3838 kvm_desct->base = seg_desc->base0;
3839 kvm_desct->base |= seg_desc->base1 << 16;
3840 kvm_desct->base |= seg_desc->base2 << 24;
3841 kvm_desct->limit = seg_desc->limit0;
3842 kvm_desct->limit |= seg_desc->limit << 16;
c93cd3a5
MT
3843 if (seg_desc->g) {
3844 kvm_desct->limit <<= 12;
3845 kvm_desct->limit |= 0xfff;
3846 }
37817f29
IE
3847 kvm_desct->selector = selector;
3848 kvm_desct->type = seg_desc->type;
3849 kvm_desct->present = seg_desc->p;
3850 kvm_desct->dpl = seg_desc->dpl;
3851 kvm_desct->db = seg_desc->d;
3852 kvm_desct->s = seg_desc->s;
3853 kvm_desct->l = seg_desc->l;
3854 kvm_desct->g = seg_desc->g;
3855 kvm_desct->avl = seg_desc->avl;
3856 if (!selector)
3857 kvm_desct->unusable = 1;
3858 else
3859 kvm_desct->unusable = 0;
3860 kvm_desct->padding = 0;
3861}
3862
b8222ad2
AS
3863static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
3864 u16 selector,
3865 struct descriptor_table *dtable)
37817f29
IE
3866{
3867 if (selector & 1 << 2) {
3868 struct kvm_segment kvm_seg;
3869
3e6e0aab 3870 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
37817f29
IE
3871
3872 if (kvm_seg.unusable)
3873 dtable->limit = 0;
3874 else
3875 dtable->limit = kvm_seg.limit;
3876 dtable->base = kvm_seg.base;
3877 }
3878 else
3879 kvm_x86_ops->get_gdt(vcpu, dtable);
3880}
3881
3882/* allowed just for 8 bytes segments */
3883static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3884 struct desc_struct *seg_desc)
3885{
98899aa0 3886 gpa_t gpa;
37817f29
IE
3887 struct descriptor_table dtable;
3888 u16 index = selector >> 3;
3889
b8222ad2 3890 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3891
3892 if (dtable.limit < index * 8 + 7) {
3893 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3894 return 1;
3895 }
98899aa0
MT
3896 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3897 gpa += index * 8;
3898 return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3899}
3900
3901/* allowed just for 8 bytes segments */
3902static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3903 struct desc_struct *seg_desc)
3904{
98899aa0 3905 gpa_t gpa;
37817f29
IE
3906 struct descriptor_table dtable;
3907 u16 index = selector >> 3;
3908
b8222ad2 3909 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3910
3911 if (dtable.limit < index * 8 + 7)
3912 return 1;
98899aa0
MT
3913 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3914 gpa += index * 8;
3915 return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3916}
3917
3918static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3919 struct desc_struct *seg_desc)
3920{
3921 u32 base_addr;
3922
3923 base_addr = seg_desc->base0;
3924 base_addr |= (seg_desc->base1 << 16);
3925 base_addr |= (seg_desc->base2 << 24);
3926
98899aa0 3927 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
37817f29
IE
3928}
3929
37817f29
IE
3930static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3931{
3932 struct kvm_segment kvm_seg;
3933
3e6e0aab 3934 kvm_get_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3935 return kvm_seg.selector;
3936}
3937
3938static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3939 u16 selector,
3940 struct kvm_segment *kvm_seg)
3941{
3942 struct desc_struct seg_desc;
3943
3944 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3945 return 1;
3946 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3947 return 0;
3948}
3949
2259e3a7 3950static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
f4bbd9aa
AK
3951{
3952 struct kvm_segment segvar = {
3953 .base = selector << 4,
3954 .limit = 0xffff,
3955 .selector = selector,
3956 .type = 3,
3957 .present = 1,
3958 .dpl = 3,
3959 .db = 0,
3960 .s = 1,
3961 .l = 0,
3962 .g = 0,
3963 .avl = 0,
3964 .unusable = 0,
3965 };
3966 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
3967 return 0;
3968}
3969
3e6e0aab
GT
3970int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3971 int type_bits, int seg)
37817f29
IE
3972{
3973 struct kvm_segment kvm_seg;
3974
f4bbd9aa
AK
3975 if (!(vcpu->arch.cr0 & X86_CR0_PE))
3976 return kvm_load_realmode_segment(vcpu, selector, seg);
37817f29
IE
3977 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3978 return 1;
3979 kvm_seg.type |= type_bits;
3980
3981 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3982 seg != VCPU_SREG_LDTR)
3983 if (!kvm_seg.s)
3984 kvm_seg.unusable = 1;
3985
3e6e0aab 3986 kvm_set_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3987 return 0;
3988}
3989
3990static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3991 struct tss_segment_32 *tss)
3992{
3993 tss->cr3 = vcpu->arch.cr3;
5fdbf976 3994 tss->eip = kvm_rip_read(vcpu);
37817f29 3995 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
3996 tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3997 tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3998 tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3999 tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4000 tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4001 tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4002 tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4003 tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
4004 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4005 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4006 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4007 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4008 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
4009 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
4010 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
37817f29
IE
4011}
4012
4013static int load_state_from_tss32(struct kvm_vcpu *vcpu,
4014 struct tss_segment_32 *tss)
4015{
4016 kvm_set_cr3(vcpu, tss->cr3);
4017
5fdbf976 4018 kvm_rip_write(vcpu, tss->eip);
37817f29
IE
4019 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
4020
5fdbf976
MT
4021 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
4022 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
4023 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
4024 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
4025 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
4026 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
4027 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
4028 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
37817f29 4029
3e6e0aab 4030 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
37817f29
IE
4031 return 1;
4032
3e6e0aab 4033 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
4034 return 1;
4035
3e6e0aab 4036 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
4037 return 1;
4038
3e6e0aab 4039 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
4040 return 1;
4041
3e6e0aab 4042 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
4043 return 1;
4044
3e6e0aab 4045 if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
37817f29
IE
4046 return 1;
4047
3e6e0aab 4048 if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
37817f29
IE
4049 return 1;
4050 return 0;
4051}
4052
4053static void save_state_to_tss16(struct kvm_vcpu *vcpu,
4054 struct tss_segment_16 *tss)
4055{
5fdbf976 4056 tss->ip = kvm_rip_read(vcpu);
37817f29 4057 tss->flag = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
4058 tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4059 tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4060 tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4061 tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4062 tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4063 tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4064 tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
4065 tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
4066
4067 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4068 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4069 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4070 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4071 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
4072 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
4073}
4074
4075static int load_state_from_tss16(struct kvm_vcpu *vcpu,
4076 struct tss_segment_16 *tss)
4077{
5fdbf976 4078 kvm_rip_write(vcpu, tss->ip);
37817f29 4079 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
5fdbf976
MT
4080 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
4081 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
4082 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
4083 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
4084 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
4085 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
4086 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
4087 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
37817f29 4088
3e6e0aab 4089 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
37817f29
IE
4090 return 1;
4091
3e6e0aab 4092 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
4093 return 1;
4094
3e6e0aab 4095 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
4096 return 1;
4097
3e6e0aab 4098 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
4099 return 1;
4100
3e6e0aab 4101 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
4102 return 1;
4103 return 0;
4104}
4105
8b2cf73c 4106static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
b237ac37
GN
4107 u16 old_tss_sel, u32 old_tss_base,
4108 struct desc_struct *nseg_desc)
37817f29
IE
4109{
4110 struct tss_segment_16 tss_segment_16;
4111 int ret = 0;
4112
34198bf8
MT
4113 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4114 sizeof tss_segment_16))
37817f29
IE
4115 goto out;
4116
4117 save_state_to_tss16(vcpu, &tss_segment_16);
37817f29 4118
34198bf8
MT
4119 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4120 sizeof tss_segment_16))
37817f29 4121 goto out;
34198bf8
MT
4122
4123 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
4124 &tss_segment_16, sizeof tss_segment_16))
4125 goto out;
4126
b237ac37
GN
4127 if (old_tss_sel != 0xffff) {
4128 tss_segment_16.prev_task_link = old_tss_sel;
4129
4130 if (kvm_write_guest(vcpu->kvm,
4131 get_tss_base_addr(vcpu, nseg_desc),
4132 &tss_segment_16.prev_task_link,
4133 sizeof tss_segment_16.prev_task_link))
4134 goto out;
4135 }
4136
37817f29
IE
4137 if (load_state_from_tss16(vcpu, &tss_segment_16))
4138 goto out;
4139
4140 ret = 1;
4141out:
4142 return ret;
4143}
4144
8b2cf73c 4145static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
b237ac37 4146 u16 old_tss_sel, u32 old_tss_base,
37817f29
IE
4147 struct desc_struct *nseg_desc)
4148{
4149 struct tss_segment_32 tss_segment_32;
4150 int ret = 0;
4151
34198bf8
MT
4152 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
4153 sizeof tss_segment_32))
37817f29
IE
4154 goto out;
4155
4156 save_state_to_tss32(vcpu, &tss_segment_32);
37817f29 4157
34198bf8
MT
4158 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
4159 sizeof tss_segment_32))
4160 goto out;
4161
4162 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
4163 &tss_segment_32, sizeof tss_segment_32))
37817f29 4164 goto out;
34198bf8 4165
b237ac37
GN
4166 if (old_tss_sel != 0xffff) {
4167 tss_segment_32.prev_task_link = old_tss_sel;
4168
4169 if (kvm_write_guest(vcpu->kvm,
4170 get_tss_base_addr(vcpu, nseg_desc),
4171 &tss_segment_32.prev_task_link,
4172 sizeof tss_segment_32.prev_task_link))
4173 goto out;
4174 }
4175
37817f29
IE
4176 if (load_state_from_tss32(vcpu, &tss_segment_32))
4177 goto out;
4178
4179 ret = 1;
4180out:
4181 return ret;
4182}
4183
4184int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
4185{
4186 struct kvm_segment tr_seg;
4187 struct desc_struct cseg_desc;
4188 struct desc_struct nseg_desc;
4189 int ret = 0;
34198bf8
MT
4190 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
4191 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
37817f29 4192
34198bf8 4193 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
37817f29 4194
34198bf8
MT
4195 /* FIXME: Handle errors. Failure to read either TSS or their
4196 * descriptors should generate a pagefault.
4197 */
37817f29
IE
4198 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
4199 goto out;
4200
34198bf8 4201 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
37817f29
IE
4202 goto out;
4203
37817f29
IE
4204 if (reason != TASK_SWITCH_IRET) {
4205 int cpl;
4206
4207 cpl = kvm_x86_ops->get_cpl(vcpu);
4208 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
4209 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4210 return 1;
4211 }
4212 }
4213
4214 if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
4215 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
4216 return 1;
4217 }
4218
4219 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3fe913e7 4220 cseg_desc.type &= ~(1 << 1); //clear the B flag
34198bf8 4221 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
37817f29
IE
4222 }
4223
4224 if (reason == TASK_SWITCH_IRET) {
4225 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4226 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
4227 }
4228
64a7ec06
GN
4229 /* set back link to prev task only if NT bit is set in eflags
4230 note that old_tss_sel is not used afetr this point */
4231 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4232 old_tss_sel = 0xffff;
37817f29 4233
b237ac37
GN
4234 /* set back link to prev task only if NT bit is set in eflags
4235 note that old_tss_sel is not used afetr this point */
4236 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4237 old_tss_sel = 0xffff;
4238
37817f29 4239 if (nseg_desc.type & 8)
b237ac37
GN
4240 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
4241 old_tss_base, &nseg_desc);
37817f29 4242 else
b237ac37
GN
4243 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
4244 old_tss_base, &nseg_desc);
37817f29
IE
4245
4246 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
4247 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4248 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
4249 }
4250
4251 if (reason != TASK_SWITCH_IRET) {
3fe913e7 4252 nseg_desc.type |= (1 << 1);
37817f29
IE
4253 save_guest_segment_descriptor(vcpu, tss_selector,
4254 &nseg_desc);
4255 }
4256
4257 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
4258 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
4259 tr_seg.type = 11;
3e6e0aab 4260 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
37817f29 4261out:
37817f29
IE
4262 return ret;
4263}
4264EXPORT_SYMBOL_GPL(kvm_task_switch);
4265
b6c7a5dc
HB
4266int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4267 struct kvm_sregs *sregs)
4268{
4269 int mmu_reset_needed = 0;
923c61bb 4270 int pending_vec, max_bits;
b6c7a5dc
HB
4271 struct descriptor_table dt;
4272
4273 vcpu_load(vcpu);
4274
4275 dt.limit = sregs->idt.limit;
4276 dt.base = sregs->idt.base;
4277 kvm_x86_ops->set_idt(vcpu, &dt);
4278 dt.limit = sregs->gdt.limit;
4279 dt.base = sregs->gdt.base;
4280 kvm_x86_ops->set_gdt(vcpu, &dt);
4281
ad312c7c
ZX
4282 vcpu->arch.cr2 = sregs->cr2;
4283 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
59839dff
MT
4284
4285 down_read(&vcpu->kvm->slots_lock);
4286 if (gfn_to_memslot(vcpu->kvm, sregs->cr3 >> PAGE_SHIFT))
4287 vcpu->arch.cr3 = sregs->cr3;
4288 else
4289 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
4290 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc 4291
2d3ad1f4 4292 kvm_set_cr8(vcpu, sregs->cr8);
b6c7a5dc 4293
ad312c7c 4294 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
b6c7a5dc 4295 kvm_x86_ops->set_efer(vcpu, sregs->efer);
b6c7a5dc
HB
4296 kvm_set_apic_base(vcpu, sregs->apic_base);
4297
4298 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
4299
ad312c7c 4300 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
b6c7a5dc 4301 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
d7306163 4302 vcpu->arch.cr0 = sregs->cr0;
b6c7a5dc 4303
ad312c7c 4304 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
b6c7a5dc
HB
4305 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
4306 if (!is_long_mode(vcpu) && is_pae(vcpu))
ad312c7c 4307 load_pdptrs(vcpu, vcpu->arch.cr3);
b6c7a5dc
HB
4308
4309 if (mmu_reset_needed)
4310 kvm_mmu_reset_context(vcpu);
4311
923c61bb
GN
4312 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
4313 pending_vec = find_first_bit(
4314 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
4315 if (pending_vec < max_bits) {
66fd3f7f 4316 kvm_queue_interrupt(vcpu, pending_vec, false);
923c61bb
GN
4317 pr_debug("Set back pending irq %d\n", pending_vec);
4318 if (irqchip_in_kernel(vcpu->kvm))
4319 kvm_pic_clear_isr_ack(vcpu->kvm);
b6c7a5dc
HB
4320 }
4321
3e6e0aab
GT
4322 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4323 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4324 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4325 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4326 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4327 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 4328
3e6e0aab
GT
4329 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4330 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc 4331
9c3e4aab 4332 /* Older userspace won't unhalt the vcpu on reset. */
c5af89b6 4333 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
9c3e4aab
MT
4334 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4335 !(vcpu->arch.cr0 & X86_CR0_PE))
4336 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4337
b6c7a5dc
HB
4338 vcpu_put(vcpu);
4339
4340 return 0;
4341}
4342
d0bfb940
JK
4343int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4344 struct kvm_guest_debug *dbg)
b6c7a5dc 4345{
ae675ef0 4346 int i, r;
b6c7a5dc
HB
4347
4348 vcpu_load(vcpu);
4349
ae675ef0
JK
4350 if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
4351 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
4352 for (i = 0; i < KVM_NR_DB_REGS; ++i)
4353 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
4354 vcpu->arch.switch_db_regs =
4355 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
4356 } else {
4357 for (i = 0; i < KVM_NR_DB_REGS; i++)
4358 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
4359 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
4360 }
4361
b6c7a5dc
HB
4362 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
4363
d0bfb940
JK
4364 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
4365 kvm_queue_exception(vcpu, DB_VECTOR);
4366 else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
4367 kvm_queue_exception(vcpu, BP_VECTOR);
4368
b6c7a5dc
HB
4369 vcpu_put(vcpu);
4370
4371 return r;
4372}
4373
d0752060
HB
4374/*
4375 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
4376 * we have asm/x86/processor.h
4377 */
4378struct fxsave {
4379 u16 cwd;
4380 u16 swd;
4381 u16 twd;
4382 u16 fop;
4383 u64 rip;
4384 u64 rdp;
4385 u32 mxcsr;
4386 u32 mxcsr_mask;
4387 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
4388#ifdef CONFIG_X86_64
4389 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
4390#else
4391 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
4392#endif
4393};
4394
8b006791
ZX
4395/*
4396 * Translate a guest virtual address to a guest physical address.
4397 */
4398int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4399 struct kvm_translation *tr)
4400{
4401 unsigned long vaddr = tr->linear_address;
4402 gpa_t gpa;
4403
4404 vcpu_load(vcpu);
72dc67a6 4405 down_read(&vcpu->kvm->slots_lock);
ad312c7c 4406 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
72dc67a6 4407 up_read(&vcpu->kvm->slots_lock);
8b006791
ZX
4408 tr->physical_address = gpa;
4409 tr->valid = gpa != UNMAPPED_GVA;
4410 tr->writeable = 1;
4411 tr->usermode = 0;
8b006791
ZX
4412 vcpu_put(vcpu);
4413
4414 return 0;
4415}
4416
d0752060
HB
4417int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4418{
ad312c7c 4419 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
4420
4421 vcpu_load(vcpu);
4422
4423 memcpy(fpu->fpr, fxsave->st_space, 128);
4424 fpu->fcw = fxsave->cwd;
4425 fpu->fsw = fxsave->swd;
4426 fpu->ftwx = fxsave->twd;
4427 fpu->last_opcode = fxsave->fop;
4428 fpu->last_ip = fxsave->rip;
4429 fpu->last_dp = fxsave->rdp;
4430 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
4431
4432 vcpu_put(vcpu);
4433
4434 return 0;
4435}
4436
4437int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4438{
ad312c7c 4439 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
4440
4441 vcpu_load(vcpu);
4442
4443 memcpy(fxsave->st_space, fpu->fpr, 128);
4444 fxsave->cwd = fpu->fcw;
4445 fxsave->swd = fpu->fsw;
4446 fxsave->twd = fpu->ftwx;
4447 fxsave->fop = fpu->last_opcode;
4448 fxsave->rip = fpu->last_ip;
4449 fxsave->rdp = fpu->last_dp;
4450 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
4451
4452 vcpu_put(vcpu);
4453
4454 return 0;
4455}
4456
4457void fx_init(struct kvm_vcpu *vcpu)
4458{
4459 unsigned after_mxcsr_mask;
4460
bc1a34f1
AA
4461 /*
4462 * Touch the fpu the first time in non atomic context as if
4463 * this is the first fpu instruction the exception handler
4464 * will fire before the instruction returns and it'll have to
4465 * allocate ram with GFP_KERNEL.
4466 */
4467 if (!used_math())
d6e88aec 4468 kvm_fx_save(&vcpu->arch.host_fx_image);
bc1a34f1 4469
d0752060
HB
4470 /* Initialize guest FPU by resetting ours and saving into guest's */
4471 preempt_disable();
d6e88aec
AK
4472 kvm_fx_save(&vcpu->arch.host_fx_image);
4473 kvm_fx_finit();
4474 kvm_fx_save(&vcpu->arch.guest_fx_image);
4475 kvm_fx_restore(&vcpu->arch.host_fx_image);
d0752060
HB
4476 preempt_enable();
4477
ad312c7c 4478 vcpu->arch.cr0 |= X86_CR0_ET;
d0752060 4479 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
ad312c7c
ZX
4480 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
4481 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
d0752060
HB
4482 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
4483}
4484EXPORT_SYMBOL_GPL(fx_init);
4485
4486void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
4487{
4488 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
4489 return;
4490
4491 vcpu->guest_fpu_loaded = 1;
d6e88aec
AK
4492 kvm_fx_save(&vcpu->arch.host_fx_image);
4493 kvm_fx_restore(&vcpu->arch.guest_fx_image);
d0752060
HB
4494}
4495EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
4496
4497void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4498{
4499 if (!vcpu->guest_fpu_loaded)
4500 return;
4501
4502 vcpu->guest_fpu_loaded = 0;
d6e88aec
AK
4503 kvm_fx_save(&vcpu->arch.guest_fx_image);
4504 kvm_fx_restore(&vcpu->arch.host_fx_image);
f096ed85 4505 ++vcpu->stat.fpu_reload;
d0752060
HB
4506}
4507EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
e9b11c17
ZX
4508
4509void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4510{
7f1ea208
JR
4511 if (vcpu->arch.time_page) {
4512 kvm_release_page_dirty(vcpu->arch.time_page);
4513 vcpu->arch.time_page = NULL;
4514 }
4515
e9b11c17
ZX
4516 kvm_x86_ops->vcpu_free(vcpu);
4517}
4518
4519struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
4520 unsigned int id)
4521{
26e5215f
AK
4522 return kvm_x86_ops->vcpu_create(kvm, id);
4523}
e9b11c17 4524
26e5215f
AK
4525int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
4526{
4527 int r;
e9b11c17
ZX
4528
4529 /* We do fxsave: this must be aligned. */
ad312c7c 4530 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
e9b11c17 4531
0bed3b56 4532 vcpu->arch.mtrr_state.have_fixed = 1;
e9b11c17
ZX
4533 vcpu_load(vcpu);
4534 r = kvm_arch_vcpu_reset(vcpu);
4535 if (r == 0)
4536 r = kvm_mmu_setup(vcpu);
4537 vcpu_put(vcpu);
4538 if (r < 0)
4539 goto free_vcpu;
4540
26e5215f 4541 return 0;
e9b11c17
ZX
4542free_vcpu:
4543 kvm_x86_ops->vcpu_free(vcpu);
26e5215f 4544 return r;
e9b11c17
ZX
4545}
4546
d40ccc62 4547void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17
ZX
4548{
4549 vcpu_load(vcpu);
4550 kvm_mmu_unload(vcpu);
4551 vcpu_put(vcpu);
4552
4553 kvm_x86_ops->vcpu_free(vcpu);
4554}
4555
4556int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
4557{
448fa4a9
JK
4558 vcpu->arch.nmi_pending = false;
4559 vcpu->arch.nmi_injected = false;
4560
42dbaa5a
JK
4561 vcpu->arch.switch_db_regs = 0;
4562 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
4563 vcpu->arch.dr6 = DR6_FIXED_1;
4564 vcpu->arch.dr7 = DR7_FIXED_1;
4565
e9b11c17
ZX
4566 return kvm_x86_ops->vcpu_reset(vcpu);
4567}
4568
4569void kvm_arch_hardware_enable(void *garbage)
4570{
4571 kvm_x86_ops->hardware_enable(garbage);
4572}
4573
4574void kvm_arch_hardware_disable(void *garbage)
4575{
4576 kvm_x86_ops->hardware_disable(garbage);
4577}
4578
4579int kvm_arch_hardware_setup(void)
4580{
4581 return kvm_x86_ops->hardware_setup();
4582}
4583
4584void kvm_arch_hardware_unsetup(void)
4585{
4586 kvm_x86_ops->hardware_unsetup();
4587}
4588
4589void kvm_arch_check_processor_compat(void *rtn)
4590{
4591 kvm_x86_ops->check_processor_compatibility(rtn);
4592}
4593
4594int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
4595{
4596 struct page *page;
4597 struct kvm *kvm;
4598 int r;
4599
4600 BUG_ON(vcpu->kvm == NULL);
4601 kvm = vcpu->kvm;
4602
ad312c7c 4603 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
c5af89b6 4604 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
a4535290 4605 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
e9b11c17 4606 else
a4535290 4607 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
e9b11c17
ZX
4608
4609 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
4610 if (!page) {
4611 r = -ENOMEM;
4612 goto fail;
4613 }
ad312c7c 4614 vcpu->arch.pio_data = page_address(page);
e9b11c17
ZX
4615
4616 r = kvm_mmu_create(vcpu);
4617 if (r < 0)
4618 goto fail_free_pio_data;
4619
4620 if (irqchip_in_kernel(kvm)) {
4621 r = kvm_create_lapic(vcpu);
4622 if (r < 0)
4623 goto fail_mmu_destroy;
4624 }
4625
890ca9ae
HY
4626 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
4627 GFP_KERNEL);
4628 if (!vcpu->arch.mce_banks) {
4629 r = -ENOMEM;
4630 goto fail_mmu_destroy;
4631 }
4632 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
4633
e9b11c17
ZX
4634 return 0;
4635
4636fail_mmu_destroy:
4637 kvm_mmu_destroy(vcpu);
4638fail_free_pio_data:
ad312c7c 4639 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17
ZX
4640fail:
4641 return r;
4642}
4643
4644void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
4645{
4646 kvm_free_lapic(vcpu);
3200f405 4647 down_read(&vcpu->kvm->slots_lock);
e9b11c17 4648 kvm_mmu_destroy(vcpu);
3200f405 4649 up_read(&vcpu->kvm->slots_lock);
ad312c7c 4650 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17 4651}
d19a9cd2
ZX
4652
4653struct kvm *kvm_arch_create_vm(void)
4654{
4655 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
4656
4657 if (!kvm)
4658 return ERR_PTR(-ENOMEM);
4659
f05e70ac 4660 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
4d5c5d0f 4661 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
d19a9cd2 4662
5550af4d
SY
4663 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
4664 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
4665
53f658b3
MT
4666 rdtscll(kvm->arch.vm_init_tsc);
4667
d19a9cd2
ZX
4668 return kvm;
4669}
4670
4671static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4672{
4673 vcpu_load(vcpu);
4674 kvm_mmu_unload(vcpu);
4675 vcpu_put(vcpu);
4676}
4677
4678static void kvm_free_vcpus(struct kvm *kvm)
4679{
4680 unsigned int i;
4681
4682 /*
4683 * Unpin any mmu pages first.
4684 */
4685 for (i = 0; i < KVM_MAX_VCPUS; ++i)
4686 if (kvm->vcpus[i])
4687 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
4688 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
4689 if (kvm->vcpus[i]) {
4690 kvm_arch_vcpu_free(kvm->vcpus[i]);
4691 kvm->vcpus[i] = NULL;
4692 }
4693 }
4694
4695}
4696
ad8ba2cd
SY
4697void kvm_arch_sync_events(struct kvm *kvm)
4698{
ba4cef31 4699 kvm_free_all_assigned_devices(kvm);
ad8ba2cd
SY
4700}
4701
d19a9cd2
ZX
4702void kvm_arch_destroy_vm(struct kvm *kvm)
4703{
6eb55818 4704 kvm_iommu_unmap_guest(kvm);
7837699f 4705 kvm_free_pit(kvm);
d7deeeb0
ZX
4706 kfree(kvm->arch.vpic);
4707 kfree(kvm->arch.vioapic);
d19a9cd2
ZX
4708 kvm_free_vcpus(kvm);
4709 kvm_free_physmem(kvm);
3d45830c
AK
4710 if (kvm->arch.apic_access_page)
4711 put_page(kvm->arch.apic_access_page);
b7ebfb05
SY
4712 if (kvm->arch.ept_identity_pagetable)
4713 put_page(kvm->arch.ept_identity_pagetable);
d19a9cd2
ZX
4714 kfree(kvm);
4715}
0de10343
ZX
4716
4717int kvm_arch_set_memory_region(struct kvm *kvm,
4718 struct kvm_userspace_memory_region *mem,
4719 struct kvm_memory_slot old,
4720 int user_alloc)
4721{
4722 int npages = mem->memory_size >> PAGE_SHIFT;
4723 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
4724
4725 /*To keep backward compatibility with older userspace,
4726 *x86 needs to hanlde !user_alloc case.
4727 */
4728 if (!user_alloc) {
4729 if (npages && !old.rmap) {
604b38ac
AA
4730 unsigned long userspace_addr;
4731
72dc67a6 4732 down_write(&current->mm->mmap_sem);
604b38ac
AA
4733 userspace_addr = do_mmap(NULL, 0,
4734 npages * PAGE_SIZE,
4735 PROT_READ | PROT_WRITE,
acee3c04 4736 MAP_PRIVATE | MAP_ANONYMOUS,
604b38ac 4737 0);
72dc67a6 4738 up_write(&current->mm->mmap_sem);
0de10343 4739
604b38ac
AA
4740 if (IS_ERR((void *)userspace_addr))
4741 return PTR_ERR((void *)userspace_addr);
4742
4743 /* set userspace_addr atomically for kvm_hva_to_rmapp */
4744 spin_lock(&kvm->mmu_lock);
4745 memslot->userspace_addr = userspace_addr;
4746 spin_unlock(&kvm->mmu_lock);
0de10343
ZX
4747 } else {
4748 if (!old.user_alloc && old.rmap) {
4749 int ret;
4750
72dc67a6 4751 down_write(&current->mm->mmap_sem);
0de10343
ZX
4752 ret = do_munmap(current->mm, old.userspace_addr,
4753 old.npages * PAGE_SIZE);
72dc67a6 4754 up_write(&current->mm->mmap_sem);
0de10343
ZX
4755 if (ret < 0)
4756 printk(KERN_WARNING
4757 "kvm_vm_ioctl_set_memory_region: "
4758 "failed to munmap memory\n");
4759 }
4760 }
4761 }
4762
7c8a83b7 4763 spin_lock(&kvm->mmu_lock);
f05e70ac 4764 if (!kvm->arch.n_requested_mmu_pages) {
0de10343
ZX
4765 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
4766 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
4767 }
4768
4769 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
7c8a83b7 4770 spin_unlock(&kvm->mmu_lock);
0de10343
ZX
4771 kvm_flush_remote_tlbs(kvm);
4772
4773 return 0;
4774}
1d737c8a 4775
34d4cb8f
MT
4776void kvm_arch_flush_shadow(struct kvm *kvm)
4777{
4778 kvm_mmu_zap_all(kvm);
8986ecc0 4779 kvm_reload_remote_mmus(kvm);
34d4cb8f
MT
4780}
4781
1d737c8a
ZX
4782int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4783{
a4535290 4784 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
0496fbb9
JK
4785 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
4786 || vcpu->arch.nmi_pending;
1d737c8a 4787}
5736199a 4788
5736199a
ZX
4789void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4790{
32f88400
MT
4791 int me;
4792 int cpu = vcpu->cpu;
5736199a
ZX
4793
4794 if (waitqueue_active(&vcpu->wq)) {
4795 wake_up_interruptible(&vcpu->wq);
4796 ++vcpu->stat.halt_wakeup;
4797 }
32f88400
MT
4798
4799 me = get_cpu();
4800 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
4801 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
4802 smp_send_reschedule(cpu);
e9571ed5 4803 put_cpu();
5736199a 4804}
78646121
GN
4805
4806int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
4807{
4808 return kvm_x86_ops->interrupt_allowed(vcpu);
4809}