]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/x86/kvm/x86.c
KVM: introduce irq_lock, use it to protect ioapic
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
4d5c5d0f
BAY
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
043405e1
CO
9 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
4d5c5d0f
BAY
13 * Amit Shah <amit.shah@qumranet.com>
14 * Ben-Ami Yassour <benami@il.ibm.com>
043405e1
CO
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 */
20
edf88417 21#include <linux/kvm_host.h>
313a3dc7 22#include "irq.h"
1d737c8a 23#include "mmu.h"
7837699f 24#include "i8254.h"
37817f29 25#include "tss.h"
5fdbf976 26#include "kvm_cache_regs.h"
26eef70c 27#include "x86.h"
313a3dc7 28
18068523 29#include <linux/clocksource.h>
4d5c5d0f 30#include <linux/interrupt.h>
313a3dc7
CO
31#include <linux/kvm.h>
32#include <linux/fs.h>
33#include <linux/vmalloc.h>
5fb76f9b 34#include <linux/module.h>
0de10343 35#include <linux/mman.h>
2bacc55c 36#include <linux/highmem.h>
19de40a8 37#include <linux/iommu.h>
62c476c7 38#include <linux/intel-iommu.h>
c8076604 39#include <linux/cpufreq.h>
043405e1
CO
40
41#include <asm/uaccess.h>
d825ed0a 42#include <asm/msr.h>
a5f61300 43#include <asm/desc.h>
0bed3b56 44#include <asm/mtrr.h>
890ca9ae 45#include <asm/mce.h>
043405e1 46
313a3dc7 47#define MAX_IO_MSRS 256
a03490ed
CO
48#define CR0_RESERVED_BITS \
49 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
50 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
51 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
52#define CR4_RESERVED_BITS \
53 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
54 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
55 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
56 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
57
58#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
890ca9ae
HY
59
60#define KVM_MAX_MCE_BANKS 32
61#define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
62
50a37eb4
JR
63/* EFER defaults:
64 * - enable syscall per default because its emulated by KVM
65 * - enable LME and LMA per default on 64 bit KVM
66 */
67#ifdef CONFIG_X86_64
68static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
69#else
70static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
71#endif
313a3dc7 72
ba1389b7
AK
73#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
74#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
417bc304 75
674eea0f
AK
76static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
77 struct kvm_cpuid_entry2 __user *entries);
d8017474
AG
78struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
79 u32 function, u32 index);
674eea0f 80
97896d04 81struct kvm_x86_ops *kvm_x86_ops;
5fdbf976 82EXPORT_SYMBOL_GPL(kvm_x86_ops);
97896d04 83
417bc304 84struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
85 { "pf_fixed", VCPU_STAT(pf_fixed) },
86 { "pf_guest", VCPU_STAT(pf_guest) },
87 { "tlb_flush", VCPU_STAT(tlb_flush) },
88 { "invlpg", VCPU_STAT(invlpg) },
89 { "exits", VCPU_STAT(exits) },
90 { "io_exits", VCPU_STAT(io_exits) },
91 { "mmio_exits", VCPU_STAT(mmio_exits) },
92 { "signal_exits", VCPU_STAT(signal_exits) },
93 { "irq_window", VCPU_STAT(irq_window_exits) },
f08864b4 94 { "nmi_window", VCPU_STAT(nmi_window_exits) },
ba1389b7
AK
95 { "halt_exits", VCPU_STAT(halt_exits) },
96 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f11c3a8d 97 { "hypercalls", VCPU_STAT(hypercalls) },
ba1389b7
AK
98 { "request_irq", VCPU_STAT(request_irq_exits) },
99 { "irq_exits", VCPU_STAT(irq_exits) },
100 { "host_state_reload", VCPU_STAT(host_state_reload) },
101 { "efer_reload", VCPU_STAT(efer_reload) },
102 { "fpu_reload", VCPU_STAT(fpu_reload) },
103 { "insn_emulation", VCPU_STAT(insn_emulation) },
104 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
fa89a817 105 { "irq_injections", VCPU_STAT(irq_injections) },
c4abb7c9 106 { "nmi_injections", VCPU_STAT(nmi_injections) },
4cee5764
AK
107 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
108 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
109 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
110 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
111 { "mmu_flooded", VM_STAT(mmu_flooded) },
112 { "mmu_recycled", VM_STAT(mmu_recycled) },
dfc5aa00 113 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
4731d4c7 114 { "mmu_unsync", VM_STAT(mmu_unsync) },
0f74a24c 115 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
05da4558 116 { "largepages", VM_STAT(lpages) },
417bc304
HB
117 { NULL }
118};
119
5fb76f9b
CO
120unsigned long segment_base(u16 selector)
121{
122 struct descriptor_table gdt;
a5f61300 123 struct desc_struct *d;
5fb76f9b
CO
124 unsigned long table_base;
125 unsigned long v;
126
127 if (selector == 0)
128 return 0;
129
130 asm("sgdt %0" : "=m"(gdt));
131 table_base = gdt.base;
132
133 if (selector & 4) { /* from ldt */
134 u16 ldt_selector;
135
136 asm("sldt %0" : "=g"(ldt_selector));
137 table_base = segment_base(ldt_selector);
138 }
a5f61300
AK
139 d = (struct desc_struct *)(table_base + (selector & ~7));
140 v = d->base0 | ((unsigned long)d->base1 << 16) |
141 ((unsigned long)d->base2 << 24);
5fb76f9b 142#ifdef CONFIG_X86_64
a5f61300
AK
143 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
144 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
5fb76f9b
CO
145#endif
146 return v;
147}
148EXPORT_SYMBOL_GPL(segment_base);
149
6866b83e
CO
150u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
151{
152 if (irqchip_in_kernel(vcpu->kvm))
ad312c7c 153 return vcpu->arch.apic_base;
6866b83e 154 else
ad312c7c 155 return vcpu->arch.apic_base;
6866b83e
CO
156}
157EXPORT_SYMBOL_GPL(kvm_get_apic_base);
158
159void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
160{
161 /* TODO: reserve bits check */
162 if (irqchip_in_kernel(vcpu->kvm))
163 kvm_lapic_set_base(vcpu, data);
164 else
ad312c7c 165 vcpu->arch.apic_base = data;
6866b83e
CO
166}
167EXPORT_SYMBOL_GPL(kvm_set_apic_base);
168
298101da
AK
169void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
170{
ad312c7c
ZX
171 WARN_ON(vcpu->arch.exception.pending);
172 vcpu->arch.exception.pending = true;
173 vcpu->arch.exception.has_error_code = false;
174 vcpu->arch.exception.nr = nr;
298101da
AK
175}
176EXPORT_SYMBOL_GPL(kvm_queue_exception);
177
c3c91fee
AK
178void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
179 u32 error_code)
180{
181 ++vcpu->stat.pf_guest;
d8017474 182
71c4dfaf
JR
183 if (vcpu->arch.exception.pending) {
184 if (vcpu->arch.exception.nr == PF_VECTOR) {
185 printk(KERN_DEBUG "kvm: inject_page_fault:"
186 " double fault 0x%lx\n", addr);
187 vcpu->arch.exception.nr = DF_VECTOR;
188 vcpu->arch.exception.error_code = 0;
189 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
190 /* triple fault -> shutdown */
191 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
192 }
c3c91fee
AK
193 return;
194 }
ad312c7c 195 vcpu->arch.cr2 = addr;
c3c91fee
AK
196 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
197}
198
3419ffc8
SY
199void kvm_inject_nmi(struct kvm_vcpu *vcpu)
200{
201 vcpu->arch.nmi_pending = 1;
202}
203EXPORT_SYMBOL_GPL(kvm_inject_nmi);
204
298101da
AK
205void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
206{
ad312c7c
ZX
207 WARN_ON(vcpu->arch.exception.pending);
208 vcpu->arch.exception.pending = true;
209 vcpu->arch.exception.has_error_code = true;
210 vcpu->arch.exception.nr = nr;
211 vcpu->arch.exception.error_code = error_code;
298101da
AK
212}
213EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
214
215static void __queue_exception(struct kvm_vcpu *vcpu)
216{
ad312c7c
ZX
217 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
218 vcpu->arch.exception.has_error_code,
219 vcpu->arch.exception.error_code);
298101da
AK
220}
221
a03490ed
CO
222/*
223 * Load the pae pdptrs. Return true is they are all valid.
224 */
225int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
226{
227 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
228 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
229 int i;
230 int ret;
ad312c7c 231 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
a03490ed 232
a03490ed
CO
233 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
234 offset * sizeof(u64), sizeof(pdpte));
235 if (ret < 0) {
236 ret = 0;
237 goto out;
238 }
239 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
20c466b5
DE
240 if (is_present_pte(pdpte[i]) &&
241 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
a03490ed
CO
242 ret = 0;
243 goto out;
244 }
245 }
246 ret = 1;
247
ad312c7c 248 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
6de4f3ad
AK
249 __set_bit(VCPU_EXREG_PDPTR,
250 (unsigned long *)&vcpu->arch.regs_avail);
251 __set_bit(VCPU_EXREG_PDPTR,
252 (unsigned long *)&vcpu->arch.regs_dirty);
a03490ed 253out:
a03490ed
CO
254
255 return ret;
256}
cc4b6871 257EXPORT_SYMBOL_GPL(load_pdptrs);
a03490ed 258
d835dfec
AK
259static bool pdptrs_changed(struct kvm_vcpu *vcpu)
260{
ad312c7c 261 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
d835dfec
AK
262 bool changed = true;
263 int r;
264
265 if (is_long_mode(vcpu) || !is_pae(vcpu))
266 return false;
267
6de4f3ad
AK
268 if (!test_bit(VCPU_EXREG_PDPTR,
269 (unsigned long *)&vcpu->arch.regs_avail))
270 return true;
271
ad312c7c 272 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
d835dfec
AK
273 if (r < 0)
274 goto out;
ad312c7c 275 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
d835dfec 276out:
d835dfec
AK
277
278 return changed;
279}
280
2d3ad1f4 281void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
a03490ed
CO
282{
283 if (cr0 & CR0_RESERVED_BITS) {
284 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
ad312c7c 285 cr0, vcpu->arch.cr0);
c1a5d4f9 286 kvm_inject_gp(vcpu, 0);
a03490ed
CO
287 return;
288 }
289
290 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
291 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
c1a5d4f9 292 kvm_inject_gp(vcpu, 0);
a03490ed
CO
293 return;
294 }
295
296 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
297 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
298 "and a clear PE flag\n");
c1a5d4f9 299 kvm_inject_gp(vcpu, 0);
a03490ed
CO
300 return;
301 }
302
303 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
304#ifdef CONFIG_X86_64
ad312c7c 305 if ((vcpu->arch.shadow_efer & EFER_LME)) {
a03490ed
CO
306 int cs_db, cs_l;
307
308 if (!is_pae(vcpu)) {
309 printk(KERN_DEBUG "set_cr0: #GP, start paging "
310 "in long mode while PAE is disabled\n");
c1a5d4f9 311 kvm_inject_gp(vcpu, 0);
a03490ed
CO
312 return;
313 }
314 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
315 if (cs_l) {
316 printk(KERN_DEBUG "set_cr0: #GP, start paging "
317 "in long mode while CS.L == 1\n");
c1a5d4f9 318 kvm_inject_gp(vcpu, 0);
a03490ed
CO
319 return;
320
321 }
322 } else
323#endif
ad312c7c 324 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed
CO
325 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
326 "reserved bits\n");
c1a5d4f9 327 kvm_inject_gp(vcpu, 0);
a03490ed
CO
328 return;
329 }
330
331 }
332
333 kvm_x86_ops->set_cr0(vcpu, cr0);
ad312c7c 334 vcpu->arch.cr0 = cr0;
a03490ed 335
a03490ed 336 kvm_mmu_reset_context(vcpu);
a03490ed
CO
337 return;
338}
2d3ad1f4 339EXPORT_SYMBOL_GPL(kvm_set_cr0);
a03490ed 340
2d3ad1f4 341void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
a03490ed 342{
2d3ad1f4 343 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
2714d1d3
FEL
344 KVMTRACE_1D(LMSW, vcpu,
345 (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
346 handler);
a03490ed 347}
2d3ad1f4 348EXPORT_SYMBOL_GPL(kvm_lmsw);
a03490ed 349
2d3ad1f4 350void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
a03490ed 351{
a2edf57f
AK
352 unsigned long old_cr4 = vcpu->arch.cr4;
353 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
354
a03490ed
CO
355 if (cr4 & CR4_RESERVED_BITS) {
356 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
c1a5d4f9 357 kvm_inject_gp(vcpu, 0);
a03490ed
CO
358 return;
359 }
360
361 if (is_long_mode(vcpu)) {
362 if (!(cr4 & X86_CR4_PAE)) {
363 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
364 "in long mode\n");
c1a5d4f9 365 kvm_inject_gp(vcpu, 0);
a03490ed
CO
366 return;
367 }
a2edf57f
AK
368 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
369 && ((cr4 ^ old_cr4) & pdptr_bits)
ad312c7c 370 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed 371 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
c1a5d4f9 372 kvm_inject_gp(vcpu, 0);
a03490ed
CO
373 return;
374 }
375
376 if (cr4 & X86_CR4_VMXE) {
377 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
c1a5d4f9 378 kvm_inject_gp(vcpu, 0);
a03490ed
CO
379 return;
380 }
381 kvm_x86_ops->set_cr4(vcpu, cr4);
ad312c7c 382 vcpu->arch.cr4 = cr4;
5a41accd 383 vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
a03490ed 384 kvm_mmu_reset_context(vcpu);
a03490ed 385}
2d3ad1f4 386EXPORT_SYMBOL_GPL(kvm_set_cr4);
a03490ed 387
2d3ad1f4 388void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
a03490ed 389{
ad312c7c 390 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
0ba73cda 391 kvm_mmu_sync_roots(vcpu);
d835dfec
AK
392 kvm_mmu_flush_tlb(vcpu);
393 return;
394 }
395
a03490ed
CO
396 if (is_long_mode(vcpu)) {
397 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
398 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
c1a5d4f9 399 kvm_inject_gp(vcpu, 0);
a03490ed
CO
400 return;
401 }
402 } else {
403 if (is_pae(vcpu)) {
404 if (cr3 & CR3_PAE_RESERVED_BITS) {
405 printk(KERN_DEBUG
406 "set_cr3: #GP, reserved bits\n");
c1a5d4f9 407 kvm_inject_gp(vcpu, 0);
a03490ed
CO
408 return;
409 }
410 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
411 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
412 "reserved bits\n");
c1a5d4f9 413 kvm_inject_gp(vcpu, 0);
a03490ed
CO
414 return;
415 }
416 }
417 /*
418 * We don't check reserved bits in nonpae mode, because
419 * this isn't enforced, and VMware depends on this.
420 */
421 }
422
a03490ed
CO
423 /*
424 * Does the new cr3 value map to physical memory? (Note, we
425 * catch an invalid cr3 even in real-mode, because it would
426 * cause trouble later on when we turn on paging anyway.)
427 *
428 * A real CPU would silently accept an invalid cr3 and would
429 * attempt to use it - with largely undefined (and often hard
430 * to debug) behavior on the guest side.
431 */
432 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
c1a5d4f9 433 kvm_inject_gp(vcpu, 0);
a03490ed 434 else {
ad312c7c
ZX
435 vcpu->arch.cr3 = cr3;
436 vcpu->arch.mmu.new_cr3(vcpu);
a03490ed 437 }
a03490ed 438}
2d3ad1f4 439EXPORT_SYMBOL_GPL(kvm_set_cr3);
a03490ed 440
2d3ad1f4 441void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
a03490ed
CO
442{
443 if (cr8 & CR8_RESERVED_BITS) {
444 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
c1a5d4f9 445 kvm_inject_gp(vcpu, 0);
a03490ed
CO
446 return;
447 }
448 if (irqchip_in_kernel(vcpu->kvm))
449 kvm_lapic_set_tpr(vcpu, cr8);
450 else
ad312c7c 451 vcpu->arch.cr8 = cr8;
a03490ed 452}
2d3ad1f4 453EXPORT_SYMBOL_GPL(kvm_set_cr8);
a03490ed 454
2d3ad1f4 455unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
a03490ed
CO
456{
457 if (irqchip_in_kernel(vcpu->kvm))
458 return kvm_lapic_get_cr8(vcpu);
459 else
ad312c7c 460 return vcpu->arch.cr8;
a03490ed 461}
2d3ad1f4 462EXPORT_SYMBOL_GPL(kvm_get_cr8);
a03490ed 463
d8017474
AG
464static inline u32 bit(int bitno)
465{
466 return 1 << (bitno & 31);
467}
468
043405e1
CO
469/*
470 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
471 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
472 *
473 * This list is modified at module load time to reflect the
474 * capabilities of the host cpu.
475 */
476static u32 msrs_to_save[] = {
477 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
478 MSR_K6_STAR,
479#ifdef CONFIG_X86_64
480 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
481#endif
af24a4e4 482 MSR_IA32_TSC, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
b286d5d8 483 MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
043405e1
CO
484};
485
486static unsigned num_msrs_to_save;
487
488static u32 emulated_msrs[] = {
489 MSR_IA32_MISC_ENABLE,
490};
491
15c4a640
CO
492static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
493{
f2b4b7dd 494 if (efer & efer_reserved_bits) {
15c4a640
CO
495 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
496 efer);
c1a5d4f9 497 kvm_inject_gp(vcpu, 0);
15c4a640
CO
498 return;
499 }
500
501 if (is_paging(vcpu)
ad312c7c 502 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
15c4a640 503 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
c1a5d4f9 504 kvm_inject_gp(vcpu, 0);
15c4a640
CO
505 return;
506 }
507
1b2fd70c
AG
508 if (efer & EFER_FFXSR) {
509 struct kvm_cpuid_entry2 *feat;
510
511 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
512 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
513 printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
514 kvm_inject_gp(vcpu, 0);
515 return;
516 }
517 }
518
d8017474
AG
519 if (efer & EFER_SVME) {
520 struct kvm_cpuid_entry2 *feat;
521
522 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
523 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
524 printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
525 kvm_inject_gp(vcpu, 0);
526 return;
527 }
528 }
529
15c4a640
CO
530 kvm_x86_ops->set_efer(vcpu, efer);
531
532 efer &= ~EFER_LMA;
ad312c7c 533 efer |= vcpu->arch.shadow_efer & EFER_LMA;
15c4a640 534
ad312c7c 535 vcpu->arch.shadow_efer = efer;
9645bb56
AK
536
537 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
538 kvm_mmu_reset_context(vcpu);
15c4a640
CO
539}
540
f2b4b7dd
JR
541void kvm_enable_efer_bits(u64 mask)
542{
543 efer_reserved_bits &= ~mask;
544}
545EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
546
547
15c4a640
CO
548/*
549 * Writes msr value into into the appropriate "register".
550 * Returns 0 on success, non-0 otherwise.
551 * Assumes vcpu_load() was already called.
552 */
553int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
554{
555 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
556}
557
313a3dc7
CO
558/*
559 * Adapt set_msr() to msr_io()'s calling convention
560 */
561static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
562{
563 return kvm_set_msr(vcpu, index, *data);
564}
565
18068523
GOC
566static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
567{
568 static int version;
50d0a0f9
GH
569 struct pvclock_wall_clock wc;
570 struct timespec now, sys, boot;
18068523
GOC
571
572 if (!wall_clock)
573 return;
574
575 version++;
576
18068523
GOC
577 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
578
50d0a0f9
GH
579 /*
580 * The guest calculates current wall clock time by adding
581 * system time (updated by kvm_write_guest_time below) to the
582 * wall clock specified here. guest system time equals host
583 * system time for us, thus we must fill in host boot time here.
584 */
585 now = current_kernel_time();
586 ktime_get_ts(&sys);
587 boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
588
589 wc.sec = boot.tv_sec;
590 wc.nsec = boot.tv_nsec;
591 wc.version = version;
18068523
GOC
592
593 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
594
595 version++;
596 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
18068523
GOC
597}
598
50d0a0f9
GH
599static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
600{
601 uint32_t quotient, remainder;
602
603 /* Don't try to replace with do_div(), this one calculates
604 * "(dividend << 32) / divisor" */
605 __asm__ ( "divl %4"
606 : "=a" (quotient), "=d" (remainder)
607 : "0" (0), "1" (dividend), "r" (divisor) );
608 return quotient;
609}
610
611static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
612{
613 uint64_t nsecs = 1000000000LL;
614 int32_t shift = 0;
615 uint64_t tps64;
616 uint32_t tps32;
617
618 tps64 = tsc_khz * 1000LL;
619 while (tps64 > nsecs*2) {
620 tps64 >>= 1;
621 shift--;
622 }
623
624 tps32 = (uint32_t)tps64;
625 while (tps32 <= (uint32_t)nsecs) {
626 tps32 <<= 1;
627 shift++;
628 }
629
630 hv_clock->tsc_shift = shift;
631 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
632
633 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
80a914dc 634 __func__, tsc_khz, hv_clock->tsc_shift,
50d0a0f9
GH
635 hv_clock->tsc_to_system_mul);
636}
637
c8076604
GH
638static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
639
18068523
GOC
640static void kvm_write_guest_time(struct kvm_vcpu *v)
641{
642 struct timespec ts;
643 unsigned long flags;
644 struct kvm_vcpu_arch *vcpu = &v->arch;
645 void *shared_kaddr;
463656c0 646 unsigned long this_tsc_khz;
18068523
GOC
647
648 if ((!vcpu->time_page))
649 return;
650
463656c0
AK
651 this_tsc_khz = get_cpu_var(cpu_tsc_khz);
652 if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
653 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
654 vcpu->hv_clock_tsc_khz = this_tsc_khz;
50d0a0f9 655 }
463656c0 656 put_cpu_var(cpu_tsc_khz);
50d0a0f9 657
18068523
GOC
658 /* Keep irq disabled to prevent changes to the clock */
659 local_irq_save(flags);
af24a4e4 660 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
18068523
GOC
661 ktime_get_ts(&ts);
662 local_irq_restore(flags);
663
664 /* With all the info we got, fill in the values */
665
666 vcpu->hv_clock.system_time = ts.tv_nsec +
667 (NSEC_PER_SEC * (u64)ts.tv_sec);
668 /*
669 * The interface expects us to write an even number signaling that the
670 * update is finished. Since the guest won't see the intermediate
50d0a0f9 671 * state, we just increase by 2 at the end.
18068523 672 */
50d0a0f9 673 vcpu->hv_clock.version += 2;
18068523
GOC
674
675 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
676
677 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
50d0a0f9 678 sizeof(vcpu->hv_clock));
18068523
GOC
679
680 kunmap_atomic(shared_kaddr, KM_USER0);
681
682 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
683}
684
c8076604
GH
685static int kvm_request_guest_time_update(struct kvm_vcpu *v)
686{
687 struct kvm_vcpu_arch *vcpu = &v->arch;
688
689 if (!vcpu->time_page)
690 return 0;
691 set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
692 return 1;
693}
694
9ba075a6
AK
695static bool msr_mtrr_valid(unsigned msr)
696{
697 switch (msr) {
698 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
699 case MSR_MTRRfix64K_00000:
700 case MSR_MTRRfix16K_80000:
701 case MSR_MTRRfix16K_A0000:
702 case MSR_MTRRfix4K_C0000:
703 case MSR_MTRRfix4K_C8000:
704 case MSR_MTRRfix4K_D0000:
705 case MSR_MTRRfix4K_D8000:
706 case MSR_MTRRfix4K_E0000:
707 case MSR_MTRRfix4K_E8000:
708 case MSR_MTRRfix4K_F0000:
709 case MSR_MTRRfix4K_F8000:
710 case MSR_MTRRdefType:
711 case MSR_IA32_CR_PAT:
712 return true;
713 case 0x2f8:
714 return true;
715 }
716 return false;
717}
718
d6289b93
MT
719static bool valid_pat_type(unsigned t)
720{
721 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
722}
723
724static bool valid_mtrr_type(unsigned t)
725{
726 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
727}
728
729static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
730{
731 int i;
732
733 if (!msr_mtrr_valid(msr))
734 return false;
735
736 if (msr == MSR_IA32_CR_PAT) {
737 for (i = 0; i < 8; i++)
738 if (!valid_pat_type((data >> (i * 8)) & 0xff))
739 return false;
740 return true;
741 } else if (msr == MSR_MTRRdefType) {
742 if (data & ~0xcff)
743 return false;
744 return valid_mtrr_type(data & 0xff);
745 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
746 for (i = 0; i < 8 ; i++)
747 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
748 return false;
749 return true;
750 }
751
752 /* variable MTRRs */
753 return valid_mtrr_type(data & 0xff);
754}
755
9ba075a6
AK
756static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
757{
0bed3b56
SY
758 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
759
d6289b93 760 if (!mtrr_valid(vcpu, msr, data))
9ba075a6
AK
761 return 1;
762
0bed3b56
SY
763 if (msr == MSR_MTRRdefType) {
764 vcpu->arch.mtrr_state.def_type = data;
765 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
766 } else if (msr == MSR_MTRRfix64K_00000)
767 p[0] = data;
768 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
769 p[1 + msr - MSR_MTRRfix16K_80000] = data;
770 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
771 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
772 else if (msr == MSR_IA32_CR_PAT)
773 vcpu->arch.pat = data;
774 else { /* Variable MTRRs */
775 int idx, is_mtrr_mask;
776 u64 *pt;
777
778 idx = (msr - 0x200) / 2;
779 is_mtrr_mask = msr - 0x200 - 2 * idx;
780 if (!is_mtrr_mask)
781 pt =
782 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
783 else
784 pt =
785 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
786 *pt = data;
787 }
788
789 kvm_mmu_reset_context(vcpu);
9ba075a6
AK
790 return 0;
791}
15c4a640 792
890ca9ae 793static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
15c4a640 794{
890ca9ae
HY
795 u64 mcg_cap = vcpu->arch.mcg_cap;
796 unsigned bank_num = mcg_cap & 0xff;
797
15c4a640 798 switch (msr) {
15c4a640 799 case MSR_IA32_MCG_STATUS:
890ca9ae 800 vcpu->arch.mcg_status = data;
15c4a640 801 break;
c7ac679c 802 case MSR_IA32_MCG_CTL:
890ca9ae
HY
803 if (!(mcg_cap & MCG_CTL_P))
804 return 1;
805 if (data != 0 && data != ~(u64)0)
806 return -1;
807 vcpu->arch.mcg_ctl = data;
808 break;
809 default:
810 if (msr >= MSR_IA32_MC0_CTL &&
811 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
812 u32 offset = msr - MSR_IA32_MC0_CTL;
813 /* only 0 or all 1s can be written to IA32_MCi_CTL */
814 if ((offset & 0x3) == 0 &&
815 data != 0 && data != ~(u64)0)
816 return -1;
817 vcpu->arch.mce_banks[offset] = data;
818 break;
819 }
820 return 1;
821 }
822 return 0;
823}
824
825int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
826{
827 switch (msr) {
828 case MSR_EFER:
829 set_efer(vcpu, data);
c7ac679c 830 break;
b5e2fec0
AG
831 case MSR_IA32_DEBUGCTLMSR:
832 if (!data) {
833 /* We support the non-activated case already */
834 break;
835 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
836 /* Values other than LBR and BTF are vendor-specific,
837 thus reserved and should throw a #GP */
838 return 1;
839 }
840 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
841 __func__, data);
842 break;
15c4a640
CO
843 case MSR_IA32_UCODE_REV:
844 case MSR_IA32_UCODE_WRITE:
61a6bd67 845 case MSR_VM_HSAVE_PA:
15c4a640 846 break;
9ba075a6
AK
847 case 0x200 ... 0x2ff:
848 return set_msr_mtrr(vcpu, msr, data);
15c4a640
CO
849 case MSR_IA32_APICBASE:
850 kvm_set_apic_base(vcpu, data);
851 break;
852 case MSR_IA32_MISC_ENABLE:
ad312c7c 853 vcpu->arch.ia32_misc_enable_msr = data;
15c4a640 854 break;
18068523
GOC
855 case MSR_KVM_WALL_CLOCK:
856 vcpu->kvm->arch.wall_clock = data;
857 kvm_write_wall_clock(vcpu->kvm, data);
858 break;
859 case MSR_KVM_SYSTEM_TIME: {
860 if (vcpu->arch.time_page) {
861 kvm_release_page_dirty(vcpu->arch.time_page);
862 vcpu->arch.time_page = NULL;
863 }
864
865 vcpu->arch.time = data;
866
867 /* we verify if the enable bit is set... */
868 if (!(data & 1))
869 break;
870
871 /* ...but clean it before doing the actual write */
872 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
873
18068523
GOC
874 vcpu->arch.time_page =
875 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
18068523
GOC
876
877 if (is_error_page(vcpu->arch.time_page)) {
878 kvm_release_page_clean(vcpu->arch.time_page);
879 vcpu->arch.time_page = NULL;
880 }
881
c8076604 882 kvm_request_guest_time_update(vcpu);
18068523
GOC
883 break;
884 }
890ca9ae
HY
885 case MSR_IA32_MCG_CTL:
886 case MSR_IA32_MCG_STATUS:
887 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
888 return set_msr_mce(vcpu, msr, data);
15c4a640 889 default:
565f1fbd 890 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
15c4a640
CO
891 return 1;
892 }
893 return 0;
894}
895EXPORT_SYMBOL_GPL(kvm_set_msr_common);
896
897
898/*
899 * Reads an msr value (of 'msr_index') into 'pdata'.
900 * Returns 0 on success, non-0 otherwise.
901 * Assumes vcpu_load() was already called.
902 */
903int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
904{
905 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
906}
907
9ba075a6
AK
908static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
909{
0bed3b56
SY
910 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
911
9ba075a6
AK
912 if (!msr_mtrr_valid(msr))
913 return 1;
914
0bed3b56
SY
915 if (msr == MSR_MTRRdefType)
916 *pdata = vcpu->arch.mtrr_state.def_type +
917 (vcpu->arch.mtrr_state.enabled << 10);
918 else if (msr == MSR_MTRRfix64K_00000)
919 *pdata = p[0];
920 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
921 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
922 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
923 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
924 else if (msr == MSR_IA32_CR_PAT)
925 *pdata = vcpu->arch.pat;
926 else { /* Variable MTRRs */
927 int idx, is_mtrr_mask;
928 u64 *pt;
929
930 idx = (msr - 0x200) / 2;
931 is_mtrr_mask = msr - 0x200 - 2 * idx;
932 if (!is_mtrr_mask)
933 pt =
934 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
935 else
936 pt =
937 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
938 *pdata = *pt;
939 }
940
9ba075a6
AK
941 return 0;
942}
943
890ca9ae 944static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
15c4a640
CO
945{
946 u64 data;
890ca9ae
HY
947 u64 mcg_cap = vcpu->arch.mcg_cap;
948 unsigned bank_num = mcg_cap & 0xff;
15c4a640
CO
949
950 switch (msr) {
15c4a640
CO
951 case MSR_IA32_P5_MC_ADDR:
952 case MSR_IA32_P5_MC_TYPE:
890ca9ae
HY
953 data = 0;
954 break;
15c4a640 955 case MSR_IA32_MCG_CAP:
890ca9ae
HY
956 data = vcpu->arch.mcg_cap;
957 break;
c7ac679c 958 case MSR_IA32_MCG_CTL:
890ca9ae
HY
959 if (!(mcg_cap & MCG_CTL_P))
960 return 1;
961 data = vcpu->arch.mcg_ctl;
962 break;
963 case MSR_IA32_MCG_STATUS:
964 data = vcpu->arch.mcg_status;
965 break;
966 default:
967 if (msr >= MSR_IA32_MC0_CTL &&
968 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
969 u32 offset = msr - MSR_IA32_MC0_CTL;
970 data = vcpu->arch.mce_banks[offset];
971 break;
972 }
973 return 1;
974 }
975 *pdata = data;
976 return 0;
977}
978
979int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
980{
981 u64 data;
982
983 switch (msr) {
890ca9ae 984 case MSR_IA32_PLATFORM_ID:
15c4a640 985 case MSR_IA32_UCODE_REV:
15c4a640 986 case MSR_IA32_EBL_CR_POWERON:
b5e2fec0
AG
987 case MSR_IA32_DEBUGCTLMSR:
988 case MSR_IA32_LASTBRANCHFROMIP:
989 case MSR_IA32_LASTBRANCHTOIP:
990 case MSR_IA32_LASTINTFROMIP:
991 case MSR_IA32_LASTINTTOIP:
60af2ecd
JSR
992 case MSR_K8_SYSCFG:
993 case MSR_K7_HWCR:
61a6bd67 994 case MSR_VM_HSAVE_PA:
7fe29e0f
AS
995 case MSR_P6_EVNTSEL0:
996 case MSR_P6_EVNTSEL1:
9e699624 997 case MSR_K7_EVNTSEL0:
15c4a640
CO
998 data = 0;
999 break;
9ba075a6
AK
1000 case MSR_MTRRcap:
1001 data = 0x500 | KVM_NR_VAR_MTRR;
1002 break;
1003 case 0x200 ... 0x2ff:
1004 return get_msr_mtrr(vcpu, msr, pdata);
15c4a640
CO
1005 case 0xcd: /* fsb frequency */
1006 data = 3;
1007 break;
1008 case MSR_IA32_APICBASE:
1009 data = kvm_get_apic_base(vcpu);
1010 break;
1011 case MSR_IA32_MISC_ENABLE:
ad312c7c 1012 data = vcpu->arch.ia32_misc_enable_msr;
15c4a640 1013 break;
847f0ad8
AG
1014 case MSR_IA32_PERF_STATUS:
1015 /* TSC increment by tick */
1016 data = 1000ULL;
1017 /* CPU multiplier */
1018 data |= (((uint64_t)4ULL) << 40);
1019 break;
15c4a640 1020 case MSR_EFER:
ad312c7c 1021 data = vcpu->arch.shadow_efer;
15c4a640 1022 break;
18068523
GOC
1023 case MSR_KVM_WALL_CLOCK:
1024 data = vcpu->kvm->arch.wall_clock;
1025 break;
1026 case MSR_KVM_SYSTEM_TIME:
1027 data = vcpu->arch.time;
1028 break;
890ca9ae
HY
1029 case MSR_IA32_P5_MC_ADDR:
1030 case MSR_IA32_P5_MC_TYPE:
1031 case MSR_IA32_MCG_CAP:
1032 case MSR_IA32_MCG_CTL:
1033 case MSR_IA32_MCG_STATUS:
1034 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1035 return get_msr_mce(vcpu, msr, pdata);
15c4a640
CO
1036 default:
1037 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1038 return 1;
1039 }
1040 *pdata = data;
1041 return 0;
1042}
1043EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1044
313a3dc7
CO
1045/*
1046 * Read or write a bunch of msrs. All parameters are kernel addresses.
1047 *
1048 * @return number of msrs set successfully.
1049 */
1050static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1051 struct kvm_msr_entry *entries,
1052 int (*do_msr)(struct kvm_vcpu *vcpu,
1053 unsigned index, u64 *data))
1054{
1055 int i;
1056
1057 vcpu_load(vcpu);
1058
3200f405 1059 down_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
1060 for (i = 0; i < msrs->nmsrs; ++i)
1061 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1062 break;
3200f405 1063 up_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
1064
1065 vcpu_put(vcpu);
1066
1067 return i;
1068}
1069
1070/*
1071 * Read or write a bunch of msrs. Parameters are user addresses.
1072 *
1073 * @return number of msrs set successfully.
1074 */
1075static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1076 int (*do_msr)(struct kvm_vcpu *vcpu,
1077 unsigned index, u64 *data),
1078 int writeback)
1079{
1080 struct kvm_msrs msrs;
1081 struct kvm_msr_entry *entries;
1082 int r, n;
1083 unsigned size;
1084
1085 r = -EFAULT;
1086 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1087 goto out;
1088
1089 r = -E2BIG;
1090 if (msrs.nmsrs >= MAX_IO_MSRS)
1091 goto out;
1092
1093 r = -ENOMEM;
1094 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1095 entries = vmalloc(size);
1096 if (!entries)
1097 goto out;
1098
1099 r = -EFAULT;
1100 if (copy_from_user(entries, user_msrs->entries, size))
1101 goto out_free;
1102
1103 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1104 if (r < 0)
1105 goto out_free;
1106
1107 r = -EFAULT;
1108 if (writeback && copy_to_user(user_msrs->entries, entries, size))
1109 goto out_free;
1110
1111 r = n;
1112
1113out_free:
1114 vfree(entries);
1115out:
1116 return r;
1117}
1118
018d00d2
ZX
1119int kvm_dev_ioctl_check_extension(long ext)
1120{
1121 int r;
1122
1123 switch (ext) {
1124 case KVM_CAP_IRQCHIP:
1125 case KVM_CAP_HLT:
1126 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
018d00d2 1127 case KVM_CAP_SET_TSS_ADDR:
07716717 1128 case KVM_CAP_EXT_CPUID:
c8076604 1129 case KVM_CAP_CLOCKSOURCE:
7837699f 1130 case KVM_CAP_PIT:
a28e4f5a 1131 case KVM_CAP_NOP_IO_DELAY:
62d9f0db 1132 case KVM_CAP_MP_STATE:
ed848624 1133 case KVM_CAP_SYNC_MMU:
52d939a0 1134 case KVM_CAP_REINJECT_CONTROL:
4925663a 1135 case KVM_CAP_IRQ_INJECT_STATUS:
e56d532f 1136 case KVM_CAP_ASSIGN_DEV_IRQ:
721eecbf 1137 case KVM_CAP_IRQFD:
c5ff41ce 1138 case KVM_CAP_PIT2:
018d00d2
ZX
1139 r = 1;
1140 break;
542472b5
LV
1141 case KVM_CAP_COALESCED_MMIO:
1142 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1143 break;
774ead3a
AK
1144 case KVM_CAP_VAPIC:
1145 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1146 break;
f725230a
AK
1147 case KVM_CAP_NR_VCPUS:
1148 r = KVM_MAX_VCPUS;
1149 break;
a988b910
AK
1150 case KVM_CAP_NR_MEMSLOTS:
1151 r = KVM_MEMORY_SLOTS;
1152 break;
2f333bcb
MT
1153 case KVM_CAP_PV_MMU:
1154 r = !tdp_enabled;
1155 break;
62c476c7 1156 case KVM_CAP_IOMMU:
19de40a8 1157 r = iommu_found();
62c476c7 1158 break;
890ca9ae
HY
1159 case KVM_CAP_MCE:
1160 r = KVM_MAX_MCE_BANKS;
1161 break;
018d00d2
ZX
1162 default:
1163 r = 0;
1164 break;
1165 }
1166 return r;
1167
1168}
1169
043405e1
CO
1170long kvm_arch_dev_ioctl(struct file *filp,
1171 unsigned int ioctl, unsigned long arg)
1172{
1173 void __user *argp = (void __user *)arg;
1174 long r;
1175
1176 switch (ioctl) {
1177 case KVM_GET_MSR_INDEX_LIST: {
1178 struct kvm_msr_list __user *user_msr_list = argp;
1179 struct kvm_msr_list msr_list;
1180 unsigned n;
1181
1182 r = -EFAULT;
1183 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1184 goto out;
1185 n = msr_list.nmsrs;
1186 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1187 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1188 goto out;
1189 r = -E2BIG;
e125e7b6 1190 if (n < msr_list.nmsrs)
043405e1
CO
1191 goto out;
1192 r = -EFAULT;
1193 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1194 num_msrs_to_save * sizeof(u32)))
1195 goto out;
e125e7b6 1196 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
043405e1
CO
1197 &emulated_msrs,
1198 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1199 goto out;
1200 r = 0;
1201 break;
1202 }
674eea0f
AK
1203 case KVM_GET_SUPPORTED_CPUID: {
1204 struct kvm_cpuid2 __user *cpuid_arg = argp;
1205 struct kvm_cpuid2 cpuid;
1206
1207 r = -EFAULT;
1208 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1209 goto out;
1210 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
19355475 1211 cpuid_arg->entries);
674eea0f
AK
1212 if (r)
1213 goto out;
1214
1215 r = -EFAULT;
1216 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1217 goto out;
1218 r = 0;
1219 break;
1220 }
890ca9ae
HY
1221 case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1222 u64 mce_cap;
1223
1224 mce_cap = KVM_MCE_CAP_SUPPORTED;
1225 r = -EFAULT;
1226 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1227 goto out;
1228 r = 0;
1229 break;
1230 }
043405e1
CO
1231 default:
1232 r = -EINVAL;
1233 }
1234out:
1235 return r;
1236}
1237
313a3dc7
CO
1238void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1239{
1240 kvm_x86_ops->vcpu_load(vcpu, cpu);
c8076604 1241 kvm_request_guest_time_update(vcpu);
313a3dc7
CO
1242}
1243
1244void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1245{
1246 kvm_x86_ops->vcpu_put(vcpu);
9327fd11 1247 kvm_put_guest_fpu(vcpu);
313a3dc7
CO
1248}
1249
07716717 1250static int is_efer_nx(void)
313a3dc7 1251{
e286e86e 1252 unsigned long long efer = 0;
313a3dc7 1253
e286e86e 1254 rdmsrl_safe(MSR_EFER, &efer);
07716717
DK
1255 return efer & EFER_NX;
1256}
1257
1258static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1259{
1260 int i;
1261 struct kvm_cpuid_entry2 *e, *entry;
1262
313a3dc7 1263 entry = NULL;
ad312c7c
ZX
1264 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1265 e = &vcpu->arch.cpuid_entries[i];
313a3dc7
CO
1266 if (e->function == 0x80000001) {
1267 entry = e;
1268 break;
1269 }
1270 }
07716717 1271 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
313a3dc7
CO
1272 entry->edx &= ~(1 << 20);
1273 printk(KERN_INFO "kvm: guest NX capability removed\n");
1274 }
1275}
1276
07716717 1277/* when an old userspace process fills a new kernel module */
313a3dc7
CO
1278static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1279 struct kvm_cpuid *cpuid,
1280 struct kvm_cpuid_entry __user *entries)
07716717
DK
1281{
1282 int r, i;
1283 struct kvm_cpuid_entry *cpuid_entries;
1284
1285 r = -E2BIG;
1286 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1287 goto out;
1288 r = -ENOMEM;
1289 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1290 if (!cpuid_entries)
1291 goto out;
1292 r = -EFAULT;
1293 if (copy_from_user(cpuid_entries, entries,
1294 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1295 goto out_free;
1296 for (i = 0; i < cpuid->nent; i++) {
ad312c7c
ZX
1297 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1298 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1299 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1300 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1301 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1302 vcpu->arch.cpuid_entries[i].index = 0;
1303 vcpu->arch.cpuid_entries[i].flags = 0;
1304 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1305 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1306 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1307 }
1308 vcpu->arch.cpuid_nent = cpuid->nent;
07716717
DK
1309 cpuid_fix_nx_cap(vcpu);
1310 r = 0;
1311
1312out_free:
1313 vfree(cpuid_entries);
1314out:
1315 return r;
1316}
1317
1318static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1319 struct kvm_cpuid2 *cpuid,
1320 struct kvm_cpuid_entry2 __user *entries)
313a3dc7
CO
1321{
1322 int r;
1323
1324 r = -E2BIG;
1325 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1326 goto out;
1327 r = -EFAULT;
ad312c7c 1328 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
07716717 1329 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
313a3dc7 1330 goto out;
ad312c7c 1331 vcpu->arch.cpuid_nent = cpuid->nent;
313a3dc7
CO
1332 return 0;
1333
1334out:
1335 return r;
1336}
1337
07716717 1338static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1339 struct kvm_cpuid2 *cpuid,
1340 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1341{
1342 int r;
1343
1344 r = -E2BIG;
ad312c7c 1345 if (cpuid->nent < vcpu->arch.cpuid_nent)
07716717
DK
1346 goto out;
1347 r = -EFAULT;
ad312c7c 1348 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
19355475 1349 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1350 goto out;
1351 return 0;
1352
1353out:
ad312c7c 1354 cpuid->nent = vcpu->arch.cpuid_nent;
07716717
DK
1355 return r;
1356}
1357
07716717 1358static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
19355475 1359 u32 index)
07716717
DK
1360{
1361 entry->function = function;
1362 entry->index = index;
1363 cpuid_count(entry->function, entry->index,
19355475 1364 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
07716717
DK
1365 entry->flags = 0;
1366}
1367
7faa4ee1
AK
1368#define F(x) bit(X86_FEATURE_##x)
1369
07716717
DK
1370static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1371 u32 index, int *nent, int maxnent)
1372{
7faa4ee1 1373 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
07716717 1374#ifdef CONFIG_X86_64
7faa4ee1
AK
1375 unsigned f_lm = F(LM);
1376#else
1377 unsigned f_lm = 0;
07716717 1378#endif
7faa4ee1
AK
1379
1380 /* cpuid 1.edx */
1381 const u32 kvm_supported_word0_x86_features =
1382 F(FPU) | F(VME) | F(DE) | F(PSE) |
1383 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1384 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1385 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1386 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1387 0 /* Reserved, DS, ACPI */ | F(MMX) |
1388 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1389 0 /* HTT, TM, Reserved, PBE */;
1390 /* cpuid 0x80000001.edx */
1391 const u32 kvm_supported_word1_x86_features =
1392 F(FPU) | F(VME) | F(DE) | F(PSE) |
1393 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1394 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1395 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1396 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1397 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
1398 F(FXSR) | F(FXSR_OPT) | 0 /* GBPAGES */ | 0 /* RDTSCP */ |
1399 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1400 /* cpuid 1.ecx */
1401 const u32 kvm_supported_word4_x86_features =
d149c731
AK
1402 F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
1403 0 /* DS-CPL, VMX, SMX, EST */ |
1404 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1405 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1406 0 /* Reserved, DCA */ | F(XMM4_1) |
1407 F(XMM4_2) | 0 /* x2APIC */ | F(MOVBE) | F(POPCNT) |
1408 0 /* Reserved, XSAVE, OSXSAVE */;
7faa4ee1 1409 /* cpuid 0x80000001.ecx */
07716717 1410 const u32 kvm_supported_word6_x86_features =
7faa4ee1
AK
1411 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
1412 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1413 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
1414 0 /* SKINIT */ | 0 /* WDT */;
07716717 1415
19355475 1416 /* all calls to cpuid_count() should be made on the same cpu */
07716717
DK
1417 get_cpu();
1418 do_cpuid_1_ent(entry, function, index);
1419 ++*nent;
1420
1421 switch (function) {
1422 case 0:
1423 entry->eax = min(entry->eax, (u32)0xb);
1424 break;
1425 case 1:
1426 entry->edx &= kvm_supported_word0_x86_features;
7faa4ee1 1427 entry->ecx &= kvm_supported_word4_x86_features;
07716717
DK
1428 break;
1429 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1430 * may return different values. This forces us to get_cpu() before
1431 * issuing the first command, and also to emulate this annoying behavior
1432 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1433 case 2: {
1434 int t, times = entry->eax & 0xff;
1435
1436 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
0fdf8e59 1437 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
07716717
DK
1438 for (t = 1; t < times && *nent < maxnent; ++t) {
1439 do_cpuid_1_ent(&entry[t], function, 0);
1440 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1441 ++*nent;
1442 }
1443 break;
1444 }
1445 /* function 4 and 0xb have additional index. */
1446 case 4: {
14af3f3c 1447 int i, cache_type;
07716717
DK
1448
1449 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1450 /* read more entries until cache_type is zero */
14af3f3c
HH
1451 for (i = 1; *nent < maxnent; ++i) {
1452 cache_type = entry[i - 1].eax & 0x1f;
07716717
DK
1453 if (!cache_type)
1454 break;
14af3f3c
HH
1455 do_cpuid_1_ent(&entry[i], function, i);
1456 entry[i].flags |=
07716717
DK
1457 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1458 ++*nent;
1459 }
1460 break;
1461 }
1462 case 0xb: {
14af3f3c 1463 int i, level_type;
07716717
DK
1464
1465 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1466 /* read more entries until level_type is zero */
14af3f3c 1467 for (i = 1; *nent < maxnent; ++i) {
0853d2c1 1468 level_type = entry[i - 1].ecx & 0xff00;
07716717
DK
1469 if (!level_type)
1470 break;
14af3f3c
HH
1471 do_cpuid_1_ent(&entry[i], function, i);
1472 entry[i].flags |=
07716717
DK
1473 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1474 ++*nent;
1475 }
1476 break;
1477 }
1478 case 0x80000000:
1479 entry->eax = min(entry->eax, 0x8000001a);
1480 break;
1481 case 0x80000001:
1482 entry->edx &= kvm_supported_word1_x86_features;
1483 entry->ecx &= kvm_supported_word6_x86_features;
1484 break;
1485 }
1486 put_cpu();
1487}
1488
7faa4ee1
AK
1489#undef F
1490
674eea0f 1491static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
19355475 1492 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1493{
1494 struct kvm_cpuid_entry2 *cpuid_entries;
1495 int limit, nent = 0, r = -E2BIG;
1496 u32 func;
1497
1498 if (cpuid->nent < 1)
1499 goto out;
1500 r = -ENOMEM;
1501 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1502 if (!cpuid_entries)
1503 goto out;
1504
1505 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1506 limit = cpuid_entries[0].eax;
1507 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1508 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1509 &nent, cpuid->nent);
07716717
DK
1510 r = -E2BIG;
1511 if (nent >= cpuid->nent)
1512 goto out_free;
1513
1514 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1515 limit = cpuid_entries[nent - 1].eax;
1516 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1517 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1518 &nent, cpuid->nent);
cb007648
MM
1519 r = -E2BIG;
1520 if (nent >= cpuid->nent)
1521 goto out_free;
1522
07716717
DK
1523 r = -EFAULT;
1524 if (copy_to_user(entries, cpuid_entries,
19355475 1525 nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1526 goto out_free;
1527 cpuid->nent = nent;
1528 r = 0;
1529
1530out_free:
1531 vfree(cpuid_entries);
1532out:
1533 return r;
1534}
1535
313a3dc7
CO
1536static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1537 struct kvm_lapic_state *s)
1538{
1539 vcpu_load(vcpu);
ad312c7c 1540 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
313a3dc7
CO
1541 vcpu_put(vcpu);
1542
1543 return 0;
1544}
1545
1546static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1547 struct kvm_lapic_state *s)
1548{
1549 vcpu_load(vcpu);
ad312c7c 1550 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
313a3dc7
CO
1551 kvm_apic_post_state_restore(vcpu);
1552 vcpu_put(vcpu);
1553
1554 return 0;
1555}
1556
f77bc6a4
ZX
1557static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1558 struct kvm_interrupt *irq)
1559{
1560 if (irq->irq < 0 || irq->irq >= 256)
1561 return -EINVAL;
1562 if (irqchip_in_kernel(vcpu->kvm))
1563 return -ENXIO;
1564 vcpu_load(vcpu);
1565
66fd3f7f 1566 kvm_queue_interrupt(vcpu, irq->irq, false);
f77bc6a4
ZX
1567
1568 vcpu_put(vcpu);
1569
1570 return 0;
1571}
1572
c4abb7c9
JK
1573static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
1574{
1575 vcpu_load(vcpu);
1576 kvm_inject_nmi(vcpu);
1577 vcpu_put(vcpu);
1578
1579 return 0;
1580}
1581
b209749f
AK
1582static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1583 struct kvm_tpr_access_ctl *tac)
1584{
1585 if (tac->flags)
1586 return -EINVAL;
1587 vcpu->arch.tpr_access_reporting = !!tac->enabled;
1588 return 0;
1589}
1590
890ca9ae
HY
1591static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
1592 u64 mcg_cap)
1593{
1594 int r;
1595 unsigned bank_num = mcg_cap & 0xff, bank;
1596
1597 r = -EINVAL;
1598 if (!bank_num)
1599 goto out;
1600 if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
1601 goto out;
1602 r = 0;
1603 vcpu->arch.mcg_cap = mcg_cap;
1604 /* Init IA32_MCG_CTL to all 1s */
1605 if (mcg_cap & MCG_CTL_P)
1606 vcpu->arch.mcg_ctl = ~(u64)0;
1607 /* Init IA32_MCi_CTL to all 1s */
1608 for (bank = 0; bank < bank_num; bank++)
1609 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
1610out:
1611 return r;
1612}
1613
1614static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
1615 struct kvm_x86_mce *mce)
1616{
1617 u64 mcg_cap = vcpu->arch.mcg_cap;
1618 unsigned bank_num = mcg_cap & 0xff;
1619 u64 *banks = vcpu->arch.mce_banks;
1620
1621 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
1622 return -EINVAL;
1623 /*
1624 * if IA32_MCG_CTL is not all 1s, the uncorrected error
1625 * reporting is disabled
1626 */
1627 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1628 vcpu->arch.mcg_ctl != ~(u64)0)
1629 return 0;
1630 banks += 4 * mce->bank;
1631 /*
1632 * if IA32_MCi_CTL is not all 1s, the uncorrected error
1633 * reporting is disabled for the bank
1634 */
1635 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
1636 return 0;
1637 if (mce->status & MCI_STATUS_UC) {
1638 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
1639 !(vcpu->arch.cr4 & X86_CR4_MCE)) {
1640 printk(KERN_DEBUG "kvm: set_mce: "
1641 "injects mce exception while "
1642 "previous one is in progress!\n");
1643 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
1644 return 0;
1645 }
1646 if (banks[1] & MCI_STATUS_VAL)
1647 mce->status |= MCI_STATUS_OVER;
1648 banks[2] = mce->addr;
1649 banks[3] = mce->misc;
1650 vcpu->arch.mcg_status = mce->mcg_status;
1651 banks[1] = mce->status;
1652 kvm_queue_exception(vcpu, MC_VECTOR);
1653 } else if (!(banks[1] & MCI_STATUS_VAL)
1654 || !(banks[1] & MCI_STATUS_UC)) {
1655 if (banks[1] & MCI_STATUS_VAL)
1656 mce->status |= MCI_STATUS_OVER;
1657 banks[2] = mce->addr;
1658 banks[3] = mce->misc;
1659 banks[1] = mce->status;
1660 } else
1661 banks[1] |= MCI_STATUS_OVER;
1662 return 0;
1663}
1664
313a3dc7
CO
1665long kvm_arch_vcpu_ioctl(struct file *filp,
1666 unsigned int ioctl, unsigned long arg)
1667{
1668 struct kvm_vcpu *vcpu = filp->private_data;
1669 void __user *argp = (void __user *)arg;
1670 int r;
b772ff36 1671 struct kvm_lapic_state *lapic = NULL;
313a3dc7
CO
1672
1673 switch (ioctl) {
1674 case KVM_GET_LAPIC: {
b772ff36 1675 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
313a3dc7 1676
b772ff36
DH
1677 r = -ENOMEM;
1678 if (!lapic)
1679 goto out;
1680 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
313a3dc7
CO
1681 if (r)
1682 goto out;
1683 r = -EFAULT;
b772ff36 1684 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
313a3dc7
CO
1685 goto out;
1686 r = 0;
1687 break;
1688 }
1689 case KVM_SET_LAPIC: {
b772ff36
DH
1690 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1691 r = -ENOMEM;
1692 if (!lapic)
1693 goto out;
313a3dc7 1694 r = -EFAULT;
b772ff36 1695 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
313a3dc7 1696 goto out;
b772ff36 1697 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
313a3dc7
CO
1698 if (r)
1699 goto out;
1700 r = 0;
1701 break;
1702 }
f77bc6a4
ZX
1703 case KVM_INTERRUPT: {
1704 struct kvm_interrupt irq;
1705
1706 r = -EFAULT;
1707 if (copy_from_user(&irq, argp, sizeof irq))
1708 goto out;
1709 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1710 if (r)
1711 goto out;
1712 r = 0;
1713 break;
1714 }
c4abb7c9
JK
1715 case KVM_NMI: {
1716 r = kvm_vcpu_ioctl_nmi(vcpu);
1717 if (r)
1718 goto out;
1719 r = 0;
1720 break;
1721 }
313a3dc7
CO
1722 case KVM_SET_CPUID: {
1723 struct kvm_cpuid __user *cpuid_arg = argp;
1724 struct kvm_cpuid cpuid;
1725
1726 r = -EFAULT;
1727 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1728 goto out;
1729 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1730 if (r)
1731 goto out;
1732 break;
1733 }
07716717
DK
1734 case KVM_SET_CPUID2: {
1735 struct kvm_cpuid2 __user *cpuid_arg = argp;
1736 struct kvm_cpuid2 cpuid;
1737
1738 r = -EFAULT;
1739 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1740 goto out;
1741 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
19355475 1742 cpuid_arg->entries);
07716717
DK
1743 if (r)
1744 goto out;
1745 break;
1746 }
1747 case KVM_GET_CPUID2: {
1748 struct kvm_cpuid2 __user *cpuid_arg = argp;
1749 struct kvm_cpuid2 cpuid;
1750
1751 r = -EFAULT;
1752 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1753 goto out;
1754 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
19355475 1755 cpuid_arg->entries);
07716717
DK
1756 if (r)
1757 goto out;
1758 r = -EFAULT;
1759 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1760 goto out;
1761 r = 0;
1762 break;
1763 }
313a3dc7
CO
1764 case KVM_GET_MSRS:
1765 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1766 break;
1767 case KVM_SET_MSRS:
1768 r = msr_io(vcpu, argp, do_set_msr, 0);
1769 break;
b209749f
AK
1770 case KVM_TPR_ACCESS_REPORTING: {
1771 struct kvm_tpr_access_ctl tac;
1772
1773 r = -EFAULT;
1774 if (copy_from_user(&tac, argp, sizeof tac))
1775 goto out;
1776 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1777 if (r)
1778 goto out;
1779 r = -EFAULT;
1780 if (copy_to_user(argp, &tac, sizeof tac))
1781 goto out;
1782 r = 0;
1783 break;
1784 };
b93463aa
AK
1785 case KVM_SET_VAPIC_ADDR: {
1786 struct kvm_vapic_addr va;
1787
1788 r = -EINVAL;
1789 if (!irqchip_in_kernel(vcpu->kvm))
1790 goto out;
1791 r = -EFAULT;
1792 if (copy_from_user(&va, argp, sizeof va))
1793 goto out;
1794 r = 0;
1795 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1796 break;
1797 }
890ca9ae
HY
1798 case KVM_X86_SETUP_MCE: {
1799 u64 mcg_cap;
1800
1801 r = -EFAULT;
1802 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
1803 goto out;
1804 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
1805 break;
1806 }
1807 case KVM_X86_SET_MCE: {
1808 struct kvm_x86_mce mce;
1809
1810 r = -EFAULT;
1811 if (copy_from_user(&mce, argp, sizeof mce))
1812 goto out;
1813 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
1814 break;
1815 }
313a3dc7
CO
1816 default:
1817 r = -EINVAL;
1818 }
1819out:
7a6ce84c 1820 kfree(lapic);
313a3dc7
CO
1821 return r;
1822}
1823
1fe779f8
CO
1824static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1825{
1826 int ret;
1827
1828 if (addr > (unsigned int)(-3 * PAGE_SIZE))
1829 return -1;
1830 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1831 return ret;
1832}
1833
1834static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1835 u32 kvm_nr_mmu_pages)
1836{
1837 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1838 return -EINVAL;
1839
72dc67a6 1840 down_write(&kvm->slots_lock);
7c8a83b7 1841 spin_lock(&kvm->mmu_lock);
1fe779f8
CO
1842
1843 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
f05e70ac 1844 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1fe779f8 1845
7c8a83b7 1846 spin_unlock(&kvm->mmu_lock);
72dc67a6 1847 up_write(&kvm->slots_lock);
1fe779f8
CO
1848 return 0;
1849}
1850
1851static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1852{
f05e70ac 1853 return kvm->arch.n_alloc_mmu_pages;
1fe779f8
CO
1854}
1855
e9f85cde
ZX
1856gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1857{
1858 int i;
1859 struct kvm_mem_alias *alias;
1860
d69fb81f
ZX
1861 for (i = 0; i < kvm->arch.naliases; ++i) {
1862 alias = &kvm->arch.aliases[i];
e9f85cde
ZX
1863 if (gfn >= alias->base_gfn
1864 && gfn < alias->base_gfn + alias->npages)
1865 return alias->target_gfn + gfn - alias->base_gfn;
1866 }
1867 return gfn;
1868}
1869
1fe779f8
CO
1870/*
1871 * Set a new alias region. Aliases map a portion of physical memory into
1872 * another portion. This is useful for memory windows, for example the PC
1873 * VGA region.
1874 */
1875static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1876 struct kvm_memory_alias *alias)
1877{
1878 int r, n;
1879 struct kvm_mem_alias *p;
1880
1881 r = -EINVAL;
1882 /* General sanity checks */
1883 if (alias->memory_size & (PAGE_SIZE - 1))
1884 goto out;
1885 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1886 goto out;
1887 if (alias->slot >= KVM_ALIAS_SLOTS)
1888 goto out;
1889 if (alias->guest_phys_addr + alias->memory_size
1890 < alias->guest_phys_addr)
1891 goto out;
1892 if (alias->target_phys_addr + alias->memory_size
1893 < alias->target_phys_addr)
1894 goto out;
1895
72dc67a6 1896 down_write(&kvm->slots_lock);
a1708ce8 1897 spin_lock(&kvm->mmu_lock);
1fe779f8 1898
d69fb81f 1899 p = &kvm->arch.aliases[alias->slot];
1fe779f8
CO
1900 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1901 p->npages = alias->memory_size >> PAGE_SHIFT;
1902 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1903
1904 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
d69fb81f 1905 if (kvm->arch.aliases[n - 1].npages)
1fe779f8 1906 break;
d69fb81f 1907 kvm->arch.naliases = n;
1fe779f8 1908
a1708ce8 1909 spin_unlock(&kvm->mmu_lock);
1fe779f8
CO
1910 kvm_mmu_zap_all(kvm);
1911
72dc67a6 1912 up_write(&kvm->slots_lock);
1fe779f8
CO
1913
1914 return 0;
1915
1916out:
1917 return r;
1918}
1919
1920static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1921{
1922 int r;
1923
1924 r = 0;
1925 switch (chip->chip_id) {
1926 case KVM_IRQCHIP_PIC_MASTER:
1927 memcpy(&chip->chip.pic,
1928 &pic_irqchip(kvm)->pics[0],
1929 sizeof(struct kvm_pic_state));
1930 break;
1931 case KVM_IRQCHIP_PIC_SLAVE:
1932 memcpy(&chip->chip.pic,
1933 &pic_irqchip(kvm)->pics[1],
1934 sizeof(struct kvm_pic_state));
1935 break;
1936 case KVM_IRQCHIP_IOAPIC:
1937 memcpy(&chip->chip.ioapic,
1938 ioapic_irqchip(kvm),
1939 sizeof(struct kvm_ioapic_state));
1940 break;
1941 default:
1942 r = -EINVAL;
1943 break;
1944 }
1945 return r;
1946}
1947
1948static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1949{
1950 int r;
1951
1952 r = 0;
1953 switch (chip->chip_id) {
1954 case KVM_IRQCHIP_PIC_MASTER:
1955 memcpy(&pic_irqchip(kvm)->pics[0],
1956 &chip->chip.pic,
1957 sizeof(struct kvm_pic_state));
1958 break;
1959 case KVM_IRQCHIP_PIC_SLAVE:
1960 memcpy(&pic_irqchip(kvm)->pics[1],
1961 &chip->chip.pic,
1962 sizeof(struct kvm_pic_state));
1963 break;
1964 case KVM_IRQCHIP_IOAPIC:
1965 memcpy(ioapic_irqchip(kvm),
1966 &chip->chip.ioapic,
1967 sizeof(struct kvm_ioapic_state));
1968 break;
1969 default:
1970 r = -EINVAL;
1971 break;
1972 }
1973 kvm_pic_update_irq(pic_irqchip(kvm));
1974 return r;
1975}
1976
e0f63cb9
SY
1977static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1978{
1979 int r = 0;
1980
1981 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
1982 return r;
1983}
1984
1985static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1986{
1987 int r = 0;
1988
1989 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1990 kvm_pit_load_count(kvm, 0, ps->channels[0].count);
1991 return r;
1992}
1993
52d939a0
MT
1994static int kvm_vm_ioctl_reinject(struct kvm *kvm,
1995 struct kvm_reinject_control *control)
1996{
1997 if (!kvm->arch.vpit)
1998 return -ENXIO;
1999 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
2000 return 0;
2001}
2002
5bb064dc
ZX
2003/*
2004 * Get (and clear) the dirty memory log for a memory slot.
2005 */
2006int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2007 struct kvm_dirty_log *log)
2008{
2009 int r;
2010 int n;
2011 struct kvm_memory_slot *memslot;
2012 int is_dirty = 0;
2013
72dc67a6 2014 down_write(&kvm->slots_lock);
5bb064dc
ZX
2015
2016 r = kvm_get_dirty_log(kvm, log, &is_dirty);
2017 if (r)
2018 goto out;
2019
2020 /* If nothing is dirty, don't bother messing with page tables. */
2021 if (is_dirty) {
7c8a83b7 2022 spin_lock(&kvm->mmu_lock);
5bb064dc 2023 kvm_mmu_slot_remove_write_access(kvm, log->slot);
7c8a83b7 2024 spin_unlock(&kvm->mmu_lock);
5bb064dc
ZX
2025 kvm_flush_remote_tlbs(kvm);
2026 memslot = &kvm->memslots[log->slot];
2027 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
2028 memset(memslot->dirty_bitmap, 0, n);
2029 }
2030 r = 0;
2031out:
72dc67a6 2032 up_write(&kvm->slots_lock);
5bb064dc
ZX
2033 return r;
2034}
2035
1fe779f8
CO
2036long kvm_arch_vm_ioctl(struct file *filp,
2037 unsigned int ioctl, unsigned long arg)
2038{
2039 struct kvm *kvm = filp->private_data;
2040 void __user *argp = (void __user *)arg;
2041 int r = -EINVAL;
f0d66275
DH
2042 /*
2043 * This union makes it completely explicit to gcc-3.x
2044 * that these two variables' stack usage should be
2045 * combined, not added together.
2046 */
2047 union {
2048 struct kvm_pit_state ps;
2049 struct kvm_memory_alias alias;
c5ff41ce 2050 struct kvm_pit_config pit_config;
f0d66275 2051 } u;
1fe779f8
CO
2052
2053 switch (ioctl) {
2054 case KVM_SET_TSS_ADDR:
2055 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
2056 if (r < 0)
2057 goto out;
2058 break;
2059 case KVM_SET_MEMORY_REGION: {
2060 struct kvm_memory_region kvm_mem;
2061 struct kvm_userspace_memory_region kvm_userspace_mem;
2062
2063 r = -EFAULT;
2064 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
2065 goto out;
2066 kvm_userspace_mem.slot = kvm_mem.slot;
2067 kvm_userspace_mem.flags = kvm_mem.flags;
2068 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
2069 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
2070 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
2071 if (r)
2072 goto out;
2073 break;
2074 }
2075 case KVM_SET_NR_MMU_PAGES:
2076 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
2077 if (r)
2078 goto out;
2079 break;
2080 case KVM_GET_NR_MMU_PAGES:
2081 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
2082 break;
f0d66275 2083 case KVM_SET_MEMORY_ALIAS:
1fe779f8 2084 r = -EFAULT;
f0d66275 2085 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
1fe779f8 2086 goto out;
f0d66275 2087 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
1fe779f8
CO
2088 if (r)
2089 goto out;
2090 break;
1fe779f8
CO
2091 case KVM_CREATE_IRQCHIP:
2092 r = -ENOMEM;
d7deeeb0
ZX
2093 kvm->arch.vpic = kvm_create_pic(kvm);
2094 if (kvm->arch.vpic) {
1fe779f8
CO
2095 r = kvm_ioapic_init(kvm);
2096 if (r) {
d7deeeb0
ZX
2097 kfree(kvm->arch.vpic);
2098 kvm->arch.vpic = NULL;
1fe779f8
CO
2099 goto out;
2100 }
2101 } else
2102 goto out;
399ec807
AK
2103 r = kvm_setup_default_irq_routing(kvm);
2104 if (r) {
2105 kfree(kvm->arch.vpic);
2106 kfree(kvm->arch.vioapic);
2107 goto out;
2108 }
1fe779f8 2109 break;
7837699f 2110 case KVM_CREATE_PIT:
c5ff41ce
JK
2111 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
2112 goto create_pit;
2113 case KVM_CREATE_PIT2:
2114 r = -EFAULT;
2115 if (copy_from_user(&u.pit_config, argp,
2116 sizeof(struct kvm_pit_config)))
2117 goto out;
2118 create_pit:
269e05e4
AK
2119 mutex_lock(&kvm->lock);
2120 r = -EEXIST;
2121 if (kvm->arch.vpit)
2122 goto create_pit_unlock;
7837699f 2123 r = -ENOMEM;
c5ff41ce 2124 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
7837699f
SY
2125 if (kvm->arch.vpit)
2126 r = 0;
269e05e4
AK
2127 create_pit_unlock:
2128 mutex_unlock(&kvm->lock);
7837699f 2129 break;
4925663a 2130 case KVM_IRQ_LINE_STATUS:
1fe779f8
CO
2131 case KVM_IRQ_LINE: {
2132 struct kvm_irq_level irq_event;
2133
2134 r = -EFAULT;
2135 if (copy_from_user(&irq_event, argp, sizeof irq_event))
2136 goto out;
2137 if (irqchip_in_kernel(kvm)) {
4925663a 2138 __s32 status;
1fe779f8 2139 mutex_lock(&kvm->lock);
4925663a
GN
2140 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2141 irq_event.irq, irq_event.level);
1fe779f8 2142 mutex_unlock(&kvm->lock);
4925663a
GN
2143 if (ioctl == KVM_IRQ_LINE_STATUS) {
2144 irq_event.status = status;
2145 if (copy_to_user(argp, &irq_event,
2146 sizeof irq_event))
2147 goto out;
2148 }
1fe779f8
CO
2149 r = 0;
2150 }
2151 break;
2152 }
2153 case KVM_GET_IRQCHIP: {
2154 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 2155 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 2156
f0d66275
DH
2157 r = -ENOMEM;
2158 if (!chip)
1fe779f8 2159 goto out;
f0d66275
DH
2160 r = -EFAULT;
2161 if (copy_from_user(chip, argp, sizeof *chip))
2162 goto get_irqchip_out;
1fe779f8
CO
2163 r = -ENXIO;
2164 if (!irqchip_in_kernel(kvm))
f0d66275
DH
2165 goto get_irqchip_out;
2166 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
1fe779f8 2167 if (r)
f0d66275 2168 goto get_irqchip_out;
1fe779f8 2169 r = -EFAULT;
f0d66275
DH
2170 if (copy_to_user(argp, chip, sizeof *chip))
2171 goto get_irqchip_out;
1fe779f8 2172 r = 0;
f0d66275
DH
2173 get_irqchip_out:
2174 kfree(chip);
2175 if (r)
2176 goto out;
1fe779f8
CO
2177 break;
2178 }
2179 case KVM_SET_IRQCHIP: {
2180 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 2181 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 2182
f0d66275
DH
2183 r = -ENOMEM;
2184 if (!chip)
1fe779f8 2185 goto out;
f0d66275
DH
2186 r = -EFAULT;
2187 if (copy_from_user(chip, argp, sizeof *chip))
2188 goto set_irqchip_out;
1fe779f8
CO
2189 r = -ENXIO;
2190 if (!irqchip_in_kernel(kvm))
f0d66275
DH
2191 goto set_irqchip_out;
2192 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
1fe779f8 2193 if (r)
f0d66275 2194 goto set_irqchip_out;
1fe779f8 2195 r = 0;
f0d66275
DH
2196 set_irqchip_out:
2197 kfree(chip);
2198 if (r)
2199 goto out;
1fe779f8
CO
2200 break;
2201 }
e0f63cb9 2202 case KVM_GET_PIT: {
e0f63cb9 2203 r = -EFAULT;
f0d66275 2204 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
2205 goto out;
2206 r = -ENXIO;
2207 if (!kvm->arch.vpit)
2208 goto out;
f0d66275 2209 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
e0f63cb9
SY
2210 if (r)
2211 goto out;
2212 r = -EFAULT;
f0d66275 2213 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
2214 goto out;
2215 r = 0;
2216 break;
2217 }
2218 case KVM_SET_PIT: {
e0f63cb9 2219 r = -EFAULT;
f0d66275 2220 if (copy_from_user(&u.ps, argp, sizeof u.ps))
e0f63cb9
SY
2221 goto out;
2222 r = -ENXIO;
2223 if (!kvm->arch.vpit)
2224 goto out;
f0d66275 2225 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
e0f63cb9
SY
2226 if (r)
2227 goto out;
2228 r = 0;
2229 break;
2230 }
52d939a0
MT
2231 case KVM_REINJECT_CONTROL: {
2232 struct kvm_reinject_control control;
2233 r = -EFAULT;
2234 if (copy_from_user(&control, argp, sizeof(control)))
2235 goto out;
2236 r = kvm_vm_ioctl_reinject(kvm, &control);
2237 if (r)
2238 goto out;
2239 r = 0;
2240 break;
2241 }
1fe779f8
CO
2242 default:
2243 ;
2244 }
2245out:
2246 return r;
2247}
2248
a16b043c 2249static void kvm_init_msr_list(void)
043405e1
CO
2250{
2251 u32 dummy[2];
2252 unsigned i, j;
2253
2254 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2255 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2256 continue;
2257 if (j < i)
2258 msrs_to_save[j] = msrs_to_save[i];
2259 j++;
2260 }
2261 num_msrs_to_save = j;
2262}
2263
bbd9b64e
CO
2264/*
2265 * Only apic need an MMIO device hook, so shortcut now..
2266 */
2267static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
92760499
LV
2268 gpa_t addr, int len,
2269 int is_write)
bbd9b64e
CO
2270{
2271 struct kvm_io_device *dev;
2272
ad312c7c
ZX
2273 if (vcpu->arch.apic) {
2274 dev = &vcpu->arch.apic->dev;
d76685c4 2275 if (kvm_iodevice_in_range(dev, addr, len, is_write))
bbd9b64e
CO
2276 return dev;
2277 }
2278 return NULL;
2279}
2280
2281
2282static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2283 gpa_t addr, int len,
2284 int is_write)
bbd9b64e
CO
2285{
2286 struct kvm_io_device *dev;
2287
92760499 2288 dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
bbd9b64e 2289 if (dev == NULL)
92760499
LV
2290 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
2291 is_write);
bbd9b64e
CO
2292 return dev;
2293}
2294
cded19f3
HE
2295static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2296 struct kvm_vcpu *vcpu)
bbd9b64e
CO
2297{
2298 void *data = val;
10589a46 2299 int r = X86EMUL_CONTINUE;
bbd9b64e
CO
2300
2301 while (bytes) {
ad312c7c 2302 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e 2303 unsigned offset = addr & (PAGE_SIZE-1);
77c2002e 2304 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
bbd9b64e
CO
2305 int ret;
2306
10589a46
MT
2307 if (gpa == UNMAPPED_GVA) {
2308 r = X86EMUL_PROPAGATE_FAULT;
2309 goto out;
2310 }
77c2002e 2311 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
10589a46
MT
2312 if (ret < 0) {
2313 r = X86EMUL_UNHANDLEABLE;
2314 goto out;
2315 }
bbd9b64e 2316
77c2002e
IE
2317 bytes -= toread;
2318 data += toread;
2319 addr += toread;
bbd9b64e 2320 }
10589a46 2321out:
10589a46 2322 return r;
bbd9b64e 2323}
77c2002e 2324
cded19f3
HE
2325static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2326 struct kvm_vcpu *vcpu)
77c2002e
IE
2327{
2328 void *data = val;
2329 int r = X86EMUL_CONTINUE;
2330
2331 while (bytes) {
2332 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2333 unsigned offset = addr & (PAGE_SIZE-1);
2334 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2335 int ret;
2336
2337 if (gpa == UNMAPPED_GVA) {
2338 r = X86EMUL_PROPAGATE_FAULT;
2339 goto out;
2340 }
2341 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
2342 if (ret < 0) {
2343 r = X86EMUL_UNHANDLEABLE;
2344 goto out;
2345 }
2346
2347 bytes -= towrite;
2348 data += towrite;
2349 addr += towrite;
2350 }
2351out:
2352 return r;
2353}
2354
bbd9b64e 2355
bbd9b64e
CO
2356static int emulator_read_emulated(unsigned long addr,
2357 void *val,
2358 unsigned int bytes,
2359 struct kvm_vcpu *vcpu)
2360{
2361 struct kvm_io_device *mmio_dev;
2362 gpa_t gpa;
2363
2364 if (vcpu->mmio_read_completed) {
2365 memcpy(val, vcpu->mmio_data, bytes);
2366 vcpu->mmio_read_completed = 0;
2367 return X86EMUL_CONTINUE;
2368 }
2369
ad312c7c 2370 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2371
2372 /* For APIC access vmexit */
2373 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2374 goto mmio;
2375
77c2002e
IE
2376 if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2377 == X86EMUL_CONTINUE)
bbd9b64e
CO
2378 return X86EMUL_CONTINUE;
2379 if (gpa == UNMAPPED_GVA)
2380 return X86EMUL_PROPAGATE_FAULT;
2381
2382mmio:
2383 /*
2384 * Is this MMIO handled locally?
2385 */
10589a46 2386 mutex_lock(&vcpu->kvm->lock);
92760499 2387 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
bbd9b64e
CO
2388 if (mmio_dev) {
2389 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
10589a46 2390 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2391 return X86EMUL_CONTINUE;
2392 }
10589a46 2393 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2394
2395 vcpu->mmio_needed = 1;
2396 vcpu->mmio_phys_addr = gpa;
2397 vcpu->mmio_size = bytes;
2398 vcpu->mmio_is_write = 0;
2399
2400 return X86EMUL_UNHANDLEABLE;
2401}
2402
3200f405 2403int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
9f811285 2404 const void *val, int bytes)
bbd9b64e
CO
2405{
2406 int ret;
2407
2408 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
9f811285 2409 if (ret < 0)
bbd9b64e 2410 return 0;
ad218f85 2411 kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
bbd9b64e
CO
2412 return 1;
2413}
2414
2415static int emulator_write_emulated_onepage(unsigned long addr,
2416 const void *val,
2417 unsigned int bytes,
2418 struct kvm_vcpu *vcpu)
2419{
2420 struct kvm_io_device *mmio_dev;
10589a46
MT
2421 gpa_t gpa;
2422
10589a46 2423 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2424
2425 if (gpa == UNMAPPED_GVA) {
c3c91fee 2426 kvm_inject_page_fault(vcpu, addr, 2);
bbd9b64e
CO
2427 return X86EMUL_PROPAGATE_FAULT;
2428 }
2429
2430 /* For APIC access vmexit */
2431 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2432 goto mmio;
2433
2434 if (emulator_write_phys(vcpu, gpa, val, bytes))
2435 return X86EMUL_CONTINUE;
2436
2437mmio:
2438 /*
2439 * Is this MMIO handled locally?
2440 */
10589a46 2441 mutex_lock(&vcpu->kvm->lock);
92760499 2442 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
bbd9b64e
CO
2443 if (mmio_dev) {
2444 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
10589a46 2445 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2446 return X86EMUL_CONTINUE;
2447 }
10589a46 2448 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2449
2450 vcpu->mmio_needed = 1;
2451 vcpu->mmio_phys_addr = gpa;
2452 vcpu->mmio_size = bytes;
2453 vcpu->mmio_is_write = 1;
2454 memcpy(vcpu->mmio_data, val, bytes);
2455
2456 return X86EMUL_CONTINUE;
2457}
2458
2459int emulator_write_emulated(unsigned long addr,
2460 const void *val,
2461 unsigned int bytes,
2462 struct kvm_vcpu *vcpu)
2463{
2464 /* Crossing a page boundary? */
2465 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
2466 int rc, now;
2467
2468 now = -addr & ~PAGE_MASK;
2469 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
2470 if (rc != X86EMUL_CONTINUE)
2471 return rc;
2472 addr += now;
2473 val += now;
2474 bytes -= now;
2475 }
2476 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
2477}
2478EXPORT_SYMBOL_GPL(emulator_write_emulated);
2479
2480static int emulator_cmpxchg_emulated(unsigned long addr,
2481 const void *old,
2482 const void *new,
2483 unsigned int bytes,
2484 struct kvm_vcpu *vcpu)
2485{
2486 static int reported;
2487
2488 if (!reported) {
2489 reported = 1;
2490 printk(KERN_WARNING "kvm: emulating exchange as write\n");
2491 }
2bacc55c
MT
2492#ifndef CONFIG_X86_64
2493 /* guests cmpxchg8b have to be emulated atomically */
2494 if (bytes == 8) {
10589a46 2495 gpa_t gpa;
2bacc55c 2496 struct page *page;
c0b49b0d 2497 char *kaddr;
2bacc55c
MT
2498 u64 val;
2499
10589a46
MT
2500 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2501
2bacc55c
MT
2502 if (gpa == UNMAPPED_GVA ||
2503 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2504 goto emul_write;
2505
2506 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2507 goto emul_write;
2508
2509 val = *(u64 *)new;
72dc67a6 2510
2bacc55c 2511 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
72dc67a6 2512
c0b49b0d
AM
2513 kaddr = kmap_atomic(page, KM_USER0);
2514 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2515 kunmap_atomic(kaddr, KM_USER0);
2bacc55c
MT
2516 kvm_release_page_dirty(page);
2517 }
3200f405 2518emul_write:
2bacc55c
MT
2519#endif
2520
bbd9b64e
CO
2521 return emulator_write_emulated(addr, new, bytes, vcpu);
2522}
2523
2524static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
2525{
2526 return kvm_x86_ops->get_segment_base(vcpu, seg);
2527}
2528
2529int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2530{
a7052897 2531 kvm_mmu_invlpg(vcpu, address);
bbd9b64e
CO
2532 return X86EMUL_CONTINUE;
2533}
2534
2535int emulate_clts(struct kvm_vcpu *vcpu)
2536{
54e445ca 2537 KVMTRACE_0D(CLTS, vcpu, handler);
ad312c7c 2538 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
bbd9b64e
CO
2539 return X86EMUL_CONTINUE;
2540}
2541
2542int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2543{
2544 struct kvm_vcpu *vcpu = ctxt->vcpu;
2545
2546 switch (dr) {
2547 case 0 ... 3:
2548 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2549 return X86EMUL_CONTINUE;
2550 default:
b8688d51 2551 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
bbd9b64e
CO
2552 return X86EMUL_UNHANDLEABLE;
2553 }
2554}
2555
2556int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2557{
2558 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2559 int exception;
2560
2561 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
2562 if (exception) {
2563 /* FIXME: better handling */
2564 return X86EMUL_UNHANDLEABLE;
2565 }
2566 return X86EMUL_CONTINUE;
2567}
2568
2569void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2570{
bbd9b64e 2571 u8 opcodes[4];
5fdbf976 2572 unsigned long rip = kvm_rip_read(vcpu);
bbd9b64e
CO
2573 unsigned long rip_linear;
2574
f76c710d 2575 if (!printk_ratelimit())
bbd9b64e
CO
2576 return;
2577
25be4608
GC
2578 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2579
77c2002e 2580 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
bbd9b64e
CO
2581
2582 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2583 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
bbd9b64e
CO
2584}
2585EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2586
14af3f3c 2587static struct x86_emulate_ops emulate_ops = {
77c2002e 2588 .read_std = kvm_read_guest_virt,
bbd9b64e
CO
2589 .read_emulated = emulator_read_emulated,
2590 .write_emulated = emulator_write_emulated,
2591 .cmpxchg_emulated = emulator_cmpxchg_emulated,
2592};
2593
5fdbf976
MT
2594static void cache_all_regs(struct kvm_vcpu *vcpu)
2595{
2596 kvm_register_read(vcpu, VCPU_REGS_RAX);
2597 kvm_register_read(vcpu, VCPU_REGS_RSP);
2598 kvm_register_read(vcpu, VCPU_REGS_RIP);
2599 vcpu->arch.regs_dirty = ~0;
2600}
2601
bbd9b64e
CO
2602int emulate_instruction(struct kvm_vcpu *vcpu,
2603 struct kvm_run *run,
2604 unsigned long cr2,
2605 u16 error_code,
571008da 2606 int emulation_type)
bbd9b64e 2607{
310b5d30 2608 int r, shadow_mask;
571008da 2609 struct decode_cache *c;
bbd9b64e 2610
26eef70c 2611 kvm_clear_exception_queue(vcpu);
ad312c7c 2612 vcpu->arch.mmio_fault_cr2 = cr2;
5fdbf976
MT
2613 /*
2614 * TODO: fix x86_emulate.c to use guest_read/write_register
2615 * instead of direct ->regs accesses, can save hundred cycles
2616 * on Intel for instructions that don't read/change RSP, for
2617 * for example.
2618 */
2619 cache_all_regs(vcpu);
bbd9b64e
CO
2620
2621 vcpu->mmio_is_write = 0;
ad312c7c 2622 vcpu->arch.pio.string = 0;
bbd9b64e 2623
571008da 2624 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
bbd9b64e
CO
2625 int cs_db, cs_l;
2626 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2627
ad312c7c
ZX
2628 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2629 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2630 vcpu->arch.emulate_ctxt.mode =
2631 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
bbd9b64e
CO
2632 ? X86EMUL_MODE_REAL : cs_l
2633 ? X86EMUL_MODE_PROT64 : cs_db
2634 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2635
ad312c7c 2636 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
571008da
SY
2637
2638 /* Reject the instructions other than VMCALL/VMMCALL when
2639 * try to emulate invalid opcode */
2640 c = &vcpu->arch.emulate_ctxt.decode;
2641 if ((emulation_type & EMULTYPE_TRAP_UD) &&
2642 (!(c->twobyte && c->b == 0x01 &&
2643 (c->modrm_reg == 0 || c->modrm_reg == 3) &&
2644 c->modrm_mod == 3 && c->modrm_rm == 1)))
2645 return EMULATE_FAIL;
2646
f2b5756b 2647 ++vcpu->stat.insn_emulation;
bbd9b64e 2648 if (r) {
f2b5756b 2649 ++vcpu->stat.insn_emulation_fail;
bbd9b64e
CO
2650 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2651 return EMULATE_DONE;
2652 return EMULATE_FAIL;
2653 }
2654 }
2655
ba8afb6b
GN
2656 if (emulation_type & EMULTYPE_SKIP) {
2657 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
2658 return EMULATE_DONE;
2659 }
2660
ad312c7c 2661 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
310b5d30
GC
2662 shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
2663
2664 if (r == 0)
2665 kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
bbd9b64e 2666
ad312c7c 2667 if (vcpu->arch.pio.string)
bbd9b64e
CO
2668 return EMULATE_DO_MMIO;
2669
2670 if ((r || vcpu->mmio_is_write) && run) {
2671 run->exit_reason = KVM_EXIT_MMIO;
2672 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2673 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2674 run->mmio.len = vcpu->mmio_size;
2675 run->mmio.is_write = vcpu->mmio_is_write;
2676 }
2677
2678 if (r) {
2679 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2680 return EMULATE_DONE;
2681 if (!vcpu->mmio_needed) {
2682 kvm_report_emulation_failure(vcpu, "mmio");
2683 return EMULATE_FAIL;
2684 }
2685 return EMULATE_DO_MMIO;
2686 }
2687
ad312c7c 2688 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
bbd9b64e
CO
2689
2690 if (vcpu->mmio_is_write) {
2691 vcpu->mmio_needed = 0;
2692 return EMULATE_DO_MMIO;
2693 }
2694
2695 return EMULATE_DONE;
2696}
2697EXPORT_SYMBOL_GPL(emulate_instruction);
2698
de7d789a
CO
2699static int pio_copy_data(struct kvm_vcpu *vcpu)
2700{
ad312c7c 2701 void *p = vcpu->arch.pio_data;
0f346074 2702 gva_t q = vcpu->arch.pio.guest_gva;
de7d789a 2703 unsigned bytes;
0f346074 2704 int ret;
de7d789a 2705
ad312c7c
ZX
2706 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2707 if (vcpu->arch.pio.in)
0f346074 2708 ret = kvm_write_guest_virt(q, p, bytes, vcpu);
de7d789a 2709 else
0f346074
IE
2710 ret = kvm_read_guest_virt(q, p, bytes, vcpu);
2711 return ret;
de7d789a
CO
2712}
2713
2714int complete_pio(struct kvm_vcpu *vcpu)
2715{
ad312c7c 2716 struct kvm_pio_request *io = &vcpu->arch.pio;
de7d789a
CO
2717 long delta;
2718 int r;
5fdbf976 2719 unsigned long val;
de7d789a
CO
2720
2721 if (!io->string) {
5fdbf976
MT
2722 if (io->in) {
2723 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2724 memcpy(&val, vcpu->arch.pio_data, io->size);
2725 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
2726 }
de7d789a
CO
2727 } else {
2728 if (io->in) {
2729 r = pio_copy_data(vcpu);
5fdbf976 2730 if (r)
de7d789a 2731 return r;
de7d789a
CO
2732 }
2733
2734 delta = 1;
2735 if (io->rep) {
2736 delta *= io->cur_count;
2737 /*
2738 * The size of the register should really depend on
2739 * current address size.
2740 */
5fdbf976
MT
2741 val = kvm_register_read(vcpu, VCPU_REGS_RCX);
2742 val -= delta;
2743 kvm_register_write(vcpu, VCPU_REGS_RCX, val);
de7d789a
CO
2744 }
2745 if (io->down)
2746 delta = -delta;
2747 delta *= io->size;
5fdbf976
MT
2748 if (io->in) {
2749 val = kvm_register_read(vcpu, VCPU_REGS_RDI);
2750 val += delta;
2751 kvm_register_write(vcpu, VCPU_REGS_RDI, val);
2752 } else {
2753 val = kvm_register_read(vcpu, VCPU_REGS_RSI);
2754 val += delta;
2755 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2756 }
de7d789a
CO
2757 }
2758
de7d789a
CO
2759 io->count -= io->cur_count;
2760 io->cur_count = 0;
2761
2762 return 0;
2763}
2764
2765static void kernel_pio(struct kvm_io_device *pio_dev,
2766 struct kvm_vcpu *vcpu,
2767 void *pd)
2768{
2769 /* TODO: String I/O for in kernel device */
2770
2771 mutex_lock(&vcpu->kvm->lock);
ad312c7c
ZX
2772 if (vcpu->arch.pio.in)
2773 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2774 vcpu->arch.pio.size,
de7d789a
CO
2775 pd);
2776 else
ad312c7c
ZX
2777 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2778 vcpu->arch.pio.size,
de7d789a
CO
2779 pd);
2780 mutex_unlock(&vcpu->kvm->lock);
2781}
2782
2783static void pio_string_write(struct kvm_io_device *pio_dev,
2784 struct kvm_vcpu *vcpu)
2785{
ad312c7c
ZX
2786 struct kvm_pio_request *io = &vcpu->arch.pio;
2787 void *pd = vcpu->arch.pio_data;
de7d789a
CO
2788 int i;
2789
2790 mutex_lock(&vcpu->kvm->lock);
2791 for (i = 0; i < io->cur_count; i++) {
2792 kvm_iodevice_write(pio_dev, io->port,
2793 io->size,
2794 pd);
2795 pd += io->size;
2796 }
2797 mutex_unlock(&vcpu->kvm->lock);
2798}
2799
2800static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2801 gpa_t addr, int len,
2802 int is_write)
de7d789a 2803{
92760499 2804 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
de7d789a
CO
2805}
2806
2807int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2808 int size, unsigned port)
2809{
2810 struct kvm_io_device *pio_dev;
5fdbf976 2811 unsigned long val;
de7d789a
CO
2812
2813 vcpu->run->exit_reason = KVM_EXIT_IO;
2814 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2815 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2816 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2817 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2818 vcpu->run->io.port = vcpu->arch.pio.port = port;
2819 vcpu->arch.pio.in = in;
2820 vcpu->arch.pio.string = 0;
2821 vcpu->arch.pio.down = 0;
ad312c7c 2822 vcpu->arch.pio.rep = 0;
de7d789a 2823
2714d1d3
FEL
2824 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2825 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2826 handler);
2827 else
2828 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2829 handler);
2830
5fdbf976
MT
2831 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2832 memcpy(vcpu->arch.pio_data, &val, 4);
de7d789a 2833
92760499 2834 pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
de7d789a 2835 if (pio_dev) {
ad312c7c 2836 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
de7d789a
CO
2837 complete_pio(vcpu);
2838 return 1;
2839 }
2840 return 0;
2841}
2842EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2843
2844int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2845 int size, unsigned long count, int down,
2846 gva_t address, int rep, unsigned port)
2847{
2848 unsigned now, in_page;
0f346074 2849 int ret = 0;
de7d789a
CO
2850 struct kvm_io_device *pio_dev;
2851
2852 vcpu->run->exit_reason = KVM_EXIT_IO;
2853 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2854 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2855 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2856 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2857 vcpu->run->io.port = vcpu->arch.pio.port = port;
2858 vcpu->arch.pio.in = in;
2859 vcpu->arch.pio.string = 1;
2860 vcpu->arch.pio.down = down;
ad312c7c 2861 vcpu->arch.pio.rep = rep;
de7d789a 2862
2714d1d3
FEL
2863 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2864 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2865 handler);
2866 else
2867 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2868 handler);
2869
de7d789a
CO
2870 if (!count) {
2871 kvm_x86_ops->skip_emulated_instruction(vcpu);
2872 return 1;
2873 }
2874
2875 if (!down)
2876 in_page = PAGE_SIZE - offset_in_page(address);
2877 else
2878 in_page = offset_in_page(address) + size;
2879 now = min(count, (unsigned long)in_page / size);
0f346074 2880 if (!now)
de7d789a 2881 now = 1;
de7d789a
CO
2882 if (down) {
2883 /*
2884 * String I/O in reverse. Yuck. Kill the guest, fix later.
2885 */
2886 pr_unimpl(vcpu, "guest string pio down\n");
c1a5d4f9 2887 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2888 return 1;
2889 }
2890 vcpu->run->io.count = now;
ad312c7c 2891 vcpu->arch.pio.cur_count = now;
de7d789a 2892
ad312c7c 2893 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
de7d789a
CO
2894 kvm_x86_ops->skip_emulated_instruction(vcpu);
2895
0f346074 2896 vcpu->arch.pio.guest_gva = address;
de7d789a 2897
92760499
LV
2898 pio_dev = vcpu_find_pio_dev(vcpu, port,
2899 vcpu->arch.pio.cur_count,
2900 !vcpu->arch.pio.in);
ad312c7c 2901 if (!vcpu->arch.pio.in) {
de7d789a
CO
2902 /* string PIO write */
2903 ret = pio_copy_data(vcpu);
0f346074
IE
2904 if (ret == X86EMUL_PROPAGATE_FAULT) {
2905 kvm_inject_gp(vcpu, 0);
2906 return 1;
2907 }
2908 if (ret == 0 && pio_dev) {
de7d789a
CO
2909 pio_string_write(pio_dev, vcpu);
2910 complete_pio(vcpu);
ad312c7c 2911 if (vcpu->arch.pio.count == 0)
de7d789a
CO
2912 ret = 1;
2913 }
2914 } else if (pio_dev)
2915 pr_unimpl(vcpu, "no string pio read support yet, "
2916 "port %x size %d count %ld\n",
2917 port, size, count);
2918
2919 return ret;
2920}
2921EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2922
c8076604
GH
2923static void bounce_off(void *info)
2924{
2925 /* nothing */
2926}
2927
2928static unsigned int ref_freq;
2929static unsigned long tsc_khz_ref;
2930
2931static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
2932 void *data)
2933{
2934 struct cpufreq_freqs *freq = data;
2935 struct kvm *kvm;
2936 struct kvm_vcpu *vcpu;
2937 int i, send_ipi = 0;
2938
2939 if (!ref_freq)
2940 ref_freq = freq->old;
2941
2942 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
2943 return 0;
2944 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
2945 return 0;
2946 per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
2947
2948 spin_lock(&kvm_lock);
2949 list_for_each_entry(kvm, &vm_list, vm_list) {
2950 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2951 vcpu = kvm->vcpus[i];
2952 if (!vcpu)
2953 continue;
2954 if (vcpu->cpu != freq->cpu)
2955 continue;
2956 if (!kvm_request_guest_time_update(vcpu))
2957 continue;
2958 if (vcpu->cpu != smp_processor_id())
2959 send_ipi++;
2960 }
2961 }
2962 spin_unlock(&kvm_lock);
2963
2964 if (freq->old < freq->new && send_ipi) {
2965 /*
2966 * We upscale the frequency. Must make the guest
2967 * doesn't see old kvmclock values while running with
2968 * the new frequency, otherwise we risk the guest sees
2969 * time go backwards.
2970 *
2971 * In case we update the frequency for another cpu
2972 * (which might be in guest context) send an interrupt
2973 * to kick the cpu out of guest context. Next time
2974 * guest context is entered kvmclock will be updated,
2975 * so the guest will not see stale values.
2976 */
2977 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
2978 }
2979 return 0;
2980}
2981
2982static struct notifier_block kvmclock_cpufreq_notifier_block = {
2983 .notifier_call = kvmclock_cpufreq_notifier
2984};
2985
f8c16bba 2986int kvm_arch_init(void *opaque)
043405e1 2987{
c8076604 2988 int r, cpu;
f8c16bba
ZX
2989 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2990
f8c16bba
ZX
2991 if (kvm_x86_ops) {
2992 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
2993 r = -EEXIST;
2994 goto out;
f8c16bba
ZX
2995 }
2996
2997 if (!ops->cpu_has_kvm_support()) {
2998 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
2999 r = -EOPNOTSUPP;
3000 goto out;
f8c16bba
ZX
3001 }
3002 if (ops->disabled_by_bios()) {
3003 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
3004 r = -EOPNOTSUPP;
3005 goto out;
f8c16bba
ZX
3006 }
3007
97db56ce
AK
3008 r = kvm_mmu_module_init();
3009 if (r)
3010 goto out;
3011
3012 kvm_init_msr_list();
3013
f8c16bba 3014 kvm_x86_ops = ops;
56c6d28a 3015 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
7b52345e
SY
3016 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
3017 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
4b12f0de 3018 PT_DIRTY_MASK, PT64_NX_MASK, 0);
c8076604
GH
3019
3020 for_each_possible_cpu(cpu)
3021 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
3022 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
3023 tsc_khz_ref = tsc_khz;
3024 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
3025 CPUFREQ_TRANSITION_NOTIFIER);
3026 }
3027
f8c16bba 3028 return 0;
56c6d28a
ZX
3029
3030out:
56c6d28a 3031 return r;
043405e1 3032}
8776e519 3033
f8c16bba
ZX
3034void kvm_arch_exit(void)
3035{
888d256e
JK
3036 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
3037 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
3038 CPUFREQ_TRANSITION_NOTIFIER);
f8c16bba 3039 kvm_x86_ops = NULL;
56c6d28a
ZX
3040 kvm_mmu_module_exit();
3041}
f8c16bba 3042
8776e519
HB
3043int kvm_emulate_halt(struct kvm_vcpu *vcpu)
3044{
3045 ++vcpu->stat.halt_exits;
2714d1d3 3046 KVMTRACE_0D(HLT, vcpu, handler);
8776e519 3047 if (irqchip_in_kernel(vcpu->kvm)) {
a4535290 3048 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
8776e519
HB
3049 return 1;
3050 } else {
3051 vcpu->run->exit_reason = KVM_EXIT_HLT;
3052 return 0;
3053 }
3054}
3055EXPORT_SYMBOL_GPL(kvm_emulate_halt);
3056
2f333bcb
MT
3057static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
3058 unsigned long a1)
3059{
3060 if (is_long_mode(vcpu))
3061 return a0;
3062 else
3063 return a0 | ((gpa_t)a1 << 32);
3064}
3065
8776e519
HB
3066int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
3067{
3068 unsigned long nr, a0, a1, a2, a3, ret;
2f333bcb 3069 int r = 1;
8776e519 3070
5fdbf976
MT
3071 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
3072 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
3073 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
3074 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
3075 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
8776e519 3076
2714d1d3
FEL
3077 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
3078
8776e519
HB
3079 if (!is_long_mode(vcpu)) {
3080 nr &= 0xFFFFFFFF;
3081 a0 &= 0xFFFFFFFF;
3082 a1 &= 0xFFFFFFFF;
3083 a2 &= 0xFFFFFFFF;
3084 a3 &= 0xFFFFFFFF;
3085 }
3086
3087 switch (nr) {
b93463aa
AK
3088 case KVM_HC_VAPIC_POLL_IRQ:
3089 ret = 0;
3090 break;
2f333bcb
MT
3091 case KVM_HC_MMU_OP:
3092 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
3093 break;
8776e519
HB
3094 default:
3095 ret = -KVM_ENOSYS;
3096 break;
3097 }
5fdbf976 3098 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
f11c3a8d 3099 ++vcpu->stat.hypercalls;
2f333bcb 3100 return r;
8776e519
HB
3101}
3102EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
3103
3104int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
3105{
3106 char instruction[3];
3107 int ret = 0;
5fdbf976 3108 unsigned long rip = kvm_rip_read(vcpu);
8776e519 3109
8776e519
HB
3110
3111 /*
3112 * Blow out the MMU to ensure that no other VCPU has an active mapping
3113 * to ensure that the updated hypercall appears atomically across all
3114 * VCPUs.
3115 */
3116 kvm_mmu_zap_all(vcpu->kvm);
3117
8776e519 3118 kvm_x86_ops->patch_hypercall(vcpu, instruction);
5fdbf976 3119 if (emulator_write_emulated(rip, instruction, 3, vcpu)
8776e519
HB
3120 != X86EMUL_CONTINUE)
3121 ret = -EFAULT;
3122
8776e519
HB
3123 return ret;
3124}
3125
3126static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3127{
3128 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
3129}
3130
3131void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3132{
3133 struct descriptor_table dt = { limit, base };
3134
3135 kvm_x86_ops->set_gdt(vcpu, &dt);
3136}
3137
3138void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3139{
3140 struct descriptor_table dt = { limit, base };
3141
3142 kvm_x86_ops->set_idt(vcpu, &dt);
3143}
3144
3145void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
3146 unsigned long *rflags)
3147{
2d3ad1f4 3148 kvm_lmsw(vcpu, msw);
8776e519
HB
3149 *rflags = kvm_x86_ops->get_rflags(vcpu);
3150}
3151
3152unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3153{
54e445ca
JR
3154 unsigned long value;
3155
8776e519
HB
3156 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3157 switch (cr) {
3158 case 0:
54e445ca
JR
3159 value = vcpu->arch.cr0;
3160 break;
8776e519 3161 case 2:
54e445ca
JR
3162 value = vcpu->arch.cr2;
3163 break;
8776e519 3164 case 3:
54e445ca
JR
3165 value = vcpu->arch.cr3;
3166 break;
8776e519 3167 case 4:
54e445ca
JR
3168 value = vcpu->arch.cr4;
3169 break;
152ff9be 3170 case 8:
54e445ca
JR
3171 value = kvm_get_cr8(vcpu);
3172 break;
8776e519 3173 default:
b8688d51 3174 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
3175 return 0;
3176 }
54e445ca
JR
3177 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
3178 (u32)((u64)value >> 32), handler);
3179
3180 return value;
8776e519
HB
3181}
3182
3183void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
3184 unsigned long *rflags)
3185{
54e445ca
JR
3186 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
3187 (u32)((u64)val >> 32), handler);
3188
8776e519
HB
3189 switch (cr) {
3190 case 0:
2d3ad1f4 3191 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
8776e519
HB
3192 *rflags = kvm_x86_ops->get_rflags(vcpu);
3193 break;
3194 case 2:
ad312c7c 3195 vcpu->arch.cr2 = val;
8776e519
HB
3196 break;
3197 case 3:
2d3ad1f4 3198 kvm_set_cr3(vcpu, val);
8776e519
HB
3199 break;
3200 case 4:
2d3ad1f4 3201 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
8776e519 3202 break;
152ff9be 3203 case 8:
2d3ad1f4 3204 kvm_set_cr8(vcpu, val & 0xfUL);
152ff9be 3205 break;
8776e519 3206 default:
b8688d51 3207 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
3208 }
3209}
3210
07716717
DK
3211static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
3212{
ad312c7c
ZX
3213 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
3214 int j, nent = vcpu->arch.cpuid_nent;
07716717
DK
3215
3216 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
3217 /* when no next entry is found, the current entry[i] is reselected */
0fdf8e59 3218 for (j = i + 1; ; j = (j + 1) % nent) {
ad312c7c 3219 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
07716717
DK
3220 if (ej->function == e->function) {
3221 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
3222 return j;
3223 }
3224 }
3225 return 0; /* silence gcc, even though control never reaches here */
3226}
3227
3228/* find an entry with matching function, matching index (if needed), and that
3229 * should be read next (if it's stateful) */
3230static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
3231 u32 function, u32 index)
3232{
3233 if (e->function != function)
3234 return 0;
3235 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
3236 return 0;
3237 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
19355475 3238 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
07716717
DK
3239 return 0;
3240 return 1;
3241}
3242
d8017474
AG
3243struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
3244 u32 function, u32 index)
8776e519
HB
3245{
3246 int i;
d8017474 3247 struct kvm_cpuid_entry2 *best = NULL;
8776e519 3248
ad312c7c 3249 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
d8017474
AG
3250 struct kvm_cpuid_entry2 *e;
3251
ad312c7c 3252 e = &vcpu->arch.cpuid_entries[i];
07716717
DK
3253 if (is_matching_cpuid_entry(e, function, index)) {
3254 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
3255 move_to_next_stateful_cpuid_entry(vcpu, i);
8776e519
HB
3256 best = e;
3257 break;
3258 }
3259 /*
3260 * Both basic or both extended?
3261 */
3262 if (((e->function ^ function) & 0x80000000) == 0)
3263 if (!best || e->function > best->function)
3264 best = e;
3265 }
d8017474
AG
3266 return best;
3267}
3268
82725b20
DE
3269int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
3270{
3271 struct kvm_cpuid_entry2 *best;
3272
3273 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
3274 if (best)
3275 return best->eax & 0xff;
3276 return 36;
3277}
3278
d8017474
AG
3279void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3280{
3281 u32 function, index;
3282 struct kvm_cpuid_entry2 *best;
3283
3284 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
3285 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
3286 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
3287 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
3288 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
3289 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
3290 best = kvm_find_cpuid_entry(vcpu, function, index);
8776e519 3291 if (best) {
5fdbf976
MT
3292 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
3293 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
3294 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
3295 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
8776e519 3296 }
8776e519 3297 kvm_x86_ops->skip_emulated_instruction(vcpu);
2714d1d3 3298 KVMTRACE_5D(CPUID, vcpu, function,
5fdbf976
MT
3299 (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
3300 (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
3301 (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
3302 (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
8776e519
HB
3303}
3304EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 3305
b6c7a5dc
HB
3306/*
3307 * Check if userspace requested an interrupt window, and that the
3308 * interrupt window is open.
3309 *
3310 * No need to exit to userspace if we already have an interrupt queued.
3311 */
3312static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
3313 struct kvm_run *kvm_run)
3314{
8061823a 3315 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
b6c7a5dc 3316 kvm_run->request_interrupt_window &&
5df56646 3317 kvm_arch_interrupt_allowed(vcpu));
b6c7a5dc
HB
3318}
3319
3320static void post_kvm_run_save(struct kvm_vcpu *vcpu,
3321 struct kvm_run *kvm_run)
3322{
3323 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2d3ad1f4 3324 kvm_run->cr8 = kvm_get_cr8(vcpu);
b6c7a5dc 3325 kvm_run->apic_base = kvm_get_apic_base(vcpu);
4531220b 3326 if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 3327 kvm_run->ready_for_interrupt_injection = 1;
4531220b 3328 else
b6c7a5dc 3329 kvm_run->ready_for_interrupt_injection =
fa9726b0
GN
3330 kvm_arch_interrupt_allowed(vcpu) &&
3331 !kvm_cpu_has_interrupt(vcpu) &&
3332 !kvm_event_needs_reinjection(vcpu);
b6c7a5dc
HB
3333}
3334
b93463aa
AK
3335static void vapic_enter(struct kvm_vcpu *vcpu)
3336{
3337 struct kvm_lapic *apic = vcpu->arch.apic;
3338 struct page *page;
3339
3340 if (!apic || !apic->vapic_addr)
3341 return;
3342
3343 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
72dc67a6
IE
3344
3345 vcpu->arch.apic->vapic_page = page;
b93463aa
AK
3346}
3347
3348static void vapic_exit(struct kvm_vcpu *vcpu)
3349{
3350 struct kvm_lapic *apic = vcpu->arch.apic;
3351
3352 if (!apic || !apic->vapic_addr)
3353 return;
3354
f8b78fa3 3355 down_read(&vcpu->kvm->slots_lock);
b93463aa
AK
3356 kvm_release_page_dirty(apic->vapic_page);
3357 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
f8b78fa3 3358 up_read(&vcpu->kvm->slots_lock);
b93463aa
AK
3359}
3360
95ba8273
GN
3361static void update_cr8_intercept(struct kvm_vcpu *vcpu)
3362{
3363 int max_irr, tpr;
3364
3365 if (!kvm_x86_ops->update_cr8_intercept)
3366 return;
3367
8db3baa2
GN
3368 if (!vcpu->arch.apic->vapic_addr)
3369 max_irr = kvm_lapic_find_highest_irr(vcpu);
3370 else
3371 max_irr = -1;
95ba8273
GN
3372
3373 if (max_irr != -1)
3374 max_irr >>= 4;
3375
3376 tpr = kvm_lapic_get_cr8(vcpu);
3377
3378 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
3379}
3380
6a8b1d13 3381static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
95ba8273
GN
3382{
3383 /* try to reinject previous events if any */
3384 if (vcpu->arch.nmi_injected) {
3385 kvm_x86_ops->set_nmi(vcpu);
3386 return;
3387 }
3388
3389 if (vcpu->arch.interrupt.pending) {
66fd3f7f 3390 kvm_x86_ops->set_irq(vcpu);
95ba8273
GN
3391 return;
3392 }
3393
3394 /* try to inject new event if pending */
3395 if (vcpu->arch.nmi_pending) {
3396 if (kvm_x86_ops->nmi_allowed(vcpu)) {
3397 vcpu->arch.nmi_pending = false;
3398 vcpu->arch.nmi_injected = true;
3399 kvm_x86_ops->set_nmi(vcpu);
3400 }
3401 } else if (kvm_cpu_has_interrupt(vcpu)) {
3402 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
66fd3f7f
GN
3403 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
3404 false);
3405 kvm_x86_ops->set_irq(vcpu);
95ba8273
GN
3406 }
3407 }
3408}
3409
d7690175 3410static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
b6c7a5dc
HB
3411{
3412 int r;
6a8b1d13
GN
3413 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
3414 kvm_run->request_interrupt_window;
b6c7a5dc 3415
2e53d63a
MT
3416 if (vcpu->requests)
3417 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
3418 kvm_mmu_unload(vcpu);
3419
b6c7a5dc
HB
3420 r = kvm_mmu_reload(vcpu);
3421 if (unlikely(r))
3422 goto out;
3423
2f52d58c
AK
3424 if (vcpu->requests) {
3425 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2f599714 3426 __kvm_migrate_timers(vcpu);
c8076604
GH
3427 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
3428 kvm_write_guest_time(vcpu);
4731d4c7
MT
3429 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
3430 kvm_mmu_sync_roots(vcpu);
d4acf7e7
MT
3431 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
3432 kvm_x86_ops->tlb_flush(vcpu);
b93463aa
AK
3433 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
3434 &vcpu->requests)) {
3435 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
3436 r = 0;
3437 goto out;
3438 }
71c4dfaf
JR
3439 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
3440 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
3441 r = 0;
3442 goto out;
3443 }
2f52d58c 3444 }
b93463aa 3445
b6c7a5dc
HB
3446 preempt_disable();
3447
3448 kvm_x86_ops->prepare_guest_switch(vcpu);
3449 kvm_load_guest_fpu(vcpu);
3450
3451 local_irq_disable();
3452
32f88400
MT
3453 clear_bit(KVM_REQ_KICK, &vcpu->requests);
3454 smp_mb__after_clear_bit();
3455
d7690175 3456 if (vcpu->requests || need_resched() || signal_pending(current)) {
6c142801
AK
3457 local_irq_enable();
3458 preempt_enable();
3459 r = 1;
3460 goto out;
3461 }
3462
ad312c7c 3463 if (vcpu->arch.exception.pending)
298101da 3464 __queue_exception(vcpu);
eb9774f0 3465 else
95ba8273 3466 inject_pending_irq(vcpu, kvm_run);
b6c7a5dc 3467
6a8b1d13
GN
3468 /* enable NMI/IRQ window open exits if needed */
3469 if (vcpu->arch.nmi_pending)
3470 kvm_x86_ops->enable_nmi_window(vcpu);
3471 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
3472 kvm_x86_ops->enable_irq_window(vcpu);
3473
95ba8273 3474 if (kvm_lapic_enabled(vcpu)) {
8db3baa2
GN
3475 update_cr8_intercept(vcpu);
3476 kvm_lapic_sync_to_vapic(vcpu);
95ba8273 3477 }
b93463aa 3478
3200f405
MT
3479 up_read(&vcpu->kvm->slots_lock);
3480
b6c7a5dc
HB
3481 kvm_guest_enter();
3482
42dbaa5a
JK
3483 get_debugreg(vcpu->arch.host_dr6, 6);
3484 get_debugreg(vcpu->arch.host_dr7, 7);
3485 if (unlikely(vcpu->arch.switch_db_regs)) {
3486 get_debugreg(vcpu->arch.host_db[0], 0);
3487 get_debugreg(vcpu->arch.host_db[1], 1);
3488 get_debugreg(vcpu->arch.host_db[2], 2);
3489 get_debugreg(vcpu->arch.host_db[3], 3);
3490
3491 set_debugreg(0, 7);
3492 set_debugreg(vcpu->arch.eff_db[0], 0);
3493 set_debugreg(vcpu->arch.eff_db[1], 1);
3494 set_debugreg(vcpu->arch.eff_db[2], 2);
3495 set_debugreg(vcpu->arch.eff_db[3], 3);
3496 }
b6c7a5dc 3497
2714d1d3 3498 KVMTRACE_0D(VMENTRY, vcpu, entryexit);
b6c7a5dc
HB
3499 kvm_x86_ops->run(vcpu, kvm_run);
3500
42dbaa5a
JK
3501 if (unlikely(vcpu->arch.switch_db_regs)) {
3502 set_debugreg(0, 7);
3503 set_debugreg(vcpu->arch.host_db[0], 0);
3504 set_debugreg(vcpu->arch.host_db[1], 1);
3505 set_debugreg(vcpu->arch.host_db[2], 2);
3506 set_debugreg(vcpu->arch.host_db[3], 3);
3507 }
3508 set_debugreg(vcpu->arch.host_dr6, 6);
3509 set_debugreg(vcpu->arch.host_dr7, 7);
3510
32f88400 3511 set_bit(KVM_REQ_KICK, &vcpu->requests);
b6c7a5dc
HB
3512 local_irq_enable();
3513
3514 ++vcpu->stat.exits;
3515
3516 /*
3517 * We must have an instruction between local_irq_enable() and
3518 * kvm_guest_exit(), so the timer interrupt isn't delayed by
3519 * the interrupt shadow. The stat.exits increment will do nicely.
3520 * But we need to prevent reordering, hence this barrier():
3521 */
3522 barrier();
3523
3524 kvm_guest_exit();
3525
3526 preempt_enable();
3527
3200f405
MT
3528 down_read(&vcpu->kvm->slots_lock);
3529
b6c7a5dc
HB
3530 /*
3531 * Profile KVM exit RIPs:
3532 */
3533 if (unlikely(prof_on == KVM_PROFILING)) {
5fdbf976
MT
3534 unsigned long rip = kvm_rip_read(vcpu);
3535 profile_hit(KVM_PROFILING, (void *)rip);
b6c7a5dc
HB
3536 }
3537
298101da 3538
b93463aa
AK
3539 kvm_lapic_sync_from_vapic(vcpu);
3540
b6c7a5dc 3541 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
d7690175
MT
3542out:
3543 return r;
3544}
b6c7a5dc 3545
09cec754 3546
d7690175
MT
3547static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3548{
3549 int r;
3550
3551 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
1b10bf31
JK
3552 pr_debug("vcpu %d received sipi with vector # %x\n",
3553 vcpu->vcpu_id, vcpu->arch.sipi_vector);
d7690175 3554 kvm_lapic_reset(vcpu);
5f179287 3555 r = kvm_arch_vcpu_reset(vcpu);
d7690175
MT
3556 if (r)
3557 return r;
3558 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b6c7a5dc
HB
3559 }
3560
d7690175
MT
3561 down_read(&vcpu->kvm->slots_lock);
3562 vapic_enter(vcpu);
3563
3564 r = 1;
3565 while (r > 0) {
af2152f5 3566 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
d7690175
MT
3567 r = vcpu_enter_guest(vcpu, kvm_run);
3568 else {
3569 up_read(&vcpu->kvm->slots_lock);
3570 kvm_vcpu_block(vcpu);
3571 down_read(&vcpu->kvm->slots_lock);
3572 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
09cec754
GN
3573 {
3574 switch(vcpu->arch.mp_state) {
3575 case KVM_MP_STATE_HALTED:
d7690175 3576 vcpu->arch.mp_state =
09cec754
GN
3577 KVM_MP_STATE_RUNNABLE;
3578 case KVM_MP_STATE_RUNNABLE:
3579 break;
3580 case KVM_MP_STATE_SIPI_RECEIVED:
3581 default:
3582 r = -EINTR;
3583 break;
3584 }
3585 }
d7690175
MT
3586 }
3587
09cec754
GN
3588 if (r <= 0)
3589 break;
3590
3591 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
3592 if (kvm_cpu_has_pending_timer(vcpu))
3593 kvm_inject_pending_timer_irqs(vcpu);
3594
3595 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
3596 r = -EINTR;
3597 kvm_run->exit_reason = KVM_EXIT_INTR;
3598 ++vcpu->stat.request_irq_exits;
3599 }
3600 if (signal_pending(current)) {
3601 r = -EINTR;
3602 kvm_run->exit_reason = KVM_EXIT_INTR;
3603 ++vcpu->stat.signal_exits;
3604 }
3605 if (need_resched()) {
3606 up_read(&vcpu->kvm->slots_lock);
3607 kvm_resched(vcpu);
3608 down_read(&vcpu->kvm->slots_lock);
d7690175 3609 }
b6c7a5dc
HB
3610 }
3611
d7690175 3612 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3613 post_kvm_run_save(vcpu, kvm_run);
3614
b93463aa
AK
3615 vapic_exit(vcpu);
3616
b6c7a5dc
HB
3617 return r;
3618}
3619
3620int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3621{
3622 int r;
3623 sigset_t sigsaved;
3624
3625 vcpu_load(vcpu);
3626
ac9f6dc0
AK
3627 if (vcpu->sigset_active)
3628 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3629
a4535290 3630 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
b6c7a5dc 3631 kvm_vcpu_block(vcpu);
d7690175 3632 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
ac9f6dc0
AK
3633 r = -EAGAIN;
3634 goto out;
b6c7a5dc
HB
3635 }
3636
b6c7a5dc
HB
3637 /* re-sync apic's tpr */
3638 if (!irqchip_in_kernel(vcpu->kvm))
2d3ad1f4 3639 kvm_set_cr8(vcpu, kvm_run->cr8);
b6c7a5dc 3640
ad312c7c 3641 if (vcpu->arch.pio.cur_count) {
b6c7a5dc
HB
3642 r = complete_pio(vcpu);
3643 if (r)
3644 goto out;
3645 }
3646#if CONFIG_HAS_IOMEM
3647 if (vcpu->mmio_needed) {
3648 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
3649 vcpu->mmio_read_completed = 1;
3650 vcpu->mmio_needed = 0;
3200f405
MT
3651
3652 down_read(&vcpu->kvm->slots_lock);
b6c7a5dc 3653 r = emulate_instruction(vcpu, kvm_run,
571008da
SY
3654 vcpu->arch.mmio_fault_cr2, 0,
3655 EMULTYPE_NO_DECODE);
3200f405 3656 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3657 if (r == EMULATE_DO_MMIO) {
3658 /*
3659 * Read-modify-write. Back to userspace.
3660 */
3661 r = 0;
3662 goto out;
3663 }
3664 }
3665#endif
5fdbf976
MT
3666 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
3667 kvm_register_write(vcpu, VCPU_REGS_RAX,
3668 kvm_run->hypercall.ret);
b6c7a5dc
HB
3669
3670 r = __vcpu_run(vcpu, kvm_run);
3671
3672out:
3673 if (vcpu->sigset_active)
3674 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3675
3676 vcpu_put(vcpu);
3677 return r;
3678}
3679
3680int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3681{
3682 vcpu_load(vcpu);
3683
5fdbf976
MT
3684 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3685 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3686 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3687 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3688 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3689 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3690 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3691 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
b6c7a5dc 3692#ifdef CONFIG_X86_64
5fdbf976
MT
3693 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
3694 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
3695 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
3696 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
3697 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
3698 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
3699 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
3700 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
b6c7a5dc
HB
3701#endif
3702
5fdbf976 3703 regs->rip = kvm_rip_read(vcpu);
b6c7a5dc
HB
3704 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3705
3706 /*
3707 * Don't leak debug flags in case they were set for guest debugging
3708 */
d0bfb940 3709 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
b6c7a5dc
HB
3710 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3711
3712 vcpu_put(vcpu);
3713
3714 return 0;
3715}
3716
3717int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3718{
3719 vcpu_load(vcpu);
3720
5fdbf976
MT
3721 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
3722 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
3723 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
3724 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
3725 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
3726 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
3727 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
3728 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
b6c7a5dc 3729#ifdef CONFIG_X86_64
5fdbf976
MT
3730 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
3731 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
3732 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
3733 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
3734 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
3735 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
3736 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
3737 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
3738
b6c7a5dc
HB
3739#endif
3740
5fdbf976 3741 kvm_rip_write(vcpu, regs->rip);
b6c7a5dc
HB
3742 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3743
b6c7a5dc 3744
b4f14abd
JK
3745 vcpu->arch.exception.pending = false;
3746
b6c7a5dc
HB
3747 vcpu_put(vcpu);
3748
3749 return 0;
3750}
3751
3e6e0aab
GT
3752void kvm_get_segment(struct kvm_vcpu *vcpu,
3753 struct kvm_segment *var, int seg)
b6c7a5dc 3754{
14af3f3c 3755 kvm_x86_ops->get_segment(vcpu, var, seg);
b6c7a5dc
HB
3756}
3757
3758void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3759{
3760 struct kvm_segment cs;
3761
3e6e0aab 3762 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
b6c7a5dc
HB
3763 *db = cs.db;
3764 *l = cs.l;
3765}
3766EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3767
3768int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3769 struct kvm_sregs *sregs)
3770{
3771 struct descriptor_table dt;
b6c7a5dc
HB
3772
3773 vcpu_load(vcpu);
3774
3e6e0aab
GT
3775 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3776 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3777 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3778 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3779 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3780 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 3781
3e6e0aab
GT
3782 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3783 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc
HB
3784
3785 kvm_x86_ops->get_idt(vcpu, &dt);
3786 sregs->idt.limit = dt.limit;
3787 sregs->idt.base = dt.base;
3788 kvm_x86_ops->get_gdt(vcpu, &dt);
3789 sregs->gdt.limit = dt.limit;
3790 sregs->gdt.base = dt.base;
3791
3792 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
ad312c7c
ZX
3793 sregs->cr0 = vcpu->arch.cr0;
3794 sregs->cr2 = vcpu->arch.cr2;
3795 sregs->cr3 = vcpu->arch.cr3;
3796 sregs->cr4 = vcpu->arch.cr4;
2d3ad1f4 3797 sregs->cr8 = kvm_get_cr8(vcpu);
ad312c7c 3798 sregs->efer = vcpu->arch.shadow_efer;
b6c7a5dc
HB
3799 sregs->apic_base = kvm_get_apic_base(vcpu);
3800
923c61bb 3801 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
b6c7a5dc 3802
36752c9b 3803 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
14d0bc1f
GN
3804 set_bit(vcpu->arch.interrupt.nr,
3805 (unsigned long *)sregs->interrupt_bitmap);
16d7a191 3806
b6c7a5dc
HB
3807 vcpu_put(vcpu);
3808
3809 return 0;
3810}
3811
62d9f0db
MT
3812int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3813 struct kvm_mp_state *mp_state)
3814{
3815 vcpu_load(vcpu);
3816 mp_state->mp_state = vcpu->arch.mp_state;
3817 vcpu_put(vcpu);
3818 return 0;
3819}
3820
3821int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3822 struct kvm_mp_state *mp_state)
3823{
3824 vcpu_load(vcpu);
3825 vcpu->arch.mp_state = mp_state->mp_state;
3826 vcpu_put(vcpu);
3827 return 0;
3828}
3829
3e6e0aab 3830static void kvm_set_segment(struct kvm_vcpu *vcpu,
b6c7a5dc
HB
3831 struct kvm_segment *var, int seg)
3832{
14af3f3c 3833 kvm_x86_ops->set_segment(vcpu, var, seg);
b6c7a5dc
HB
3834}
3835
37817f29
IE
3836static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3837 struct kvm_segment *kvm_desct)
3838{
3839 kvm_desct->base = seg_desc->base0;
3840 kvm_desct->base |= seg_desc->base1 << 16;
3841 kvm_desct->base |= seg_desc->base2 << 24;
3842 kvm_desct->limit = seg_desc->limit0;
3843 kvm_desct->limit |= seg_desc->limit << 16;
c93cd3a5
MT
3844 if (seg_desc->g) {
3845 kvm_desct->limit <<= 12;
3846 kvm_desct->limit |= 0xfff;
3847 }
37817f29
IE
3848 kvm_desct->selector = selector;
3849 kvm_desct->type = seg_desc->type;
3850 kvm_desct->present = seg_desc->p;
3851 kvm_desct->dpl = seg_desc->dpl;
3852 kvm_desct->db = seg_desc->d;
3853 kvm_desct->s = seg_desc->s;
3854 kvm_desct->l = seg_desc->l;
3855 kvm_desct->g = seg_desc->g;
3856 kvm_desct->avl = seg_desc->avl;
3857 if (!selector)
3858 kvm_desct->unusable = 1;
3859 else
3860 kvm_desct->unusable = 0;
3861 kvm_desct->padding = 0;
3862}
3863
b8222ad2
AS
3864static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
3865 u16 selector,
3866 struct descriptor_table *dtable)
37817f29
IE
3867{
3868 if (selector & 1 << 2) {
3869 struct kvm_segment kvm_seg;
3870
3e6e0aab 3871 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
37817f29
IE
3872
3873 if (kvm_seg.unusable)
3874 dtable->limit = 0;
3875 else
3876 dtable->limit = kvm_seg.limit;
3877 dtable->base = kvm_seg.base;
3878 }
3879 else
3880 kvm_x86_ops->get_gdt(vcpu, dtable);
3881}
3882
3883/* allowed just for 8 bytes segments */
3884static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3885 struct desc_struct *seg_desc)
3886{
98899aa0 3887 gpa_t gpa;
37817f29
IE
3888 struct descriptor_table dtable;
3889 u16 index = selector >> 3;
3890
b8222ad2 3891 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3892
3893 if (dtable.limit < index * 8 + 7) {
3894 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3895 return 1;
3896 }
98899aa0
MT
3897 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3898 gpa += index * 8;
3899 return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3900}
3901
3902/* allowed just for 8 bytes segments */
3903static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3904 struct desc_struct *seg_desc)
3905{
98899aa0 3906 gpa_t gpa;
37817f29
IE
3907 struct descriptor_table dtable;
3908 u16 index = selector >> 3;
3909
b8222ad2 3910 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3911
3912 if (dtable.limit < index * 8 + 7)
3913 return 1;
98899aa0
MT
3914 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3915 gpa += index * 8;
3916 return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3917}
3918
3919static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3920 struct desc_struct *seg_desc)
3921{
3922 u32 base_addr;
3923
3924 base_addr = seg_desc->base0;
3925 base_addr |= (seg_desc->base1 << 16);
3926 base_addr |= (seg_desc->base2 << 24);
3927
98899aa0 3928 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
37817f29
IE
3929}
3930
37817f29
IE
3931static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3932{
3933 struct kvm_segment kvm_seg;
3934
3e6e0aab 3935 kvm_get_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3936 return kvm_seg.selector;
3937}
3938
3939static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3940 u16 selector,
3941 struct kvm_segment *kvm_seg)
3942{
3943 struct desc_struct seg_desc;
3944
3945 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3946 return 1;
3947 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3948 return 0;
3949}
3950
2259e3a7 3951static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
f4bbd9aa
AK
3952{
3953 struct kvm_segment segvar = {
3954 .base = selector << 4,
3955 .limit = 0xffff,
3956 .selector = selector,
3957 .type = 3,
3958 .present = 1,
3959 .dpl = 3,
3960 .db = 0,
3961 .s = 1,
3962 .l = 0,
3963 .g = 0,
3964 .avl = 0,
3965 .unusable = 0,
3966 };
3967 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
3968 return 0;
3969}
3970
3e6e0aab
GT
3971int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3972 int type_bits, int seg)
37817f29
IE
3973{
3974 struct kvm_segment kvm_seg;
3975
f4bbd9aa
AK
3976 if (!(vcpu->arch.cr0 & X86_CR0_PE))
3977 return kvm_load_realmode_segment(vcpu, selector, seg);
37817f29
IE
3978 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3979 return 1;
3980 kvm_seg.type |= type_bits;
3981
3982 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3983 seg != VCPU_SREG_LDTR)
3984 if (!kvm_seg.s)
3985 kvm_seg.unusable = 1;
3986
3e6e0aab 3987 kvm_set_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3988 return 0;
3989}
3990
3991static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3992 struct tss_segment_32 *tss)
3993{
3994 tss->cr3 = vcpu->arch.cr3;
5fdbf976 3995 tss->eip = kvm_rip_read(vcpu);
37817f29 3996 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
3997 tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3998 tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3999 tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4000 tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4001 tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4002 tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4003 tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4004 tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
4005 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4006 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4007 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4008 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4009 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
4010 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
4011 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
37817f29
IE
4012}
4013
4014static int load_state_from_tss32(struct kvm_vcpu *vcpu,
4015 struct tss_segment_32 *tss)
4016{
4017 kvm_set_cr3(vcpu, tss->cr3);
4018
5fdbf976 4019 kvm_rip_write(vcpu, tss->eip);
37817f29
IE
4020 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
4021
5fdbf976
MT
4022 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
4023 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
4024 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
4025 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
4026 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
4027 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
4028 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
4029 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
37817f29 4030
3e6e0aab 4031 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
37817f29
IE
4032 return 1;
4033
3e6e0aab 4034 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
4035 return 1;
4036
3e6e0aab 4037 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
4038 return 1;
4039
3e6e0aab 4040 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
4041 return 1;
4042
3e6e0aab 4043 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
4044 return 1;
4045
3e6e0aab 4046 if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
37817f29
IE
4047 return 1;
4048
3e6e0aab 4049 if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
37817f29
IE
4050 return 1;
4051 return 0;
4052}
4053
4054static void save_state_to_tss16(struct kvm_vcpu *vcpu,
4055 struct tss_segment_16 *tss)
4056{
5fdbf976 4057 tss->ip = kvm_rip_read(vcpu);
37817f29 4058 tss->flag = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
4059 tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4060 tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4061 tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4062 tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4063 tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4064 tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4065 tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
4066 tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
4067
4068 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4069 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4070 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4071 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4072 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
4073 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
4074}
4075
4076static int load_state_from_tss16(struct kvm_vcpu *vcpu,
4077 struct tss_segment_16 *tss)
4078{
5fdbf976 4079 kvm_rip_write(vcpu, tss->ip);
37817f29 4080 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
5fdbf976
MT
4081 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
4082 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
4083 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
4084 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
4085 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
4086 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
4087 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
4088 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
37817f29 4089
3e6e0aab 4090 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
37817f29
IE
4091 return 1;
4092
3e6e0aab 4093 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
4094 return 1;
4095
3e6e0aab 4096 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
4097 return 1;
4098
3e6e0aab 4099 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
4100 return 1;
4101
3e6e0aab 4102 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
4103 return 1;
4104 return 0;
4105}
4106
8b2cf73c 4107static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
b237ac37
GN
4108 u16 old_tss_sel, u32 old_tss_base,
4109 struct desc_struct *nseg_desc)
37817f29
IE
4110{
4111 struct tss_segment_16 tss_segment_16;
4112 int ret = 0;
4113
34198bf8
MT
4114 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4115 sizeof tss_segment_16))
37817f29
IE
4116 goto out;
4117
4118 save_state_to_tss16(vcpu, &tss_segment_16);
37817f29 4119
34198bf8
MT
4120 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4121 sizeof tss_segment_16))
37817f29 4122 goto out;
34198bf8
MT
4123
4124 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
4125 &tss_segment_16, sizeof tss_segment_16))
4126 goto out;
4127
b237ac37
GN
4128 if (old_tss_sel != 0xffff) {
4129 tss_segment_16.prev_task_link = old_tss_sel;
4130
4131 if (kvm_write_guest(vcpu->kvm,
4132 get_tss_base_addr(vcpu, nseg_desc),
4133 &tss_segment_16.prev_task_link,
4134 sizeof tss_segment_16.prev_task_link))
4135 goto out;
4136 }
4137
37817f29
IE
4138 if (load_state_from_tss16(vcpu, &tss_segment_16))
4139 goto out;
4140
4141 ret = 1;
4142out:
4143 return ret;
4144}
4145
8b2cf73c 4146static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
b237ac37 4147 u16 old_tss_sel, u32 old_tss_base,
37817f29
IE
4148 struct desc_struct *nseg_desc)
4149{
4150 struct tss_segment_32 tss_segment_32;
4151 int ret = 0;
4152
34198bf8
MT
4153 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
4154 sizeof tss_segment_32))
37817f29
IE
4155 goto out;
4156
4157 save_state_to_tss32(vcpu, &tss_segment_32);
37817f29 4158
34198bf8
MT
4159 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
4160 sizeof tss_segment_32))
4161 goto out;
4162
4163 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
4164 &tss_segment_32, sizeof tss_segment_32))
37817f29 4165 goto out;
34198bf8 4166
b237ac37
GN
4167 if (old_tss_sel != 0xffff) {
4168 tss_segment_32.prev_task_link = old_tss_sel;
4169
4170 if (kvm_write_guest(vcpu->kvm,
4171 get_tss_base_addr(vcpu, nseg_desc),
4172 &tss_segment_32.prev_task_link,
4173 sizeof tss_segment_32.prev_task_link))
4174 goto out;
4175 }
4176
37817f29
IE
4177 if (load_state_from_tss32(vcpu, &tss_segment_32))
4178 goto out;
4179
4180 ret = 1;
4181out:
4182 return ret;
4183}
4184
4185int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
4186{
4187 struct kvm_segment tr_seg;
4188 struct desc_struct cseg_desc;
4189 struct desc_struct nseg_desc;
4190 int ret = 0;
34198bf8
MT
4191 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
4192 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
37817f29 4193
34198bf8 4194 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
37817f29 4195
34198bf8
MT
4196 /* FIXME: Handle errors. Failure to read either TSS or their
4197 * descriptors should generate a pagefault.
4198 */
37817f29
IE
4199 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
4200 goto out;
4201
34198bf8 4202 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
37817f29
IE
4203 goto out;
4204
37817f29
IE
4205 if (reason != TASK_SWITCH_IRET) {
4206 int cpl;
4207
4208 cpl = kvm_x86_ops->get_cpl(vcpu);
4209 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
4210 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4211 return 1;
4212 }
4213 }
4214
4215 if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
4216 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
4217 return 1;
4218 }
4219
4220 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3fe913e7 4221 cseg_desc.type &= ~(1 << 1); //clear the B flag
34198bf8 4222 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
37817f29
IE
4223 }
4224
4225 if (reason == TASK_SWITCH_IRET) {
4226 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4227 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
4228 }
4229
64a7ec06
GN
4230 /* set back link to prev task only if NT bit is set in eflags
4231 note that old_tss_sel is not used afetr this point */
4232 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4233 old_tss_sel = 0xffff;
37817f29 4234
b237ac37
GN
4235 /* set back link to prev task only if NT bit is set in eflags
4236 note that old_tss_sel is not used afetr this point */
4237 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4238 old_tss_sel = 0xffff;
4239
37817f29 4240 if (nseg_desc.type & 8)
b237ac37
GN
4241 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
4242 old_tss_base, &nseg_desc);
37817f29 4243 else
b237ac37
GN
4244 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
4245 old_tss_base, &nseg_desc);
37817f29
IE
4246
4247 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
4248 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4249 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
4250 }
4251
4252 if (reason != TASK_SWITCH_IRET) {
3fe913e7 4253 nseg_desc.type |= (1 << 1);
37817f29
IE
4254 save_guest_segment_descriptor(vcpu, tss_selector,
4255 &nseg_desc);
4256 }
4257
4258 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
4259 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
4260 tr_seg.type = 11;
3e6e0aab 4261 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
37817f29 4262out:
37817f29
IE
4263 return ret;
4264}
4265EXPORT_SYMBOL_GPL(kvm_task_switch);
4266
b6c7a5dc
HB
4267int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4268 struct kvm_sregs *sregs)
4269{
4270 int mmu_reset_needed = 0;
923c61bb 4271 int pending_vec, max_bits;
b6c7a5dc
HB
4272 struct descriptor_table dt;
4273
4274 vcpu_load(vcpu);
4275
4276 dt.limit = sregs->idt.limit;
4277 dt.base = sregs->idt.base;
4278 kvm_x86_ops->set_idt(vcpu, &dt);
4279 dt.limit = sregs->gdt.limit;
4280 dt.base = sregs->gdt.base;
4281 kvm_x86_ops->set_gdt(vcpu, &dt);
4282
ad312c7c
ZX
4283 vcpu->arch.cr2 = sregs->cr2;
4284 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
59839dff
MT
4285
4286 down_read(&vcpu->kvm->slots_lock);
4287 if (gfn_to_memslot(vcpu->kvm, sregs->cr3 >> PAGE_SHIFT))
4288 vcpu->arch.cr3 = sregs->cr3;
4289 else
4290 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
4291 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc 4292
2d3ad1f4 4293 kvm_set_cr8(vcpu, sregs->cr8);
b6c7a5dc 4294
ad312c7c 4295 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
b6c7a5dc 4296 kvm_x86_ops->set_efer(vcpu, sregs->efer);
b6c7a5dc
HB
4297 kvm_set_apic_base(vcpu, sregs->apic_base);
4298
4299 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
4300
ad312c7c 4301 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
b6c7a5dc 4302 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
d7306163 4303 vcpu->arch.cr0 = sregs->cr0;
b6c7a5dc 4304
ad312c7c 4305 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
b6c7a5dc
HB
4306 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
4307 if (!is_long_mode(vcpu) && is_pae(vcpu))
ad312c7c 4308 load_pdptrs(vcpu, vcpu->arch.cr3);
b6c7a5dc
HB
4309
4310 if (mmu_reset_needed)
4311 kvm_mmu_reset_context(vcpu);
4312
923c61bb
GN
4313 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
4314 pending_vec = find_first_bit(
4315 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
4316 if (pending_vec < max_bits) {
66fd3f7f 4317 kvm_queue_interrupt(vcpu, pending_vec, false);
923c61bb
GN
4318 pr_debug("Set back pending irq %d\n", pending_vec);
4319 if (irqchip_in_kernel(vcpu->kvm))
4320 kvm_pic_clear_isr_ack(vcpu->kvm);
b6c7a5dc
HB
4321 }
4322
3e6e0aab
GT
4323 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4324 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4325 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4326 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4327 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4328 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 4329
3e6e0aab
GT
4330 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4331 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc 4332
9c3e4aab
MT
4333 /* Older userspace won't unhalt the vcpu on reset. */
4334 if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
4335 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4336 !(vcpu->arch.cr0 & X86_CR0_PE))
4337 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4338
b6c7a5dc
HB
4339 vcpu_put(vcpu);
4340
4341 return 0;
4342}
4343
d0bfb940
JK
4344int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4345 struct kvm_guest_debug *dbg)
b6c7a5dc 4346{
ae675ef0 4347 int i, r;
b6c7a5dc
HB
4348
4349 vcpu_load(vcpu);
4350
ae675ef0
JK
4351 if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
4352 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
4353 for (i = 0; i < KVM_NR_DB_REGS; ++i)
4354 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
4355 vcpu->arch.switch_db_regs =
4356 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
4357 } else {
4358 for (i = 0; i < KVM_NR_DB_REGS; i++)
4359 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
4360 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
4361 }
4362
b6c7a5dc
HB
4363 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
4364
d0bfb940
JK
4365 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
4366 kvm_queue_exception(vcpu, DB_VECTOR);
4367 else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
4368 kvm_queue_exception(vcpu, BP_VECTOR);
4369
b6c7a5dc
HB
4370 vcpu_put(vcpu);
4371
4372 return r;
4373}
4374
d0752060
HB
4375/*
4376 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
4377 * we have asm/x86/processor.h
4378 */
4379struct fxsave {
4380 u16 cwd;
4381 u16 swd;
4382 u16 twd;
4383 u16 fop;
4384 u64 rip;
4385 u64 rdp;
4386 u32 mxcsr;
4387 u32 mxcsr_mask;
4388 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
4389#ifdef CONFIG_X86_64
4390 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
4391#else
4392 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
4393#endif
4394};
4395
8b006791
ZX
4396/*
4397 * Translate a guest virtual address to a guest physical address.
4398 */
4399int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4400 struct kvm_translation *tr)
4401{
4402 unsigned long vaddr = tr->linear_address;
4403 gpa_t gpa;
4404
4405 vcpu_load(vcpu);
72dc67a6 4406 down_read(&vcpu->kvm->slots_lock);
ad312c7c 4407 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
72dc67a6 4408 up_read(&vcpu->kvm->slots_lock);
8b006791
ZX
4409 tr->physical_address = gpa;
4410 tr->valid = gpa != UNMAPPED_GVA;
4411 tr->writeable = 1;
4412 tr->usermode = 0;
8b006791
ZX
4413 vcpu_put(vcpu);
4414
4415 return 0;
4416}
4417
d0752060
HB
4418int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4419{
ad312c7c 4420 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
4421
4422 vcpu_load(vcpu);
4423
4424 memcpy(fpu->fpr, fxsave->st_space, 128);
4425 fpu->fcw = fxsave->cwd;
4426 fpu->fsw = fxsave->swd;
4427 fpu->ftwx = fxsave->twd;
4428 fpu->last_opcode = fxsave->fop;
4429 fpu->last_ip = fxsave->rip;
4430 fpu->last_dp = fxsave->rdp;
4431 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
4432
4433 vcpu_put(vcpu);
4434
4435 return 0;
4436}
4437
4438int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4439{
ad312c7c 4440 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
4441
4442 vcpu_load(vcpu);
4443
4444 memcpy(fxsave->st_space, fpu->fpr, 128);
4445 fxsave->cwd = fpu->fcw;
4446 fxsave->swd = fpu->fsw;
4447 fxsave->twd = fpu->ftwx;
4448 fxsave->fop = fpu->last_opcode;
4449 fxsave->rip = fpu->last_ip;
4450 fxsave->rdp = fpu->last_dp;
4451 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
4452
4453 vcpu_put(vcpu);
4454
4455 return 0;
4456}
4457
4458void fx_init(struct kvm_vcpu *vcpu)
4459{
4460 unsigned after_mxcsr_mask;
4461
bc1a34f1
AA
4462 /*
4463 * Touch the fpu the first time in non atomic context as if
4464 * this is the first fpu instruction the exception handler
4465 * will fire before the instruction returns and it'll have to
4466 * allocate ram with GFP_KERNEL.
4467 */
4468 if (!used_math())
d6e88aec 4469 kvm_fx_save(&vcpu->arch.host_fx_image);
bc1a34f1 4470
d0752060
HB
4471 /* Initialize guest FPU by resetting ours and saving into guest's */
4472 preempt_disable();
d6e88aec
AK
4473 kvm_fx_save(&vcpu->arch.host_fx_image);
4474 kvm_fx_finit();
4475 kvm_fx_save(&vcpu->arch.guest_fx_image);
4476 kvm_fx_restore(&vcpu->arch.host_fx_image);
d0752060
HB
4477 preempt_enable();
4478
ad312c7c 4479 vcpu->arch.cr0 |= X86_CR0_ET;
d0752060 4480 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
ad312c7c
ZX
4481 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
4482 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
d0752060
HB
4483 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
4484}
4485EXPORT_SYMBOL_GPL(fx_init);
4486
4487void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
4488{
4489 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
4490 return;
4491
4492 vcpu->guest_fpu_loaded = 1;
d6e88aec
AK
4493 kvm_fx_save(&vcpu->arch.host_fx_image);
4494 kvm_fx_restore(&vcpu->arch.guest_fx_image);
d0752060
HB
4495}
4496EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
4497
4498void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4499{
4500 if (!vcpu->guest_fpu_loaded)
4501 return;
4502
4503 vcpu->guest_fpu_loaded = 0;
d6e88aec
AK
4504 kvm_fx_save(&vcpu->arch.guest_fx_image);
4505 kvm_fx_restore(&vcpu->arch.host_fx_image);
f096ed85 4506 ++vcpu->stat.fpu_reload;
d0752060
HB
4507}
4508EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
e9b11c17
ZX
4509
4510void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4511{
7f1ea208
JR
4512 if (vcpu->arch.time_page) {
4513 kvm_release_page_dirty(vcpu->arch.time_page);
4514 vcpu->arch.time_page = NULL;
4515 }
4516
e9b11c17
ZX
4517 kvm_x86_ops->vcpu_free(vcpu);
4518}
4519
4520struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
4521 unsigned int id)
4522{
26e5215f
AK
4523 return kvm_x86_ops->vcpu_create(kvm, id);
4524}
e9b11c17 4525
26e5215f
AK
4526int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
4527{
4528 int r;
e9b11c17
ZX
4529
4530 /* We do fxsave: this must be aligned. */
ad312c7c 4531 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
e9b11c17 4532
0bed3b56 4533 vcpu->arch.mtrr_state.have_fixed = 1;
e9b11c17
ZX
4534 vcpu_load(vcpu);
4535 r = kvm_arch_vcpu_reset(vcpu);
4536 if (r == 0)
4537 r = kvm_mmu_setup(vcpu);
4538 vcpu_put(vcpu);
4539 if (r < 0)
4540 goto free_vcpu;
4541
26e5215f 4542 return 0;
e9b11c17
ZX
4543free_vcpu:
4544 kvm_x86_ops->vcpu_free(vcpu);
26e5215f 4545 return r;
e9b11c17
ZX
4546}
4547
d40ccc62 4548void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17
ZX
4549{
4550 vcpu_load(vcpu);
4551 kvm_mmu_unload(vcpu);
4552 vcpu_put(vcpu);
4553
4554 kvm_x86_ops->vcpu_free(vcpu);
4555}
4556
4557int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
4558{
448fa4a9
JK
4559 vcpu->arch.nmi_pending = false;
4560 vcpu->arch.nmi_injected = false;
4561
42dbaa5a
JK
4562 vcpu->arch.switch_db_regs = 0;
4563 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
4564 vcpu->arch.dr6 = DR6_FIXED_1;
4565 vcpu->arch.dr7 = DR7_FIXED_1;
4566
e9b11c17
ZX
4567 return kvm_x86_ops->vcpu_reset(vcpu);
4568}
4569
4570void kvm_arch_hardware_enable(void *garbage)
4571{
4572 kvm_x86_ops->hardware_enable(garbage);
4573}
4574
4575void kvm_arch_hardware_disable(void *garbage)
4576{
4577 kvm_x86_ops->hardware_disable(garbage);
4578}
4579
4580int kvm_arch_hardware_setup(void)
4581{
4582 return kvm_x86_ops->hardware_setup();
4583}
4584
4585void kvm_arch_hardware_unsetup(void)
4586{
4587 kvm_x86_ops->hardware_unsetup();
4588}
4589
4590void kvm_arch_check_processor_compat(void *rtn)
4591{
4592 kvm_x86_ops->check_processor_compatibility(rtn);
4593}
4594
4595int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
4596{
4597 struct page *page;
4598 struct kvm *kvm;
4599 int r;
4600
4601 BUG_ON(vcpu->kvm == NULL);
4602 kvm = vcpu->kvm;
4603
ad312c7c 4604 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
e9b11c17 4605 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
a4535290 4606 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
e9b11c17 4607 else
a4535290 4608 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
e9b11c17
ZX
4609
4610 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
4611 if (!page) {
4612 r = -ENOMEM;
4613 goto fail;
4614 }
ad312c7c 4615 vcpu->arch.pio_data = page_address(page);
e9b11c17
ZX
4616
4617 r = kvm_mmu_create(vcpu);
4618 if (r < 0)
4619 goto fail_free_pio_data;
4620
4621 if (irqchip_in_kernel(kvm)) {
4622 r = kvm_create_lapic(vcpu);
4623 if (r < 0)
4624 goto fail_mmu_destroy;
4625 }
4626
890ca9ae
HY
4627 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
4628 GFP_KERNEL);
4629 if (!vcpu->arch.mce_banks) {
4630 r = -ENOMEM;
4631 goto fail_mmu_destroy;
4632 }
4633 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
4634
e9b11c17
ZX
4635 return 0;
4636
4637fail_mmu_destroy:
4638 kvm_mmu_destroy(vcpu);
4639fail_free_pio_data:
ad312c7c 4640 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17
ZX
4641fail:
4642 return r;
4643}
4644
4645void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
4646{
4647 kvm_free_lapic(vcpu);
3200f405 4648 down_read(&vcpu->kvm->slots_lock);
e9b11c17 4649 kvm_mmu_destroy(vcpu);
3200f405 4650 up_read(&vcpu->kvm->slots_lock);
ad312c7c 4651 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17 4652}
d19a9cd2
ZX
4653
4654struct kvm *kvm_arch_create_vm(void)
4655{
4656 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
4657
4658 if (!kvm)
4659 return ERR_PTR(-ENOMEM);
4660
f05e70ac 4661 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
4d5c5d0f 4662 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
d19a9cd2 4663
5550af4d
SY
4664 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
4665 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
4666
53f658b3
MT
4667 rdtscll(kvm->arch.vm_init_tsc);
4668
d19a9cd2
ZX
4669 return kvm;
4670}
4671
4672static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4673{
4674 vcpu_load(vcpu);
4675 kvm_mmu_unload(vcpu);
4676 vcpu_put(vcpu);
4677}
4678
4679static void kvm_free_vcpus(struct kvm *kvm)
4680{
4681 unsigned int i;
4682
4683 /*
4684 * Unpin any mmu pages first.
4685 */
4686 for (i = 0; i < KVM_MAX_VCPUS; ++i)
4687 if (kvm->vcpus[i])
4688 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
4689 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
4690 if (kvm->vcpus[i]) {
4691 kvm_arch_vcpu_free(kvm->vcpus[i]);
4692 kvm->vcpus[i] = NULL;
4693 }
4694 }
4695
4696}
4697
ad8ba2cd
SY
4698void kvm_arch_sync_events(struct kvm *kvm)
4699{
ba4cef31 4700 kvm_free_all_assigned_devices(kvm);
ad8ba2cd
SY
4701}
4702
d19a9cd2
ZX
4703void kvm_arch_destroy_vm(struct kvm *kvm)
4704{
6eb55818 4705 kvm_iommu_unmap_guest(kvm);
7837699f 4706 kvm_free_pit(kvm);
d7deeeb0
ZX
4707 kfree(kvm->arch.vpic);
4708 kfree(kvm->arch.vioapic);
d19a9cd2
ZX
4709 kvm_free_vcpus(kvm);
4710 kvm_free_physmem(kvm);
3d45830c
AK
4711 if (kvm->arch.apic_access_page)
4712 put_page(kvm->arch.apic_access_page);
b7ebfb05
SY
4713 if (kvm->arch.ept_identity_pagetable)
4714 put_page(kvm->arch.ept_identity_pagetable);
d19a9cd2
ZX
4715 kfree(kvm);
4716}
0de10343
ZX
4717
4718int kvm_arch_set_memory_region(struct kvm *kvm,
4719 struct kvm_userspace_memory_region *mem,
4720 struct kvm_memory_slot old,
4721 int user_alloc)
4722{
4723 int npages = mem->memory_size >> PAGE_SHIFT;
4724 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
4725
4726 /*To keep backward compatibility with older userspace,
4727 *x86 needs to hanlde !user_alloc case.
4728 */
4729 if (!user_alloc) {
4730 if (npages && !old.rmap) {
604b38ac
AA
4731 unsigned long userspace_addr;
4732
72dc67a6 4733 down_write(&current->mm->mmap_sem);
604b38ac
AA
4734 userspace_addr = do_mmap(NULL, 0,
4735 npages * PAGE_SIZE,
4736 PROT_READ | PROT_WRITE,
acee3c04 4737 MAP_PRIVATE | MAP_ANONYMOUS,
604b38ac 4738 0);
72dc67a6 4739 up_write(&current->mm->mmap_sem);
0de10343 4740
604b38ac
AA
4741 if (IS_ERR((void *)userspace_addr))
4742 return PTR_ERR((void *)userspace_addr);
4743
4744 /* set userspace_addr atomically for kvm_hva_to_rmapp */
4745 spin_lock(&kvm->mmu_lock);
4746 memslot->userspace_addr = userspace_addr;
4747 spin_unlock(&kvm->mmu_lock);
0de10343
ZX
4748 } else {
4749 if (!old.user_alloc && old.rmap) {
4750 int ret;
4751
72dc67a6 4752 down_write(&current->mm->mmap_sem);
0de10343
ZX
4753 ret = do_munmap(current->mm, old.userspace_addr,
4754 old.npages * PAGE_SIZE);
72dc67a6 4755 up_write(&current->mm->mmap_sem);
0de10343
ZX
4756 if (ret < 0)
4757 printk(KERN_WARNING
4758 "kvm_vm_ioctl_set_memory_region: "
4759 "failed to munmap memory\n");
4760 }
4761 }
4762 }
4763
7c8a83b7 4764 spin_lock(&kvm->mmu_lock);
f05e70ac 4765 if (!kvm->arch.n_requested_mmu_pages) {
0de10343
ZX
4766 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
4767 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
4768 }
4769
4770 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
7c8a83b7 4771 spin_unlock(&kvm->mmu_lock);
0de10343
ZX
4772 kvm_flush_remote_tlbs(kvm);
4773
4774 return 0;
4775}
1d737c8a 4776
34d4cb8f
MT
4777void kvm_arch_flush_shadow(struct kvm *kvm)
4778{
4779 kvm_mmu_zap_all(kvm);
8986ecc0 4780 kvm_reload_remote_mmus(kvm);
34d4cb8f
MT
4781}
4782
1d737c8a
ZX
4783int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4784{
a4535290 4785 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
0496fbb9
JK
4786 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
4787 || vcpu->arch.nmi_pending;
1d737c8a 4788}
5736199a 4789
5736199a
ZX
4790void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4791{
32f88400
MT
4792 int me;
4793 int cpu = vcpu->cpu;
5736199a
ZX
4794
4795 if (waitqueue_active(&vcpu->wq)) {
4796 wake_up_interruptible(&vcpu->wq);
4797 ++vcpu->stat.halt_wakeup;
4798 }
32f88400
MT
4799
4800 me = get_cpu();
4801 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
4802 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
4803 smp_send_reschedule(cpu);
e9571ed5 4804 put_cpu();
5736199a 4805}
78646121
GN
4806
4807int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
4808{
4809 return kvm_x86_ops->interrupt_allowed(vcpu);
4810}