]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/kvm/x86.c
KVM: x86: move steal time initialization to vcpu entry time
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / x86.c
1 /*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Amit Shah <amit.shah@qumranet.com>
15 * Ben-Ami Yassour <benami@il.ibm.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 */
21
22 #include <linux/kvm_host.h>
23 #include "irq.h"
24 #include "mmu.h"
25 #include "i8254.h"
26 #include "tss.h"
27 #include "kvm_cache_regs.h"
28 #include "x86.h"
29 #include "cpuid.h"
30 #include "assigned-dev.h"
31 #include "pmu.h"
32 #include "hyperv.h"
33
34 #include <linux/clocksource.h>
35 #include <linux/interrupt.h>
36 #include <linux/kvm.h>
37 #include <linux/fs.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
40 #include <linux/mman.h>
41 #include <linux/highmem.h>
42 #include <linux/iommu.h>
43 #include <linux/intel-iommu.h>
44 #include <linux/cpufreq.h>
45 #include <linux/user-return-notifier.h>
46 #include <linux/srcu.h>
47 #include <linux/slab.h>
48 #include <linux/perf_event.h>
49 #include <linux/uaccess.h>
50 #include <linux/hash.h>
51 #include <linux/pci.h>
52 #include <linux/timekeeper_internal.h>
53 #include <linux/pvclock_gtod.h>
54 #include <linux/kvm_irqfd.h>
55 #include <linux/irqbypass.h>
56 #include <trace/events/kvm.h>
57
58 #define CREATE_TRACE_POINTS
59 #include "trace.h"
60
61 #include <asm/debugreg.h>
62 #include <asm/msr.h>
63 #include <asm/desc.h>
64 #include <asm/mce.h>
65 #include <linux/kernel_stat.h>
66 #include <asm/fpu/internal.h> /* Ugh! */
67 #include <asm/pvclock.h>
68 #include <asm/div64.h>
69 #include <asm/irq_remapping.h>
70
71 #define MAX_IO_MSRS 256
72 #define KVM_MAX_MCE_BANKS 32
73 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
74
75 #define emul_to_vcpu(ctxt) \
76 container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
77
78 /* EFER defaults:
79 * - enable syscall per default because its emulated by KVM
80 * - enable LME and LMA per default on 64 bit KVM
81 */
82 #ifdef CONFIG_X86_64
83 static
84 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
85 #else
86 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
87 #endif
88
89 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
90 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
91
92 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
93 static void process_nmi(struct kvm_vcpu *vcpu);
94 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
95
96 struct kvm_x86_ops *kvm_x86_ops;
97 EXPORT_SYMBOL_GPL(kvm_x86_ops);
98
99 static bool ignore_msrs = 0;
100 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
101
102 unsigned int min_timer_period_us = 500;
103 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
104
105 static bool __read_mostly kvmclock_periodic_sync = true;
106 module_param(kvmclock_periodic_sync, bool, S_IRUGO);
107
108 bool kvm_has_tsc_control;
109 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
110 u32 kvm_max_guest_tsc_khz;
111 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
112
113 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
114 static u32 tsc_tolerance_ppm = 250;
115 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
116
117 /* lapic timer advance (tscdeadline mode only) in nanoseconds */
118 unsigned int lapic_timer_advance_ns = 0;
119 module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
120
121 static bool backwards_tsc_observed = false;
122
123 #define KVM_NR_SHARED_MSRS 16
124
125 struct kvm_shared_msrs_global {
126 int nr;
127 u32 msrs[KVM_NR_SHARED_MSRS];
128 };
129
130 struct kvm_shared_msrs {
131 struct user_return_notifier urn;
132 bool registered;
133 struct kvm_shared_msr_values {
134 u64 host;
135 u64 curr;
136 } values[KVM_NR_SHARED_MSRS];
137 };
138
139 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
140 static struct kvm_shared_msrs __percpu *shared_msrs;
141
142 struct kvm_stats_debugfs_item debugfs_entries[] = {
143 { "pf_fixed", VCPU_STAT(pf_fixed) },
144 { "pf_guest", VCPU_STAT(pf_guest) },
145 { "tlb_flush", VCPU_STAT(tlb_flush) },
146 { "invlpg", VCPU_STAT(invlpg) },
147 { "exits", VCPU_STAT(exits) },
148 { "io_exits", VCPU_STAT(io_exits) },
149 { "mmio_exits", VCPU_STAT(mmio_exits) },
150 { "signal_exits", VCPU_STAT(signal_exits) },
151 { "irq_window", VCPU_STAT(irq_window_exits) },
152 { "nmi_window", VCPU_STAT(nmi_window_exits) },
153 { "halt_exits", VCPU_STAT(halt_exits) },
154 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
155 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
156 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
157 { "hypercalls", VCPU_STAT(hypercalls) },
158 { "request_irq", VCPU_STAT(request_irq_exits) },
159 { "irq_exits", VCPU_STAT(irq_exits) },
160 { "host_state_reload", VCPU_STAT(host_state_reload) },
161 { "efer_reload", VCPU_STAT(efer_reload) },
162 { "fpu_reload", VCPU_STAT(fpu_reload) },
163 { "insn_emulation", VCPU_STAT(insn_emulation) },
164 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
165 { "irq_injections", VCPU_STAT(irq_injections) },
166 { "nmi_injections", VCPU_STAT(nmi_injections) },
167 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
168 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
169 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
170 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
171 { "mmu_flooded", VM_STAT(mmu_flooded) },
172 { "mmu_recycled", VM_STAT(mmu_recycled) },
173 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
174 { "mmu_unsync", VM_STAT(mmu_unsync) },
175 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
176 { "largepages", VM_STAT(lpages) },
177 { NULL }
178 };
179
180 u64 __read_mostly host_xcr0;
181
182 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
183
184 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
185 {
186 int i;
187 for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
188 vcpu->arch.apf.gfns[i] = ~0;
189 }
190
191 static void kvm_on_user_return(struct user_return_notifier *urn)
192 {
193 unsigned slot;
194 struct kvm_shared_msrs *locals
195 = container_of(urn, struct kvm_shared_msrs, urn);
196 struct kvm_shared_msr_values *values;
197
198 for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
199 values = &locals->values[slot];
200 if (values->host != values->curr) {
201 wrmsrl(shared_msrs_global.msrs[slot], values->host);
202 values->curr = values->host;
203 }
204 }
205 locals->registered = false;
206 user_return_notifier_unregister(urn);
207 }
208
209 static void shared_msr_update(unsigned slot, u32 msr)
210 {
211 u64 value;
212 unsigned int cpu = smp_processor_id();
213 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
214
215 /* only read, and nobody should modify it at this time,
216 * so don't need lock */
217 if (slot >= shared_msrs_global.nr) {
218 printk(KERN_ERR "kvm: invalid MSR slot!");
219 return;
220 }
221 rdmsrl_safe(msr, &value);
222 smsr->values[slot].host = value;
223 smsr->values[slot].curr = value;
224 }
225
226 void kvm_define_shared_msr(unsigned slot, u32 msr)
227 {
228 BUG_ON(slot >= KVM_NR_SHARED_MSRS);
229 shared_msrs_global.msrs[slot] = msr;
230 if (slot >= shared_msrs_global.nr)
231 shared_msrs_global.nr = slot + 1;
232 }
233 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
234
235 static void kvm_shared_msr_cpu_online(void)
236 {
237 unsigned i;
238
239 for (i = 0; i < shared_msrs_global.nr; ++i)
240 shared_msr_update(i, shared_msrs_global.msrs[i]);
241 }
242
243 int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
244 {
245 unsigned int cpu = smp_processor_id();
246 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
247 int err;
248
249 if (((value ^ smsr->values[slot].curr) & mask) == 0)
250 return 0;
251 smsr->values[slot].curr = value;
252 err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
253 if (err)
254 return 1;
255
256 if (!smsr->registered) {
257 smsr->urn.on_user_return = kvm_on_user_return;
258 user_return_notifier_register(&smsr->urn);
259 smsr->registered = true;
260 }
261 return 0;
262 }
263 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
264
265 static void drop_user_return_notifiers(void)
266 {
267 unsigned int cpu = smp_processor_id();
268 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
269
270 if (smsr->registered)
271 kvm_on_user_return(&smsr->urn);
272 }
273
274 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
275 {
276 return vcpu->arch.apic_base;
277 }
278 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
279
280 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
281 {
282 u64 old_state = vcpu->arch.apic_base &
283 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
284 u64 new_state = msr_info->data &
285 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
286 u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
287 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
288
289 if (!msr_info->host_initiated &&
290 ((msr_info->data & reserved_bits) != 0 ||
291 new_state == X2APIC_ENABLE ||
292 (new_state == MSR_IA32_APICBASE_ENABLE &&
293 old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
294 (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
295 old_state == 0)))
296 return 1;
297
298 kvm_lapic_set_base(vcpu, msr_info->data);
299 return 0;
300 }
301 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
302
303 asmlinkage __visible void kvm_spurious_fault(void)
304 {
305 /* Fault while not rebooting. We want the trace. */
306 BUG();
307 }
308 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
309
310 #define EXCPT_BENIGN 0
311 #define EXCPT_CONTRIBUTORY 1
312 #define EXCPT_PF 2
313
314 static int exception_class(int vector)
315 {
316 switch (vector) {
317 case PF_VECTOR:
318 return EXCPT_PF;
319 case DE_VECTOR:
320 case TS_VECTOR:
321 case NP_VECTOR:
322 case SS_VECTOR:
323 case GP_VECTOR:
324 return EXCPT_CONTRIBUTORY;
325 default:
326 break;
327 }
328 return EXCPT_BENIGN;
329 }
330
331 #define EXCPT_FAULT 0
332 #define EXCPT_TRAP 1
333 #define EXCPT_ABORT 2
334 #define EXCPT_INTERRUPT 3
335
336 static int exception_type(int vector)
337 {
338 unsigned int mask;
339
340 if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
341 return EXCPT_INTERRUPT;
342
343 mask = 1 << vector;
344
345 /* #DB is trap, as instruction watchpoints are handled elsewhere */
346 if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
347 return EXCPT_TRAP;
348
349 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
350 return EXCPT_ABORT;
351
352 /* Reserved exceptions will result in fault */
353 return EXCPT_FAULT;
354 }
355
356 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
357 unsigned nr, bool has_error, u32 error_code,
358 bool reinject)
359 {
360 u32 prev_nr;
361 int class1, class2;
362
363 kvm_make_request(KVM_REQ_EVENT, vcpu);
364
365 if (!vcpu->arch.exception.pending) {
366 queue:
367 if (has_error && !is_protmode(vcpu))
368 has_error = false;
369 vcpu->arch.exception.pending = true;
370 vcpu->arch.exception.has_error_code = has_error;
371 vcpu->arch.exception.nr = nr;
372 vcpu->arch.exception.error_code = error_code;
373 vcpu->arch.exception.reinject = reinject;
374 return;
375 }
376
377 /* to check exception */
378 prev_nr = vcpu->arch.exception.nr;
379 if (prev_nr == DF_VECTOR) {
380 /* triple fault -> shutdown */
381 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
382 return;
383 }
384 class1 = exception_class(prev_nr);
385 class2 = exception_class(nr);
386 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
387 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
388 /* generate double fault per SDM Table 5-5 */
389 vcpu->arch.exception.pending = true;
390 vcpu->arch.exception.has_error_code = true;
391 vcpu->arch.exception.nr = DF_VECTOR;
392 vcpu->arch.exception.error_code = 0;
393 } else
394 /* replace previous exception with a new one in a hope
395 that instruction re-execution will regenerate lost
396 exception */
397 goto queue;
398 }
399
400 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
401 {
402 kvm_multiple_exception(vcpu, nr, false, 0, false);
403 }
404 EXPORT_SYMBOL_GPL(kvm_queue_exception);
405
406 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
407 {
408 kvm_multiple_exception(vcpu, nr, false, 0, true);
409 }
410 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
411
412 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
413 {
414 if (err)
415 kvm_inject_gp(vcpu, 0);
416 else
417 kvm_x86_ops->skip_emulated_instruction(vcpu);
418 }
419 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
420
421 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
422 {
423 ++vcpu->stat.pf_guest;
424 vcpu->arch.cr2 = fault->address;
425 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
426 }
427 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
428
429 static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
430 {
431 if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
432 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
433 else
434 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
435
436 return fault->nested_page_fault;
437 }
438
439 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
440 {
441 atomic_inc(&vcpu->arch.nmi_queued);
442 kvm_make_request(KVM_REQ_NMI, vcpu);
443 }
444 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
445
446 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
447 {
448 kvm_multiple_exception(vcpu, nr, true, error_code, false);
449 }
450 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
451
452 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
453 {
454 kvm_multiple_exception(vcpu, nr, true, error_code, true);
455 }
456 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
457
458 /*
459 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
460 * a #GP and return false.
461 */
462 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
463 {
464 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
465 return true;
466 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
467 return false;
468 }
469 EXPORT_SYMBOL_GPL(kvm_require_cpl);
470
471 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
472 {
473 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
474 return true;
475
476 kvm_queue_exception(vcpu, UD_VECTOR);
477 return false;
478 }
479 EXPORT_SYMBOL_GPL(kvm_require_dr);
480
481 /*
482 * This function will be used to read from the physical memory of the currently
483 * running guest. The difference to kvm_vcpu_read_guest_page is that this function
484 * can read from guest physical or from the guest's guest physical memory.
485 */
486 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
487 gfn_t ngfn, void *data, int offset, int len,
488 u32 access)
489 {
490 struct x86_exception exception;
491 gfn_t real_gfn;
492 gpa_t ngpa;
493
494 ngpa = gfn_to_gpa(ngfn);
495 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
496 if (real_gfn == UNMAPPED_GVA)
497 return -EFAULT;
498
499 real_gfn = gpa_to_gfn(real_gfn);
500
501 return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
502 }
503 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
504
505 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
506 void *data, int offset, int len, u32 access)
507 {
508 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
509 data, offset, len, access);
510 }
511
512 /*
513 * Load the pae pdptrs. Return true is they are all valid.
514 */
515 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
516 {
517 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
518 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
519 int i;
520 int ret;
521 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
522
523 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
524 offset * sizeof(u64), sizeof(pdpte),
525 PFERR_USER_MASK|PFERR_WRITE_MASK);
526 if (ret < 0) {
527 ret = 0;
528 goto out;
529 }
530 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
531 if (is_present_gpte(pdpte[i]) &&
532 (pdpte[i] &
533 vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
534 ret = 0;
535 goto out;
536 }
537 }
538 ret = 1;
539
540 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
541 __set_bit(VCPU_EXREG_PDPTR,
542 (unsigned long *)&vcpu->arch.regs_avail);
543 __set_bit(VCPU_EXREG_PDPTR,
544 (unsigned long *)&vcpu->arch.regs_dirty);
545 out:
546
547 return ret;
548 }
549 EXPORT_SYMBOL_GPL(load_pdptrs);
550
551 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
552 {
553 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
554 bool changed = true;
555 int offset;
556 gfn_t gfn;
557 int r;
558
559 if (is_long_mode(vcpu) || !is_pae(vcpu))
560 return false;
561
562 if (!test_bit(VCPU_EXREG_PDPTR,
563 (unsigned long *)&vcpu->arch.regs_avail))
564 return true;
565
566 gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
567 offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
568 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
569 PFERR_USER_MASK | PFERR_WRITE_MASK);
570 if (r < 0)
571 goto out;
572 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
573 out:
574
575 return changed;
576 }
577
578 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
579 {
580 unsigned long old_cr0 = kvm_read_cr0(vcpu);
581 unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
582
583 cr0 |= X86_CR0_ET;
584
585 #ifdef CONFIG_X86_64
586 if (cr0 & 0xffffffff00000000UL)
587 return 1;
588 #endif
589
590 cr0 &= ~CR0_RESERVED_BITS;
591
592 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
593 return 1;
594
595 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
596 return 1;
597
598 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
599 #ifdef CONFIG_X86_64
600 if ((vcpu->arch.efer & EFER_LME)) {
601 int cs_db, cs_l;
602
603 if (!is_pae(vcpu))
604 return 1;
605 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
606 if (cs_l)
607 return 1;
608 } else
609 #endif
610 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
611 kvm_read_cr3(vcpu)))
612 return 1;
613 }
614
615 if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
616 return 1;
617
618 kvm_x86_ops->set_cr0(vcpu, cr0);
619
620 if ((cr0 ^ old_cr0) & X86_CR0_PG) {
621 kvm_clear_async_pf_completion_queue(vcpu);
622 kvm_async_pf_hash_reset(vcpu);
623 }
624
625 if ((cr0 ^ old_cr0) & update_bits)
626 kvm_mmu_reset_context(vcpu);
627
628 if ((cr0 ^ old_cr0) & X86_CR0_CD)
629 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
630
631 return 0;
632 }
633 EXPORT_SYMBOL_GPL(kvm_set_cr0);
634
635 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
636 {
637 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
638 }
639 EXPORT_SYMBOL_GPL(kvm_lmsw);
640
641 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
642 {
643 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
644 !vcpu->guest_xcr0_loaded) {
645 /* kvm_set_xcr() also depends on this */
646 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
647 vcpu->guest_xcr0_loaded = 1;
648 }
649 }
650
651 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
652 {
653 if (vcpu->guest_xcr0_loaded) {
654 if (vcpu->arch.xcr0 != host_xcr0)
655 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
656 vcpu->guest_xcr0_loaded = 0;
657 }
658 }
659
660 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
661 {
662 u64 xcr0 = xcr;
663 u64 old_xcr0 = vcpu->arch.xcr0;
664 u64 valid_bits;
665
666 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
667 if (index != XCR_XFEATURE_ENABLED_MASK)
668 return 1;
669 if (!(xcr0 & XSTATE_FP))
670 return 1;
671 if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
672 return 1;
673
674 /*
675 * Do not allow the guest to set bits that we do not support
676 * saving. However, xcr0 bit 0 is always set, even if the
677 * emulated CPU does not support XSAVE (see fx_init).
678 */
679 valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP;
680 if (xcr0 & ~valid_bits)
681 return 1;
682
683 if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR)))
684 return 1;
685
686 if (xcr0 & XSTATE_AVX512) {
687 if (!(xcr0 & XSTATE_YMM))
688 return 1;
689 if ((xcr0 & XSTATE_AVX512) != XSTATE_AVX512)
690 return 1;
691 }
692 kvm_put_guest_xcr0(vcpu);
693 vcpu->arch.xcr0 = xcr0;
694
695 if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK)
696 kvm_update_cpuid(vcpu);
697 return 0;
698 }
699
700 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
701 {
702 if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
703 __kvm_set_xcr(vcpu, index, xcr)) {
704 kvm_inject_gp(vcpu, 0);
705 return 1;
706 }
707 return 0;
708 }
709 EXPORT_SYMBOL_GPL(kvm_set_xcr);
710
711 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
712 {
713 unsigned long old_cr4 = kvm_read_cr4(vcpu);
714 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
715 X86_CR4_SMEP | X86_CR4_SMAP;
716
717 if (cr4 & CR4_RESERVED_BITS)
718 return 1;
719
720 if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
721 return 1;
722
723 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
724 return 1;
725
726 if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
727 return 1;
728
729 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
730 return 1;
731
732 if (is_long_mode(vcpu)) {
733 if (!(cr4 & X86_CR4_PAE))
734 return 1;
735 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
736 && ((cr4 ^ old_cr4) & pdptr_bits)
737 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
738 kvm_read_cr3(vcpu)))
739 return 1;
740
741 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
742 if (!guest_cpuid_has_pcid(vcpu))
743 return 1;
744
745 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
746 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
747 return 1;
748 }
749
750 if (kvm_x86_ops->set_cr4(vcpu, cr4))
751 return 1;
752
753 if (((cr4 ^ old_cr4) & pdptr_bits) ||
754 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
755 kvm_mmu_reset_context(vcpu);
756
757 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
758 kvm_update_cpuid(vcpu);
759
760 return 0;
761 }
762 EXPORT_SYMBOL_GPL(kvm_set_cr4);
763
764 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
765 {
766 #ifdef CONFIG_X86_64
767 cr3 &= ~CR3_PCID_INVD;
768 #endif
769
770 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
771 kvm_mmu_sync_roots(vcpu);
772 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
773 return 0;
774 }
775
776 if (is_long_mode(vcpu)) {
777 if (cr3 & CR3_L_MODE_RESERVED_BITS)
778 return 1;
779 } else if (is_pae(vcpu) && is_paging(vcpu) &&
780 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
781 return 1;
782
783 vcpu->arch.cr3 = cr3;
784 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
785 kvm_mmu_new_cr3(vcpu);
786 return 0;
787 }
788 EXPORT_SYMBOL_GPL(kvm_set_cr3);
789
790 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
791 {
792 if (cr8 & CR8_RESERVED_BITS)
793 return 1;
794 if (lapic_in_kernel(vcpu))
795 kvm_lapic_set_tpr(vcpu, cr8);
796 else
797 vcpu->arch.cr8 = cr8;
798 return 0;
799 }
800 EXPORT_SYMBOL_GPL(kvm_set_cr8);
801
802 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
803 {
804 if (lapic_in_kernel(vcpu))
805 return kvm_lapic_get_cr8(vcpu);
806 else
807 return vcpu->arch.cr8;
808 }
809 EXPORT_SYMBOL_GPL(kvm_get_cr8);
810
811 static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
812 {
813 int i;
814
815 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
816 for (i = 0; i < KVM_NR_DB_REGS; i++)
817 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
818 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
819 }
820 }
821
822 static void kvm_update_dr6(struct kvm_vcpu *vcpu)
823 {
824 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
825 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
826 }
827
828 static void kvm_update_dr7(struct kvm_vcpu *vcpu)
829 {
830 unsigned long dr7;
831
832 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
833 dr7 = vcpu->arch.guest_debug_dr7;
834 else
835 dr7 = vcpu->arch.dr7;
836 kvm_x86_ops->set_dr7(vcpu, dr7);
837 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
838 if (dr7 & DR7_BP_EN_MASK)
839 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
840 }
841
842 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
843 {
844 u64 fixed = DR6_FIXED_1;
845
846 if (!guest_cpuid_has_rtm(vcpu))
847 fixed |= DR6_RTM;
848 return fixed;
849 }
850
851 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
852 {
853 switch (dr) {
854 case 0 ... 3:
855 vcpu->arch.db[dr] = val;
856 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
857 vcpu->arch.eff_db[dr] = val;
858 break;
859 case 4:
860 /* fall through */
861 case 6:
862 if (val & 0xffffffff00000000ULL)
863 return -1; /* #GP */
864 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
865 kvm_update_dr6(vcpu);
866 break;
867 case 5:
868 /* fall through */
869 default: /* 7 */
870 if (val & 0xffffffff00000000ULL)
871 return -1; /* #GP */
872 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
873 kvm_update_dr7(vcpu);
874 break;
875 }
876
877 return 0;
878 }
879
880 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
881 {
882 if (__kvm_set_dr(vcpu, dr, val)) {
883 kvm_inject_gp(vcpu, 0);
884 return 1;
885 }
886 return 0;
887 }
888 EXPORT_SYMBOL_GPL(kvm_set_dr);
889
890 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
891 {
892 switch (dr) {
893 case 0 ... 3:
894 *val = vcpu->arch.db[dr];
895 break;
896 case 4:
897 /* fall through */
898 case 6:
899 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
900 *val = vcpu->arch.dr6;
901 else
902 *val = kvm_x86_ops->get_dr6(vcpu);
903 break;
904 case 5:
905 /* fall through */
906 default: /* 7 */
907 *val = vcpu->arch.dr7;
908 break;
909 }
910 return 0;
911 }
912 EXPORT_SYMBOL_GPL(kvm_get_dr);
913
914 bool kvm_rdpmc(struct kvm_vcpu *vcpu)
915 {
916 u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
917 u64 data;
918 int err;
919
920 err = kvm_pmu_rdpmc(vcpu, ecx, &data);
921 if (err)
922 return err;
923 kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
924 kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
925 return err;
926 }
927 EXPORT_SYMBOL_GPL(kvm_rdpmc);
928
929 /*
930 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
931 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
932 *
933 * This list is modified at module load time to reflect the
934 * capabilities of the host cpu. This capabilities test skips MSRs that are
935 * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
936 * may depend on host virtualization features rather than host cpu features.
937 */
938
939 static u32 msrs_to_save[] = {
940 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
941 MSR_STAR,
942 #ifdef CONFIG_X86_64
943 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
944 #endif
945 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
946 MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
947 };
948
949 static unsigned num_msrs_to_save;
950
951 static u32 emulated_msrs[] = {
952 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
953 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
954 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
955 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
956 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
957 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
958 HV_X64_MSR_RESET,
959 HV_X64_MSR_VP_INDEX,
960 HV_X64_MSR_VP_RUNTIME,
961 HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
962 MSR_KVM_PV_EOI_EN,
963
964 MSR_IA32_TSC_ADJUST,
965 MSR_IA32_TSCDEADLINE,
966 MSR_IA32_MISC_ENABLE,
967 MSR_IA32_MCG_STATUS,
968 MSR_IA32_MCG_CTL,
969 MSR_IA32_SMBASE,
970 };
971
972 static unsigned num_emulated_msrs;
973
974 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
975 {
976 if (efer & efer_reserved_bits)
977 return false;
978
979 if (efer & EFER_FFXSR) {
980 struct kvm_cpuid_entry2 *feat;
981
982 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
983 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
984 return false;
985 }
986
987 if (efer & EFER_SVME) {
988 struct kvm_cpuid_entry2 *feat;
989
990 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
991 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
992 return false;
993 }
994
995 return true;
996 }
997 EXPORT_SYMBOL_GPL(kvm_valid_efer);
998
999 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
1000 {
1001 u64 old_efer = vcpu->arch.efer;
1002
1003 if (!kvm_valid_efer(vcpu, efer))
1004 return 1;
1005
1006 if (is_paging(vcpu)
1007 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1008 return 1;
1009
1010 efer &= ~EFER_LMA;
1011 efer |= vcpu->arch.efer & EFER_LMA;
1012
1013 kvm_x86_ops->set_efer(vcpu, efer);
1014
1015 /* Update reserved bits */
1016 if ((efer ^ old_efer) & EFER_NX)
1017 kvm_mmu_reset_context(vcpu);
1018
1019 return 0;
1020 }
1021
1022 void kvm_enable_efer_bits(u64 mask)
1023 {
1024 efer_reserved_bits &= ~mask;
1025 }
1026 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
1027
1028 /*
1029 * Writes msr value into into the appropriate "register".
1030 * Returns 0 on success, non-0 otherwise.
1031 * Assumes vcpu_load() was already called.
1032 */
1033 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1034 {
1035 switch (msr->index) {
1036 case MSR_FS_BASE:
1037 case MSR_GS_BASE:
1038 case MSR_KERNEL_GS_BASE:
1039 case MSR_CSTAR:
1040 case MSR_LSTAR:
1041 if (is_noncanonical_address(msr->data))
1042 return 1;
1043 break;
1044 case MSR_IA32_SYSENTER_EIP:
1045 case MSR_IA32_SYSENTER_ESP:
1046 /*
1047 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1048 * non-canonical address is written on Intel but not on
1049 * AMD (which ignores the top 32-bits, because it does
1050 * not implement 64-bit SYSENTER).
1051 *
1052 * 64-bit code should hence be able to write a non-canonical
1053 * value on AMD. Making the address canonical ensures that
1054 * vmentry does not fail on Intel after writing a non-canonical
1055 * value, and that something deterministic happens if the guest
1056 * invokes 64-bit SYSENTER.
1057 */
1058 msr->data = get_canonical(msr->data);
1059 }
1060 return kvm_x86_ops->set_msr(vcpu, msr);
1061 }
1062 EXPORT_SYMBOL_GPL(kvm_set_msr);
1063
1064 /*
1065 * Adapt set_msr() to msr_io()'s calling convention
1066 */
1067 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1068 {
1069 struct msr_data msr;
1070 int r;
1071
1072 msr.index = index;
1073 msr.host_initiated = true;
1074 r = kvm_get_msr(vcpu, &msr);
1075 if (r)
1076 return r;
1077
1078 *data = msr.data;
1079 return 0;
1080 }
1081
1082 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1083 {
1084 struct msr_data msr;
1085
1086 msr.data = *data;
1087 msr.index = index;
1088 msr.host_initiated = true;
1089 return kvm_set_msr(vcpu, &msr);
1090 }
1091
1092 #ifdef CONFIG_X86_64
1093 struct pvclock_gtod_data {
1094 seqcount_t seq;
1095
1096 struct { /* extract of a clocksource struct */
1097 int vclock_mode;
1098 cycle_t cycle_last;
1099 cycle_t mask;
1100 u32 mult;
1101 u32 shift;
1102 } clock;
1103
1104 u64 boot_ns;
1105 u64 nsec_base;
1106 };
1107
1108 static struct pvclock_gtod_data pvclock_gtod_data;
1109
1110 static void update_pvclock_gtod(struct timekeeper *tk)
1111 {
1112 struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
1113 u64 boot_ns;
1114
1115 boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
1116
1117 write_seqcount_begin(&vdata->seq);
1118
1119 /* copy pvclock gtod data */
1120 vdata->clock.vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
1121 vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
1122 vdata->clock.mask = tk->tkr_mono.mask;
1123 vdata->clock.mult = tk->tkr_mono.mult;
1124 vdata->clock.shift = tk->tkr_mono.shift;
1125
1126 vdata->boot_ns = boot_ns;
1127 vdata->nsec_base = tk->tkr_mono.xtime_nsec;
1128
1129 write_seqcount_end(&vdata->seq);
1130 }
1131 #endif
1132
1133 void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
1134 {
1135 /*
1136 * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
1137 * vcpu_enter_guest. This function is only called from
1138 * the physical CPU that is running vcpu.
1139 */
1140 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1141 }
1142
1143 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
1144 {
1145 int version;
1146 int r;
1147 struct pvclock_wall_clock wc;
1148 struct timespec boot;
1149
1150 if (!wall_clock)
1151 return;
1152
1153 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
1154 if (r)
1155 return;
1156
1157 if (version & 1)
1158 ++version; /* first time write, random junk */
1159
1160 ++version;
1161
1162 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
1163
1164 /*
1165 * The guest calculates current wall clock time by adding
1166 * system time (updated by kvm_guest_time_update below) to the
1167 * wall clock specified here. guest system time equals host
1168 * system time for us, thus we must fill in host boot time here.
1169 */
1170 getboottime(&boot);
1171
1172 if (kvm->arch.kvmclock_offset) {
1173 struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
1174 boot = timespec_sub(boot, ts);
1175 }
1176 wc.sec = boot.tv_sec;
1177 wc.nsec = boot.tv_nsec;
1178 wc.version = version;
1179
1180 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
1181
1182 version++;
1183 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
1184 }
1185
1186 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
1187 {
1188 uint32_t quotient, remainder;
1189
1190 /* Don't try to replace with do_div(), this one calculates
1191 * "(dividend << 32) / divisor" */
1192 __asm__ ( "divl %4"
1193 : "=a" (quotient), "=d" (remainder)
1194 : "0" (0), "1" (dividend), "r" (divisor) );
1195 return quotient;
1196 }
1197
1198 static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
1199 s8 *pshift, u32 *pmultiplier)
1200 {
1201 uint64_t scaled64;
1202 int32_t shift = 0;
1203 uint64_t tps64;
1204 uint32_t tps32;
1205
1206 tps64 = base_khz * 1000LL;
1207 scaled64 = scaled_khz * 1000LL;
1208 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
1209 tps64 >>= 1;
1210 shift--;
1211 }
1212
1213 tps32 = (uint32_t)tps64;
1214 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
1215 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
1216 scaled64 >>= 1;
1217 else
1218 tps32 <<= 1;
1219 shift++;
1220 }
1221
1222 *pshift = shift;
1223 *pmultiplier = div_frac(scaled64, tps32);
1224
1225 pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
1226 __func__, base_khz, scaled_khz, shift, *pmultiplier);
1227 }
1228
1229 #ifdef CONFIG_X86_64
1230 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
1231 #endif
1232
1233 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1234 static unsigned long max_tsc_khz;
1235
1236 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
1237 {
1238 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
1239 vcpu->arch.virtual_tsc_shift);
1240 }
1241
1242 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1243 {
1244 u64 v = (u64)khz * (1000000 + ppm);
1245 do_div(v, 1000000);
1246 return v;
1247 }
1248
1249 static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
1250 {
1251 u32 thresh_lo, thresh_hi;
1252 int use_scaling = 0;
1253
1254 /* tsc_khz can be zero if TSC calibration fails */
1255 if (this_tsc_khz == 0)
1256 return;
1257
1258 /* Compute a scale to convert nanoseconds in TSC cycles */
1259 kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
1260 &vcpu->arch.virtual_tsc_shift,
1261 &vcpu->arch.virtual_tsc_mult);
1262 vcpu->arch.virtual_tsc_khz = this_tsc_khz;
1263
1264 /*
1265 * Compute the variation in TSC rate which is acceptable
1266 * within the range of tolerance and decide if the
1267 * rate being applied is within that bounds of the hardware
1268 * rate. If so, no scaling or compensation need be done.
1269 */
1270 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
1271 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
1272 if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
1273 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
1274 use_scaling = 1;
1275 }
1276 kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
1277 }
1278
1279 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
1280 {
1281 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
1282 vcpu->arch.virtual_tsc_mult,
1283 vcpu->arch.virtual_tsc_shift);
1284 tsc += vcpu->arch.this_tsc_write;
1285 return tsc;
1286 }
1287
1288 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
1289 {
1290 #ifdef CONFIG_X86_64
1291 bool vcpus_matched;
1292 struct kvm_arch *ka = &vcpu->kvm->arch;
1293 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1294
1295 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
1296 atomic_read(&vcpu->kvm->online_vcpus));
1297
1298 /*
1299 * Once the masterclock is enabled, always perform request in
1300 * order to update it.
1301 *
1302 * In order to enable masterclock, the host clocksource must be TSC
1303 * and the vcpus need to have matched TSCs. When that happens,
1304 * perform request to enable masterclock.
1305 */
1306 if (ka->use_master_clock ||
1307 (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
1308 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1309
1310 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
1311 atomic_read(&vcpu->kvm->online_vcpus),
1312 ka->use_master_clock, gtod->clock.vclock_mode);
1313 #endif
1314 }
1315
1316 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
1317 {
1318 u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu);
1319 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
1320 }
1321
1322 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1323 {
1324 struct kvm *kvm = vcpu->kvm;
1325 u64 offset, ns, elapsed;
1326 unsigned long flags;
1327 s64 usdiff;
1328 bool matched;
1329 bool already_matched;
1330 u64 data = msr->data;
1331
1332 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1333 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1334 ns = get_kernel_ns();
1335 elapsed = ns - kvm->arch.last_tsc_nsec;
1336
1337 if (vcpu->arch.virtual_tsc_khz) {
1338 int faulted = 0;
1339
1340 /* n.b - signed multiplication and division required */
1341 usdiff = data - kvm->arch.last_tsc_write;
1342 #ifdef CONFIG_X86_64
1343 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
1344 #else
1345 /* do_div() only does unsigned */
1346 asm("1: idivl %[divisor]\n"
1347 "2: xor %%edx, %%edx\n"
1348 " movl $0, %[faulted]\n"
1349 "3:\n"
1350 ".section .fixup,\"ax\"\n"
1351 "4: movl $1, %[faulted]\n"
1352 " jmp 3b\n"
1353 ".previous\n"
1354
1355 _ASM_EXTABLE(1b, 4b)
1356
1357 : "=A"(usdiff), [faulted] "=r" (faulted)
1358 : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));
1359
1360 #endif
1361 do_div(elapsed, 1000);
1362 usdiff -= elapsed;
1363 if (usdiff < 0)
1364 usdiff = -usdiff;
1365
1366 /* idivl overflow => difference is larger than USEC_PER_SEC */
1367 if (faulted)
1368 usdiff = USEC_PER_SEC;
1369 } else
1370 usdiff = USEC_PER_SEC; /* disable TSC match window below */
1371
1372 /*
1373 * Special case: TSC write with a small delta (1 second) of virtual
1374 * cycle time against real time is interpreted as an attempt to
1375 * synchronize the CPU.
1376 *
1377 * For a reliable TSC, we can match TSC offsets, and for an unstable
1378 * TSC, we add elapsed time in this computation. We could let the
1379 * compensation code attempt to catch up if we fall behind, but
1380 * it's better to try to match offsets from the beginning.
1381 */
1382 if (usdiff < USEC_PER_SEC &&
1383 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1384 if (!check_tsc_unstable()) {
1385 offset = kvm->arch.cur_tsc_offset;
1386 pr_debug("kvm: matched tsc offset for %llu\n", data);
1387 } else {
1388 u64 delta = nsec_to_cycles(vcpu, elapsed);
1389 data += delta;
1390 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1391 pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1392 }
1393 matched = true;
1394 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
1395 } else {
1396 /*
1397 * We split periods of matched TSC writes into generations.
1398 * For each generation, we track the original measured
1399 * nanosecond time, offset, and write, so if TSCs are in
1400 * sync, we can match exact offset, and if not, we can match
1401 * exact software computation in compute_guest_tsc()
1402 *
1403 * These values are tracked in kvm->arch.cur_xxx variables.
1404 */
1405 kvm->arch.cur_tsc_generation++;
1406 kvm->arch.cur_tsc_nsec = ns;
1407 kvm->arch.cur_tsc_write = data;
1408 kvm->arch.cur_tsc_offset = offset;
1409 matched = false;
1410 pr_debug("kvm: new tsc generation %llu, clock %llu\n",
1411 kvm->arch.cur_tsc_generation, data);
1412 }
1413
1414 /*
1415 * We also track th most recent recorded KHZ, write and time to
1416 * allow the matching interval to be extended at each write.
1417 */
1418 kvm->arch.last_tsc_nsec = ns;
1419 kvm->arch.last_tsc_write = data;
1420 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1421
1422 vcpu->arch.last_guest_tsc = data;
1423
1424 /* Keep track of which generation this VCPU has synchronized to */
1425 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
1426 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
1427 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
1428
1429 if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
1430 update_ia32_tsc_adjust_msr(vcpu, offset);
1431 kvm_x86_ops->write_tsc_offset(vcpu, offset);
1432 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1433
1434 spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
1435 if (!matched) {
1436 kvm->arch.nr_vcpus_matched_tsc = 0;
1437 } else if (!already_matched) {
1438 kvm->arch.nr_vcpus_matched_tsc++;
1439 }
1440
1441 kvm_track_tsc_matching(vcpu);
1442 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
1443 }
1444
1445 EXPORT_SYMBOL_GPL(kvm_write_tsc);
1446
1447 #ifdef CONFIG_X86_64
1448
1449 static cycle_t read_tsc(void)
1450 {
1451 cycle_t ret = (cycle_t)rdtsc_ordered();
1452 u64 last = pvclock_gtod_data.clock.cycle_last;
1453
1454 if (likely(ret >= last))
1455 return ret;
1456
1457 /*
1458 * GCC likes to generate cmov here, but this branch is extremely
1459 * predictable (it's just a funciton of time and the likely is
1460 * very likely) and there's a data dependence, so force GCC
1461 * to generate a branch instead. I don't barrier() because
1462 * we don't actually need a barrier, and if this function
1463 * ever gets inlined it will generate worse code.
1464 */
1465 asm volatile ("");
1466 return last;
1467 }
1468
1469 static inline u64 vgettsc(cycle_t *cycle_now)
1470 {
1471 long v;
1472 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1473
1474 *cycle_now = read_tsc();
1475
1476 v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
1477 return v * gtod->clock.mult;
1478 }
1479
1480 static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
1481 {
1482 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1483 unsigned long seq;
1484 int mode;
1485 u64 ns;
1486
1487 do {
1488 seq = read_seqcount_begin(&gtod->seq);
1489 mode = gtod->clock.vclock_mode;
1490 ns = gtod->nsec_base;
1491 ns += vgettsc(cycle_now);
1492 ns >>= gtod->clock.shift;
1493 ns += gtod->boot_ns;
1494 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
1495 *t = ns;
1496
1497 return mode;
1498 }
1499
1500 /* returns true if host is using tsc clocksource */
1501 static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
1502 {
1503 /* checked again under seqlock below */
1504 if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
1505 return false;
1506
1507 return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC;
1508 }
1509 #endif
1510
1511 /*
1512 *
1513 * Assuming a stable TSC across physical CPUS, and a stable TSC
1514 * across virtual CPUs, the following condition is possible.
1515 * Each numbered line represents an event visible to both
1516 * CPUs at the next numbered event.
1517 *
1518 * "timespecX" represents host monotonic time. "tscX" represents
1519 * RDTSC value.
1520 *
1521 * VCPU0 on CPU0 | VCPU1 on CPU1
1522 *
1523 * 1. read timespec0,tsc0
1524 * 2. | timespec1 = timespec0 + N
1525 * | tsc1 = tsc0 + M
1526 * 3. transition to guest | transition to guest
1527 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
1528 * 5. | ret1 = timespec1 + (rdtsc - tsc1)
1529 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
1530 *
1531 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
1532 *
1533 * - ret0 < ret1
1534 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
1535 * ...
1536 * - 0 < N - M => M < N
1537 *
1538 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
1539 * always the case (the difference between two distinct xtime instances
1540 * might be smaller then the difference between corresponding TSC reads,
1541 * when updating guest vcpus pvclock areas).
1542 *
1543 * To avoid that problem, do not allow visibility of distinct
1544 * system_timestamp/tsc_timestamp values simultaneously: use a master
1545 * copy of host monotonic time values. Update that master copy
1546 * in lockstep.
1547 *
1548 * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
1549 *
1550 */
1551
1552 static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
1553 {
1554 #ifdef CONFIG_X86_64
1555 struct kvm_arch *ka = &kvm->arch;
1556 int vclock_mode;
1557 bool host_tsc_clocksource, vcpus_matched;
1558
1559 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
1560 atomic_read(&kvm->online_vcpus));
1561
1562 /*
1563 * If the host uses TSC clock, then passthrough TSC as stable
1564 * to the guest.
1565 */
1566 host_tsc_clocksource = kvm_get_time_and_clockread(
1567 &ka->master_kernel_ns,
1568 &ka->master_cycle_now);
1569
1570 ka->use_master_clock = host_tsc_clocksource && vcpus_matched
1571 && !backwards_tsc_observed
1572 && !ka->boot_vcpu_runs_old_kvmclock;
1573
1574 if (ka->use_master_clock)
1575 atomic_set(&kvm_guest_has_master_clock, 1);
1576
1577 vclock_mode = pvclock_gtod_data.clock.vclock_mode;
1578 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
1579 vcpus_matched);
1580 #endif
1581 }
1582
1583 static void kvm_gen_update_masterclock(struct kvm *kvm)
1584 {
1585 #ifdef CONFIG_X86_64
1586 int i;
1587 struct kvm_vcpu *vcpu;
1588 struct kvm_arch *ka = &kvm->arch;
1589
1590 spin_lock(&ka->pvclock_gtod_sync_lock);
1591 kvm_make_mclock_inprogress_request(kvm);
1592 /* no guest entries from this point */
1593 pvclock_update_vm_gtod_copy(kvm);
1594
1595 kvm_for_each_vcpu(i, vcpu, kvm)
1596 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1597
1598 /* guest entries allowed */
1599 kvm_for_each_vcpu(i, vcpu, kvm)
1600 clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
1601
1602 spin_unlock(&ka->pvclock_gtod_sync_lock);
1603 #endif
1604 }
1605
1606 static int kvm_guest_time_update(struct kvm_vcpu *v)
1607 {
1608 unsigned long flags, this_tsc_khz;
1609 struct kvm_vcpu_arch *vcpu = &v->arch;
1610 struct kvm_arch *ka = &v->kvm->arch;
1611 s64 kernel_ns;
1612 u64 tsc_timestamp, host_tsc;
1613 struct pvclock_vcpu_time_info guest_hv_clock;
1614 u8 pvclock_flags;
1615 bool use_master_clock;
1616
1617 kernel_ns = 0;
1618 host_tsc = 0;
1619
1620 /*
1621 * If the host uses TSC clock, then passthrough TSC as stable
1622 * to the guest.
1623 */
1624 spin_lock(&ka->pvclock_gtod_sync_lock);
1625 use_master_clock = ka->use_master_clock;
1626 if (use_master_clock) {
1627 host_tsc = ka->master_cycle_now;
1628 kernel_ns = ka->master_kernel_ns;
1629 }
1630 spin_unlock(&ka->pvclock_gtod_sync_lock);
1631
1632 /* Keep irq disabled to prevent changes to the clock */
1633 local_irq_save(flags);
1634 this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
1635 if (unlikely(this_tsc_khz == 0)) {
1636 local_irq_restore(flags);
1637 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1638 return 1;
1639 }
1640 if (!use_master_clock) {
1641 host_tsc = rdtsc();
1642 kernel_ns = get_kernel_ns();
1643 }
1644
1645 tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
1646
1647 /*
1648 * We may have to catch up the TSC to match elapsed wall clock
1649 * time for two reasons, even if kvmclock is used.
1650 * 1) CPU could have been running below the maximum TSC rate
1651 * 2) Broken TSC compensation resets the base at each VCPU
1652 * entry to avoid unknown leaps of TSC even when running
1653 * again on the same CPU. This may cause apparent elapsed
1654 * time to disappear, and the guest to stand still or run
1655 * very slowly.
1656 */
1657 if (vcpu->tsc_catchup) {
1658 u64 tsc = compute_guest_tsc(v, kernel_ns);
1659 if (tsc > tsc_timestamp) {
1660 adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
1661 tsc_timestamp = tsc;
1662 }
1663 }
1664
1665 local_irq_restore(flags);
1666
1667 if (!vcpu->pv_time_enabled)
1668 return 0;
1669
1670 if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
1671 kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
1672 &vcpu->hv_clock.tsc_shift,
1673 &vcpu->hv_clock.tsc_to_system_mul);
1674 vcpu->hw_tsc_khz = this_tsc_khz;
1675 }
1676
1677 /* With all the info we got, fill in the values */
1678 vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1679 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
1680 vcpu->last_guest_tsc = tsc_timestamp;
1681
1682 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
1683 &guest_hv_clock, sizeof(guest_hv_clock))))
1684 return 0;
1685
1686 /* This VCPU is paused, but it's legal for a guest to read another
1687 * VCPU's kvmclock, so we really have to follow the specification where
1688 * it says that version is odd if data is being modified, and even after
1689 * it is consistent.
1690 *
1691 * Version field updates must be kept separate. This is because
1692 * kvm_write_guest_cached might use a "rep movs" instruction, and
1693 * writes within a string instruction are weakly ordered. So there
1694 * are three writes overall.
1695 *
1696 * As a small optimization, only write the version field in the first
1697 * and third write. The vcpu->pv_time cache is still valid, because the
1698 * version field is the first in the struct.
1699 */
1700 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
1701
1702 vcpu->hv_clock.version = guest_hv_clock.version + 1;
1703 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1704 &vcpu->hv_clock,
1705 sizeof(vcpu->hv_clock.version));
1706
1707 smp_wmb();
1708
1709 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
1710 pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
1711
1712 if (vcpu->pvclock_set_guest_stopped_request) {
1713 pvclock_flags |= PVCLOCK_GUEST_STOPPED;
1714 vcpu->pvclock_set_guest_stopped_request = false;
1715 }
1716
1717 /* If the host uses TSC clocksource, then it is stable */
1718 if (use_master_clock)
1719 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
1720
1721 vcpu->hv_clock.flags = pvclock_flags;
1722
1723 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
1724
1725 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1726 &vcpu->hv_clock,
1727 sizeof(vcpu->hv_clock));
1728
1729 smp_wmb();
1730
1731 vcpu->hv_clock.version++;
1732 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1733 &vcpu->hv_clock,
1734 sizeof(vcpu->hv_clock.version));
1735 return 0;
1736 }
1737
1738 /*
1739 * kvmclock updates which are isolated to a given vcpu, such as
1740 * vcpu->cpu migration, should not allow system_timestamp from
1741 * the rest of the vcpus to remain static. Otherwise ntp frequency
1742 * correction applies to one vcpu's system_timestamp but not
1743 * the others.
1744 *
1745 * So in those cases, request a kvmclock update for all vcpus.
1746 * We need to rate-limit these requests though, as they can
1747 * considerably slow guests that have a large number of vcpus.
1748 * The time for a remote vcpu to update its kvmclock is bound
1749 * by the delay we use to rate-limit the updates.
1750 */
1751
1752 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
1753
1754 static void kvmclock_update_fn(struct work_struct *work)
1755 {
1756 int i;
1757 struct delayed_work *dwork = to_delayed_work(work);
1758 struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1759 kvmclock_update_work);
1760 struct kvm *kvm = container_of(ka, struct kvm, arch);
1761 struct kvm_vcpu *vcpu;
1762
1763 kvm_for_each_vcpu(i, vcpu, kvm) {
1764 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1765 kvm_vcpu_kick(vcpu);
1766 }
1767 }
1768
1769 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
1770 {
1771 struct kvm *kvm = v->kvm;
1772
1773 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1774 schedule_delayed_work(&kvm->arch.kvmclock_update_work,
1775 KVMCLOCK_UPDATE_DELAY);
1776 }
1777
1778 #define KVMCLOCK_SYNC_PERIOD (300 * HZ)
1779
1780 static void kvmclock_sync_fn(struct work_struct *work)
1781 {
1782 struct delayed_work *dwork = to_delayed_work(work);
1783 struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1784 kvmclock_sync_work);
1785 struct kvm *kvm = container_of(ka, struct kvm, arch);
1786
1787 if (!kvmclock_periodic_sync)
1788 return;
1789
1790 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
1791 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
1792 KVMCLOCK_SYNC_PERIOD);
1793 }
1794
1795 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1796 {
1797 u64 mcg_cap = vcpu->arch.mcg_cap;
1798 unsigned bank_num = mcg_cap & 0xff;
1799
1800 switch (msr) {
1801 case MSR_IA32_MCG_STATUS:
1802 vcpu->arch.mcg_status = data;
1803 break;
1804 case MSR_IA32_MCG_CTL:
1805 if (!(mcg_cap & MCG_CTL_P))
1806 return 1;
1807 if (data != 0 && data != ~(u64)0)
1808 return -1;
1809 vcpu->arch.mcg_ctl = data;
1810 break;
1811 default:
1812 if (msr >= MSR_IA32_MC0_CTL &&
1813 msr < MSR_IA32_MCx_CTL(bank_num)) {
1814 u32 offset = msr - MSR_IA32_MC0_CTL;
1815 /* only 0 or all 1s can be written to IA32_MCi_CTL
1816 * some Linux kernels though clear bit 10 in bank 4 to
1817 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1818 * this to avoid an uncatched #GP in the guest
1819 */
1820 if ((offset & 0x3) == 0 &&
1821 data != 0 && (data | (1 << 10)) != ~(u64)0)
1822 return -1;
1823 vcpu->arch.mce_banks[offset] = data;
1824 break;
1825 }
1826 return 1;
1827 }
1828 return 0;
1829 }
1830
1831 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
1832 {
1833 struct kvm *kvm = vcpu->kvm;
1834 int lm = is_long_mode(vcpu);
1835 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
1836 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
1837 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1838 : kvm->arch.xen_hvm_config.blob_size_32;
1839 u32 page_num = data & ~PAGE_MASK;
1840 u64 page_addr = data & PAGE_MASK;
1841 u8 *page;
1842 int r;
1843
1844 r = -E2BIG;
1845 if (page_num >= blob_size)
1846 goto out;
1847 r = -ENOMEM;
1848 page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
1849 if (IS_ERR(page)) {
1850 r = PTR_ERR(page);
1851 goto out;
1852 }
1853 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
1854 goto out_free;
1855 r = 0;
1856 out_free:
1857 kfree(page);
1858 out:
1859 return r;
1860 }
1861
1862 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
1863 {
1864 gpa_t gpa = data & ~0x3f;
1865
1866 /* Bits 2:5 are reserved, Should be zero */
1867 if (data & 0x3c)
1868 return 1;
1869
1870 vcpu->arch.apf.msr_val = data;
1871
1872 if (!(data & KVM_ASYNC_PF_ENABLED)) {
1873 kvm_clear_async_pf_completion_queue(vcpu);
1874 kvm_async_pf_hash_reset(vcpu);
1875 return 0;
1876 }
1877
1878 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
1879 sizeof(u32)))
1880 return 1;
1881
1882 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
1883 kvm_async_pf_wakeup_all(vcpu);
1884 return 0;
1885 }
1886
1887 static void kvmclock_reset(struct kvm_vcpu *vcpu)
1888 {
1889 vcpu->arch.pv_time_enabled = false;
1890 }
1891
1892 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
1893 {
1894 u64 delta;
1895
1896 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1897 return;
1898
1899 delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
1900 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1901 vcpu->arch.st.accum_steal = delta;
1902 }
1903
1904 static void record_steal_time(struct kvm_vcpu *vcpu)
1905 {
1906 accumulate_steal_time(vcpu);
1907
1908 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1909 return;
1910
1911 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1912 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
1913 return;
1914
1915 vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
1916 vcpu->arch.st.steal.version += 2;
1917 vcpu->arch.st.accum_steal = 0;
1918
1919 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1920 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
1921 }
1922
1923 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1924 {
1925 bool pr = false;
1926 u32 msr = msr_info->index;
1927 u64 data = msr_info->data;
1928
1929 switch (msr) {
1930 case MSR_AMD64_NB_CFG:
1931 case MSR_IA32_UCODE_REV:
1932 case MSR_IA32_UCODE_WRITE:
1933 case MSR_VM_HSAVE_PA:
1934 case MSR_AMD64_PATCH_LOADER:
1935 case MSR_AMD64_BU_CFG2:
1936 break;
1937
1938 case MSR_EFER:
1939 return set_efer(vcpu, data);
1940 case MSR_K7_HWCR:
1941 data &= ~(u64)0x40; /* ignore flush filter disable */
1942 data &= ~(u64)0x100; /* ignore ignne emulation enable */
1943 data &= ~(u64)0x8; /* ignore TLB cache disable */
1944 data &= ~(u64)0x40000; /* ignore Mc status write enable */
1945 if (data != 0) {
1946 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1947 data);
1948 return 1;
1949 }
1950 break;
1951 case MSR_FAM10H_MMIO_CONF_BASE:
1952 if (data != 0) {
1953 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1954 "0x%llx\n", data);
1955 return 1;
1956 }
1957 break;
1958 case MSR_IA32_DEBUGCTLMSR:
1959 if (!data) {
1960 /* We support the non-activated case already */
1961 break;
1962 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1963 /* Values other than LBR and BTF are vendor-specific,
1964 thus reserved and should throw a #GP */
1965 return 1;
1966 }
1967 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1968 __func__, data);
1969 break;
1970 case 0x200 ... 0x2ff:
1971 return kvm_mtrr_set_msr(vcpu, msr, data);
1972 case MSR_IA32_APICBASE:
1973 return kvm_set_apic_base(vcpu, msr_info);
1974 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1975 return kvm_x2apic_msr_write(vcpu, msr, data);
1976 case MSR_IA32_TSCDEADLINE:
1977 kvm_set_lapic_tscdeadline_msr(vcpu, data);
1978 break;
1979 case MSR_IA32_TSC_ADJUST:
1980 if (guest_cpuid_has_tsc_adjust(vcpu)) {
1981 if (!msr_info->host_initiated) {
1982 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
1983 adjust_tsc_offset_guest(vcpu, adj);
1984 }
1985 vcpu->arch.ia32_tsc_adjust_msr = data;
1986 }
1987 break;
1988 case MSR_IA32_MISC_ENABLE:
1989 vcpu->arch.ia32_misc_enable_msr = data;
1990 break;
1991 case MSR_IA32_SMBASE:
1992 if (!msr_info->host_initiated)
1993 return 1;
1994 vcpu->arch.smbase = data;
1995 break;
1996 case MSR_KVM_WALL_CLOCK_NEW:
1997 case MSR_KVM_WALL_CLOCK:
1998 vcpu->kvm->arch.wall_clock = data;
1999 kvm_write_wall_clock(vcpu->kvm, data);
2000 break;
2001 case MSR_KVM_SYSTEM_TIME_NEW:
2002 case MSR_KVM_SYSTEM_TIME: {
2003 u64 gpa_offset;
2004 struct kvm_arch *ka = &vcpu->kvm->arch;
2005
2006 kvmclock_reset(vcpu);
2007
2008 if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
2009 bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
2010
2011 if (ka->boot_vcpu_runs_old_kvmclock != tmp)
2012 set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
2013 &vcpu->requests);
2014
2015 ka->boot_vcpu_runs_old_kvmclock = tmp;
2016 }
2017
2018 vcpu->arch.time = data;
2019 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2020
2021 /* we verify if the enable bit is set... */
2022 if (!(data & 1))
2023 break;
2024
2025 gpa_offset = data & ~(PAGE_MASK | 1);
2026
2027 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2028 &vcpu->arch.pv_time, data & ~1ULL,
2029 sizeof(struct pvclock_vcpu_time_info)))
2030 vcpu->arch.pv_time_enabled = false;
2031 else
2032 vcpu->arch.pv_time_enabled = true;
2033
2034 break;
2035 }
2036 case MSR_KVM_ASYNC_PF_EN:
2037 if (kvm_pv_enable_async_pf(vcpu, data))
2038 return 1;
2039 break;
2040 case MSR_KVM_STEAL_TIME:
2041
2042 if (unlikely(!sched_info_on()))
2043 return 1;
2044
2045 if (data & KVM_STEAL_RESERVED_MASK)
2046 return 1;
2047
2048 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
2049 data & KVM_STEAL_VALID_BITS,
2050 sizeof(struct kvm_steal_time)))
2051 return 1;
2052
2053 vcpu->arch.st.msr_val = data;
2054
2055 if (!(data & KVM_MSR_ENABLED))
2056 break;
2057
2058 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2059
2060 break;
2061 case MSR_KVM_PV_EOI_EN:
2062 if (kvm_lapic_enable_pv_eoi(vcpu, data))
2063 return 1;
2064 break;
2065
2066 case MSR_IA32_MCG_CTL:
2067 case MSR_IA32_MCG_STATUS:
2068 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2069 return set_msr_mce(vcpu, msr, data);
2070
2071 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
2072 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
2073 pr = true; /* fall through */
2074 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
2075 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2076 if (kvm_pmu_is_valid_msr(vcpu, msr))
2077 return kvm_pmu_set_msr(vcpu, msr_info);
2078
2079 if (pr || data != 0)
2080 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
2081 "0x%x data 0x%llx\n", msr, data);
2082 break;
2083 case MSR_K7_CLK_CTL:
2084 /*
2085 * Ignore all writes to this no longer documented MSR.
2086 * Writes are only relevant for old K7 processors,
2087 * all pre-dating SVM, but a recommended workaround from
2088 * AMD for these chips. It is possible to specify the
2089 * affected processor models on the command line, hence
2090 * the need to ignore the workaround.
2091 */
2092 break;
2093 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2094 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2095 case HV_X64_MSR_CRASH_CTL:
2096 return kvm_hv_set_msr_common(vcpu, msr, data,
2097 msr_info->host_initiated);
2098 case MSR_IA32_BBL_CR_CTL3:
2099 /* Drop writes to this legacy MSR -- see rdmsr
2100 * counterpart for further detail.
2101 */
2102 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
2103 break;
2104 case MSR_AMD64_OSVW_ID_LENGTH:
2105 if (!guest_cpuid_has_osvw(vcpu))
2106 return 1;
2107 vcpu->arch.osvw.length = data;
2108 break;
2109 case MSR_AMD64_OSVW_STATUS:
2110 if (!guest_cpuid_has_osvw(vcpu))
2111 return 1;
2112 vcpu->arch.osvw.status = data;
2113 break;
2114 default:
2115 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
2116 return xen_hvm_config(vcpu, data);
2117 if (kvm_pmu_is_valid_msr(vcpu, msr))
2118 return kvm_pmu_set_msr(vcpu, msr_info);
2119 if (!ignore_msrs) {
2120 vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
2121 msr, data);
2122 return 1;
2123 } else {
2124 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
2125 msr, data);
2126 break;
2127 }
2128 }
2129 return 0;
2130 }
2131 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
2132
2133
2134 /*
2135 * Reads an msr value (of 'msr_index') into 'pdata'.
2136 * Returns 0 on success, non-0 otherwise.
2137 * Assumes vcpu_load() was already called.
2138 */
2139 int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2140 {
2141 return kvm_x86_ops->get_msr(vcpu, msr);
2142 }
2143 EXPORT_SYMBOL_GPL(kvm_get_msr);
2144
2145 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2146 {
2147 u64 data;
2148 u64 mcg_cap = vcpu->arch.mcg_cap;
2149 unsigned bank_num = mcg_cap & 0xff;
2150
2151 switch (msr) {
2152 case MSR_IA32_P5_MC_ADDR:
2153 case MSR_IA32_P5_MC_TYPE:
2154 data = 0;
2155 break;
2156 case MSR_IA32_MCG_CAP:
2157 data = vcpu->arch.mcg_cap;
2158 break;
2159 case MSR_IA32_MCG_CTL:
2160 if (!(mcg_cap & MCG_CTL_P))
2161 return 1;
2162 data = vcpu->arch.mcg_ctl;
2163 break;
2164 case MSR_IA32_MCG_STATUS:
2165 data = vcpu->arch.mcg_status;
2166 break;
2167 default:
2168 if (msr >= MSR_IA32_MC0_CTL &&
2169 msr < MSR_IA32_MCx_CTL(bank_num)) {
2170 u32 offset = msr - MSR_IA32_MC0_CTL;
2171 data = vcpu->arch.mce_banks[offset];
2172 break;
2173 }
2174 return 1;
2175 }
2176 *pdata = data;
2177 return 0;
2178 }
2179
2180 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2181 {
2182 switch (msr_info->index) {
2183 case MSR_IA32_PLATFORM_ID:
2184 case MSR_IA32_EBL_CR_POWERON:
2185 case MSR_IA32_DEBUGCTLMSR:
2186 case MSR_IA32_LASTBRANCHFROMIP:
2187 case MSR_IA32_LASTBRANCHTOIP:
2188 case MSR_IA32_LASTINTFROMIP:
2189 case MSR_IA32_LASTINTTOIP:
2190 case MSR_K8_SYSCFG:
2191 case MSR_K8_TSEG_ADDR:
2192 case MSR_K8_TSEG_MASK:
2193 case MSR_K7_HWCR:
2194 case MSR_VM_HSAVE_PA:
2195 case MSR_K8_INT_PENDING_MSG:
2196 case MSR_AMD64_NB_CFG:
2197 case MSR_FAM10H_MMIO_CONF_BASE:
2198 case MSR_AMD64_BU_CFG2:
2199 msr_info->data = 0;
2200 break;
2201 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
2202 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
2203 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
2204 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2205 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2206 return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
2207 msr_info->data = 0;
2208 break;
2209 case MSR_IA32_UCODE_REV:
2210 msr_info->data = 0x100000000ULL;
2211 break;
2212 case MSR_MTRRcap:
2213 case 0x200 ... 0x2ff:
2214 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
2215 case 0xcd: /* fsb frequency */
2216 msr_info->data = 3;
2217 break;
2218 /*
2219 * MSR_EBC_FREQUENCY_ID
2220 * Conservative value valid for even the basic CPU models.
2221 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
2222 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
2223 * and 266MHz for model 3, or 4. Set Core Clock
2224 * Frequency to System Bus Frequency Ratio to 1 (bits
2225 * 31:24) even though these are only valid for CPU
2226 * models > 2, however guests may end up dividing or
2227 * multiplying by zero otherwise.
2228 */
2229 case MSR_EBC_FREQUENCY_ID:
2230 msr_info->data = 1 << 24;
2231 break;
2232 case MSR_IA32_APICBASE:
2233 msr_info->data = kvm_get_apic_base(vcpu);
2234 break;
2235 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2236 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
2237 break;
2238 case MSR_IA32_TSCDEADLINE:
2239 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
2240 break;
2241 case MSR_IA32_TSC_ADJUST:
2242 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
2243 break;
2244 case MSR_IA32_MISC_ENABLE:
2245 msr_info->data = vcpu->arch.ia32_misc_enable_msr;
2246 break;
2247 case MSR_IA32_SMBASE:
2248 if (!msr_info->host_initiated)
2249 return 1;
2250 msr_info->data = vcpu->arch.smbase;
2251 break;
2252 case MSR_IA32_PERF_STATUS:
2253 /* TSC increment by tick */
2254 msr_info->data = 1000ULL;
2255 /* CPU multiplier */
2256 msr_info->data |= (((uint64_t)4ULL) << 40);
2257 break;
2258 case MSR_EFER:
2259 msr_info->data = vcpu->arch.efer;
2260 break;
2261 case MSR_KVM_WALL_CLOCK:
2262 case MSR_KVM_WALL_CLOCK_NEW:
2263 msr_info->data = vcpu->kvm->arch.wall_clock;
2264 break;
2265 case MSR_KVM_SYSTEM_TIME:
2266 case MSR_KVM_SYSTEM_TIME_NEW:
2267 msr_info->data = vcpu->arch.time;
2268 break;
2269 case MSR_KVM_ASYNC_PF_EN:
2270 msr_info->data = vcpu->arch.apf.msr_val;
2271 break;
2272 case MSR_KVM_STEAL_TIME:
2273 msr_info->data = vcpu->arch.st.msr_val;
2274 break;
2275 case MSR_KVM_PV_EOI_EN:
2276 msr_info->data = vcpu->arch.pv_eoi.msr_val;
2277 break;
2278 case MSR_IA32_P5_MC_ADDR:
2279 case MSR_IA32_P5_MC_TYPE:
2280 case MSR_IA32_MCG_CAP:
2281 case MSR_IA32_MCG_CTL:
2282 case MSR_IA32_MCG_STATUS:
2283 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2284 return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
2285 case MSR_K7_CLK_CTL:
2286 /*
2287 * Provide expected ramp-up count for K7. All other
2288 * are set to zero, indicating minimum divisors for
2289 * every field.
2290 *
2291 * This prevents guest kernels on AMD host with CPU
2292 * type 6, model 8 and higher from exploding due to
2293 * the rdmsr failing.
2294 */
2295 msr_info->data = 0x20000000;
2296 break;
2297 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2298 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2299 case HV_X64_MSR_CRASH_CTL:
2300 return kvm_hv_get_msr_common(vcpu,
2301 msr_info->index, &msr_info->data);
2302 break;
2303 case MSR_IA32_BBL_CR_CTL3:
2304 /* This legacy MSR exists but isn't fully documented in current
2305 * silicon. It is however accessed by winxp in very narrow
2306 * scenarios where it sets bit #19, itself documented as
2307 * a "reserved" bit. Best effort attempt to source coherent
2308 * read data here should the balance of the register be
2309 * interpreted by the guest:
2310 *
2311 * L2 cache control register 3: 64GB range, 256KB size,
2312 * enabled, latency 0x1, configured
2313 */
2314 msr_info->data = 0xbe702111;
2315 break;
2316 case MSR_AMD64_OSVW_ID_LENGTH:
2317 if (!guest_cpuid_has_osvw(vcpu))
2318 return 1;
2319 msr_info->data = vcpu->arch.osvw.length;
2320 break;
2321 case MSR_AMD64_OSVW_STATUS:
2322 if (!guest_cpuid_has_osvw(vcpu))
2323 return 1;
2324 msr_info->data = vcpu->arch.osvw.status;
2325 break;
2326 default:
2327 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2328 return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
2329 if (!ignore_msrs) {
2330 vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr_info->index);
2331 return 1;
2332 } else {
2333 vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
2334 msr_info->data = 0;
2335 }
2336 break;
2337 }
2338 return 0;
2339 }
2340 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
2341
2342 /*
2343 * Read or write a bunch of msrs. All parameters are kernel addresses.
2344 *
2345 * @return number of msrs set successfully.
2346 */
2347 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2348 struct kvm_msr_entry *entries,
2349 int (*do_msr)(struct kvm_vcpu *vcpu,
2350 unsigned index, u64 *data))
2351 {
2352 int i, idx;
2353
2354 idx = srcu_read_lock(&vcpu->kvm->srcu);
2355 for (i = 0; i < msrs->nmsrs; ++i)
2356 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2357 break;
2358 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2359
2360 return i;
2361 }
2362
2363 /*
2364 * Read or write a bunch of msrs. Parameters are user addresses.
2365 *
2366 * @return number of msrs set successfully.
2367 */
2368 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2369 int (*do_msr)(struct kvm_vcpu *vcpu,
2370 unsigned index, u64 *data),
2371 int writeback)
2372 {
2373 struct kvm_msrs msrs;
2374 struct kvm_msr_entry *entries;
2375 int r, n;
2376 unsigned size;
2377
2378 r = -EFAULT;
2379 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2380 goto out;
2381
2382 r = -E2BIG;
2383 if (msrs.nmsrs >= MAX_IO_MSRS)
2384 goto out;
2385
2386 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2387 entries = memdup_user(user_msrs->entries, size);
2388 if (IS_ERR(entries)) {
2389 r = PTR_ERR(entries);
2390 goto out;
2391 }
2392
2393 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2394 if (r < 0)
2395 goto out_free;
2396
2397 r = -EFAULT;
2398 if (writeback && copy_to_user(user_msrs->entries, entries, size))
2399 goto out_free;
2400
2401 r = n;
2402
2403 out_free:
2404 kfree(entries);
2405 out:
2406 return r;
2407 }
2408
2409 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2410 {
2411 int r;
2412
2413 switch (ext) {
2414 case KVM_CAP_IRQCHIP:
2415 case KVM_CAP_HLT:
2416 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
2417 case KVM_CAP_SET_TSS_ADDR:
2418 case KVM_CAP_EXT_CPUID:
2419 case KVM_CAP_EXT_EMUL_CPUID:
2420 case KVM_CAP_CLOCKSOURCE:
2421 case KVM_CAP_PIT:
2422 case KVM_CAP_NOP_IO_DELAY:
2423 case KVM_CAP_MP_STATE:
2424 case KVM_CAP_SYNC_MMU:
2425 case KVM_CAP_USER_NMI:
2426 case KVM_CAP_REINJECT_CONTROL:
2427 case KVM_CAP_IRQ_INJECT_STATUS:
2428 case KVM_CAP_IOEVENTFD:
2429 case KVM_CAP_IOEVENTFD_NO_LENGTH:
2430 case KVM_CAP_PIT2:
2431 case KVM_CAP_PIT_STATE2:
2432 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
2433 case KVM_CAP_XEN_HVM:
2434 case KVM_CAP_ADJUST_CLOCK:
2435 case KVM_CAP_VCPU_EVENTS:
2436 case KVM_CAP_HYPERV:
2437 case KVM_CAP_HYPERV_VAPIC:
2438 case KVM_CAP_HYPERV_SPIN:
2439 case KVM_CAP_PCI_SEGMENT:
2440 case KVM_CAP_DEBUGREGS:
2441 case KVM_CAP_X86_ROBUST_SINGLESTEP:
2442 case KVM_CAP_XSAVE:
2443 case KVM_CAP_ASYNC_PF:
2444 case KVM_CAP_GET_TSC_KHZ:
2445 case KVM_CAP_KVMCLOCK_CTRL:
2446 case KVM_CAP_READONLY_MEM:
2447 case KVM_CAP_HYPERV_TIME:
2448 case KVM_CAP_IOAPIC_POLARITY_IGNORED:
2449 case KVM_CAP_TSC_DEADLINE_TIMER:
2450 case KVM_CAP_ENABLE_CAP_VM:
2451 case KVM_CAP_DISABLE_QUIRKS:
2452 case KVM_CAP_SET_BOOT_CPU_ID:
2453 case KVM_CAP_SPLIT_IRQCHIP:
2454 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
2455 case KVM_CAP_ASSIGN_DEV_IRQ:
2456 case KVM_CAP_PCI_2_3:
2457 #endif
2458 r = 1;
2459 break;
2460 case KVM_CAP_X86_SMM:
2461 /* SMBASE is usually relocated above 1M on modern chipsets,
2462 * and SMM handlers might indeed rely on 4G segment limits,
2463 * so do not report SMM to be available if real mode is
2464 * emulated via vm86 mode. Still, do not go to great lengths
2465 * to avoid userspace's usage of the feature, because it is a
2466 * fringe case that is not enabled except via specific settings
2467 * of the module parameters.
2468 */
2469 r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
2470 break;
2471 case KVM_CAP_COALESCED_MMIO:
2472 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
2473 break;
2474 case KVM_CAP_VAPIC:
2475 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
2476 break;
2477 case KVM_CAP_NR_VCPUS:
2478 r = KVM_SOFT_MAX_VCPUS;
2479 break;
2480 case KVM_CAP_MAX_VCPUS:
2481 r = KVM_MAX_VCPUS;
2482 break;
2483 case KVM_CAP_NR_MEMSLOTS:
2484 r = KVM_USER_MEM_SLOTS;
2485 break;
2486 case KVM_CAP_PV_MMU: /* obsolete */
2487 r = 0;
2488 break;
2489 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
2490 case KVM_CAP_IOMMU:
2491 r = iommu_present(&pci_bus_type);
2492 break;
2493 #endif
2494 case KVM_CAP_MCE:
2495 r = KVM_MAX_MCE_BANKS;
2496 break;
2497 case KVM_CAP_XCRS:
2498 r = cpu_has_xsave;
2499 break;
2500 case KVM_CAP_TSC_CONTROL:
2501 r = kvm_has_tsc_control;
2502 break;
2503 default:
2504 r = 0;
2505 break;
2506 }
2507 return r;
2508
2509 }
2510
2511 long kvm_arch_dev_ioctl(struct file *filp,
2512 unsigned int ioctl, unsigned long arg)
2513 {
2514 void __user *argp = (void __user *)arg;
2515 long r;
2516
2517 switch (ioctl) {
2518 case KVM_GET_MSR_INDEX_LIST: {
2519 struct kvm_msr_list __user *user_msr_list = argp;
2520 struct kvm_msr_list msr_list;
2521 unsigned n;
2522
2523 r = -EFAULT;
2524 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2525 goto out;
2526 n = msr_list.nmsrs;
2527 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
2528 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2529 goto out;
2530 r = -E2BIG;
2531 if (n < msr_list.nmsrs)
2532 goto out;
2533 r = -EFAULT;
2534 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
2535 num_msrs_to_save * sizeof(u32)))
2536 goto out;
2537 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
2538 &emulated_msrs,
2539 num_emulated_msrs * sizeof(u32)))
2540 goto out;
2541 r = 0;
2542 break;
2543 }
2544 case KVM_GET_SUPPORTED_CPUID:
2545 case KVM_GET_EMULATED_CPUID: {
2546 struct kvm_cpuid2 __user *cpuid_arg = argp;
2547 struct kvm_cpuid2 cpuid;
2548
2549 r = -EFAULT;
2550 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2551 goto out;
2552
2553 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
2554 ioctl);
2555 if (r)
2556 goto out;
2557
2558 r = -EFAULT;
2559 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2560 goto out;
2561 r = 0;
2562 break;
2563 }
2564 case KVM_X86_GET_MCE_CAP_SUPPORTED: {
2565 u64 mce_cap;
2566
2567 mce_cap = KVM_MCE_CAP_SUPPORTED;
2568 r = -EFAULT;
2569 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
2570 goto out;
2571 r = 0;
2572 break;
2573 }
2574 default:
2575 r = -EINVAL;
2576 }
2577 out:
2578 return r;
2579 }
2580
2581 static void wbinvd_ipi(void *garbage)
2582 {
2583 wbinvd();
2584 }
2585
2586 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
2587 {
2588 return kvm_arch_has_noncoherent_dma(vcpu->kvm);
2589 }
2590
2591 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2592 {
2593 /* Address WBINVD may be executed by guest */
2594 if (need_emulate_wbinvd(vcpu)) {
2595 if (kvm_x86_ops->has_wbinvd_exit())
2596 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
2597 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
2598 smp_call_function_single(vcpu->cpu,
2599 wbinvd_ipi, NULL, 1);
2600 }
2601
2602 kvm_x86_ops->vcpu_load(vcpu, cpu);
2603
2604 /* Apply any externally detected TSC adjustments (due to suspend) */
2605 if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
2606 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
2607 vcpu->arch.tsc_offset_adjustment = 0;
2608 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2609 }
2610
2611 if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
2612 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
2613 rdtsc() - vcpu->arch.last_host_tsc;
2614 if (tsc_delta < 0)
2615 mark_tsc_unstable("KVM discovered backwards TSC");
2616 if (check_tsc_unstable()) {
2617 u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
2618 vcpu->arch.last_guest_tsc);
2619 kvm_x86_ops->write_tsc_offset(vcpu, offset);
2620 vcpu->arch.tsc_catchup = 1;
2621 }
2622 /*
2623 * On a host with synchronized TSC, there is no need to update
2624 * kvmclock on vcpu->cpu migration
2625 */
2626 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
2627 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2628 if (vcpu->cpu != cpu)
2629 kvm_migrate_timers(vcpu);
2630 vcpu->cpu = cpu;
2631 }
2632
2633 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2634 }
2635
2636 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2637 {
2638 kvm_x86_ops->vcpu_put(vcpu);
2639 kvm_put_guest_fpu(vcpu);
2640 vcpu->arch.last_host_tsc = rdtsc();
2641 }
2642
2643 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2644 struct kvm_lapic_state *s)
2645 {
2646 kvm_x86_ops->sync_pir_to_irr(vcpu);
2647 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
2648
2649 return 0;
2650 }
2651
2652 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2653 struct kvm_lapic_state *s)
2654 {
2655 kvm_apic_post_state_restore(vcpu, s);
2656 update_cr8_intercept(vcpu);
2657
2658 return 0;
2659 }
2660
2661 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2662 struct kvm_interrupt *irq)
2663 {
2664 if (irq->irq >= KVM_NR_INTERRUPTS)
2665 return -EINVAL;
2666
2667 if (!irqchip_in_kernel(vcpu->kvm)) {
2668 kvm_queue_interrupt(vcpu, irq->irq, false);
2669 kvm_make_request(KVM_REQ_EVENT, vcpu);
2670 return 0;
2671 }
2672
2673 /*
2674 * With in-kernel LAPIC, we only use this to inject EXTINT, so
2675 * fail for in-kernel 8259.
2676 */
2677 if (pic_in_kernel(vcpu->kvm))
2678 return -ENXIO;
2679
2680 if (vcpu->arch.pending_external_vector != -1)
2681 return -EEXIST;
2682
2683 vcpu->arch.pending_external_vector = irq->irq;
2684 return 0;
2685 }
2686
2687 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2688 {
2689 kvm_inject_nmi(vcpu);
2690
2691 return 0;
2692 }
2693
2694 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
2695 {
2696 kvm_make_request(KVM_REQ_SMI, vcpu);
2697
2698 return 0;
2699 }
2700
2701 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2702 struct kvm_tpr_access_ctl *tac)
2703 {
2704 if (tac->flags)
2705 return -EINVAL;
2706 vcpu->arch.tpr_access_reporting = !!tac->enabled;
2707 return 0;
2708 }
2709
2710 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2711 u64 mcg_cap)
2712 {
2713 int r;
2714 unsigned bank_num = mcg_cap & 0xff, bank;
2715
2716 r = -EINVAL;
2717 if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
2718 goto out;
2719 if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2720 goto out;
2721 r = 0;
2722 vcpu->arch.mcg_cap = mcg_cap;
2723 /* Init IA32_MCG_CTL to all 1s */
2724 if (mcg_cap & MCG_CTL_P)
2725 vcpu->arch.mcg_ctl = ~(u64)0;
2726 /* Init IA32_MCi_CTL to all 1s */
2727 for (bank = 0; bank < bank_num; bank++)
2728 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2729 out:
2730 return r;
2731 }
2732
2733 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2734 struct kvm_x86_mce *mce)
2735 {
2736 u64 mcg_cap = vcpu->arch.mcg_cap;
2737 unsigned bank_num = mcg_cap & 0xff;
2738 u64 *banks = vcpu->arch.mce_banks;
2739
2740 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2741 return -EINVAL;
2742 /*
2743 * if IA32_MCG_CTL is not all 1s, the uncorrected error
2744 * reporting is disabled
2745 */
2746 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
2747 vcpu->arch.mcg_ctl != ~(u64)0)
2748 return 0;
2749 banks += 4 * mce->bank;
2750 /*
2751 * if IA32_MCi_CTL is not all 1s, the uncorrected error
2752 * reporting is disabled for the bank
2753 */
2754 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
2755 return 0;
2756 if (mce->status & MCI_STATUS_UC) {
2757 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
2758 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
2759 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2760 return 0;
2761 }
2762 if (banks[1] & MCI_STATUS_VAL)
2763 mce->status |= MCI_STATUS_OVER;
2764 banks[2] = mce->addr;
2765 banks[3] = mce->misc;
2766 vcpu->arch.mcg_status = mce->mcg_status;
2767 banks[1] = mce->status;
2768 kvm_queue_exception(vcpu, MC_VECTOR);
2769 } else if (!(banks[1] & MCI_STATUS_VAL)
2770 || !(banks[1] & MCI_STATUS_UC)) {
2771 if (banks[1] & MCI_STATUS_VAL)
2772 mce->status |= MCI_STATUS_OVER;
2773 banks[2] = mce->addr;
2774 banks[3] = mce->misc;
2775 banks[1] = mce->status;
2776 } else
2777 banks[1] |= MCI_STATUS_OVER;
2778 return 0;
2779 }
2780
2781 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2782 struct kvm_vcpu_events *events)
2783 {
2784 process_nmi(vcpu);
2785 events->exception.injected =
2786 vcpu->arch.exception.pending &&
2787 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2788 events->exception.nr = vcpu->arch.exception.nr;
2789 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2790 events->exception.pad = 0;
2791 events->exception.error_code = vcpu->arch.exception.error_code;
2792
2793 events->interrupt.injected =
2794 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
2795 events->interrupt.nr = vcpu->arch.interrupt.nr;
2796 events->interrupt.soft = 0;
2797 events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
2798
2799 events->nmi.injected = vcpu->arch.nmi_injected;
2800 events->nmi.pending = vcpu->arch.nmi_pending != 0;
2801 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2802 events->nmi.pad = 0;
2803
2804 events->sipi_vector = 0; /* never valid when reporting to user space */
2805
2806 events->smi.smm = is_smm(vcpu);
2807 events->smi.pending = vcpu->arch.smi_pending;
2808 events->smi.smm_inside_nmi =
2809 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
2810 events->smi.latched_init = kvm_lapic_latched_init(vcpu);
2811
2812 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2813 | KVM_VCPUEVENT_VALID_SHADOW
2814 | KVM_VCPUEVENT_VALID_SMM);
2815 memset(&events->reserved, 0, sizeof(events->reserved));
2816 }
2817
2818 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2819 struct kvm_vcpu_events *events)
2820 {
2821 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
2822 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2823 | KVM_VCPUEVENT_VALID_SHADOW
2824 | KVM_VCPUEVENT_VALID_SMM))
2825 return -EINVAL;
2826
2827 process_nmi(vcpu);
2828 vcpu->arch.exception.pending = events->exception.injected;
2829 vcpu->arch.exception.nr = events->exception.nr;
2830 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
2831 vcpu->arch.exception.error_code = events->exception.error_code;
2832
2833 vcpu->arch.interrupt.pending = events->interrupt.injected;
2834 vcpu->arch.interrupt.nr = events->interrupt.nr;
2835 vcpu->arch.interrupt.soft = events->interrupt.soft;
2836 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
2837 kvm_x86_ops->set_interrupt_shadow(vcpu,
2838 events->interrupt.shadow);
2839
2840 vcpu->arch.nmi_injected = events->nmi.injected;
2841 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
2842 vcpu->arch.nmi_pending = events->nmi.pending;
2843 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2844
2845 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
2846 kvm_vcpu_has_lapic(vcpu))
2847 vcpu->arch.apic->sipi_vector = events->sipi_vector;
2848
2849 if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
2850 if (events->smi.smm)
2851 vcpu->arch.hflags |= HF_SMM_MASK;
2852 else
2853 vcpu->arch.hflags &= ~HF_SMM_MASK;
2854 vcpu->arch.smi_pending = events->smi.pending;
2855 if (events->smi.smm_inside_nmi)
2856 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
2857 else
2858 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
2859 if (kvm_vcpu_has_lapic(vcpu)) {
2860 if (events->smi.latched_init)
2861 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
2862 else
2863 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
2864 }
2865 }
2866
2867 kvm_make_request(KVM_REQ_EVENT, vcpu);
2868
2869 return 0;
2870 }
2871
2872 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2873 struct kvm_debugregs *dbgregs)
2874 {
2875 unsigned long val;
2876
2877 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
2878 kvm_get_dr(vcpu, 6, &val);
2879 dbgregs->dr6 = val;
2880 dbgregs->dr7 = vcpu->arch.dr7;
2881 dbgregs->flags = 0;
2882 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2883 }
2884
2885 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
2886 struct kvm_debugregs *dbgregs)
2887 {
2888 if (dbgregs->flags)
2889 return -EINVAL;
2890
2891 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
2892 kvm_update_dr0123(vcpu);
2893 vcpu->arch.dr6 = dbgregs->dr6;
2894 kvm_update_dr6(vcpu);
2895 vcpu->arch.dr7 = dbgregs->dr7;
2896 kvm_update_dr7(vcpu);
2897
2898 return 0;
2899 }
2900
2901 #define XSTATE_COMPACTION_ENABLED (1ULL << 63)
2902
2903 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
2904 {
2905 struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
2906 u64 xstate_bv = xsave->header.xfeatures;
2907 u64 valid;
2908
2909 /*
2910 * Copy legacy XSAVE area, to avoid complications with CPUID
2911 * leaves 0 and 1 in the loop below.
2912 */
2913 memcpy(dest, xsave, XSAVE_HDR_OFFSET);
2914
2915 /* Set XSTATE_BV */
2916 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
2917
2918 /*
2919 * Copy each region from the possibly compacted offset to the
2920 * non-compacted offset.
2921 */
2922 valid = xstate_bv & ~XSTATE_FPSSE;
2923 while (valid) {
2924 u64 feature = valid & -valid;
2925 int index = fls64(feature) - 1;
2926 void *src = get_xsave_addr(xsave, feature);
2927
2928 if (src) {
2929 u32 size, offset, ecx, edx;
2930 cpuid_count(XSTATE_CPUID, index,
2931 &size, &offset, &ecx, &edx);
2932 memcpy(dest + offset, src, size);
2933 }
2934
2935 valid -= feature;
2936 }
2937 }
2938
2939 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
2940 {
2941 struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
2942 u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
2943 u64 valid;
2944
2945 /*
2946 * Copy legacy XSAVE area, to avoid complications with CPUID
2947 * leaves 0 and 1 in the loop below.
2948 */
2949 memcpy(xsave, src, XSAVE_HDR_OFFSET);
2950
2951 /* Set XSTATE_BV and possibly XCOMP_BV. */
2952 xsave->header.xfeatures = xstate_bv;
2953 if (cpu_has_xsaves)
2954 xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
2955
2956 /*
2957 * Copy each region from the non-compacted offset to the
2958 * possibly compacted offset.
2959 */
2960 valid = xstate_bv & ~XSTATE_FPSSE;
2961 while (valid) {
2962 u64 feature = valid & -valid;
2963 int index = fls64(feature) - 1;
2964 void *dest = get_xsave_addr(xsave, feature);
2965
2966 if (dest) {
2967 u32 size, offset, ecx, edx;
2968 cpuid_count(XSTATE_CPUID, index,
2969 &size, &offset, &ecx, &edx);
2970 memcpy(dest, src + offset, size);
2971 }
2972
2973 valid -= feature;
2974 }
2975 }
2976
2977 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
2978 struct kvm_xsave *guest_xsave)
2979 {
2980 if (cpu_has_xsave) {
2981 memset(guest_xsave, 0, sizeof(struct kvm_xsave));
2982 fill_xsave((u8 *) guest_xsave->region, vcpu);
2983 } else {
2984 memcpy(guest_xsave->region,
2985 &vcpu->arch.guest_fpu.state.fxsave,
2986 sizeof(struct fxregs_state));
2987 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
2988 XSTATE_FPSSE;
2989 }
2990 }
2991
2992 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
2993 struct kvm_xsave *guest_xsave)
2994 {
2995 u64 xstate_bv =
2996 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
2997
2998 if (cpu_has_xsave) {
2999 /*
3000 * Here we allow setting states that are not present in
3001 * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
3002 * with old userspace.
3003 */
3004 if (xstate_bv & ~kvm_supported_xcr0())
3005 return -EINVAL;
3006 load_xsave(vcpu, (u8 *)guest_xsave->region);
3007 } else {
3008 if (xstate_bv & ~XSTATE_FPSSE)
3009 return -EINVAL;
3010 memcpy(&vcpu->arch.guest_fpu.state.fxsave,
3011 guest_xsave->region, sizeof(struct fxregs_state));
3012 }
3013 return 0;
3014 }
3015
3016 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
3017 struct kvm_xcrs *guest_xcrs)
3018 {
3019 if (!cpu_has_xsave) {
3020 guest_xcrs->nr_xcrs = 0;
3021 return;
3022 }
3023
3024 guest_xcrs->nr_xcrs = 1;
3025 guest_xcrs->flags = 0;
3026 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
3027 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
3028 }
3029
3030 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
3031 struct kvm_xcrs *guest_xcrs)
3032 {
3033 int i, r = 0;
3034
3035 if (!cpu_has_xsave)
3036 return -EINVAL;
3037
3038 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
3039 return -EINVAL;
3040
3041 for (i = 0; i < guest_xcrs->nr_xcrs; i++)
3042 /* Only support XCR0 currently */
3043 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
3044 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
3045 guest_xcrs->xcrs[i].value);
3046 break;
3047 }
3048 if (r)
3049 r = -EINVAL;
3050 return r;
3051 }
3052
3053 /*
3054 * kvm_set_guest_paused() indicates to the guest kernel that it has been
3055 * stopped by the hypervisor. This function will be called from the host only.
3056 * EINVAL is returned when the host attempts to set the flag for a guest that
3057 * does not support pv clocks.
3058 */
3059 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
3060 {
3061 if (!vcpu->arch.pv_time_enabled)
3062 return -EINVAL;
3063 vcpu->arch.pvclock_set_guest_stopped_request = true;
3064 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3065 return 0;
3066 }
3067
3068 long kvm_arch_vcpu_ioctl(struct file *filp,
3069 unsigned int ioctl, unsigned long arg)
3070 {
3071 struct kvm_vcpu *vcpu = filp->private_data;
3072 void __user *argp = (void __user *)arg;
3073 int r;
3074 union {
3075 struct kvm_lapic_state *lapic;
3076 struct kvm_xsave *xsave;
3077 struct kvm_xcrs *xcrs;
3078 void *buffer;
3079 } u;
3080
3081 u.buffer = NULL;
3082 switch (ioctl) {
3083 case KVM_GET_LAPIC: {
3084 r = -EINVAL;
3085 if (!vcpu->arch.apic)
3086 goto out;
3087 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
3088
3089 r = -ENOMEM;
3090 if (!u.lapic)
3091 goto out;
3092 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
3093 if (r)
3094 goto out;
3095 r = -EFAULT;
3096 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
3097 goto out;
3098 r = 0;
3099 break;
3100 }
3101 case KVM_SET_LAPIC: {
3102 r = -EINVAL;
3103 if (!vcpu->arch.apic)
3104 goto out;
3105 u.lapic = memdup_user(argp, sizeof(*u.lapic));
3106 if (IS_ERR(u.lapic))
3107 return PTR_ERR(u.lapic);
3108
3109 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
3110 break;
3111 }
3112 case KVM_INTERRUPT: {
3113 struct kvm_interrupt irq;
3114
3115 r = -EFAULT;
3116 if (copy_from_user(&irq, argp, sizeof irq))
3117 goto out;
3118 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
3119 break;
3120 }
3121 case KVM_NMI: {
3122 r = kvm_vcpu_ioctl_nmi(vcpu);
3123 break;
3124 }
3125 case KVM_SMI: {
3126 r = kvm_vcpu_ioctl_smi(vcpu);
3127 break;
3128 }
3129 case KVM_SET_CPUID: {
3130 struct kvm_cpuid __user *cpuid_arg = argp;
3131 struct kvm_cpuid cpuid;
3132
3133 r = -EFAULT;
3134 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3135 goto out;
3136 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
3137 break;
3138 }
3139 case KVM_SET_CPUID2: {
3140 struct kvm_cpuid2 __user *cpuid_arg = argp;
3141 struct kvm_cpuid2 cpuid;
3142
3143 r = -EFAULT;
3144 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3145 goto out;
3146 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
3147 cpuid_arg->entries);
3148 break;
3149 }
3150 case KVM_GET_CPUID2: {
3151 struct kvm_cpuid2 __user *cpuid_arg = argp;
3152 struct kvm_cpuid2 cpuid;
3153
3154 r = -EFAULT;
3155 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3156 goto out;
3157 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
3158 cpuid_arg->entries);
3159 if (r)
3160 goto out;
3161 r = -EFAULT;
3162 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
3163 goto out;
3164 r = 0;
3165 break;
3166 }
3167 case KVM_GET_MSRS:
3168 r = msr_io(vcpu, argp, do_get_msr, 1);
3169 break;
3170 case KVM_SET_MSRS:
3171 r = msr_io(vcpu, argp, do_set_msr, 0);
3172 break;
3173 case KVM_TPR_ACCESS_REPORTING: {
3174 struct kvm_tpr_access_ctl tac;
3175
3176 r = -EFAULT;
3177 if (copy_from_user(&tac, argp, sizeof tac))
3178 goto out;
3179 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
3180 if (r)
3181 goto out;
3182 r = -EFAULT;
3183 if (copy_to_user(argp, &tac, sizeof tac))
3184 goto out;
3185 r = 0;
3186 break;
3187 };
3188 case KVM_SET_VAPIC_ADDR: {
3189 struct kvm_vapic_addr va;
3190
3191 r = -EINVAL;
3192 if (!lapic_in_kernel(vcpu))
3193 goto out;
3194 r = -EFAULT;
3195 if (copy_from_user(&va, argp, sizeof va))
3196 goto out;
3197 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3198 break;
3199 }
3200 case KVM_X86_SETUP_MCE: {
3201 u64 mcg_cap;
3202
3203 r = -EFAULT;
3204 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
3205 goto out;
3206 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
3207 break;
3208 }
3209 case KVM_X86_SET_MCE: {
3210 struct kvm_x86_mce mce;
3211
3212 r = -EFAULT;
3213 if (copy_from_user(&mce, argp, sizeof mce))
3214 goto out;
3215 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
3216 break;
3217 }
3218 case KVM_GET_VCPU_EVENTS: {
3219 struct kvm_vcpu_events events;
3220
3221 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
3222
3223 r = -EFAULT;
3224 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
3225 break;
3226 r = 0;
3227 break;
3228 }
3229 case KVM_SET_VCPU_EVENTS: {
3230 struct kvm_vcpu_events events;
3231
3232 r = -EFAULT;
3233 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
3234 break;
3235
3236 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
3237 break;
3238 }
3239 case KVM_GET_DEBUGREGS: {
3240 struct kvm_debugregs dbgregs;
3241
3242 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
3243
3244 r = -EFAULT;
3245 if (copy_to_user(argp, &dbgregs,
3246 sizeof(struct kvm_debugregs)))
3247 break;
3248 r = 0;
3249 break;
3250 }
3251 case KVM_SET_DEBUGREGS: {
3252 struct kvm_debugregs dbgregs;
3253
3254 r = -EFAULT;
3255 if (copy_from_user(&dbgregs, argp,
3256 sizeof(struct kvm_debugregs)))
3257 break;
3258
3259 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
3260 break;
3261 }
3262 case KVM_GET_XSAVE: {
3263 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
3264 r = -ENOMEM;
3265 if (!u.xsave)
3266 break;
3267
3268 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
3269
3270 r = -EFAULT;
3271 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
3272 break;
3273 r = 0;
3274 break;
3275 }
3276 case KVM_SET_XSAVE: {
3277 u.xsave = memdup_user(argp, sizeof(*u.xsave));
3278 if (IS_ERR(u.xsave))
3279 return PTR_ERR(u.xsave);
3280
3281 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
3282 break;
3283 }
3284 case KVM_GET_XCRS: {
3285 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
3286 r = -ENOMEM;
3287 if (!u.xcrs)
3288 break;
3289
3290 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
3291
3292 r = -EFAULT;
3293 if (copy_to_user(argp, u.xcrs,
3294 sizeof(struct kvm_xcrs)))
3295 break;
3296 r = 0;
3297 break;
3298 }
3299 case KVM_SET_XCRS: {
3300 u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
3301 if (IS_ERR(u.xcrs))
3302 return PTR_ERR(u.xcrs);
3303
3304 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
3305 break;
3306 }
3307 case KVM_SET_TSC_KHZ: {
3308 u32 user_tsc_khz;
3309
3310 r = -EINVAL;
3311 user_tsc_khz = (u32)arg;
3312
3313 if (user_tsc_khz >= kvm_max_guest_tsc_khz)
3314 goto out;
3315
3316 if (user_tsc_khz == 0)
3317 user_tsc_khz = tsc_khz;
3318
3319 kvm_set_tsc_khz(vcpu, user_tsc_khz);
3320
3321 r = 0;
3322 goto out;
3323 }
3324 case KVM_GET_TSC_KHZ: {
3325 r = vcpu->arch.virtual_tsc_khz;
3326 goto out;
3327 }
3328 case KVM_KVMCLOCK_CTRL: {
3329 r = kvm_set_guest_paused(vcpu);
3330 goto out;
3331 }
3332 default:
3333 r = -EINVAL;
3334 }
3335 out:
3336 kfree(u.buffer);
3337 return r;
3338 }
3339
3340 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3341 {
3342 return VM_FAULT_SIGBUS;
3343 }
3344
3345 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
3346 {
3347 int ret;
3348
3349 if (addr > (unsigned int)(-3 * PAGE_SIZE))
3350 return -EINVAL;
3351 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
3352 return ret;
3353 }
3354
3355 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
3356 u64 ident_addr)
3357 {
3358 kvm->arch.ept_identity_map_addr = ident_addr;
3359 return 0;
3360 }
3361
3362 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
3363 u32 kvm_nr_mmu_pages)
3364 {
3365 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
3366 return -EINVAL;
3367
3368 mutex_lock(&kvm->slots_lock);
3369
3370 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
3371 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
3372
3373 mutex_unlock(&kvm->slots_lock);
3374 return 0;
3375 }
3376
3377 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
3378 {
3379 return kvm->arch.n_max_mmu_pages;
3380 }
3381
3382 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3383 {
3384 int r;
3385
3386 r = 0;
3387 switch (chip->chip_id) {
3388 case KVM_IRQCHIP_PIC_MASTER:
3389 memcpy(&chip->chip.pic,
3390 &pic_irqchip(kvm)->pics[0],
3391 sizeof(struct kvm_pic_state));
3392 break;
3393 case KVM_IRQCHIP_PIC_SLAVE:
3394 memcpy(&chip->chip.pic,
3395 &pic_irqchip(kvm)->pics[1],
3396 sizeof(struct kvm_pic_state));
3397 break;
3398 case KVM_IRQCHIP_IOAPIC:
3399 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
3400 break;
3401 default:
3402 r = -EINVAL;
3403 break;
3404 }
3405 return r;
3406 }
3407
3408 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3409 {
3410 int r;
3411
3412 r = 0;
3413 switch (chip->chip_id) {
3414 case KVM_IRQCHIP_PIC_MASTER:
3415 spin_lock(&pic_irqchip(kvm)->lock);
3416 memcpy(&pic_irqchip(kvm)->pics[0],
3417 &chip->chip.pic,
3418 sizeof(struct kvm_pic_state));
3419 spin_unlock(&pic_irqchip(kvm)->lock);
3420 break;
3421 case KVM_IRQCHIP_PIC_SLAVE:
3422 spin_lock(&pic_irqchip(kvm)->lock);
3423 memcpy(&pic_irqchip(kvm)->pics[1],
3424 &chip->chip.pic,
3425 sizeof(struct kvm_pic_state));
3426 spin_unlock(&pic_irqchip(kvm)->lock);
3427 break;
3428 case KVM_IRQCHIP_IOAPIC:
3429 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
3430 break;
3431 default:
3432 r = -EINVAL;
3433 break;
3434 }
3435 kvm_pic_update_irq(pic_irqchip(kvm));
3436 return r;
3437 }
3438
3439 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3440 {
3441 int r = 0;
3442
3443 mutex_lock(&kvm->arch.vpit->pit_state.lock);
3444 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
3445 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3446 return r;
3447 }
3448
3449 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3450 {
3451 int r = 0;
3452
3453 mutex_lock(&kvm->arch.vpit->pit_state.lock);
3454 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
3455 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
3456 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3457 return r;
3458 }
3459
3460 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3461 {
3462 int r = 0;
3463
3464 mutex_lock(&kvm->arch.vpit->pit_state.lock);
3465 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
3466 sizeof(ps->channels));
3467 ps->flags = kvm->arch.vpit->pit_state.flags;
3468 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3469 memset(&ps->reserved, 0, sizeof(ps->reserved));
3470 return r;
3471 }
3472
3473 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3474 {
3475 int r = 0, start = 0;
3476 u32 prev_legacy, cur_legacy;
3477 mutex_lock(&kvm->arch.vpit->pit_state.lock);
3478 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
3479 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
3480 if (!prev_legacy && cur_legacy)
3481 start = 1;
3482 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
3483 sizeof(kvm->arch.vpit->pit_state.channels));
3484 kvm->arch.vpit->pit_state.flags = ps->flags;
3485 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
3486 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3487 return r;
3488 }
3489
3490 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
3491 struct kvm_reinject_control *control)
3492 {
3493 if (!kvm->arch.vpit)
3494 return -ENXIO;
3495 mutex_lock(&kvm->arch.vpit->pit_state.lock);
3496 kvm->arch.vpit->pit_state.reinject = control->pit_reinject;
3497 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3498 return 0;
3499 }
3500
3501 /**
3502 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
3503 * @kvm: kvm instance
3504 * @log: slot id and address to which we copy the log
3505 *
3506 * Steps 1-4 below provide general overview of dirty page logging. See
3507 * kvm_get_dirty_log_protect() function description for additional details.
3508 *
3509 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
3510 * always flush the TLB (step 4) even if previous step failed and the dirty
3511 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
3512 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
3513 * writes will be marked dirty for next log read.
3514 *
3515 * 1. Take a snapshot of the bit and clear it if needed.
3516 * 2. Write protect the corresponding page.
3517 * 3. Copy the snapshot to the userspace.
3518 * 4. Flush TLB's if needed.
3519 */
3520 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
3521 {
3522 bool is_dirty = false;
3523 int r;
3524
3525 mutex_lock(&kvm->slots_lock);
3526
3527 /*
3528 * Flush potentially hardware-cached dirty pages to dirty_bitmap.
3529 */
3530 if (kvm_x86_ops->flush_log_dirty)
3531 kvm_x86_ops->flush_log_dirty(kvm);
3532
3533 r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
3534
3535 /*
3536 * All the TLBs can be flushed out of mmu lock, see the comments in
3537 * kvm_mmu_slot_remove_write_access().
3538 */
3539 lockdep_assert_held(&kvm->slots_lock);
3540 if (is_dirty)
3541 kvm_flush_remote_tlbs(kvm);
3542
3543 mutex_unlock(&kvm->slots_lock);
3544 return r;
3545 }
3546
3547 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
3548 bool line_status)
3549 {
3550 if (!irqchip_in_kernel(kvm))
3551 return -ENXIO;
3552
3553 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3554 irq_event->irq, irq_event->level,
3555 line_status);
3556 return 0;
3557 }
3558
3559 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
3560 struct kvm_enable_cap *cap)
3561 {
3562 int r;
3563
3564 if (cap->flags)
3565 return -EINVAL;
3566
3567 switch (cap->cap) {
3568 case KVM_CAP_DISABLE_QUIRKS:
3569 kvm->arch.disabled_quirks = cap->args[0];
3570 r = 0;
3571 break;
3572 case KVM_CAP_SPLIT_IRQCHIP: {
3573 mutex_lock(&kvm->lock);
3574 r = -EINVAL;
3575 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
3576 goto split_irqchip_unlock;
3577 r = -EEXIST;
3578 if (irqchip_in_kernel(kvm))
3579 goto split_irqchip_unlock;
3580 if (atomic_read(&kvm->online_vcpus))
3581 goto split_irqchip_unlock;
3582 r = kvm_setup_empty_irq_routing(kvm);
3583 if (r)
3584 goto split_irqchip_unlock;
3585 /* Pairs with irqchip_in_kernel. */
3586 smp_wmb();
3587 kvm->arch.irqchip_split = true;
3588 kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
3589 r = 0;
3590 split_irqchip_unlock:
3591 mutex_unlock(&kvm->lock);
3592 break;
3593 }
3594 default:
3595 r = -EINVAL;
3596 break;
3597 }
3598 return r;
3599 }
3600
3601 long kvm_arch_vm_ioctl(struct file *filp,
3602 unsigned int ioctl, unsigned long arg)
3603 {
3604 struct kvm *kvm = filp->private_data;
3605 void __user *argp = (void __user *)arg;
3606 int r = -ENOTTY;
3607 /*
3608 * This union makes it completely explicit to gcc-3.x
3609 * that these two variables' stack usage should be
3610 * combined, not added together.
3611 */
3612 union {
3613 struct kvm_pit_state ps;
3614 struct kvm_pit_state2 ps2;
3615 struct kvm_pit_config pit_config;
3616 } u;
3617
3618 switch (ioctl) {
3619 case KVM_SET_TSS_ADDR:
3620 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
3621 break;
3622 case KVM_SET_IDENTITY_MAP_ADDR: {
3623 u64 ident_addr;
3624
3625 r = -EFAULT;
3626 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
3627 goto out;
3628 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
3629 break;
3630 }
3631 case KVM_SET_NR_MMU_PAGES:
3632 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
3633 break;
3634 case KVM_GET_NR_MMU_PAGES:
3635 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3636 break;
3637 case KVM_CREATE_IRQCHIP: {
3638 struct kvm_pic *vpic;
3639
3640 mutex_lock(&kvm->lock);
3641 r = -EEXIST;
3642 if (kvm->arch.vpic)
3643 goto create_irqchip_unlock;
3644 r = -EINVAL;
3645 if (atomic_read(&kvm->online_vcpus))
3646 goto create_irqchip_unlock;
3647 r = -ENOMEM;
3648 vpic = kvm_create_pic(kvm);
3649 if (vpic) {
3650 r = kvm_ioapic_init(kvm);
3651 if (r) {
3652 mutex_lock(&kvm->slots_lock);
3653 kvm_destroy_pic(vpic);
3654 mutex_unlock(&kvm->slots_lock);
3655 goto create_irqchip_unlock;
3656 }
3657 } else
3658 goto create_irqchip_unlock;
3659 r = kvm_setup_default_irq_routing(kvm);
3660 if (r) {
3661 mutex_lock(&kvm->slots_lock);
3662 mutex_lock(&kvm->irq_lock);
3663 kvm_ioapic_destroy(kvm);
3664 kvm_destroy_pic(vpic);
3665 mutex_unlock(&kvm->irq_lock);
3666 mutex_unlock(&kvm->slots_lock);
3667 goto create_irqchip_unlock;
3668 }
3669 /* Write kvm->irq_routing before kvm->arch.vpic. */
3670 smp_wmb();
3671 kvm->arch.vpic = vpic;
3672 create_irqchip_unlock:
3673 mutex_unlock(&kvm->lock);
3674 break;
3675 }
3676 case KVM_CREATE_PIT:
3677 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
3678 goto create_pit;
3679 case KVM_CREATE_PIT2:
3680 r = -EFAULT;
3681 if (copy_from_user(&u.pit_config, argp,
3682 sizeof(struct kvm_pit_config)))
3683 goto out;
3684 create_pit:
3685 mutex_lock(&kvm->slots_lock);
3686 r = -EEXIST;
3687 if (kvm->arch.vpit)
3688 goto create_pit_unlock;
3689 r = -ENOMEM;
3690 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
3691 if (kvm->arch.vpit)
3692 r = 0;
3693 create_pit_unlock:
3694 mutex_unlock(&kvm->slots_lock);
3695 break;
3696 case KVM_GET_IRQCHIP: {
3697 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3698 struct kvm_irqchip *chip;
3699
3700 chip = memdup_user(argp, sizeof(*chip));
3701 if (IS_ERR(chip)) {
3702 r = PTR_ERR(chip);
3703 goto out;
3704 }
3705
3706 r = -ENXIO;
3707 if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
3708 goto get_irqchip_out;
3709 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
3710 if (r)
3711 goto get_irqchip_out;
3712 r = -EFAULT;
3713 if (copy_to_user(argp, chip, sizeof *chip))
3714 goto get_irqchip_out;
3715 r = 0;
3716 get_irqchip_out:
3717 kfree(chip);
3718 break;
3719 }
3720 case KVM_SET_IRQCHIP: {
3721 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3722 struct kvm_irqchip *chip;
3723
3724 chip = memdup_user(argp, sizeof(*chip));
3725 if (IS_ERR(chip)) {
3726 r = PTR_ERR(chip);
3727 goto out;
3728 }
3729
3730 r = -ENXIO;
3731 if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
3732 goto set_irqchip_out;
3733 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
3734 if (r)
3735 goto set_irqchip_out;
3736 r = 0;
3737 set_irqchip_out:
3738 kfree(chip);
3739 break;
3740 }
3741 case KVM_GET_PIT: {
3742 r = -EFAULT;
3743 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
3744 goto out;
3745 r = -ENXIO;
3746 if (!kvm->arch.vpit)
3747 goto out;
3748 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
3749 if (r)
3750 goto out;
3751 r = -EFAULT;
3752 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
3753 goto out;
3754 r = 0;
3755 break;
3756 }
3757 case KVM_SET_PIT: {
3758 r = -EFAULT;
3759 if (copy_from_user(&u.ps, argp, sizeof u.ps))
3760 goto out;
3761 r = -ENXIO;
3762 if (!kvm->arch.vpit)
3763 goto out;
3764 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
3765 break;
3766 }
3767 case KVM_GET_PIT2: {
3768 r = -ENXIO;
3769 if (!kvm->arch.vpit)
3770 goto out;
3771 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
3772 if (r)
3773 goto out;
3774 r = -EFAULT;
3775 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
3776 goto out;
3777 r = 0;
3778 break;
3779 }
3780 case KVM_SET_PIT2: {
3781 r = -EFAULT;
3782 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
3783 goto out;
3784 r = -ENXIO;
3785 if (!kvm->arch.vpit)
3786 goto out;
3787 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
3788 break;
3789 }
3790 case KVM_REINJECT_CONTROL: {
3791 struct kvm_reinject_control control;
3792 r = -EFAULT;
3793 if (copy_from_user(&control, argp, sizeof(control)))
3794 goto out;
3795 r = kvm_vm_ioctl_reinject(kvm, &control);
3796 break;
3797 }
3798 case KVM_SET_BOOT_CPU_ID:
3799 r = 0;
3800 mutex_lock(&kvm->lock);
3801 if (atomic_read(&kvm->online_vcpus) != 0)
3802 r = -EBUSY;
3803 else
3804 kvm->arch.bsp_vcpu_id = arg;
3805 mutex_unlock(&kvm->lock);
3806 break;
3807 case KVM_XEN_HVM_CONFIG: {
3808 r = -EFAULT;
3809 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
3810 sizeof(struct kvm_xen_hvm_config)))
3811 goto out;
3812 r = -EINVAL;
3813 if (kvm->arch.xen_hvm_config.flags)
3814 goto out;
3815 r = 0;
3816 break;
3817 }
3818 case KVM_SET_CLOCK: {
3819 struct kvm_clock_data user_ns;
3820 u64 now_ns;
3821 s64 delta;
3822
3823 r = -EFAULT;
3824 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
3825 goto out;
3826
3827 r = -EINVAL;
3828 if (user_ns.flags)
3829 goto out;
3830
3831 r = 0;
3832 local_irq_disable();
3833 now_ns = get_kernel_ns();
3834 delta = user_ns.clock - now_ns;
3835 local_irq_enable();
3836 kvm->arch.kvmclock_offset = delta;
3837 kvm_gen_update_masterclock(kvm);
3838 break;
3839 }
3840 case KVM_GET_CLOCK: {
3841 struct kvm_clock_data user_ns;
3842 u64 now_ns;
3843
3844 local_irq_disable();
3845 now_ns = get_kernel_ns();
3846 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3847 local_irq_enable();
3848 user_ns.flags = 0;
3849 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3850
3851 r = -EFAULT;
3852 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
3853 goto out;
3854 r = 0;
3855 break;
3856 }
3857 case KVM_ENABLE_CAP: {
3858 struct kvm_enable_cap cap;
3859
3860 r = -EFAULT;
3861 if (copy_from_user(&cap, argp, sizeof(cap)))
3862 goto out;
3863 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
3864 break;
3865 }
3866 default:
3867 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
3868 }
3869 out:
3870 return r;
3871 }
3872
3873 static void kvm_init_msr_list(void)
3874 {
3875 u32 dummy[2];
3876 unsigned i, j;
3877
3878 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
3879 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
3880 continue;
3881
3882 /*
3883 * Even MSRs that are valid in the host may not be exposed
3884 * to the guests in some cases. We could work around this
3885 * in VMX with the generic MSR save/load machinery, but it
3886 * is not really worthwhile since it will really only
3887 * happen with nested virtualization.
3888 */
3889 switch (msrs_to_save[i]) {
3890 case MSR_IA32_BNDCFGS:
3891 if (!kvm_x86_ops->mpx_supported())
3892 continue;
3893 break;
3894 default:
3895 break;
3896 }
3897
3898 if (j < i)
3899 msrs_to_save[j] = msrs_to_save[i];
3900 j++;
3901 }
3902 num_msrs_to_save = j;
3903
3904 for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
3905 switch (emulated_msrs[i]) {
3906 case MSR_IA32_SMBASE:
3907 if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
3908 continue;
3909 break;
3910 default:
3911 break;
3912 }
3913
3914 if (j < i)
3915 emulated_msrs[j] = emulated_msrs[i];
3916 j++;
3917 }
3918 num_emulated_msrs = j;
3919 }
3920
3921 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
3922 const void *v)
3923 {
3924 int handled = 0;
3925 int n;
3926
3927 do {
3928 n = min(len, 8);
3929 if (!(vcpu->arch.apic &&
3930 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
3931 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
3932 break;
3933 handled += n;
3934 addr += n;
3935 len -= n;
3936 v += n;
3937 } while (len);
3938
3939 return handled;
3940 }
3941
3942 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
3943 {
3944 int handled = 0;
3945 int n;
3946
3947 do {
3948 n = min(len, 8);
3949 if (!(vcpu->arch.apic &&
3950 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
3951 addr, n, v))
3952 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
3953 break;
3954 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
3955 handled += n;
3956 addr += n;
3957 len -= n;
3958 v += n;
3959 } while (len);
3960
3961 return handled;
3962 }
3963
3964 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3965 struct kvm_segment *var, int seg)
3966 {
3967 kvm_x86_ops->set_segment(vcpu, var, seg);
3968 }
3969
3970 void kvm_get_segment(struct kvm_vcpu *vcpu,
3971 struct kvm_segment *var, int seg)
3972 {
3973 kvm_x86_ops->get_segment(vcpu, var, seg);
3974 }
3975
3976 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
3977 struct x86_exception *exception)
3978 {
3979 gpa_t t_gpa;
3980
3981 BUG_ON(!mmu_is_nested(vcpu));
3982
3983 /* NPT walks are always user-walks */
3984 access |= PFERR_USER_MASK;
3985 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
3986
3987 return t_gpa;
3988 }
3989
3990 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
3991 struct x86_exception *exception)
3992 {
3993 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3994 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3995 }
3996
3997 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
3998 struct x86_exception *exception)
3999 {
4000 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4001 access |= PFERR_FETCH_MASK;
4002 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4003 }
4004
4005 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
4006 struct x86_exception *exception)
4007 {
4008 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4009 access |= PFERR_WRITE_MASK;
4010 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4011 }
4012
4013 /* uses this to access any guest's mapped memory without checking CPL */
4014 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
4015 struct x86_exception *exception)
4016 {
4017 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
4018 }
4019
4020 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
4021 struct kvm_vcpu *vcpu, u32 access,
4022 struct x86_exception *exception)
4023 {
4024 void *data = val;
4025 int r = X86EMUL_CONTINUE;
4026
4027 while (bytes) {
4028 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
4029 exception);
4030 unsigned offset = addr & (PAGE_SIZE-1);
4031 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
4032 int ret;
4033
4034 if (gpa == UNMAPPED_GVA)
4035 return X86EMUL_PROPAGATE_FAULT;
4036 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
4037 offset, toread);
4038 if (ret < 0) {
4039 r = X86EMUL_IO_NEEDED;
4040 goto out;
4041 }
4042
4043 bytes -= toread;
4044 data += toread;
4045 addr += toread;
4046 }
4047 out:
4048 return r;
4049 }
4050
4051 /* used for instruction fetching */
4052 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
4053 gva_t addr, void *val, unsigned int bytes,
4054 struct x86_exception *exception)
4055 {
4056 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4057 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4058 unsigned offset;
4059 int ret;
4060
4061 /* Inline kvm_read_guest_virt_helper for speed. */
4062 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
4063 exception);
4064 if (unlikely(gpa == UNMAPPED_GVA))
4065 return X86EMUL_PROPAGATE_FAULT;
4066
4067 offset = addr & (PAGE_SIZE-1);
4068 if (WARN_ON(offset + bytes > PAGE_SIZE))
4069 bytes = (unsigned)PAGE_SIZE - offset;
4070 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
4071 offset, bytes);
4072 if (unlikely(ret < 0))
4073 return X86EMUL_IO_NEEDED;
4074
4075 return X86EMUL_CONTINUE;
4076 }
4077
4078 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
4079 gva_t addr, void *val, unsigned int bytes,
4080 struct x86_exception *exception)
4081 {
4082 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4083 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4084
4085 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
4086 exception);
4087 }
4088 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
4089
4090 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4091 gva_t addr, void *val, unsigned int bytes,
4092 struct x86_exception *exception)
4093 {
4094 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4095 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
4096 }
4097
4098 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4099 gva_t addr, void *val,
4100 unsigned int bytes,
4101 struct x86_exception *exception)
4102 {
4103 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4104 void *data = val;
4105 int r = X86EMUL_CONTINUE;
4106
4107 while (bytes) {
4108 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
4109 PFERR_WRITE_MASK,
4110 exception);
4111 unsigned offset = addr & (PAGE_SIZE-1);
4112 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
4113 int ret;
4114
4115 if (gpa == UNMAPPED_GVA)
4116 return X86EMUL_PROPAGATE_FAULT;
4117 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
4118 if (ret < 0) {
4119 r = X86EMUL_IO_NEEDED;
4120 goto out;
4121 }
4122
4123 bytes -= towrite;
4124 data += towrite;
4125 addr += towrite;
4126 }
4127 out:
4128 return r;
4129 }
4130 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
4131
4132 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
4133 gpa_t *gpa, struct x86_exception *exception,
4134 bool write)
4135 {
4136 u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
4137 | (write ? PFERR_WRITE_MASK : 0);
4138
4139 if (vcpu_match_mmio_gva(vcpu, gva)
4140 && !permission_fault(vcpu, vcpu->arch.walk_mmu,
4141 vcpu->arch.access, access)) {
4142 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
4143 (gva & (PAGE_SIZE - 1));
4144 trace_vcpu_match_mmio(gva, *gpa, write, false);
4145 return 1;
4146 }
4147
4148 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4149
4150 if (*gpa == UNMAPPED_GVA)
4151 return -1;
4152
4153 /* For APIC access vmexit */
4154 if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4155 return 1;
4156
4157 if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
4158 trace_vcpu_match_mmio(gva, *gpa, write, true);
4159 return 1;
4160 }
4161
4162 return 0;
4163 }
4164
4165 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
4166 const void *val, int bytes)
4167 {
4168 int ret;
4169
4170 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
4171 if (ret < 0)
4172 return 0;
4173 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
4174 return 1;
4175 }
4176
4177 struct read_write_emulator_ops {
4178 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
4179 int bytes);
4180 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
4181 void *val, int bytes);
4182 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4183 int bytes, void *val);
4184 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4185 void *val, int bytes);
4186 bool write;
4187 };
4188
4189 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
4190 {
4191 if (vcpu->mmio_read_completed) {
4192 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
4193 vcpu->mmio_fragments[0].gpa, *(u64 *)val);
4194 vcpu->mmio_read_completed = 0;
4195 return 1;
4196 }
4197
4198 return 0;
4199 }
4200
4201 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
4202 void *val, int bytes)
4203 {
4204 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
4205 }
4206
4207 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
4208 void *val, int bytes)
4209 {
4210 return emulator_write_phys(vcpu, gpa, val, bytes);
4211 }
4212
4213 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
4214 {
4215 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
4216 return vcpu_mmio_write(vcpu, gpa, bytes, val);
4217 }
4218
4219 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
4220 void *val, int bytes)
4221 {
4222 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
4223 return X86EMUL_IO_NEEDED;
4224 }
4225
4226 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
4227 void *val, int bytes)
4228 {
4229 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
4230
4231 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
4232 return X86EMUL_CONTINUE;
4233 }
4234
4235 static const struct read_write_emulator_ops read_emultor = {
4236 .read_write_prepare = read_prepare,
4237 .read_write_emulate = read_emulate,
4238 .read_write_mmio = vcpu_mmio_read,
4239 .read_write_exit_mmio = read_exit_mmio,
4240 };
4241
4242 static const struct read_write_emulator_ops write_emultor = {
4243 .read_write_emulate = write_emulate,
4244 .read_write_mmio = write_mmio,
4245 .read_write_exit_mmio = write_exit_mmio,
4246 .write = true,
4247 };
4248
4249 static int emulator_read_write_onepage(unsigned long addr, void *val,
4250 unsigned int bytes,
4251 struct x86_exception *exception,
4252 struct kvm_vcpu *vcpu,
4253 const struct read_write_emulator_ops *ops)
4254 {
4255 gpa_t gpa;
4256 int handled, ret;
4257 bool write = ops->write;
4258 struct kvm_mmio_fragment *frag;
4259
4260 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
4261
4262 if (ret < 0)
4263 return X86EMUL_PROPAGATE_FAULT;
4264
4265 /* For APIC access vmexit */
4266 if (ret)
4267 goto mmio;
4268
4269 if (ops->read_write_emulate(vcpu, gpa, val, bytes))
4270 return X86EMUL_CONTINUE;
4271
4272 mmio:
4273 /*
4274 * Is this MMIO handled locally?
4275 */
4276 handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
4277 if (handled == bytes)
4278 return X86EMUL_CONTINUE;
4279
4280 gpa += handled;
4281 bytes -= handled;
4282 val += handled;
4283
4284 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
4285 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
4286 frag->gpa = gpa;
4287 frag->data = val;
4288 frag->len = bytes;
4289 return X86EMUL_CONTINUE;
4290 }
4291
4292 static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
4293 unsigned long addr,
4294 void *val, unsigned int bytes,
4295 struct x86_exception *exception,
4296 const struct read_write_emulator_ops *ops)
4297 {
4298 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4299 gpa_t gpa;
4300 int rc;
4301
4302 if (ops->read_write_prepare &&
4303 ops->read_write_prepare(vcpu, val, bytes))
4304 return X86EMUL_CONTINUE;
4305
4306 vcpu->mmio_nr_fragments = 0;
4307
4308 /* Crossing a page boundary? */
4309 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
4310 int now;
4311
4312 now = -addr & ~PAGE_MASK;
4313 rc = emulator_read_write_onepage(addr, val, now, exception,
4314 vcpu, ops);
4315
4316 if (rc != X86EMUL_CONTINUE)
4317 return rc;
4318 addr += now;
4319 if (ctxt->mode != X86EMUL_MODE_PROT64)
4320 addr = (u32)addr;
4321 val += now;
4322 bytes -= now;
4323 }
4324
4325 rc = emulator_read_write_onepage(addr, val, bytes, exception,
4326 vcpu, ops);
4327 if (rc != X86EMUL_CONTINUE)
4328 return rc;
4329
4330 if (!vcpu->mmio_nr_fragments)
4331 return rc;
4332
4333 gpa = vcpu->mmio_fragments[0].gpa;
4334
4335 vcpu->mmio_needed = 1;
4336 vcpu->mmio_cur_fragment = 0;
4337
4338 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
4339 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
4340 vcpu->run->exit_reason = KVM_EXIT_MMIO;
4341 vcpu->run->mmio.phys_addr = gpa;
4342
4343 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
4344 }
4345
4346 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
4347 unsigned long addr,
4348 void *val,
4349 unsigned int bytes,
4350 struct x86_exception *exception)
4351 {
4352 return emulator_read_write(ctxt, addr, val, bytes,
4353 exception, &read_emultor);
4354 }
4355
4356 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
4357 unsigned long addr,
4358 const void *val,
4359 unsigned int bytes,
4360 struct x86_exception *exception)
4361 {
4362 return emulator_read_write(ctxt, addr, (void *)val, bytes,
4363 exception, &write_emultor);
4364 }
4365
4366 #define CMPXCHG_TYPE(t, ptr, old, new) \
4367 (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
4368
4369 #ifdef CONFIG_X86_64
4370 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
4371 #else
4372 # define CMPXCHG64(ptr, old, new) \
4373 (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
4374 #endif
4375
4376 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
4377 unsigned long addr,
4378 const void *old,
4379 const void *new,
4380 unsigned int bytes,
4381 struct x86_exception *exception)
4382 {
4383 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4384 gpa_t gpa;
4385 struct page *page;
4386 char *kaddr;
4387 bool exchanged;
4388
4389 /* guests cmpxchg8b have to be emulated atomically */
4390 if (bytes > 8 || (bytes & (bytes - 1)))
4391 goto emul_write;
4392
4393 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
4394
4395 if (gpa == UNMAPPED_GVA ||
4396 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4397 goto emul_write;
4398
4399 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
4400 goto emul_write;
4401
4402 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
4403 if (is_error_page(page))
4404 goto emul_write;
4405
4406 kaddr = kmap_atomic(page);
4407 kaddr += offset_in_page(gpa);
4408 switch (bytes) {
4409 case 1:
4410 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
4411 break;
4412 case 2:
4413 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
4414 break;
4415 case 4:
4416 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
4417 break;
4418 case 8:
4419 exchanged = CMPXCHG64(kaddr, old, new);
4420 break;
4421 default:
4422 BUG();
4423 }
4424 kunmap_atomic(kaddr);
4425 kvm_release_page_dirty(page);
4426
4427 if (!exchanged)
4428 return X86EMUL_CMPXCHG_FAILED;
4429
4430 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
4431 kvm_mmu_pte_write(vcpu, gpa, new, bytes);
4432
4433 return X86EMUL_CONTINUE;
4434
4435 emul_write:
4436 printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
4437
4438 return emulator_write_emulated(ctxt, addr, new, bytes, exception);
4439 }
4440
4441 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
4442 {
4443 /* TODO: String I/O for in kernel device */
4444 int r;
4445
4446 if (vcpu->arch.pio.in)
4447 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
4448 vcpu->arch.pio.size, pd);
4449 else
4450 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
4451 vcpu->arch.pio.port, vcpu->arch.pio.size,
4452 pd);
4453 return r;
4454 }
4455
4456 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
4457 unsigned short port, void *val,
4458 unsigned int count, bool in)
4459 {
4460 vcpu->arch.pio.port = port;
4461 vcpu->arch.pio.in = in;
4462 vcpu->arch.pio.count = count;
4463 vcpu->arch.pio.size = size;
4464
4465 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
4466 vcpu->arch.pio.count = 0;
4467 return 1;
4468 }
4469
4470 vcpu->run->exit_reason = KVM_EXIT_IO;
4471 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
4472 vcpu->run->io.size = size;
4473 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
4474 vcpu->run->io.count = count;
4475 vcpu->run->io.port = port;
4476
4477 return 0;
4478 }
4479
4480 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
4481 int size, unsigned short port, void *val,
4482 unsigned int count)
4483 {
4484 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4485 int ret;
4486
4487 if (vcpu->arch.pio.count)
4488 goto data_avail;
4489
4490 ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
4491 if (ret) {
4492 data_avail:
4493 memcpy(val, vcpu->arch.pio_data, size * count);
4494 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
4495 vcpu->arch.pio.count = 0;
4496 return 1;
4497 }
4498
4499 return 0;
4500 }
4501
4502 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
4503 int size, unsigned short port,
4504 const void *val, unsigned int count)
4505 {
4506 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4507
4508 memcpy(vcpu->arch.pio_data, val, size * count);
4509 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
4510 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
4511 }
4512
4513 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
4514 {
4515 return kvm_x86_ops->get_segment_base(vcpu, seg);
4516 }
4517
4518 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
4519 {
4520 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
4521 }
4522
4523 int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
4524 {
4525 if (!need_emulate_wbinvd(vcpu))
4526 return X86EMUL_CONTINUE;
4527
4528 if (kvm_x86_ops->has_wbinvd_exit()) {
4529 int cpu = get_cpu();
4530
4531 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4532 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
4533 wbinvd_ipi, NULL, 1);
4534 put_cpu();
4535 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
4536 } else
4537 wbinvd();
4538 return X86EMUL_CONTINUE;
4539 }
4540
4541 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
4542 {
4543 kvm_x86_ops->skip_emulated_instruction(vcpu);
4544 return kvm_emulate_wbinvd_noskip(vcpu);
4545 }
4546 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
4547
4548
4549
4550 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
4551 {
4552 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
4553 }
4554
4555 static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
4556 unsigned long *dest)
4557 {
4558 return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
4559 }
4560
4561 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
4562 unsigned long value)
4563 {
4564
4565 return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
4566 }
4567
4568 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
4569 {
4570 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
4571 }
4572
4573 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
4574 {
4575 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4576 unsigned long value;
4577
4578 switch (cr) {
4579 case 0:
4580 value = kvm_read_cr0(vcpu);
4581 break;
4582 case 2:
4583 value = vcpu->arch.cr2;
4584 break;
4585 case 3:
4586 value = kvm_read_cr3(vcpu);
4587 break;
4588 case 4:
4589 value = kvm_read_cr4(vcpu);
4590 break;
4591 case 8:
4592 value = kvm_get_cr8(vcpu);
4593 break;
4594 default:
4595 kvm_err("%s: unexpected cr %u\n", __func__, cr);
4596 return 0;
4597 }
4598
4599 return value;
4600 }
4601
4602 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
4603 {
4604 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4605 int res = 0;
4606
4607 switch (cr) {
4608 case 0:
4609 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
4610 break;
4611 case 2:
4612 vcpu->arch.cr2 = val;
4613 break;
4614 case 3:
4615 res = kvm_set_cr3(vcpu, val);
4616 break;
4617 case 4:
4618 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
4619 break;
4620 case 8:
4621 res = kvm_set_cr8(vcpu, val);
4622 break;
4623 default:
4624 kvm_err("%s: unexpected cr %u\n", __func__, cr);
4625 res = -1;
4626 }
4627
4628 return res;
4629 }
4630
4631 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
4632 {
4633 return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
4634 }
4635
4636 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4637 {
4638 kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
4639 }
4640
4641 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4642 {
4643 kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
4644 }
4645
4646 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4647 {
4648 kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
4649 }
4650
4651 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4652 {
4653 kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
4654 }
4655
4656 static unsigned long emulator_get_cached_segment_base(
4657 struct x86_emulate_ctxt *ctxt, int seg)
4658 {
4659 return get_segment_base(emul_to_vcpu(ctxt), seg);
4660 }
4661
4662 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
4663 struct desc_struct *desc, u32 *base3,
4664 int seg)
4665 {
4666 struct kvm_segment var;
4667
4668 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
4669 *selector = var.selector;
4670
4671 if (var.unusable) {
4672 memset(desc, 0, sizeof(*desc));
4673 return false;
4674 }
4675
4676 if (var.g)
4677 var.limit >>= 12;
4678 set_desc_limit(desc, var.limit);
4679 set_desc_base(desc, (unsigned long)var.base);
4680 #ifdef CONFIG_X86_64
4681 if (base3)
4682 *base3 = var.base >> 32;
4683 #endif
4684 desc->type = var.type;
4685 desc->s = var.s;
4686 desc->dpl = var.dpl;
4687 desc->p = var.present;
4688 desc->avl = var.avl;
4689 desc->l = var.l;
4690 desc->d = var.db;
4691 desc->g = var.g;
4692
4693 return true;
4694 }
4695
4696 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
4697 struct desc_struct *desc, u32 base3,
4698 int seg)
4699 {
4700 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4701 struct kvm_segment var;
4702
4703 var.selector = selector;
4704 var.base = get_desc_base(desc);
4705 #ifdef CONFIG_X86_64
4706 var.base |= ((u64)base3) << 32;
4707 #endif
4708 var.limit = get_desc_limit(desc);
4709 if (desc->g)
4710 var.limit = (var.limit << 12) | 0xfff;
4711 var.type = desc->type;
4712 var.dpl = desc->dpl;
4713 var.db = desc->d;
4714 var.s = desc->s;
4715 var.l = desc->l;
4716 var.g = desc->g;
4717 var.avl = desc->avl;
4718 var.present = desc->p;
4719 var.unusable = !var.present;
4720 var.padding = 0;
4721
4722 kvm_set_segment(vcpu, &var, seg);
4723 return;
4724 }
4725
4726 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
4727 u32 msr_index, u64 *pdata)
4728 {
4729 struct msr_data msr;
4730 int r;
4731
4732 msr.index = msr_index;
4733 msr.host_initiated = false;
4734 r = kvm_get_msr(emul_to_vcpu(ctxt), &msr);
4735 if (r)
4736 return r;
4737
4738 *pdata = msr.data;
4739 return 0;
4740 }
4741
4742 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
4743 u32 msr_index, u64 data)
4744 {
4745 struct msr_data msr;
4746
4747 msr.data = data;
4748 msr.index = msr_index;
4749 msr.host_initiated = false;
4750 return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
4751 }
4752
4753 static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
4754 {
4755 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4756
4757 return vcpu->arch.smbase;
4758 }
4759
4760 static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
4761 {
4762 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4763
4764 vcpu->arch.smbase = smbase;
4765 }
4766
4767 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
4768 u32 pmc)
4769 {
4770 return kvm_pmu_is_valid_msr_idx(emul_to_vcpu(ctxt), pmc);
4771 }
4772
4773 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
4774 u32 pmc, u64 *pdata)
4775 {
4776 return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
4777 }
4778
4779 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
4780 {
4781 emul_to_vcpu(ctxt)->arch.halt_request = 1;
4782 }
4783
4784 static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
4785 {
4786 preempt_disable();
4787 kvm_load_guest_fpu(emul_to_vcpu(ctxt));
4788 /*
4789 * CR0.TS may reference the host fpu state, not the guest fpu state,
4790 * so it may be clear at this point.
4791 */
4792 clts();
4793 }
4794
4795 static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
4796 {
4797 preempt_enable();
4798 }
4799
4800 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
4801 struct x86_instruction_info *info,
4802 enum x86_intercept_stage stage)
4803 {
4804 return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
4805 }
4806
4807 static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
4808 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
4809 {
4810 kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx);
4811 }
4812
4813 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
4814 {
4815 return kvm_register_read(emul_to_vcpu(ctxt), reg);
4816 }
4817
4818 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
4819 {
4820 kvm_register_write(emul_to_vcpu(ctxt), reg, val);
4821 }
4822
4823 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
4824 {
4825 kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
4826 }
4827
4828 static const struct x86_emulate_ops emulate_ops = {
4829 .read_gpr = emulator_read_gpr,
4830 .write_gpr = emulator_write_gpr,
4831 .read_std = kvm_read_guest_virt_system,
4832 .write_std = kvm_write_guest_virt_system,
4833 .fetch = kvm_fetch_guest_virt,
4834 .read_emulated = emulator_read_emulated,
4835 .write_emulated = emulator_write_emulated,
4836 .cmpxchg_emulated = emulator_cmpxchg_emulated,
4837 .invlpg = emulator_invlpg,
4838 .pio_in_emulated = emulator_pio_in_emulated,
4839 .pio_out_emulated = emulator_pio_out_emulated,
4840 .get_segment = emulator_get_segment,
4841 .set_segment = emulator_set_segment,
4842 .get_cached_segment_base = emulator_get_cached_segment_base,
4843 .get_gdt = emulator_get_gdt,
4844 .get_idt = emulator_get_idt,
4845 .set_gdt = emulator_set_gdt,
4846 .set_idt = emulator_set_idt,
4847 .get_cr = emulator_get_cr,
4848 .set_cr = emulator_set_cr,
4849 .cpl = emulator_get_cpl,
4850 .get_dr = emulator_get_dr,
4851 .set_dr = emulator_set_dr,
4852 .get_smbase = emulator_get_smbase,
4853 .set_smbase = emulator_set_smbase,
4854 .set_msr = emulator_set_msr,
4855 .get_msr = emulator_get_msr,
4856 .check_pmc = emulator_check_pmc,
4857 .read_pmc = emulator_read_pmc,
4858 .halt = emulator_halt,
4859 .wbinvd = emulator_wbinvd,
4860 .fix_hypercall = emulator_fix_hypercall,
4861 .get_fpu = emulator_get_fpu,
4862 .put_fpu = emulator_put_fpu,
4863 .intercept = emulator_intercept,
4864 .get_cpuid = emulator_get_cpuid,
4865 .set_nmi_mask = emulator_set_nmi_mask,
4866 };
4867
4868 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
4869 {
4870 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
4871 /*
4872 * an sti; sti; sequence only disable interrupts for the first
4873 * instruction. So, if the last instruction, be it emulated or
4874 * not, left the system with the INT_STI flag enabled, it
4875 * means that the last instruction is an sti. We should not
4876 * leave the flag on in this case. The same goes for mov ss
4877 */
4878 if (int_shadow & mask)
4879 mask = 0;
4880 if (unlikely(int_shadow || mask)) {
4881 kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
4882 if (!mask)
4883 kvm_make_request(KVM_REQ_EVENT, vcpu);
4884 }
4885 }
4886
4887 static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
4888 {
4889 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4890 if (ctxt->exception.vector == PF_VECTOR)
4891 return kvm_propagate_fault(vcpu, &ctxt->exception);
4892
4893 if (ctxt->exception.error_code_valid)
4894 kvm_queue_exception_e(vcpu, ctxt->exception.vector,
4895 ctxt->exception.error_code);
4896 else
4897 kvm_queue_exception(vcpu, ctxt->exception.vector);
4898 return false;
4899 }
4900
4901 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
4902 {
4903 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4904 int cs_db, cs_l;
4905
4906 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4907
4908 ctxt->eflags = kvm_get_rflags(vcpu);
4909 ctxt->eip = kvm_rip_read(vcpu);
4910 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
4911 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
4912 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 :
4913 cs_db ? X86EMUL_MODE_PROT32 :
4914 X86EMUL_MODE_PROT16;
4915 BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
4916 BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
4917 BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
4918 ctxt->emul_flags = vcpu->arch.hflags;
4919
4920 init_decode_cache(ctxt);
4921 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
4922 }
4923
4924 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
4925 {
4926 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4927 int ret;
4928
4929 init_emulate_ctxt(vcpu);
4930
4931 ctxt->op_bytes = 2;
4932 ctxt->ad_bytes = 2;
4933 ctxt->_eip = ctxt->eip + inc_eip;
4934 ret = emulate_int_real(ctxt, irq);
4935
4936 if (ret != X86EMUL_CONTINUE)
4937 return EMULATE_FAIL;
4938
4939 ctxt->eip = ctxt->_eip;
4940 kvm_rip_write(vcpu, ctxt->eip);
4941 kvm_set_rflags(vcpu, ctxt->eflags);
4942
4943 if (irq == NMI_VECTOR)
4944 vcpu->arch.nmi_pending = 0;
4945 else
4946 vcpu->arch.interrupt.pending = false;
4947
4948 return EMULATE_DONE;
4949 }
4950 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
4951
4952 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
4953 {
4954 int r = EMULATE_DONE;
4955
4956 ++vcpu->stat.insn_emulation_fail;
4957 trace_kvm_emulate_insn_failed(vcpu);
4958 if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
4959 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4960 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4961 vcpu->run->internal.ndata = 0;
4962 r = EMULATE_FAIL;
4963 }
4964 kvm_queue_exception(vcpu, UD_VECTOR);
4965
4966 return r;
4967 }
4968
4969 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
4970 bool write_fault_to_shadow_pgtable,
4971 int emulation_type)
4972 {
4973 gpa_t gpa = cr2;
4974 pfn_t pfn;
4975
4976 if (emulation_type & EMULTYPE_NO_REEXECUTE)
4977 return false;
4978
4979 if (!vcpu->arch.mmu.direct_map) {
4980 /*
4981 * Write permission should be allowed since only
4982 * write access need to be emulated.
4983 */
4984 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
4985
4986 /*
4987 * If the mapping is invalid in guest, let cpu retry
4988 * it to generate fault.
4989 */
4990 if (gpa == UNMAPPED_GVA)
4991 return true;
4992 }
4993
4994 /*
4995 * Do not retry the unhandleable instruction if it faults on the
4996 * readonly host memory, otherwise it will goto a infinite loop:
4997 * retry instruction -> write #PF -> emulation fail -> retry
4998 * instruction -> ...
4999 */
5000 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
5001
5002 /*
5003 * If the instruction failed on the error pfn, it can not be fixed,
5004 * report the error to userspace.
5005 */
5006 if (is_error_noslot_pfn(pfn))
5007 return false;
5008
5009 kvm_release_pfn_clean(pfn);
5010
5011 /* The instructions are well-emulated on direct mmu. */
5012 if (vcpu->arch.mmu.direct_map) {
5013 unsigned int indirect_shadow_pages;
5014
5015 spin_lock(&vcpu->kvm->mmu_lock);
5016 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
5017 spin_unlock(&vcpu->kvm->mmu_lock);
5018
5019 if (indirect_shadow_pages)
5020 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5021
5022 return true;
5023 }
5024
5025 /*
5026 * if emulation was due to access to shadowed page table
5027 * and it failed try to unshadow page and re-enter the
5028 * guest to let CPU execute the instruction.
5029 */
5030 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5031
5032 /*
5033 * If the access faults on its page table, it can not
5034 * be fixed by unprotecting shadow page and it should
5035 * be reported to userspace.
5036 */
5037 return !write_fault_to_shadow_pgtable;
5038 }
5039
5040 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
5041 unsigned long cr2, int emulation_type)
5042 {
5043 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5044 unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
5045
5046 last_retry_eip = vcpu->arch.last_retry_eip;
5047 last_retry_addr = vcpu->arch.last_retry_addr;
5048
5049 /*
5050 * If the emulation is caused by #PF and it is non-page_table
5051 * writing instruction, it means the VM-EXIT is caused by shadow
5052 * page protected, we can zap the shadow page and retry this
5053 * instruction directly.
5054 *
5055 * Note: if the guest uses a non-page-table modifying instruction
5056 * on the PDE that points to the instruction, then we will unmap
5057 * the instruction and go to an infinite loop. So, we cache the
5058 * last retried eip and the last fault address, if we meet the eip
5059 * and the address again, we can break out of the potential infinite
5060 * loop.
5061 */
5062 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
5063
5064 if (!(emulation_type & EMULTYPE_RETRY))
5065 return false;
5066
5067 if (x86_page_table_writing_insn(ctxt))
5068 return false;
5069
5070 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
5071 return false;
5072
5073 vcpu->arch.last_retry_eip = ctxt->eip;
5074 vcpu->arch.last_retry_addr = cr2;
5075
5076 if (!vcpu->arch.mmu.direct_map)
5077 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
5078
5079 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5080
5081 return true;
5082 }
5083
5084 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
5085 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
5086
5087 static void kvm_smm_changed(struct kvm_vcpu *vcpu)
5088 {
5089 if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
5090 /* This is a good place to trace that we are exiting SMM. */
5091 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
5092
5093 if (unlikely(vcpu->arch.smi_pending)) {
5094 kvm_make_request(KVM_REQ_SMI, vcpu);
5095 vcpu->arch.smi_pending = 0;
5096 } else {
5097 /* Process a latched INIT, if any. */
5098 kvm_make_request(KVM_REQ_EVENT, vcpu);
5099 }
5100 }
5101
5102 kvm_mmu_reset_context(vcpu);
5103 }
5104
5105 static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
5106 {
5107 unsigned changed = vcpu->arch.hflags ^ emul_flags;
5108
5109 vcpu->arch.hflags = emul_flags;
5110
5111 if (changed & HF_SMM_MASK)
5112 kvm_smm_changed(vcpu);
5113 }
5114
5115 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
5116 unsigned long *db)
5117 {
5118 u32 dr6 = 0;
5119 int i;
5120 u32 enable, rwlen;
5121
5122 enable = dr7;
5123 rwlen = dr7 >> 16;
5124 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
5125 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
5126 dr6 |= (1 << i);
5127 return dr6;
5128 }
5129
5130 static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
5131 {
5132 struct kvm_run *kvm_run = vcpu->run;
5133
5134 /*
5135 * rflags is the old, "raw" value of the flags. The new value has
5136 * not been saved yet.
5137 *
5138 * This is correct even for TF set by the guest, because "the
5139 * processor will not generate this exception after the instruction
5140 * that sets the TF flag".
5141 */
5142 if (unlikely(rflags & X86_EFLAGS_TF)) {
5143 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
5144 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
5145 DR6_RTM;
5146 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
5147 kvm_run->debug.arch.exception = DB_VECTOR;
5148 kvm_run->exit_reason = KVM_EXIT_DEBUG;
5149 *r = EMULATE_USER_EXIT;
5150 } else {
5151 vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
5152 /*
5153 * "Certain debug exceptions may clear bit 0-3. The
5154 * remaining contents of the DR6 register are never
5155 * cleared by the processor".
5156 */
5157 vcpu->arch.dr6 &= ~15;
5158 vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
5159 kvm_queue_exception(vcpu, DB_VECTOR);
5160 }
5161 }
5162 }
5163
5164 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
5165 {
5166 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
5167 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
5168 struct kvm_run *kvm_run = vcpu->run;
5169 unsigned long eip = kvm_get_linear_rip(vcpu);
5170 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
5171 vcpu->arch.guest_debug_dr7,
5172 vcpu->arch.eff_db);
5173
5174 if (dr6 != 0) {
5175 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
5176 kvm_run->debug.arch.pc = eip;
5177 kvm_run->debug.arch.exception = DB_VECTOR;
5178 kvm_run->exit_reason = KVM_EXIT_DEBUG;
5179 *r = EMULATE_USER_EXIT;
5180 return true;
5181 }
5182 }
5183
5184 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
5185 !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
5186 unsigned long eip = kvm_get_linear_rip(vcpu);
5187 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
5188 vcpu->arch.dr7,
5189 vcpu->arch.db);
5190
5191 if (dr6 != 0) {
5192 vcpu->arch.dr6 &= ~15;
5193 vcpu->arch.dr6 |= dr6 | DR6_RTM;
5194 kvm_queue_exception(vcpu, DB_VECTOR);
5195 *r = EMULATE_DONE;
5196 return true;
5197 }
5198 }
5199
5200 return false;
5201 }
5202
5203 int x86_emulate_instruction(struct kvm_vcpu *vcpu,
5204 unsigned long cr2,
5205 int emulation_type,
5206 void *insn,
5207 int insn_len)
5208 {
5209 int r;
5210 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5211 bool writeback = true;
5212 bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
5213
5214 /*
5215 * Clear write_fault_to_shadow_pgtable here to ensure it is
5216 * never reused.
5217 */
5218 vcpu->arch.write_fault_to_shadow_pgtable = false;
5219 kvm_clear_exception_queue(vcpu);
5220
5221 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
5222 init_emulate_ctxt(vcpu);
5223
5224 /*
5225 * We will reenter on the same instruction since
5226 * we do not set complete_userspace_io. This does not
5227 * handle watchpoints yet, those would be handled in
5228 * the emulate_ops.
5229 */
5230 if (kvm_vcpu_check_breakpoint(vcpu, &r))
5231 return r;
5232
5233 ctxt->interruptibility = 0;
5234 ctxt->have_exception = false;
5235 ctxt->exception.vector = -1;
5236 ctxt->perm_ok = false;
5237
5238 ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
5239
5240 r = x86_decode_insn(ctxt, insn, insn_len);
5241
5242 trace_kvm_emulate_insn_start(vcpu);
5243 ++vcpu->stat.insn_emulation;
5244 if (r != EMULATION_OK) {
5245 if (emulation_type & EMULTYPE_TRAP_UD)
5246 return EMULATE_FAIL;
5247 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
5248 emulation_type))
5249 return EMULATE_DONE;
5250 if (emulation_type & EMULTYPE_SKIP)
5251 return EMULATE_FAIL;
5252 return handle_emulation_failure(vcpu);
5253 }
5254 }
5255
5256 if (emulation_type & EMULTYPE_SKIP) {
5257 kvm_rip_write(vcpu, ctxt->_eip);
5258 if (ctxt->eflags & X86_EFLAGS_RF)
5259 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
5260 return EMULATE_DONE;
5261 }
5262
5263 if (retry_instruction(ctxt, cr2, emulation_type))
5264 return EMULATE_DONE;
5265
5266 /* this is needed for vmware backdoor interface to work since it
5267 changes registers values during IO operation */
5268 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
5269 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
5270 emulator_invalidate_register_cache(ctxt);
5271 }
5272
5273 restart:
5274 r = x86_emulate_insn(ctxt);
5275
5276 if (r == EMULATION_INTERCEPTED)
5277 return EMULATE_DONE;
5278
5279 if (r == EMULATION_FAILED) {
5280 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
5281 emulation_type))
5282 return EMULATE_DONE;
5283
5284 return handle_emulation_failure(vcpu);
5285 }
5286
5287 if (ctxt->have_exception) {
5288 r = EMULATE_DONE;
5289 if (inject_emulated_exception(vcpu))
5290 return r;
5291 } else if (vcpu->arch.pio.count) {
5292 if (!vcpu->arch.pio.in) {
5293 /* FIXME: return into emulator if single-stepping. */
5294 vcpu->arch.pio.count = 0;
5295 } else {
5296 writeback = false;
5297 vcpu->arch.complete_userspace_io = complete_emulated_pio;
5298 }
5299 r = EMULATE_USER_EXIT;
5300 } else if (vcpu->mmio_needed) {
5301 if (!vcpu->mmio_is_write)
5302 writeback = false;
5303 r = EMULATE_USER_EXIT;
5304 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
5305 } else if (r == EMULATION_RESTART)
5306 goto restart;
5307 else
5308 r = EMULATE_DONE;
5309
5310 if (writeback) {
5311 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
5312 toggle_interruptibility(vcpu, ctxt->interruptibility);
5313 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5314 if (vcpu->arch.hflags != ctxt->emul_flags)
5315 kvm_set_hflags(vcpu, ctxt->emul_flags);
5316 kvm_rip_write(vcpu, ctxt->eip);
5317 if (r == EMULATE_DONE)
5318 kvm_vcpu_check_singlestep(vcpu, rflags, &r);
5319 if (!ctxt->have_exception ||
5320 exception_type(ctxt->exception.vector) == EXCPT_TRAP)
5321 __kvm_set_rflags(vcpu, ctxt->eflags);
5322
5323 /*
5324 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
5325 * do nothing, and it will be requested again as soon as
5326 * the shadow expires. But we still need to check here,
5327 * because POPF has no interrupt shadow.
5328 */
5329 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
5330 kvm_make_request(KVM_REQ_EVENT, vcpu);
5331 } else
5332 vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
5333
5334 return r;
5335 }
5336 EXPORT_SYMBOL_GPL(x86_emulate_instruction);
5337
5338 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
5339 {
5340 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
5341 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
5342 size, port, &val, 1);
5343 /* do not return to emulator after return from userspace */
5344 vcpu->arch.pio.count = 0;
5345 return ret;
5346 }
5347 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
5348
5349 static void tsc_bad(void *info)
5350 {
5351 __this_cpu_write(cpu_tsc_khz, 0);
5352 }
5353
5354 static void tsc_khz_changed(void *data)
5355 {
5356 struct cpufreq_freqs *freq = data;
5357 unsigned long khz = 0;
5358
5359 if (data)
5360 khz = freq->new;
5361 else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
5362 khz = cpufreq_quick_get(raw_smp_processor_id());
5363 if (!khz)
5364 khz = tsc_khz;
5365 __this_cpu_write(cpu_tsc_khz, khz);
5366 }
5367
5368 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
5369 void *data)
5370 {
5371 struct cpufreq_freqs *freq = data;
5372 struct kvm *kvm;
5373 struct kvm_vcpu *vcpu;
5374 int i, send_ipi = 0;
5375
5376 /*
5377 * We allow guests to temporarily run on slowing clocks,
5378 * provided we notify them after, or to run on accelerating
5379 * clocks, provided we notify them before. Thus time never
5380 * goes backwards.
5381 *
5382 * However, we have a problem. We can't atomically update
5383 * the frequency of a given CPU from this function; it is
5384 * merely a notifier, which can be called from any CPU.
5385 * Changing the TSC frequency at arbitrary points in time
5386 * requires a recomputation of local variables related to
5387 * the TSC for each VCPU. We must flag these local variables
5388 * to be updated and be sure the update takes place with the
5389 * new frequency before any guests proceed.
5390 *
5391 * Unfortunately, the combination of hotplug CPU and frequency
5392 * change creates an intractable locking scenario; the order
5393 * of when these callouts happen is undefined with respect to
5394 * CPU hotplug, and they can race with each other. As such,
5395 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
5396 * undefined; you can actually have a CPU frequency change take
5397 * place in between the computation of X and the setting of the
5398 * variable. To protect against this problem, all updates of
5399 * the per_cpu tsc_khz variable are done in an interrupt
5400 * protected IPI, and all callers wishing to update the value
5401 * must wait for a synchronous IPI to complete (which is trivial
5402 * if the caller is on the CPU already). This establishes the
5403 * necessary total order on variable updates.
5404 *
5405 * Note that because a guest time update may take place
5406 * anytime after the setting of the VCPU's request bit, the
5407 * correct TSC value must be set before the request. However,
5408 * to ensure the update actually makes it to any guest which
5409 * starts running in hardware virtualization between the set
5410 * and the acquisition of the spinlock, we must also ping the
5411 * CPU after setting the request bit.
5412 *
5413 */
5414
5415 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
5416 return 0;
5417 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
5418 return 0;
5419
5420 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5421
5422 spin_lock(&kvm_lock);
5423 list_for_each_entry(kvm, &vm_list, vm_list) {
5424 kvm_for_each_vcpu(i, vcpu, kvm) {
5425 if (vcpu->cpu != freq->cpu)
5426 continue;
5427 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5428 if (vcpu->cpu != smp_processor_id())
5429 send_ipi = 1;
5430 }
5431 }
5432 spin_unlock(&kvm_lock);
5433
5434 if (freq->old < freq->new && send_ipi) {
5435 /*
5436 * We upscale the frequency. Must make the guest
5437 * doesn't see old kvmclock values while running with
5438 * the new frequency, otherwise we risk the guest sees
5439 * time go backwards.
5440 *
5441 * In case we update the frequency for another cpu
5442 * (which might be in guest context) send an interrupt
5443 * to kick the cpu out of guest context. Next time
5444 * guest context is entered kvmclock will be updated,
5445 * so the guest will not see stale values.
5446 */
5447 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5448 }
5449 return 0;
5450 }
5451
5452 static struct notifier_block kvmclock_cpufreq_notifier_block = {
5453 .notifier_call = kvmclock_cpufreq_notifier
5454 };
5455
5456 static int kvmclock_cpu_notifier(struct notifier_block *nfb,
5457 unsigned long action, void *hcpu)
5458 {
5459 unsigned int cpu = (unsigned long)hcpu;
5460
5461 switch (action) {
5462 case CPU_ONLINE:
5463 case CPU_DOWN_FAILED:
5464 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
5465 break;
5466 case CPU_DOWN_PREPARE:
5467 smp_call_function_single(cpu, tsc_bad, NULL, 1);
5468 break;
5469 }
5470 return NOTIFY_OK;
5471 }
5472
5473 static struct notifier_block kvmclock_cpu_notifier_block = {
5474 .notifier_call = kvmclock_cpu_notifier,
5475 .priority = -INT_MAX
5476 };
5477
5478 static void kvm_timer_init(void)
5479 {
5480 int cpu;
5481
5482 max_tsc_khz = tsc_khz;
5483
5484 cpu_notifier_register_begin();
5485 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5486 #ifdef CONFIG_CPU_FREQ
5487 struct cpufreq_policy policy;
5488 memset(&policy, 0, sizeof(policy));
5489 cpu = get_cpu();
5490 cpufreq_get_policy(&policy, cpu);
5491 if (policy.cpuinfo.max_freq)
5492 max_tsc_khz = policy.cpuinfo.max_freq;
5493 put_cpu();
5494 #endif
5495 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
5496 CPUFREQ_TRANSITION_NOTIFIER);
5497 }
5498 pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
5499 for_each_online_cpu(cpu)
5500 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
5501
5502 __register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
5503 cpu_notifier_register_done();
5504
5505 }
5506
5507 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
5508
5509 int kvm_is_in_guest(void)
5510 {
5511 return __this_cpu_read(current_vcpu) != NULL;
5512 }
5513
5514 static int kvm_is_user_mode(void)
5515 {
5516 int user_mode = 3;
5517
5518 if (__this_cpu_read(current_vcpu))
5519 user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
5520
5521 return user_mode != 0;
5522 }
5523
5524 static unsigned long kvm_get_guest_ip(void)
5525 {
5526 unsigned long ip = 0;
5527
5528 if (__this_cpu_read(current_vcpu))
5529 ip = kvm_rip_read(__this_cpu_read(current_vcpu));
5530
5531 return ip;
5532 }
5533
5534 static struct perf_guest_info_callbacks kvm_guest_cbs = {
5535 .is_in_guest = kvm_is_in_guest,
5536 .is_user_mode = kvm_is_user_mode,
5537 .get_guest_ip = kvm_get_guest_ip,
5538 };
5539
5540 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
5541 {
5542 __this_cpu_write(current_vcpu, vcpu);
5543 }
5544 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
5545
5546 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
5547 {
5548 __this_cpu_write(current_vcpu, NULL);
5549 }
5550 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
5551
5552 static void kvm_set_mmio_spte_mask(void)
5553 {
5554 u64 mask;
5555 int maxphyaddr = boot_cpu_data.x86_phys_bits;
5556
5557 /*
5558 * Set the reserved bits and the present bit of an paging-structure
5559 * entry to generate page fault with PFER.RSV = 1.
5560 */
5561 /* Mask the reserved physical address bits. */
5562 mask = rsvd_bits(maxphyaddr, 51);
5563
5564 /* Bit 62 is always reserved for 32bit host. */
5565 mask |= 0x3ull << 62;
5566
5567 /* Set the present bit. */
5568 mask |= 1ull;
5569
5570 #ifdef CONFIG_X86_64
5571 /*
5572 * If reserved bit is not supported, clear the present bit to disable
5573 * mmio page fault.
5574 */
5575 if (maxphyaddr == 52)
5576 mask &= ~1ull;
5577 #endif
5578
5579 kvm_mmu_set_mmio_spte_mask(mask);
5580 }
5581
5582 #ifdef CONFIG_X86_64
5583 static void pvclock_gtod_update_fn(struct work_struct *work)
5584 {
5585 struct kvm *kvm;
5586
5587 struct kvm_vcpu *vcpu;
5588 int i;
5589
5590 spin_lock(&kvm_lock);
5591 list_for_each_entry(kvm, &vm_list, vm_list)
5592 kvm_for_each_vcpu(i, vcpu, kvm)
5593 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
5594 atomic_set(&kvm_guest_has_master_clock, 0);
5595 spin_unlock(&kvm_lock);
5596 }
5597
5598 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
5599
5600 /*
5601 * Notification about pvclock gtod data update.
5602 */
5603 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
5604 void *priv)
5605 {
5606 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
5607 struct timekeeper *tk = priv;
5608
5609 update_pvclock_gtod(tk);
5610
5611 /* disable master clock if host does not trust, or does not
5612 * use, TSC clocksource
5613 */
5614 if (gtod->clock.vclock_mode != VCLOCK_TSC &&
5615 atomic_read(&kvm_guest_has_master_clock) != 0)
5616 queue_work(system_long_wq, &pvclock_gtod_work);
5617
5618 return 0;
5619 }
5620
5621 static struct notifier_block pvclock_gtod_notifier = {
5622 .notifier_call = pvclock_gtod_notify,
5623 };
5624 #endif
5625
5626 int kvm_arch_init(void *opaque)
5627 {
5628 int r;
5629 struct kvm_x86_ops *ops = opaque;
5630
5631 if (kvm_x86_ops) {
5632 printk(KERN_ERR "kvm: already loaded the other module\n");
5633 r = -EEXIST;
5634 goto out;
5635 }
5636
5637 if (!ops->cpu_has_kvm_support()) {
5638 printk(KERN_ERR "kvm: no hardware support\n");
5639 r = -EOPNOTSUPP;
5640 goto out;
5641 }
5642 if (ops->disabled_by_bios()) {
5643 printk(KERN_ERR "kvm: disabled by bios\n");
5644 r = -EOPNOTSUPP;
5645 goto out;
5646 }
5647
5648 r = -ENOMEM;
5649 shared_msrs = alloc_percpu(struct kvm_shared_msrs);
5650 if (!shared_msrs) {
5651 printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
5652 goto out;
5653 }
5654
5655 r = kvm_mmu_module_init();
5656 if (r)
5657 goto out_free_percpu;
5658
5659 kvm_set_mmio_spte_mask();
5660
5661 kvm_x86_ops = ops;
5662
5663 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
5664 PT_DIRTY_MASK, PT64_NX_MASK, 0);
5665
5666 kvm_timer_init();
5667
5668 perf_register_guest_info_callbacks(&kvm_guest_cbs);
5669
5670 if (cpu_has_xsave)
5671 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
5672
5673 kvm_lapic_init();
5674 #ifdef CONFIG_X86_64
5675 pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
5676 #endif
5677
5678 return 0;
5679
5680 out_free_percpu:
5681 free_percpu(shared_msrs);
5682 out:
5683 return r;
5684 }
5685
5686 void kvm_arch_exit(void)
5687 {
5688 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
5689
5690 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
5691 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
5692 CPUFREQ_TRANSITION_NOTIFIER);
5693 unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
5694 #ifdef CONFIG_X86_64
5695 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
5696 #endif
5697 kvm_x86_ops = NULL;
5698 kvm_mmu_module_exit();
5699 free_percpu(shared_msrs);
5700 }
5701
5702 int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
5703 {
5704 ++vcpu->stat.halt_exits;
5705 if (lapic_in_kernel(vcpu)) {
5706 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
5707 return 1;
5708 } else {
5709 vcpu->run->exit_reason = KVM_EXIT_HLT;
5710 return 0;
5711 }
5712 }
5713 EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
5714
5715 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
5716 {
5717 kvm_x86_ops->skip_emulated_instruction(vcpu);
5718 return kvm_vcpu_halt(vcpu);
5719 }
5720 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
5721
5722 /*
5723 * kvm_pv_kick_cpu_op: Kick a vcpu.
5724 *
5725 * @apicid - apicid of vcpu to be kicked.
5726 */
5727 static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
5728 {
5729 struct kvm_lapic_irq lapic_irq;
5730
5731 lapic_irq.shorthand = 0;
5732 lapic_irq.dest_mode = 0;
5733 lapic_irq.dest_id = apicid;
5734 lapic_irq.msi_redir_hint = false;
5735
5736 lapic_irq.delivery_mode = APIC_DM_REMRD;
5737 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
5738 }
5739
5740 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
5741 {
5742 unsigned long nr, a0, a1, a2, a3, ret;
5743 int op_64_bit, r = 1;
5744
5745 kvm_x86_ops->skip_emulated_instruction(vcpu);
5746
5747 if (kvm_hv_hypercall_enabled(vcpu->kvm))
5748 return kvm_hv_hypercall(vcpu);
5749
5750 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
5751 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
5752 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
5753 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
5754 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
5755
5756 trace_kvm_hypercall(nr, a0, a1, a2, a3);
5757
5758 op_64_bit = is_64_bit_mode(vcpu);
5759 if (!op_64_bit) {
5760 nr &= 0xFFFFFFFF;
5761 a0 &= 0xFFFFFFFF;
5762 a1 &= 0xFFFFFFFF;
5763 a2 &= 0xFFFFFFFF;
5764 a3 &= 0xFFFFFFFF;
5765 }
5766
5767 if (kvm_x86_ops->get_cpl(vcpu) != 0) {
5768 ret = -KVM_EPERM;
5769 goto out;
5770 }
5771
5772 switch (nr) {
5773 case KVM_HC_VAPIC_POLL_IRQ:
5774 ret = 0;
5775 break;
5776 case KVM_HC_KICK_CPU:
5777 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
5778 ret = 0;
5779 break;
5780 default:
5781 ret = -KVM_ENOSYS;
5782 break;
5783 }
5784 out:
5785 if (!op_64_bit)
5786 ret = (u32)ret;
5787 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
5788 ++vcpu->stat.hypercalls;
5789 return r;
5790 }
5791 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
5792
5793 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5794 {
5795 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5796 char instruction[3];
5797 unsigned long rip = kvm_rip_read(vcpu);
5798
5799 kvm_x86_ops->patch_hypercall(vcpu, instruction);
5800
5801 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
5802 }
5803
5804 /*
5805 * Check if userspace requested an interrupt window, and that the
5806 * interrupt window is open.
5807 *
5808 * No need to exit to userspace if we already have an interrupt queued.
5809 */
5810 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
5811 {
5812 if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm))
5813 return false;
5814
5815 if (kvm_cpu_has_interrupt(vcpu))
5816 return false;
5817
5818 return (irqchip_split(vcpu->kvm)
5819 ? kvm_apic_accept_pic_intr(vcpu)
5820 : kvm_arch_interrupt_allowed(vcpu));
5821 }
5822
5823 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5824 {
5825 struct kvm_run *kvm_run = vcpu->run;
5826
5827 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
5828 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
5829 kvm_run->cr8 = kvm_get_cr8(vcpu);
5830 kvm_run->apic_base = kvm_get_apic_base(vcpu);
5831 if (!irqchip_in_kernel(vcpu->kvm))
5832 kvm_run->ready_for_interrupt_injection =
5833 kvm_arch_interrupt_allowed(vcpu) &&
5834 !kvm_cpu_has_interrupt(vcpu) &&
5835 !kvm_event_needs_reinjection(vcpu);
5836 else if (!pic_in_kernel(vcpu->kvm))
5837 kvm_run->ready_for_interrupt_injection =
5838 kvm_apic_accept_pic_intr(vcpu) &&
5839 !kvm_cpu_has_interrupt(vcpu);
5840 else
5841 kvm_run->ready_for_interrupt_injection = 1;
5842 }
5843
5844 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
5845 {
5846 int max_irr, tpr;
5847
5848 if (!kvm_x86_ops->update_cr8_intercept)
5849 return;
5850
5851 if (!vcpu->arch.apic)
5852 return;
5853
5854 if (!vcpu->arch.apic->vapic_addr)
5855 max_irr = kvm_lapic_find_highest_irr(vcpu);
5856 else
5857 max_irr = -1;
5858
5859 if (max_irr != -1)
5860 max_irr >>= 4;
5861
5862 tpr = kvm_lapic_get_cr8(vcpu);
5863
5864 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
5865 }
5866
5867 static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
5868 {
5869 int r;
5870
5871 /* try to reinject previous events if any */
5872 if (vcpu->arch.exception.pending) {
5873 trace_kvm_inj_exception(vcpu->arch.exception.nr,
5874 vcpu->arch.exception.has_error_code,
5875 vcpu->arch.exception.error_code);
5876
5877 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
5878 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
5879 X86_EFLAGS_RF);
5880
5881 if (vcpu->arch.exception.nr == DB_VECTOR &&
5882 (vcpu->arch.dr7 & DR7_GD)) {
5883 vcpu->arch.dr7 &= ~DR7_GD;
5884 kvm_update_dr7(vcpu);
5885 }
5886
5887 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
5888 vcpu->arch.exception.has_error_code,
5889 vcpu->arch.exception.error_code,
5890 vcpu->arch.exception.reinject);
5891 return 0;
5892 }
5893
5894 if (vcpu->arch.nmi_injected) {
5895 kvm_x86_ops->set_nmi(vcpu);
5896 return 0;
5897 }
5898
5899 if (vcpu->arch.interrupt.pending) {
5900 kvm_x86_ops->set_irq(vcpu);
5901 return 0;
5902 }
5903
5904 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
5905 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
5906 if (r != 0)
5907 return r;
5908 }
5909
5910 /* try to inject new event if pending */
5911 if (vcpu->arch.nmi_pending) {
5912 if (kvm_x86_ops->nmi_allowed(vcpu)) {
5913 --vcpu->arch.nmi_pending;
5914 vcpu->arch.nmi_injected = true;
5915 kvm_x86_ops->set_nmi(vcpu);
5916 }
5917 } else if (kvm_cpu_has_injectable_intr(vcpu)) {
5918 /*
5919 * Because interrupts can be injected asynchronously, we are
5920 * calling check_nested_events again here to avoid a race condition.
5921 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
5922 * proposal and current concerns. Perhaps we should be setting
5923 * KVM_REQ_EVENT only on certain events and not unconditionally?
5924 */
5925 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
5926 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
5927 if (r != 0)
5928 return r;
5929 }
5930 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
5931 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
5932 false);
5933 kvm_x86_ops->set_irq(vcpu);
5934 }
5935 }
5936 return 0;
5937 }
5938
5939 static void process_nmi(struct kvm_vcpu *vcpu)
5940 {
5941 unsigned limit = 2;
5942
5943 /*
5944 * x86 is limited to one NMI running, and one NMI pending after it.
5945 * If an NMI is already in progress, limit further NMIs to just one.
5946 * Otherwise, allow two (and we'll inject the first one immediately).
5947 */
5948 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
5949 limit = 1;
5950
5951 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
5952 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
5953 kvm_make_request(KVM_REQ_EVENT, vcpu);
5954 }
5955
5956 #define put_smstate(type, buf, offset, val) \
5957 *(type *)((buf) + (offset) - 0x7e00) = val
5958
5959 static u32 process_smi_get_segment_flags(struct kvm_segment *seg)
5960 {
5961 u32 flags = 0;
5962 flags |= seg->g << 23;
5963 flags |= seg->db << 22;
5964 flags |= seg->l << 21;
5965 flags |= seg->avl << 20;
5966 flags |= seg->present << 15;
5967 flags |= seg->dpl << 13;
5968 flags |= seg->s << 12;
5969 flags |= seg->type << 8;
5970 return flags;
5971 }
5972
5973 static void process_smi_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
5974 {
5975 struct kvm_segment seg;
5976 int offset;
5977
5978 kvm_get_segment(vcpu, &seg, n);
5979 put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);
5980
5981 if (n < 3)
5982 offset = 0x7f84 + n * 12;
5983 else
5984 offset = 0x7f2c + (n - 3) * 12;
5985
5986 put_smstate(u32, buf, offset + 8, seg.base);
5987 put_smstate(u32, buf, offset + 4, seg.limit);
5988 put_smstate(u32, buf, offset, process_smi_get_segment_flags(&seg));
5989 }
5990
5991 #ifdef CONFIG_X86_64
5992 static void process_smi_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
5993 {
5994 struct kvm_segment seg;
5995 int offset;
5996 u16 flags;
5997
5998 kvm_get_segment(vcpu, &seg, n);
5999 offset = 0x7e00 + n * 16;
6000
6001 flags = process_smi_get_segment_flags(&seg) >> 8;
6002 put_smstate(u16, buf, offset, seg.selector);
6003 put_smstate(u16, buf, offset + 2, flags);
6004 put_smstate(u32, buf, offset + 4, seg.limit);
6005 put_smstate(u64, buf, offset + 8, seg.base);
6006 }
6007 #endif
6008
6009 static void process_smi_save_state_32(struct kvm_vcpu *vcpu, char *buf)
6010 {
6011 struct desc_ptr dt;
6012 struct kvm_segment seg;
6013 unsigned long val;
6014 int i;
6015
6016 put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
6017 put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
6018 put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
6019 put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
6020
6021 for (i = 0; i < 8; i++)
6022 put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i));
6023
6024 kvm_get_dr(vcpu, 6, &val);
6025 put_smstate(u32, buf, 0x7fcc, (u32)val);
6026 kvm_get_dr(vcpu, 7, &val);
6027 put_smstate(u32, buf, 0x7fc8, (u32)val);
6028
6029 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
6030 put_smstate(u32, buf, 0x7fc4, seg.selector);
6031 put_smstate(u32, buf, 0x7f64, seg.base);
6032 put_smstate(u32, buf, 0x7f60, seg.limit);
6033 put_smstate(u32, buf, 0x7f5c, process_smi_get_segment_flags(&seg));
6034
6035 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
6036 put_smstate(u32, buf, 0x7fc0, seg.selector);
6037 put_smstate(u32, buf, 0x7f80, seg.base);
6038 put_smstate(u32, buf, 0x7f7c, seg.limit);
6039 put_smstate(u32, buf, 0x7f78, process_smi_get_segment_flags(&seg));
6040
6041 kvm_x86_ops->get_gdt(vcpu, &dt);
6042 put_smstate(u32, buf, 0x7f74, dt.address);
6043 put_smstate(u32, buf, 0x7f70, dt.size);
6044
6045 kvm_x86_ops->get_idt(vcpu, &dt);
6046 put_smstate(u32, buf, 0x7f58, dt.address);
6047 put_smstate(u32, buf, 0x7f54, dt.size);
6048
6049 for (i = 0; i < 6; i++)
6050 process_smi_save_seg_32(vcpu, buf, i);
6051
6052 put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
6053
6054 /* revision id */
6055 put_smstate(u32, buf, 0x7efc, 0x00020000);
6056 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
6057 }
6058
6059 static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
6060 {
6061 #ifdef CONFIG_X86_64
6062 struct desc_ptr dt;
6063 struct kvm_segment seg;
6064 unsigned long val;
6065 int i;
6066
6067 for (i = 0; i < 16; i++)
6068 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i));
6069
6070 put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
6071 put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
6072
6073 kvm_get_dr(vcpu, 6, &val);
6074 put_smstate(u64, buf, 0x7f68, val);
6075 kvm_get_dr(vcpu, 7, &val);
6076 put_smstate(u64, buf, 0x7f60, val);
6077
6078 put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
6079 put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
6080 put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
6081
6082 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);
6083
6084 /* revision id */
6085 put_smstate(u32, buf, 0x7efc, 0x00020064);
6086
6087 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);
6088
6089 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
6090 put_smstate(u16, buf, 0x7e90, seg.selector);
6091 put_smstate(u16, buf, 0x7e92, process_smi_get_segment_flags(&seg) >> 8);
6092 put_smstate(u32, buf, 0x7e94, seg.limit);
6093 put_smstate(u64, buf, 0x7e98, seg.base);
6094
6095 kvm_x86_ops->get_idt(vcpu, &dt);
6096 put_smstate(u32, buf, 0x7e84, dt.size);
6097 put_smstate(u64, buf, 0x7e88, dt.address);
6098
6099 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
6100 put_smstate(u16, buf, 0x7e70, seg.selector);
6101 put_smstate(u16, buf, 0x7e72, process_smi_get_segment_flags(&seg) >> 8);
6102 put_smstate(u32, buf, 0x7e74, seg.limit);
6103 put_smstate(u64, buf, 0x7e78, seg.base);
6104
6105 kvm_x86_ops->get_gdt(vcpu, &dt);
6106 put_smstate(u32, buf, 0x7e64, dt.size);
6107 put_smstate(u64, buf, 0x7e68, dt.address);
6108
6109 for (i = 0; i < 6; i++)
6110 process_smi_save_seg_64(vcpu, buf, i);
6111 #else
6112 WARN_ON_ONCE(1);
6113 #endif
6114 }
6115
6116 static void process_smi(struct kvm_vcpu *vcpu)
6117 {
6118 struct kvm_segment cs, ds;
6119 struct desc_ptr dt;
6120 char buf[512];
6121 u32 cr0;
6122
6123 if (is_smm(vcpu)) {
6124 vcpu->arch.smi_pending = true;
6125 return;
6126 }
6127
6128 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
6129 vcpu->arch.hflags |= HF_SMM_MASK;
6130 memset(buf, 0, 512);
6131 if (guest_cpuid_has_longmode(vcpu))
6132 process_smi_save_state_64(vcpu, buf);
6133 else
6134 process_smi_save_state_32(vcpu, buf);
6135
6136 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
6137
6138 if (kvm_x86_ops->get_nmi_mask(vcpu))
6139 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
6140 else
6141 kvm_x86_ops->set_nmi_mask(vcpu, true);
6142
6143 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
6144 kvm_rip_write(vcpu, 0x8000);
6145
6146 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
6147 kvm_x86_ops->set_cr0(vcpu, cr0);
6148 vcpu->arch.cr0 = cr0;
6149
6150 kvm_x86_ops->set_cr4(vcpu, 0);
6151
6152 /* Undocumented: IDT limit is set to zero on entry to SMM. */
6153 dt.address = dt.size = 0;
6154 kvm_x86_ops->set_idt(vcpu, &dt);
6155
6156 __kvm_set_dr(vcpu, 7, DR7_FIXED_1);
6157
6158 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
6159 cs.base = vcpu->arch.smbase;
6160
6161 ds.selector = 0;
6162 ds.base = 0;
6163
6164 cs.limit = ds.limit = 0xffffffff;
6165 cs.type = ds.type = 0x3;
6166 cs.dpl = ds.dpl = 0;
6167 cs.db = ds.db = 0;
6168 cs.s = ds.s = 1;
6169 cs.l = ds.l = 0;
6170 cs.g = ds.g = 1;
6171 cs.avl = ds.avl = 0;
6172 cs.present = ds.present = 1;
6173 cs.unusable = ds.unusable = 0;
6174 cs.padding = ds.padding = 0;
6175
6176 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
6177 kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
6178 kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
6179 kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
6180 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
6181 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
6182
6183 if (guest_cpuid_has_longmode(vcpu))
6184 kvm_x86_ops->set_efer(vcpu, 0);
6185
6186 kvm_update_cpuid(vcpu);
6187 kvm_mmu_reset_context(vcpu);
6188 }
6189
6190 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
6191 {
6192 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
6193 return;
6194
6195 memset(vcpu->arch.eoi_exit_bitmap, 0, 256 / 8);
6196
6197 if (irqchip_split(vcpu->kvm))
6198 kvm_scan_ioapic_routes(vcpu, vcpu->arch.eoi_exit_bitmap);
6199 else {
6200 kvm_x86_ops->sync_pir_to_irr(vcpu);
6201 kvm_ioapic_scan_entry(vcpu, vcpu->arch.eoi_exit_bitmap);
6202 }
6203 kvm_x86_ops->load_eoi_exitmap(vcpu);
6204 }
6205
6206 static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
6207 {
6208 ++vcpu->stat.tlb_flush;
6209 kvm_x86_ops->tlb_flush(vcpu);
6210 }
6211
6212 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
6213 {
6214 struct page *page = NULL;
6215
6216 if (!lapic_in_kernel(vcpu))
6217 return;
6218
6219 if (!kvm_x86_ops->set_apic_access_page_addr)
6220 return;
6221
6222 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
6223 if (is_error_page(page))
6224 return;
6225 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
6226
6227 /*
6228 * Do not pin apic access page in memory, the MMU notifier
6229 * will call us again if it is migrated or swapped out.
6230 */
6231 put_page(page);
6232 }
6233 EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
6234
6235 void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
6236 unsigned long address)
6237 {
6238 /*
6239 * The physical address of apic access page is stored in the VMCS.
6240 * Update it when it becomes invalid.
6241 */
6242 if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
6243 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
6244 }
6245
6246 /*
6247 * Returns 1 to let vcpu_run() continue the guest execution loop without
6248 * exiting to the userspace. Otherwise, the value will be returned to the
6249 * userspace.
6250 */
6251 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6252 {
6253 int r;
6254 bool req_int_win = !lapic_in_kernel(vcpu) &&
6255 vcpu->run->request_interrupt_window;
6256 bool req_immediate_exit = false;
6257
6258 if (vcpu->requests) {
6259 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
6260 kvm_mmu_unload(vcpu);
6261 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
6262 __kvm_migrate_timers(vcpu);
6263 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
6264 kvm_gen_update_masterclock(vcpu->kvm);
6265 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
6266 kvm_gen_kvmclock_update(vcpu);
6267 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
6268 r = kvm_guest_time_update(vcpu);
6269 if (unlikely(r))
6270 goto out;
6271 }
6272 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
6273 kvm_mmu_sync_roots(vcpu);
6274 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
6275 kvm_vcpu_flush_tlb(vcpu);
6276 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
6277 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
6278 r = 0;
6279 goto out;
6280 }
6281 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
6282 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
6283 r = 0;
6284 goto out;
6285 }
6286 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
6287 vcpu->fpu_active = 0;
6288 kvm_x86_ops->fpu_deactivate(vcpu);
6289 }
6290 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
6291 /* Page is swapped out. Do synthetic halt */
6292 vcpu->arch.apf.halted = true;
6293 r = 1;
6294 goto out;
6295 }
6296 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
6297 record_steal_time(vcpu);
6298 if (kvm_check_request(KVM_REQ_SMI, vcpu))
6299 process_smi(vcpu);
6300 if (kvm_check_request(KVM_REQ_NMI, vcpu))
6301 process_nmi(vcpu);
6302 if (kvm_check_request(KVM_REQ_PMU, vcpu))
6303 kvm_pmu_handle_event(vcpu);
6304 if (kvm_check_request(KVM_REQ_PMI, vcpu))
6305 kvm_pmu_deliver_pmi(vcpu);
6306 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
6307 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
6308 if (test_bit(vcpu->arch.pending_ioapic_eoi,
6309 (void *) vcpu->arch.eoi_exit_bitmap)) {
6310 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
6311 vcpu->run->eoi.vector =
6312 vcpu->arch.pending_ioapic_eoi;
6313 r = 0;
6314 goto out;
6315 }
6316 }
6317 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
6318 vcpu_scan_ioapic(vcpu);
6319 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
6320 kvm_vcpu_reload_apic_access_page(vcpu);
6321 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
6322 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
6323 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
6324 r = 0;
6325 goto out;
6326 }
6327 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
6328 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
6329 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
6330 r = 0;
6331 goto out;
6332 }
6333 }
6334
6335 /*
6336 * KVM_REQ_EVENT is not set when posted interrupts are set by
6337 * VT-d hardware, so we have to update RVI unconditionally.
6338 */
6339 if (kvm_lapic_enabled(vcpu)) {
6340 /*
6341 * Update architecture specific hints for APIC
6342 * virtual interrupt delivery.
6343 */
6344 if (kvm_x86_ops->hwapic_irr_update)
6345 kvm_x86_ops->hwapic_irr_update(vcpu,
6346 kvm_lapic_find_highest_irr(vcpu));
6347 }
6348
6349 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
6350 kvm_apic_accept_events(vcpu);
6351 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
6352 r = 1;
6353 goto out;
6354 }
6355
6356 if (inject_pending_event(vcpu, req_int_win) != 0)
6357 req_immediate_exit = true;
6358 /* enable NMI/IRQ window open exits if needed */
6359 else if (vcpu->arch.nmi_pending)
6360 kvm_x86_ops->enable_nmi_window(vcpu);
6361 else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
6362 kvm_x86_ops->enable_irq_window(vcpu);
6363
6364 if (kvm_lapic_enabled(vcpu)) {
6365 update_cr8_intercept(vcpu);
6366 kvm_lapic_sync_to_vapic(vcpu);
6367 }
6368 }
6369
6370 r = kvm_mmu_reload(vcpu);
6371 if (unlikely(r)) {
6372 goto cancel_injection;
6373 }
6374
6375 preempt_disable();
6376
6377 kvm_x86_ops->prepare_guest_switch(vcpu);
6378 if (vcpu->fpu_active)
6379 kvm_load_guest_fpu(vcpu);
6380 kvm_load_guest_xcr0(vcpu);
6381
6382 vcpu->mode = IN_GUEST_MODE;
6383
6384 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
6385
6386 /* We should set ->mode before check ->requests,
6387 * see the comment in make_all_cpus_request.
6388 */
6389 smp_mb__after_srcu_read_unlock();
6390
6391 local_irq_disable();
6392
6393 if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
6394 || need_resched() || signal_pending(current)) {
6395 vcpu->mode = OUTSIDE_GUEST_MODE;
6396 smp_wmb();
6397 local_irq_enable();
6398 preempt_enable();
6399 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6400 r = 1;
6401 goto cancel_injection;
6402 }
6403
6404 if (req_immediate_exit)
6405 smp_send_reschedule(vcpu->cpu);
6406
6407 __kvm_guest_enter();
6408
6409 if (unlikely(vcpu->arch.switch_db_regs)) {
6410 set_debugreg(0, 7);
6411 set_debugreg(vcpu->arch.eff_db[0], 0);
6412 set_debugreg(vcpu->arch.eff_db[1], 1);
6413 set_debugreg(vcpu->arch.eff_db[2], 2);
6414 set_debugreg(vcpu->arch.eff_db[3], 3);
6415 set_debugreg(vcpu->arch.dr6, 6);
6416 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
6417 }
6418
6419 trace_kvm_entry(vcpu->vcpu_id);
6420 wait_lapic_expire(vcpu);
6421 kvm_x86_ops->run(vcpu);
6422
6423 /*
6424 * Do this here before restoring debug registers on the host. And
6425 * since we do this before handling the vmexit, a DR access vmexit
6426 * can (a) read the correct value of the debug registers, (b) set
6427 * KVM_DEBUGREG_WONT_EXIT again.
6428 */
6429 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
6430 int i;
6431
6432 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
6433 kvm_x86_ops->sync_dirty_debug_regs(vcpu);
6434 for (i = 0; i < KVM_NR_DB_REGS; i++)
6435 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
6436 }
6437
6438 /*
6439 * If the guest has used debug registers, at least dr7
6440 * will be disabled while returning to the host.
6441 * If we don't have active breakpoints in the host, we don't
6442 * care about the messed up debug address registers. But if
6443 * we have some of them active, restore the old state.
6444 */
6445 if (hw_breakpoint_active())
6446 hw_breakpoint_restore();
6447
6448 vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
6449 rdtsc());
6450
6451 vcpu->mode = OUTSIDE_GUEST_MODE;
6452 smp_wmb();
6453
6454 /* Interrupt is enabled by handle_external_intr() */
6455 kvm_x86_ops->handle_external_intr(vcpu);
6456
6457 ++vcpu->stat.exits;
6458
6459 /*
6460 * We must have an instruction between local_irq_enable() and
6461 * kvm_guest_exit(), so the timer interrupt isn't delayed by
6462 * the interrupt shadow. The stat.exits increment will do nicely.
6463 * But we need to prevent reordering, hence this barrier():
6464 */
6465 barrier();
6466
6467 kvm_guest_exit();
6468
6469 preempt_enable();
6470
6471 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6472
6473 /*
6474 * Profile KVM exit RIPs:
6475 */
6476 if (unlikely(prof_on == KVM_PROFILING)) {
6477 unsigned long rip = kvm_rip_read(vcpu);
6478 profile_hit(KVM_PROFILING, (void *)rip);
6479 }
6480
6481 if (unlikely(vcpu->arch.tsc_always_catchup))
6482 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
6483
6484 if (vcpu->arch.apic_attention)
6485 kvm_lapic_sync_from_vapic(vcpu);
6486
6487 r = kvm_x86_ops->handle_exit(vcpu);
6488 return r;
6489
6490 cancel_injection:
6491 kvm_x86_ops->cancel_injection(vcpu);
6492 if (unlikely(vcpu->arch.apic_attention))
6493 kvm_lapic_sync_from_vapic(vcpu);
6494 out:
6495 return r;
6496 }
6497
6498 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
6499 {
6500 if (!kvm_arch_vcpu_runnable(vcpu) &&
6501 (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) {
6502 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6503 kvm_vcpu_block(vcpu);
6504 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6505
6506 if (kvm_x86_ops->post_block)
6507 kvm_x86_ops->post_block(vcpu);
6508
6509 if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
6510 return 1;
6511 }
6512
6513 kvm_apic_accept_events(vcpu);
6514 switch(vcpu->arch.mp_state) {
6515 case KVM_MP_STATE_HALTED:
6516 vcpu->arch.pv.pv_unhalted = false;
6517 vcpu->arch.mp_state =
6518 KVM_MP_STATE_RUNNABLE;
6519 case KVM_MP_STATE_RUNNABLE:
6520 vcpu->arch.apf.halted = false;
6521 break;
6522 case KVM_MP_STATE_INIT_RECEIVED:
6523 break;
6524 default:
6525 return -EINTR;
6526 break;
6527 }
6528 return 1;
6529 }
6530
6531 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
6532 {
6533 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
6534 !vcpu->arch.apf.halted);
6535 }
6536
6537 static int vcpu_run(struct kvm_vcpu *vcpu)
6538 {
6539 int r;
6540 struct kvm *kvm = vcpu->kvm;
6541
6542 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6543
6544 for (;;) {
6545 if (kvm_vcpu_running(vcpu)) {
6546 r = vcpu_enter_guest(vcpu);
6547 } else {
6548 r = vcpu_block(kvm, vcpu);
6549 }
6550
6551 if (r <= 0)
6552 break;
6553
6554 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
6555 if (kvm_cpu_has_pending_timer(vcpu))
6556 kvm_inject_pending_timer_irqs(vcpu);
6557
6558 if (dm_request_for_irq_injection(vcpu)) {
6559 r = 0;
6560 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
6561 ++vcpu->stat.request_irq_exits;
6562 break;
6563 }
6564
6565 kvm_check_async_pf_completion(vcpu);
6566
6567 if (signal_pending(current)) {
6568 r = -EINTR;
6569 vcpu->run->exit_reason = KVM_EXIT_INTR;
6570 ++vcpu->stat.signal_exits;
6571 break;
6572 }
6573 if (need_resched()) {
6574 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6575 cond_resched();
6576 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6577 }
6578 }
6579
6580 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6581
6582 return r;
6583 }
6584
6585 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
6586 {
6587 int r;
6588 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6589 r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
6590 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
6591 if (r != EMULATE_DONE)
6592 return 0;
6593 return 1;
6594 }
6595
6596 static int complete_emulated_pio(struct kvm_vcpu *vcpu)
6597 {
6598 BUG_ON(!vcpu->arch.pio.count);
6599
6600 return complete_emulated_io(vcpu);
6601 }
6602
6603 /*
6604 * Implements the following, as a state machine:
6605 *
6606 * read:
6607 * for each fragment
6608 * for each mmio piece in the fragment
6609 * write gpa, len
6610 * exit
6611 * copy data
6612 * execute insn
6613 *
6614 * write:
6615 * for each fragment
6616 * for each mmio piece in the fragment
6617 * write gpa, len
6618 * copy data
6619 * exit
6620 */
6621 static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
6622 {
6623 struct kvm_run *run = vcpu->run;
6624 struct kvm_mmio_fragment *frag;
6625 unsigned len;
6626
6627 BUG_ON(!vcpu->mmio_needed);
6628
6629 /* Complete previous fragment */
6630 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
6631 len = min(8u, frag->len);
6632 if (!vcpu->mmio_is_write)
6633 memcpy(frag->data, run->mmio.data, len);
6634
6635 if (frag->len <= 8) {
6636 /* Switch to the next fragment. */
6637 frag++;
6638 vcpu->mmio_cur_fragment++;
6639 } else {
6640 /* Go forward to the next mmio piece. */
6641 frag->data += len;
6642 frag->gpa += len;
6643 frag->len -= len;
6644 }
6645
6646 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
6647 vcpu->mmio_needed = 0;
6648
6649 /* FIXME: return into emulator if single-stepping. */
6650 if (vcpu->mmio_is_write)
6651 return 1;
6652 vcpu->mmio_read_completed = 1;
6653 return complete_emulated_io(vcpu);
6654 }
6655
6656 run->exit_reason = KVM_EXIT_MMIO;
6657 run->mmio.phys_addr = frag->gpa;
6658 if (vcpu->mmio_is_write)
6659 memcpy(run->mmio.data, frag->data, min(8u, frag->len));
6660 run->mmio.len = min(8u, frag->len);
6661 run->mmio.is_write = vcpu->mmio_is_write;
6662 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
6663 return 0;
6664 }
6665
6666
6667 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
6668 {
6669 struct fpu *fpu = &current->thread.fpu;
6670 int r;
6671 sigset_t sigsaved;
6672
6673 fpu__activate_curr(fpu);
6674
6675 if (vcpu->sigset_active)
6676 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
6677
6678 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
6679 kvm_vcpu_block(vcpu);
6680 kvm_apic_accept_events(vcpu);
6681 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
6682 r = -EAGAIN;
6683 goto out;
6684 }
6685
6686 /* re-sync apic's tpr */
6687 if (!lapic_in_kernel(vcpu)) {
6688 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
6689 r = -EINVAL;
6690 goto out;
6691 }
6692 }
6693
6694 if (unlikely(vcpu->arch.complete_userspace_io)) {
6695 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
6696 vcpu->arch.complete_userspace_io = NULL;
6697 r = cui(vcpu);
6698 if (r <= 0)
6699 goto out;
6700 } else
6701 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
6702
6703 r = vcpu_run(vcpu);
6704
6705 out:
6706 post_kvm_run_save(vcpu);
6707 if (vcpu->sigset_active)
6708 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
6709
6710 return r;
6711 }
6712
6713 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
6714 {
6715 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
6716 /*
6717 * We are here if userspace calls get_regs() in the middle of
6718 * instruction emulation. Registers state needs to be copied
6719 * back from emulation context to vcpu. Userspace shouldn't do
6720 * that usually, but some bad designed PV devices (vmware
6721 * backdoor interface) need this to work
6722 */
6723 emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
6724 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6725 }
6726 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
6727 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
6728 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
6729 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
6730 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
6731 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
6732 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
6733 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
6734 #ifdef CONFIG_X86_64
6735 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
6736 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
6737 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
6738 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
6739 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
6740 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
6741 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
6742 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
6743 #endif
6744
6745 regs->rip = kvm_rip_read(vcpu);
6746 regs->rflags = kvm_get_rflags(vcpu);
6747
6748 return 0;
6749 }
6750
6751 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
6752 {
6753 vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
6754 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6755
6756 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
6757 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
6758 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
6759 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
6760 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
6761 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
6762 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
6763 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
6764 #ifdef CONFIG_X86_64
6765 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
6766 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
6767 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
6768 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
6769 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
6770 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
6771 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
6772 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
6773 #endif
6774
6775 kvm_rip_write(vcpu, regs->rip);
6776 kvm_set_rflags(vcpu, regs->rflags);
6777
6778 vcpu->arch.exception.pending = false;
6779
6780 kvm_make_request(KVM_REQ_EVENT, vcpu);
6781
6782 return 0;
6783 }
6784
6785 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
6786 {
6787 struct kvm_segment cs;
6788
6789 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
6790 *db = cs.db;
6791 *l = cs.l;
6792 }
6793 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
6794
6795 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
6796 struct kvm_sregs *sregs)
6797 {
6798 struct desc_ptr dt;
6799
6800 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
6801 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
6802 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
6803 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
6804 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
6805 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
6806
6807 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
6808 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
6809
6810 kvm_x86_ops->get_idt(vcpu, &dt);
6811 sregs->idt.limit = dt.size;
6812 sregs->idt.base = dt.address;
6813 kvm_x86_ops->get_gdt(vcpu, &dt);
6814 sregs->gdt.limit = dt.size;
6815 sregs->gdt.base = dt.address;
6816
6817 sregs->cr0 = kvm_read_cr0(vcpu);
6818 sregs->cr2 = vcpu->arch.cr2;
6819 sregs->cr3 = kvm_read_cr3(vcpu);
6820 sregs->cr4 = kvm_read_cr4(vcpu);
6821 sregs->cr8 = kvm_get_cr8(vcpu);
6822 sregs->efer = vcpu->arch.efer;
6823 sregs->apic_base = kvm_get_apic_base(vcpu);
6824
6825 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
6826
6827 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
6828 set_bit(vcpu->arch.interrupt.nr,
6829 (unsigned long *)sregs->interrupt_bitmap);
6830
6831 return 0;
6832 }
6833
6834 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
6835 struct kvm_mp_state *mp_state)
6836 {
6837 kvm_apic_accept_events(vcpu);
6838 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
6839 vcpu->arch.pv.pv_unhalted)
6840 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
6841 else
6842 mp_state->mp_state = vcpu->arch.mp_state;
6843
6844 return 0;
6845 }
6846
6847 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
6848 struct kvm_mp_state *mp_state)
6849 {
6850 if (!kvm_vcpu_has_lapic(vcpu) &&
6851 mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
6852 return -EINVAL;
6853
6854 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
6855 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
6856 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
6857 } else
6858 vcpu->arch.mp_state = mp_state->mp_state;
6859 kvm_make_request(KVM_REQ_EVENT, vcpu);
6860 return 0;
6861 }
6862
6863 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
6864 int reason, bool has_error_code, u32 error_code)
6865 {
6866 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
6867 int ret;
6868
6869 init_emulate_ctxt(vcpu);
6870
6871 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
6872 has_error_code, error_code);
6873
6874 if (ret)
6875 return EMULATE_FAIL;
6876
6877 kvm_rip_write(vcpu, ctxt->eip);
6878 kvm_set_rflags(vcpu, ctxt->eflags);
6879 kvm_make_request(KVM_REQ_EVENT, vcpu);
6880 return EMULATE_DONE;
6881 }
6882 EXPORT_SYMBOL_GPL(kvm_task_switch);
6883
6884 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
6885 struct kvm_sregs *sregs)
6886 {
6887 struct msr_data apic_base_msr;
6888 int mmu_reset_needed = 0;
6889 int pending_vec, max_bits, idx;
6890 struct desc_ptr dt;
6891
6892 if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
6893 return -EINVAL;
6894
6895 dt.size = sregs->idt.limit;
6896 dt.address = sregs->idt.base;
6897 kvm_x86_ops->set_idt(vcpu, &dt);
6898 dt.size = sregs->gdt.limit;
6899 dt.address = sregs->gdt.base;
6900 kvm_x86_ops->set_gdt(vcpu, &dt);
6901
6902 vcpu->arch.cr2 = sregs->cr2;
6903 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
6904 vcpu->arch.cr3 = sregs->cr3;
6905 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
6906
6907 kvm_set_cr8(vcpu, sregs->cr8);
6908
6909 mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
6910 kvm_x86_ops->set_efer(vcpu, sregs->efer);
6911 apic_base_msr.data = sregs->apic_base;
6912 apic_base_msr.host_initiated = true;
6913 kvm_set_apic_base(vcpu, &apic_base_msr);
6914
6915 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
6916 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
6917 vcpu->arch.cr0 = sregs->cr0;
6918
6919 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
6920 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
6921 if (sregs->cr4 & X86_CR4_OSXSAVE)
6922 kvm_update_cpuid(vcpu);
6923
6924 idx = srcu_read_lock(&vcpu->kvm->srcu);
6925 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
6926 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
6927 mmu_reset_needed = 1;
6928 }
6929 srcu_read_unlock(&vcpu->kvm->srcu, idx);
6930
6931 if (mmu_reset_needed)
6932 kvm_mmu_reset_context(vcpu);
6933
6934 max_bits = KVM_NR_INTERRUPTS;
6935 pending_vec = find_first_bit(
6936 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
6937 if (pending_vec < max_bits) {
6938 kvm_queue_interrupt(vcpu, pending_vec, false);
6939 pr_debug("Set back pending irq %d\n", pending_vec);
6940 }
6941
6942 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
6943 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
6944 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
6945 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
6946 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
6947 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
6948
6949 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
6950 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
6951
6952 update_cr8_intercept(vcpu);
6953
6954 /* Older userspace won't unhalt the vcpu on reset. */
6955 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
6956 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
6957 !is_protmode(vcpu))
6958 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
6959
6960 kvm_make_request(KVM_REQ_EVENT, vcpu);
6961
6962 return 0;
6963 }
6964
6965 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
6966 struct kvm_guest_debug *dbg)
6967 {
6968 unsigned long rflags;
6969 int i, r;
6970
6971 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
6972 r = -EBUSY;
6973 if (vcpu->arch.exception.pending)
6974 goto out;
6975 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
6976 kvm_queue_exception(vcpu, DB_VECTOR);
6977 else
6978 kvm_queue_exception(vcpu, BP_VECTOR);
6979 }
6980
6981 /*
6982 * Read rflags as long as potentially injected trace flags are still
6983 * filtered out.
6984 */
6985 rflags = kvm_get_rflags(vcpu);
6986
6987 vcpu->guest_debug = dbg->control;
6988 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
6989 vcpu->guest_debug = 0;
6990
6991 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
6992 for (i = 0; i < KVM_NR_DB_REGS; ++i)
6993 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
6994 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
6995 } else {
6996 for (i = 0; i < KVM_NR_DB_REGS; i++)
6997 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
6998 }
6999 kvm_update_dr7(vcpu);
7000
7001 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7002 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
7003 get_segment_base(vcpu, VCPU_SREG_CS);
7004
7005 /*
7006 * Trigger an rflags update that will inject or remove the trace
7007 * flags.
7008 */
7009 kvm_set_rflags(vcpu, rflags);
7010
7011 kvm_x86_ops->update_db_bp_intercept(vcpu);
7012
7013 r = 0;
7014
7015 out:
7016
7017 return r;
7018 }
7019
7020 /*
7021 * Translate a guest virtual address to a guest physical address.
7022 */
7023 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
7024 struct kvm_translation *tr)
7025 {
7026 unsigned long vaddr = tr->linear_address;
7027 gpa_t gpa;
7028 int idx;
7029
7030 idx = srcu_read_lock(&vcpu->kvm->srcu);
7031 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
7032 srcu_read_unlock(&vcpu->kvm->srcu, idx);
7033 tr->physical_address = gpa;
7034 tr->valid = gpa != UNMAPPED_GVA;
7035 tr->writeable = 1;
7036 tr->usermode = 0;
7037
7038 return 0;
7039 }
7040
7041 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
7042 {
7043 struct fxregs_state *fxsave =
7044 &vcpu->arch.guest_fpu.state.fxsave;
7045
7046 memcpy(fpu->fpr, fxsave->st_space, 128);
7047 fpu->fcw = fxsave->cwd;
7048 fpu->fsw = fxsave->swd;
7049 fpu->ftwx = fxsave->twd;
7050 fpu->last_opcode = fxsave->fop;
7051 fpu->last_ip = fxsave->rip;
7052 fpu->last_dp = fxsave->rdp;
7053 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
7054
7055 return 0;
7056 }
7057
7058 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
7059 {
7060 struct fxregs_state *fxsave =
7061 &vcpu->arch.guest_fpu.state.fxsave;
7062
7063 memcpy(fxsave->st_space, fpu->fpr, 128);
7064 fxsave->cwd = fpu->fcw;
7065 fxsave->swd = fpu->fsw;
7066 fxsave->twd = fpu->ftwx;
7067 fxsave->fop = fpu->last_opcode;
7068 fxsave->rip = fpu->last_ip;
7069 fxsave->rdp = fpu->last_dp;
7070 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
7071
7072 return 0;
7073 }
7074
7075 static void fx_init(struct kvm_vcpu *vcpu)
7076 {
7077 fpstate_init(&vcpu->arch.guest_fpu.state);
7078 if (cpu_has_xsaves)
7079 vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
7080 host_xcr0 | XSTATE_COMPACTION_ENABLED;
7081
7082 /*
7083 * Ensure guest xcr0 is valid for loading
7084 */
7085 vcpu->arch.xcr0 = XSTATE_FP;
7086
7087 vcpu->arch.cr0 |= X86_CR0_ET;
7088 }
7089
7090 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
7091 {
7092 if (vcpu->guest_fpu_loaded)
7093 return;
7094
7095 /*
7096 * Restore all possible states in the guest,
7097 * and assume host would use all available bits.
7098 * Guest xcr0 would be loaded later.
7099 */
7100 kvm_put_guest_xcr0(vcpu);
7101 vcpu->guest_fpu_loaded = 1;
7102 __kernel_fpu_begin();
7103 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
7104 trace_kvm_fpu(1);
7105 }
7106
7107 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
7108 {
7109 kvm_put_guest_xcr0(vcpu);
7110
7111 if (!vcpu->guest_fpu_loaded) {
7112 vcpu->fpu_counter = 0;
7113 return;
7114 }
7115
7116 vcpu->guest_fpu_loaded = 0;
7117 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
7118 __kernel_fpu_end();
7119 ++vcpu->stat.fpu_reload;
7120 /*
7121 * If using eager FPU mode, or if the guest is a frequent user
7122 * of the FPU, just leave the FPU active for next time.
7123 * Every 255 times fpu_counter rolls over to 0; a guest that uses
7124 * the FPU in bursts will revert to loading it on demand.
7125 */
7126 if (!vcpu->arch.eager_fpu) {
7127 if (++vcpu->fpu_counter < 5)
7128 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
7129 }
7130 trace_kvm_fpu(0);
7131 }
7132
7133 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
7134 {
7135 kvmclock_reset(vcpu);
7136
7137 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
7138 kvm_x86_ops->vcpu_free(vcpu);
7139 }
7140
7141 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
7142 unsigned int id)
7143 {
7144 struct kvm_vcpu *vcpu;
7145
7146 if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
7147 printk_once(KERN_WARNING
7148 "kvm: SMP vm created on host with unstable TSC; "
7149 "guest TSC will not be reliable\n");
7150
7151 vcpu = kvm_x86_ops->vcpu_create(kvm, id);
7152
7153 return vcpu;
7154 }
7155
7156 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
7157 {
7158 int r;
7159
7160 kvm_vcpu_mtrr_init(vcpu);
7161 r = vcpu_load(vcpu);
7162 if (r)
7163 return r;
7164 kvm_vcpu_reset(vcpu, false);
7165 kvm_mmu_setup(vcpu);
7166 vcpu_put(vcpu);
7167 return r;
7168 }
7169
7170 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
7171 {
7172 struct msr_data msr;
7173 struct kvm *kvm = vcpu->kvm;
7174
7175 if (vcpu_load(vcpu))
7176 return;
7177 msr.data = 0x0;
7178 msr.index = MSR_IA32_TSC;
7179 msr.host_initiated = true;
7180 kvm_write_tsc(vcpu, &msr);
7181 vcpu_put(vcpu);
7182
7183 if (!kvmclock_periodic_sync)
7184 return;
7185
7186 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
7187 KVMCLOCK_SYNC_PERIOD);
7188 }
7189
7190 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
7191 {
7192 int r;
7193 vcpu->arch.apf.msr_val = 0;
7194
7195 r = vcpu_load(vcpu);
7196 BUG_ON(r);
7197 kvm_mmu_unload(vcpu);
7198 vcpu_put(vcpu);
7199
7200 kvm_x86_ops->vcpu_free(vcpu);
7201 }
7202
7203 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
7204 {
7205 vcpu->arch.hflags = 0;
7206
7207 atomic_set(&vcpu->arch.nmi_queued, 0);
7208 vcpu->arch.nmi_pending = 0;
7209 vcpu->arch.nmi_injected = false;
7210 kvm_clear_interrupt_queue(vcpu);
7211 kvm_clear_exception_queue(vcpu);
7212
7213 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
7214 kvm_update_dr0123(vcpu);
7215 vcpu->arch.dr6 = DR6_INIT;
7216 kvm_update_dr6(vcpu);
7217 vcpu->arch.dr7 = DR7_FIXED_1;
7218 kvm_update_dr7(vcpu);
7219
7220 vcpu->arch.cr2 = 0;
7221
7222 kvm_make_request(KVM_REQ_EVENT, vcpu);
7223 vcpu->arch.apf.msr_val = 0;
7224 vcpu->arch.st.msr_val = 0;
7225
7226 kvmclock_reset(vcpu);
7227
7228 kvm_clear_async_pf_completion_queue(vcpu);
7229 kvm_async_pf_hash_reset(vcpu);
7230 vcpu->arch.apf.halted = false;
7231
7232 if (!init_event) {
7233 kvm_pmu_reset(vcpu);
7234 vcpu->arch.smbase = 0x30000;
7235 }
7236
7237 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
7238 vcpu->arch.regs_avail = ~0;
7239 vcpu->arch.regs_dirty = ~0;
7240
7241 kvm_x86_ops->vcpu_reset(vcpu, init_event);
7242 }
7243
7244 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
7245 {
7246 struct kvm_segment cs;
7247
7248 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
7249 cs.selector = vector << 8;
7250 cs.base = vector << 12;
7251 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
7252 kvm_rip_write(vcpu, 0);
7253 }
7254
7255 int kvm_arch_hardware_enable(void)
7256 {
7257 struct kvm *kvm;
7258 struct kvm_vcpu *vcpu;
7259 int i;
7260 int ret;
7261 u64 local_tsc;
7262 u64 max_tsc = 0;
7263 bool stable, backwards_tsc = false;
7264
7265 kvm_shared_msr_cpu_online();
7266 ret = kvm_x86_ops->hardware_enable();
7267 if (ret != 0)
7268 return ret;
7269
7270 local_tsc = rdtsc();
7271 stable = !check_tsc_unstable();
7272 list_for_each_entry(kvm, &vm_list, vm_list) {
7273 kvm_for_each_vcpu(i, vcpu, kvm) {
7274 if (!stable && vcpu->cpu == smp_processor_id())
7275 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
7276 if (stable && vcpu->arch.last_host_tsc > local_tsc) {
7277 backwards_tsc = true;
7278 if (vcpu->arch.last_host_tsc > max_tsc)
7279 max_tsc = vcpu->arch.last_host_tsc;
7280 }
7281 }
7282 }
7283
7284 /*
7285 * Sometimes, even reliable TSCs go backwards. This happens on
7286 * platforms that reset TSC during suspend or hibernate actions, but
7287 * maintain synchronization. We must compensate. Fortunately, we can
7288 * detect that condition here, which happens early in CPU bringup,
7289 * before any KVM threads can be running. Unfortunately, we can't
7290 * bring the TSCs fully up to date with real time, as we aren't yet far
7291 * enough into CPU bringup that we know how much real time has actually
7292 * elapsed; our helper function, get_kernel_ns() will be using boot
7293 * variables that haven't been updated yet.
7294 *
7295 * So we simply find the maximum observed TSC above, then record the
7296 * adjustment to TSC in each VCPU. When the VCPU later gets loaded,
7297 * the adjustment will be applied. Note that we accumulate
7298 * adjustments, in case multiple suspend cycles happen before some VCPU
7299 * gets a chance to run again. In the event that no KVM threads get a
7300 * chance to run, we will miss the entire elapsed period, as we'll have
7301 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
7302 * loose cycle time. This isn't too big a deal, since the loss will be
7303 * uniform across all VCPUs (not to mention the scenario is extremely
7304 * unlikely). It is possible that a second hibernate recovery happens
7305 * much faster than a first, causing the observed TSC here to be
7306 * smaller; this would require additional padding adjustment, which is
7307 * why we set last_host_tsc to the local tsc observed here.
7308 *
7309 * N.B. - this code below runs only on platforms with reliable TSC,
7310 * as that is the only way backwards_tsc is set above. Also note
7311 * that this runs for ALL vcpus, which is not a bug; all VCPUs should
7312 * have the same delta_cyc adjustment applied if backwards_tsc
7313 * is detected. Note further, this adjustment is only done once,
7314 * as we reset last_host_tsc on all VCPUs to stop this from being
7315 * called multiple times (one for each physical CPU bringup).
7316 *
7317 * Platforms with unreliable TSCs don't have to deal with this, they
7318 * will be compensated by the logic in vcpu_load, which sets the TSC to
7319 * catchup mode. This will catchup all VCPUs to real time, but cannot
7320 * guarantee that they stay in perfect synchronization.
7321 */
7322 if (backwards_tsc) {
7323 u64 delta_cyc = max_tsc - local_tsc;
7324 backwards_tsc_observed = true;
7325 list_for_each_entry(kvm, &vm_list, vm_list) {
7326 kvm_for_each_vcpu(i, vcpu, kvm) {
7327 vcpu->arch.tsc_offset_adjustment += delta_cyc;
7328 vcpu->arch.last_host_tsc = local_tsc;
7329 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
7330 }
7331
7332 /*
7333 * We have to disable TSC offset matching.. if you were
7334 * booting a VM while issuing an S4 host suspend....
7335 * you may have some problem. Solving this issue is
7336 * left as an exercise to the reader.
7337 */
7338 kvm->arch.last_tsc_nsec = 0;
7339 kvm->arch.last_tsc_write = 0;
7340 }
7341
7342 }
7343 return 0;
7344 }
7345
7346 void kvm_arch_hardware_disable(void)
7347 {
7348 kvm_x86_ops->hardware_disable();
7349 drop_user_return_notifiers();
7350 }
7351
7352 int kvm_arch_hardware_setup(void)
7353 {
7354 int r;
7355
7356 r = kvm_x86_ops->hardware_setup();
7357 if (r != 0)
7358 return r;
7359
7360 kvm_init_msr_list();
7361 return 0;
7362 }
7363
7364 void kvm_arch_hardware_unsetup(void)
7365 {
7366 kvm_x86_ops->hardware_unsetup();
7367 }
7368
7369 void kvm_arch_check_processor_compat(void *rtn)
7370 {
7371 kvm_x86_ops->check_processor_compatibility(rtn);
7372 }
7373
7374 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
7375 {
7376 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
7377 }
7378 EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);
7379
7380 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
7381 {
7382 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
7383 }
7384
7385 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
7386 {
7387 return irqchip_in_kernel(vcpu->kvm) == lapic_in_kernel(vcpu);
7388 }
7389
7390 struct static_key kvm_no_apic_vcpu __read_mostly;
7391
7392 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
7393 {
7394 struct page *page;
7395 struct kvm *kvm;
7396 int r;
7397
7398 BUG_ON(vcpu->kvm == NULL);
7399 kvm = vcpu->kvm;
7400
7401 vcpu->arch.pv.pv_unhalted = false;
7402 vcpu->arch.emulate_ctxt.ops = &emulate_ops;
7403 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu))
7404 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
7405 else
7406 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
7407
7408 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
7409 if (!page) {
7410 r = -ENOMEM;
7411 goto fail;
7412 }
7413 vcpu->arch.pio_data = page_address(page);
7414
7415 kvm_set_tsc_khz(vcpu, max_tsc_khz);
7416
7417 r = kvm_mmu_create(vcpu);
7418 if (r < 0)
7419 goto fail_free_pio_data;
7420
7421 if (irqchip_in_kernel(kvm)) {
7422 r = kvm_create_lapic(vcpu);
7423 if (r < 0)
7424 goto fail_mmu_destroy;
7425 } else
7426 static_key_slow_inc(&kvm_no_apic_vcpu);
7427
7428 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
7429 GFP_KERNEL);
7430 if (!vcpu->arch.mce_banks) {
7431 r = -ENOMEM;
7432 goto fail_free_lapic;
7433 }
7434 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
7435
7436 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) {
7437 r = -ENOMEM;
7438 goto fail_free_mce_banks;
7439 }
7440
7441 fx_init(vcpu);
7442
7443 vcpu->arch.ia32_tsc_adjust_msr = 0x0;
7444 vcpu->arch.pv_time_enabled = false;
7445
7446 vcpu->arch.guest_supported_xcr0 = 0;
7447 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
7448
7449 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
7450
7451 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
7452
7453 kvm_async_pf_hash_reset(vcpu);
7454 kvm_pmu_init(vcpu);
7455
7456 vcpu->arch.pending_external_vector = -1;
7457
7458 return 0;
7459
7460 fail_free_mce_banks:
7461 kfree(vcpu->arch.mce_banks);
7462 fail_free_lapic:
7463 kvm_free_lapic(vcpu);
7464 fail_mmu_destroy:
7465 kvm_mmu_destroy(vcpu);
7466 fail_free_pio_data:
7467 free_page((unsigned long)vcpu->arch.pio_data);
7468 fail:
7469 return r;
7470 }
7471
7472 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
7473 {
7474 int idx;
7475
7476 kvm_pmu_destroy(vcpu);
7477 kfree(vcpu->arch.mce_banks);
7478 kvm_free_lapic(vcpu);
7479 idx = srcu_read_lock(&vcpu->kvm->srcu);
7480 kvm_mmu_destroy(vcpu);
7481 srcu_read_unlock(&vcpu->kvm->srcu, idx);
7482 free_page((unsigned long)vcpu->arch.pio_data);
7483 if (!lapic_in_kernel(vcpu))
7484 static_key_slow_dec(&kvm_no_apic_vcpu);
7485 }
7486
7487 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
7488 {
7489 kvm_x86_ops->sched_in(vcpu, cpu);
7490 }
7491
7492 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
7493 {
7494 if (type)
7495 return -EINVAL;
7496
7497 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
7498 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
7499 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
7500 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
7501 atomic_set(&kvm->arch.noncoherent_dma_count, 0);
7502
7503 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
7504 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
7505 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
7506 set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
7507 &kvm->arch.irq_sources_bitmap);
7508
7509 raw_spin_lock_init(&kvm->arch.tsc_write_lock);
7510 mutex_init(&kvm->arch.apic_map_lock);
7511 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
7512
7513 pvclock_update_vm_gtod_copy(kvm);
7514
7515 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
7516 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
7517
7518 return 0;
7519 }
7520
7521 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
7522 {
7523 int r;
7524 r = vcpu_load(vcpu);
7525 BUG_ON(r);
7526 kvm_mmu_unload(vcpu);
7527 vcpu_put(vcpu);
7528 }
7529
7530 static void kvm_free_vcpus(struct kvm *kvm)
7531 {
7532 unsigned int i;
7533 struct kvm_vcpu *vcpu;
7534
7535 /*
7536 * Unpin any mmu pages first.
7537 */
7538 kvm_for_each_vcpu(i, vcpu, kvm) {
7539 kvm_clear_async_pf_completion_queue(vcpu);
7540 kvm_unload_vcpu_mmu(vcpu);
7541 }
7542 kvm_for_each_vcpu(i, vcpu, kvm)
7543 kvm_arch_vcpu_free(vcpu);
7544
7545 mutex_lock(&kvm->lock);
7546 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
7547 kvm->vcpus[i] = NULL;
7548
7549 atomic_set(&kvm->online_vcpus, 0);
7550 mutex_unlock(&kvm->lock);
7551 }
7552
7553 void kvm_arch_sync_events(struct kvm *kvm)
7554 {
7555 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
7556 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
7557 kvm_free_all_assigned_devices(kvm);
7558 kvm_free_pit(kvm);
7559 }
7560
7561 int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
7562 {
7563 int i, r;
7564 unsigned long hva;
7565 struct kvm_memslots *slots = kvm_memslots(kvm);
7566 struct kvm_memory_slot *slot, old;
7567
7568 /* Called with kvm->slots_lock held. */
7569 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
7570 return -EINVAL;
7571
7572 slot = id_to_memslot(slots, id);
7573 if (size) {
7574 if (WARN_ON(slot->npages))
7575 return -EEXIST;
7576
7577 /*
7578 * MAP_SHARED to prevent internal slot pages from being moved
7579 * by fork()/COW.
7580 */
7581 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
7582 MAP_SHARED | MAP_ANONYMOUS, 0);
7583 if (IS_ERR((void *)hva))
7584 return PTR_ERR((void *)hva);
7585 } else {
7586 if (!slot->npages)
7587 return 0;
7588
7589 hva = 0;
7590 }
7591
7592 old = *slot;
7593 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
7594 struct kvm_userspace_memory_region m;
7595
7596 m.slot = id | (i << 16);
7597 m.flags = 0;
7598 m.guest_phys_addr = gpa;
7599 m.userspace_addr = hva;
7600 m.memory_size = size;
7601 r = __kvm_set_memory_region(kvm, &m);
7602 if (r < 0)
7603 return r;
7604 }
7605
7606 if (!size) {
7607 r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
7608 WARN_ON(r < 0);
7609 }
7610
7611 return 0;
7612 }
7613 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
7614
7615 int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
7616 {
7617 int r;
7618
7619 mutex_lock(&kvm->slots_lock);
7620 r = __x86_set_memory_region(kvm, id, gpa, size);
7621 mutex_unlock(&kvm->slots_lock);
7622
7623 return r;
7624 }
7625 EXPORT_SYMBOL_GPL(x86_set_memory_region);
7626
7627 void kvm_arch_destroy_vm(struct kvm *kvm)
7628 {
7629 if (current->mm == kvm->mm) {
7630 /*
7631 * Free memory regions allocated on behalf of userspace,
7632 * unless the the memory map has changed due to process exit
7633 * or fd copying.
7634 */
7635 x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
7636 x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
7637 x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
7638 }
7639 kvm_iommu_unmap_guest(kvm);
7640 kfree(kvm->arch.vpic);
7641 kfree(kvm->arch.vioapic);
7642 kvm_free_vcpus(kvm);
7643 kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
7644 }
7645
7646 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
7647 struct kvm_memory_slot *dont)
7648 {
7649 int i;
7650
7651 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
7652 if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
7653 kvfree(free->arch.rmap[i]);
7654 free->arch.rmap[i] = NULL;
7655 }
7656 if (i == 0)
7657 continue;
7658
7659 if (!dont || free->arch.lpage_info[i - 1] !=
7660 dont->arch.lpage_info[i - 1]) {
7661 kvfree(free->arch.lpage_info[i - 1]);
7662 free->arch.lpage_info[i - 1] = NULL;
7663 }
7664 }
7665 }
7666
7667 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
7668 unsigned long npages)
7669 {
7670 int i;
7671
7672 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
7673 unsigned long ugfn;
7674 int lpages;
7675 int level = i + 1;
7676
7677 lpages = gfn_to_index(slot->base_gfn + npages - 1,
7678 slot->base_gfn, level) + 1;
7679
7680 slot->arch.rmap[i] =
7681 kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
7682 if (!slot->arch.rmap[i])
7683 goto out_free;
7684 if (i == 0)
7685 continue;
7686
7687 slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages *
7688 sizeof(*slot->arch.lpage_info[i - 1]));
7689 if (!slot->arch.lpage_info[i - 1])
7690 goto out_free;
7691
7692 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
7693 slot->arch.lpage_info[i - 1][0].write_count = 1;
7694 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
7695 slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1;
7696 ugfn = slot->userspace_addr >> PAGE_SHIFT;
7697 /*
7698 * If the gfn and userspace address are not aligned wrt each
7699 * other, or if explicitly asked to, disable large page
7700 * support for this slot
7701 */
7702 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
7703 !kvm_largepages_enabled()) {
7704 unsigned long j;
7705
7706 for (j = 0; j < lpages; ++j)
7707 slot->arch.lpage_info[i - 1][j].write_count = 1;
7708 }
7709 }
7710
7711 return 0;
7712
7713 out_free:
7714 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
7715 kvfree(slot->arch.rmap[i]);
7716 slot->arch.rmap[i] = NULL;
7717 if (i == 0)
7718 continue;
7719
7720 kvfree(slot->arch.lpage_info[i - 1]);
7721 slot->arch.lpage_info[i - 1] = NULL;
7722 }
7723 return -ENOMEM;
7724 }
7725
7726 void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
7727 {
7728 /*
7729 * memslots->generation has been incremented.
7730 * mmio generation may have reached its maximum value.
7731 */
7732 kvm_mmu_invalidate_mmio_sptes(kvm, slots);
7733 }
7734
7735 int kvm_arch_prepare_memory_region(struct kvm *kvm,
7736 struct kvm_memory_slot *memslot,
7737 const struct kvm_userspace_memory_region *mem,
7738 enum kvm_mr_change change)
7739 {
7740 return 0;
7741 }
7742
7743 static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
7744 struct kvm_memory_slot *new)
7745 {
7746 /* Still write protect RO slot */
7747 if (new->flags & KVM_MEM_READONLY) {
7748 kvm_mmu_slot_remove_write_access(kvm, new);
7749 return;
7750 }
7751
7752 /*
7753 * Call kvm_x86_ops dirty logging hooks when they are valid.
7754 *
7755 * kvm_x86_ops->slot_disable_log_dirty is called when:
7756 *
7757 * - KVM_MR_CREATE with dirty logging is disabled
7758 * - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
7759 *
7760 * The reason is, in case of PML, we need to set D-bit for any slots
7761 * with dirty logging disabled in order to eliminate unnecessary GPA
7762 * logging in PML buffer (and potential PML buffer full VMEXT). This
7763 * guarantees leaving PML enabled during guest's lifetime won't have
7764 * any additonal overhead from PML when guest is running with dirty
7765 * logging disabled for memory slots.
7766 *
7767 * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
7768 * to dirty logging mode.
7769 *
7770 * If kvm_x86_ops dirty logging hooks are invalid, use write protect.
7771 *
7772 * In case of write protect:
7773 *
7774 * Write protect all pages for dirty logging.
7775 *
7776 * All the sptes including the large sptes which point to this
7777 * slot are set to readonly. We can not create any new large
7778 * spte on this slot until the end of the logging.
7779 *
7780 * See the comments in fast_page_fault().
7781 */
7782 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
7783 if (kvm_x86_ops->slot_enable_log_dirty)
7784 kvm_x86_ops->slot_enable_log_dirty(kvm, new);
7785 else
7786 kvm_mmu_slot_remove_write_access(kvm, new);
7787 } else {
7788 if (kvm_x86_ops->slot_disable_log_dirty)
7789 kvm_x86_ops->slot_disable_log_dirty(kvm, new);
7790 }
7791 }
7792
7793 void kvm_arch_commit_memory_region(struct kvm *kvm,
7794 const struct kvm_userspace_memory_region *mem,
7795 const struct kvm_memory_slot *old,
7796 const struct kvm_memory_slot *new,
7797 enum kvm_mr_change change)
7798 {
7799 int nr_mmu_pages = 0;
7800
7801 if (!kvm->arch.n_requested_mmu_pages)
7802 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
7803
7804 if (nr_mmu_pages)
7805 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
7806
7807 /*
7808 * Dirty logging tracks sptes in 4k granularity, meaning that large
7809 * sptes have to be split. If live migration is successful, the guest
7810 * in the source machine will be destroyed and large sptes will be
7811 * created in the destination. However, if the guest continues to run
7812 * in the source machine (for example if live migration fails), small
7813 * sptes will remain around and cause bad performance.
7814 *
7815 * Scan sptes if dirty logging has been stopped, dropping those
7816 * which can be collapsed into a single large-page spte. Later
7817 * page faults will create the large-page sptes.
7818 */
7819 if ((change != KVM_MR_DELETE) &&
7820 (old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
7821 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
7822 kvm_mmu_zap_collapsible_sptes(kvm, new);
7823
7824 /*
7825 * Set up write protection and/or dirty logging for the new slot.
7826 *
7827 * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
7828 * been zapped so no dirty logging staff is needed for old slot. For
7829 * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the
7830 * new and it's also covered when dealing with the new slot.
7831 *
7832 * FIXME: const-ify all uses of struct kvm_memory_slot.
7833 */
7834 if (change != KVM_MR_DELETE)
7835 kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new);
7836 }
7837
7838 void kvm_arch_flush_shadow_all(struct kvm *kvm)
7839 {
7840 kvm_mmu_invalidate_zap_all_pages(kvm);
7841 }
7842
7843 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
7844 struct kvm_memory_slot *slot)
7845 {
7846 kvm_mmu_invalidate_zap_all_pages(kvm);
7847 }
7848
7849 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
7850 {
7851 if (!list_empty_careful(&vcpu->async_pf.done))
7852 return true;
7853
7854 if (kvm_apic_has_events(vcpu))
7855 return true;
7856
7857 if (vcpu->arch.pv.pv_unhalted)
7858 return true;
7859
7860 if (atomic_read(&vcpu->arch.nmi_queued))
7861 return true;
7862
7863 if (test_bit(KVM_REQ_SMI, &vcpu->requests))
7864 return true;
7865
7866 if (kvm_arch_interrupt_allowed(vcpu) &&
7867 kvm_cpu_has_interrupt(vcpu))
7868 return true;
7869
7870 return false;
7871 }
7872
7873 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
7874 {
7875 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
7876 kvm_x86_ops->check_nested_events(vcpu, false);
7877
7878 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
7879 }
7880
7881 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
7882 {
7883 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
7884 }
7885
7886 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
7887 {
7888 return kvm_x86_ops->interrupt_allowed(vcpu);
7889 }
7890
7891 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
7892 {
7893 if (is_64_bit_mode(vcpu))
7894 return kvm_rip_read(vcpu);
7895 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
7896 kvm_rip_read(vcpu));
7897 }
7898 EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
7899
7900 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
7901 {
7902 return kvm_get_linear_rip(vcpu) == linear_rip;
7903 }
7904 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
7905
7906 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
7907 {
7908 unsigned long rflags;
7909
7910 rflags = kvm_x86_ops->get_rflags(vcpu);
7911 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7912 rflags &= ~X86_EFLAGS_TF;
7913 return rflags;
7914 }
7915 EXPORT_SYMBOL_GPL(kvm_get_rflags);
7916
7917 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
7918 {
7919 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
7920 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
7921 rflags |= X86_EFLAGS_TF;
7922 kvm_x86_ops->set_rflags(vcpu, rflags);
7923 }
7924
7925 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
7926 {
7927 __kvm_set_rflags(vcpu, rflags);
7928 kvm_make_request(KVM_REQ_EVENT, vcpu);
7929 }
7930 EXPORT_SYMBOL_GPL(kvm_set_rflags);
7931
7932 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
7933 {
7934 int r;
7935
7936 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
7937 work->wakeup_all)
7938 return;
7939
7940 r = kvm_mmu_reload(vcpu);
7941 if (unlikely(r))
7942 return;
7943
7944 if (!vcpu->arch.mmu.direct_map &&
7945 work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
7946 return;
7947
7948 vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
7949 }
7950
7951 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
7952 {
7953 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
7954 }
7955
7956 static inline u32 kvm_async_pf_next_probe(u32 key)
7957 {
7958 return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
7959 }
7960
7961 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
7962 {
7963 u32 key = kvm_async_pf_hash_fn(gfn);
7964
7965 while (vcpu->arch.apf.gfns[key] != ~0)
7966 key = kvm_async_pf_next_probe(key);
7967
7968 vcpu->arch.apf.gfns[key] = gfn;
7969 }
7970
7971 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
7972 {
7973 int i;
7974 u32 key = kvm_async_pf_hash_fn(gfn);
7975
7976 for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
7977 (vcpu->arch.apf.gfns[key] != gfn &&
7978 vcpu->arch.apf.gfns[key] != ~0); i++)
7979 key = kvm_async_pf_next_probe(key);
7980
7981 return key;
7982 }
7983
7984 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
7985 {
7986 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
7987 }
7988
7989 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
7990 {
7991 u32 i, j, k;
7992
7993 i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
7994 while (true) {
7995 vcpu->arch.apf.gfns[i] = ~0;
7996 do {
7997 j = kvm_async_pf_next_probe(j);
7998 if (vcpu->arch.apf.gfns[j] == ~0)
7999 return;
8000 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
8001 /*
8002 * k lies cyclically in ]i,j]
8003 * | i.k.j |
8004 * |....j i.k.| or |.k..j i...|
8005 */
8006 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
8007 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
8008 i = j;
8009 }
8010 }
8011
8012 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
8013 {
8014
8015 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
8016 sizeof(val));
8017 }
8018
8019 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
8020 struct kvm_async_pf *work)
8021 {
8022 struct x86_exception fault;
8023
8024 trace_kvm_async_pf_not_present(work->arch.token, work->gva);
8025 kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
8026
8027 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
8028 (vcpu->arch.apf.send_user_only &&
8029 kvm_x86_ops->get_cpl(vcpu) == 0))
8030 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
8031 else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
8032 fault.vector = PF_VECTOR;
8033 fault.error_code_valid = true;
8034 fault.error_code = 0;
8035 fault.nested_page_fault = false;
8036 fault.address = work->arch.token;
8037 kvm_inject_page_fault(vcpu, &fault);
8038 }
8039 }
8040
8041 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
8042 struct kvm_async_pf *work)
8043 {
8044 struct x86_exception fault;
8045
8046 trace_kvm_async_pf_ready(work->arch.token, work->gva);
8047 if (work->wakeup_all)
8048 work->arch.token = ~0; /* broadcast wakeup */
8049 else
8050 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
8051
8052 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
8053 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
8054 fault.vector = PF_VECTOR;
8055 fault.error_code_valid = true;
8056 fault.error_code = 0;
8057 fault.nested_page_fault = false;
8058 fault.address = work->arch.token;
8059 kvm_inject_page_fault(vcpu, &fault);
8060 }
8061 vcpu->arch.apf.halted = false;
8062 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
8063 }
8064
8065 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
8066 {
8067 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
8068 return true;
8069 else
8070 return !kvm_event_needs_reinjection(vcpu) &&
8071 kvm_x86_ops->interrupt_allowed(vcpu);
8072 }
8073
8074 void kvm_arch_start_assignment(struct kvm *kvm)
8075 {
8076 atomic_inc(&kvm->arch.assigned_device_count);
8077 }
8078 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
8079
8080 void kvm_arch_end_assignment(struct kvm *kvm)
8081 {
8082 atomic_dec(&kvm->arch.assigned_device_count);
8083 }
8084 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
8085
8086 bool kvm_arch_has_assigned_device(struct kvm *kvm)
8087 {
8088 return atomic_read(&kvm->arch.assigned_device_count);
8089 }
8090 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
8091
8092 void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
8093 {
8094 atomic_inc(&kvm->arch.noncoherent_dma_count);
8095 }
8096 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
8097
8098 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
8099 {
8100 atomic_dec(&kvm->arch.noncoherent_dma_count);
8101 }
8102 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
8103
8104 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
8105 {
8106 return atomic_read(&kvm->arch.noncoherent_dma_count);
8107 }
8108 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
8109
8110 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
8111 struct irq_bypass_producer *prod)
8112 {
8113 struct kvm_kernel_irqfd *irqfd =
8114 container_of(cons, struct kvm_kernel_irqfd, consumer);
8115
8116 if (kvm_x86_ops->update_pi_irte) {
8117 irqfd->producer = prod;
8118 return kvm_x86_ops->update_pi_irte(irqfd->kvm,
8119 prod->irq, irqfd->gsi, 1);
8120 }
8121
8122 return -EINVAL;
8123 }
8124
8125 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
8126 struct irq_bypass_producer *prod)
8127 {
8128 int ret;
8129 struct kvm_kernel_irqfd *irqfd =
8130 container_of(cons, struct kvm_kernel_irqfd, consumer);
8131
8132 if (!kvm_x86_ops->update_pi_irte) {
8133 WARN_ON(irqfd->producer != NULL);
8134 return;
8135 }
8136
8137 WARN_ON(irqfd->producer != prod);
8138 irqfd->producer = NULL;
8139
8140 /*
8141 * When producer of consumer is unregistered, we change back to
8142 * remapped mode, so we can re-use the current implementation
8143 * when the irq is masked/disabed or the consumer side (KVM
8144 * int this case doesn't want to receive the interrupts.
8145 */
8146 ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
8147 if (ret)
8148 printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
8149 " fails: %d\n", irqfd->consumer.token, ret);
8150 }
8151
8152 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
8153 uint32_t guest_irq, bool set)
8154 {
8155 if (!kvm_x86_ops->update_pi_irte)
8156 return -EINVAL;
8157
8158 return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
8159 }
8160
8161 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
8162 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
8163 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
8164 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
8165 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
8166 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
8167 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
8168 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
8169 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
8170 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
8171 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
8172 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
8173 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
8174 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
8175 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
8176 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
8177 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);