]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kvm/x86.c
KVM: X86: Don't take srcu lock in init_rmode_identity_map()
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / x86.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
043405e1
CO
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * derived from drivers/kvm/kvm_main.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
4d5c5d0f
BAY
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
9611c187 10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
043405e1
CO
11 *
12 * Authors:
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
4d5c5d0f
BAY
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
043405e1
CO
17 */
18
edf88417 19#include <linux/kvm_host.h>
313a3dc7 20#include "irq.h"
1d737c8a 21#include "mmu.h"
7837699f 22#include "i8254.h"
37817f29 23#include "tss.h"
5fdbf976 24#include "kvm_cache_regs.h"
26eef70c 25#include "x86.h"
00b27a3e 26#include "cpuid.h"
474a5bb9 27#include "pmu.h"
e83d5887 28#include "hyperv.h"
313a3dc7 29
18068523 30#include <linux/clocksource.h>
4d5c5d0f 31#include <linux/interrupt.h>
313a3dc7
CO
32#include <linux/kvm.h>
33#include <linux/fs.h>
34#include <linux/vmalloc.h>
1767e931
PG
35#include <linux/export.h>
36#include <linux/moduleparam.h>
0de10343 37#include <linux/mman.h>
2bacc55c 38#include <linux/highmem.h>
19de40a8 39#include <linux/iommu.h>
62c476c7 40#include <linux/intel-iommu.h>
c8076604 41#include <linux/cpufreq.h>
18863bdd 42#include <linux/user-return-notifier.h>
a983fb23 43#include <linux/srcu.h>
5a0e3ad6 44#include <linux/slab.h>
ff9d07a0 45#include <linux/perf_event.h>
7bee342a 46#include <linux/uaccess.h>
af585b92 47#include <linux/hash.h>
a1b60c1c 48#include <linux/pci.h>
16e8d74d
MT
49#include <linux/timekeeper_internal.h>
50#include <linux/pvclock_gtod.h>
87276880
FW
51#include <linux/kvm_irqfd.h>
52#include <linux/irqbypass.h>
3905f9ad 53#include <linux/sched/stat.h>
0c5f81da 54#include <linux/sched/isolation.h>
d0ec49d4 55#include <linux/mem_encrypt.h>
3905f9ad 56
aec51dc4 57#include <trace/events/kvm.h>
2ed152af 58
24f1e32c 59#include <asm/debugreg.h>
d825ed0a 60#include <asm/msr.h>
a5f61300 61#include <asm/desc.h>
890ca9ae 62#include <asm/mce.h>
f89e32e0 63#include <linux/kernel_stat.h>
78f7f1e5 64#include <asm/fpu/internal.h> /* Ugh! */
1d5f066e 65#include <asm/pvclock.h>
217fc9cf 66#include <asm/div64.h>
efc64404 67#include <asm/irq_remapping.h>
b0c39dc6 68#include <asm/mshyperv.h>
0092e434 69#include <asm/hypervisor.h>
bf8c55d8 70#include <asm/intel_pt.h>
b3dc0695 71#include <asm/emulate_prefix.h>
dd2cb348 72#include <clocksource/hyperv_timer.h>
043405e1 73
d1898b73
DH
74#define CREATE_TRACE_POINTS
75#include "trace.h"
76
313a3dc7 77#define MAX_IO_MSRS 256
890ca9ae 78#define KVM_MAX_MCE_BANKS 32
c45dcc71
AR
79u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
80EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
890ca9ae 81
0f65dd70
AK
82#define emul_to_vcpu(ctxt) \
83 container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
84
50a37eb4
JR
85/* EFER defaults:
86 * - enable syscall per default because its emulated by KVM
87 * - enable LME and LMA per default on 64 bit KVM
88 */
89#ifdef CONFIG_X86_64
1260edbe
LJ
90static
91u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
50a37eb4 92#else
1260edbe 93static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
50a37eb4 94#endif
313a3dc7 95
b11306b5
SC
96static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
97
833b45de
PB
98#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
99#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
417bc304 100
c519265f
RK
101#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
102 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
37131313 103
cb142eb7 104static void update_cr8_intercept(struct kvm_vcpu *vcpu);
7460fb4a 105static void process_nmi(struct kvm_vcpu *vcpu);
ee2cd4b7 106static void enter_smm(struct kvm_vcpu *vcpu);
6addfc42 107static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
01643c51
KH
108static void store_regs(struct kvm_vcpu *vcpu);
109static int sync_regs(struct kvm_vcpu *vcpu);
674eea0f 110
893590c7 111struct kvm_x86_ops *kvm_x86_ops __read_mostly;
5fdbf976 112EXPORT_SYMBOL_GPL(kvm_x86_ops);
97896d04 113
893590c7 114static bool __read_mostly ignore_msrs = 0;
476bc001 115module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
ed85c068 116
fab0aa3b
EM
117static bool __read_mostly report_ignored_msrs = true;
118module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
119
4c27625b 120unsigned int min_timer_period_us = 200;
9ed96e87
MT
121module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
122
630994b3
MT
123static bool __read_mostly kvmclock_periodic_sync = true;
124module_param(kvmclock_periodic_sync, bool, S_IRUGO);
125
893590c7 126bool __read_mostly kvm_has_tsc_control;
92a1f12d 127EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
893590c7 128u32 __read_mostly kvm_max_guest_tsc_khz;
92a1f12d 129EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
bc9b961b
HZ
130u8 __read_mostly kvm_tsc_scaling_ratio_frac_bits;
131EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
132u64 __read_mostly kvm_max_tsc_scaling_ratio;
133EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
64672c95
YJ
134u64 __read_mostly kvm_default_tsc_scaling_ratio;
135EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
92a1f12d 136
cc578287 137/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
893590c7 138static u32 __read_mostly tsc_tolerance_ppm = 250;
cc578287
ZA
139module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
140
c3941d9e
SC
141/*
142 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
143 * adaptive tuning starting from default advancment of 1000ns. '0' disables
144 * advancement entirely. Any other value is used as-is and disables adaptive
145 * tuning, i.e. allows priveleged userspace to set an exact advancement time.
146 */
147static int __read_mostly lapic_timer_advance_ns = -1;
0e6edceb 148module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR);
d0659d94 149
52004014
FW
150static bool __read_mostly vector_hashing = true;
151module_param(vector_hashing, bool, S_IRUGO);
152
c4ae60e4
LA
153bool __read_mostly enable_vmware_backdoor = false;
154module_param(enable_vmware_backdoor, bool, S_IRUGO);
155EXPORT_SYMBOL_GPL(enable_vmware_backdoor);
156
6c86eedc
WL
157static bool __read_mostly force_emulation_prefix = false;
158module_param(force_emulation_prefix, bool, S_IRUGO);
159
0c5f81da
WL
160int __read_mostly pi_inject_timer = -1;
161module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
162
18863bdd
AK
163#define KVM_NR_SHARED_MSRS 16
164
165struct kvm_shared_msrs_global {
166 int nr;
2bf78fa7 167 u32 msrs[KVM_NR_SHARED_MSRS];
18863bdd
AK
168};
169
170struct kvm_shared_msrs {
171 struct user_return_notifier urn;
172 bool registered;
2bf78fa7
SY
173 struct kvm_shared_msr_values {
174 u64 host;
175 u64 curr;
176 } values[KVM_NR_SHARED_MSRS];
18863bdd
AK
177};
178
179static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
013f6a5d 180static struct kvm_shared_msrs __percpu *shared_msrs;
18863bdd 181
139a12cf
AL
182static u64 __read_mostly host_xss;
183
417bc304 184struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
185 { "pf_fixed", VCPU_STAT(pf_fixed) },
186 { "pf_guest", VCPU_STAT(pf_guest) },
187 { "tlb_flush", VCPU_STAT(tlb_flush) },
188 { "invlpg", VCPU_STAT(invlpg) },
189 { "exits", VCPU_STAT(exits) },
190 { "io_exits", VCPU_STAT(io_exits) },
191 { "mmio_exits", VCPU_STAT(mmio_exits) },
192 { "signal_exits", VCPU_STAT(signal_exits) },
193 { "irq_window", VCPU_STAT(irq_window_exits) },
f08864b4 194 { "nmi_window", VCPU_STAT(nmi_window_exits) },
ba1389b7 195 { "halt_exits", VCPU_STAT(halt_exits) },
f7819512 196 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
62bea5bf 197 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
3491caf2 198 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
ba1389b7 199 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f11c3a8d 200 { "hypercalls", VCPU_STAT(hypercalls) },
ba1389b7
AK
201 { "request_irq", VCPU_STAT(request_irq_exits) },
202 { "irq_exits", VCPU_STAT(irq_exits) },
203 { "host_state_reload", VCPU_STAT(host_state_reload) },
ba1389b7
AK
204 { "fpu_reload", VCPU_STAT(fpu_reload) },
205 { "insn_emulation", VCPU_STAT(insn_emulation) },
206 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
fa89a817 207 { "irq_injections", VCPU_STAT(irq_injections) },
c4abb7c9 208 { "nmi_injections", VCPU_STAT(nmi_injections) },
0f1e261e 209 { "req_event", VCPU_STAT(req_event) },
c595ceee 210 { "l1d_flush", VCPU_STAT(l1d_flush) },
4cee5764
AK
211 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
212 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
213 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
214 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
215 { "mmu_flooded", VM_STAT(mmu_flooded) },
216 { "mmu_recycled", VM_STAT(mmu_recycled) },
dfc5aa00 217 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
4731d4c7 218 { "mmu_unsync", VM_STAT(mmu_unsync) },
0f74a24c 219 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
833b45de 220 { "largepages", VM_STAT(lpages, .mode = 0444) },
b8e8c830 221 { "nx_largepages_splitted", VM_STAT(nx_lpage_splits, .mode = 0444) },
f3414bc7
DM
222 { "max_mmu_page_hash_collisions",
223 VM_STAT(max_mmu_page_hash_collisions) },
417bc304
HB
224 { NULL }
225};
226
2acf923e
DC
227u64 __read_mostly host_xcr0;
228
b666a4b6
MO
229struct kmem_cache *x86_fpu_cache;
230EXPORT_SYMBOL_GPL(x86_fpu_cache);
231
b6785def 232static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
d6aa1000 233
af585b92
GN
234static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
235{
236 int i;
237 for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
238 vcpu->arch.apf.gfns[i] = ~0;
239}
240
18863bdd
AK
241static void kvm_on_user_return(struct user_return_notifier *urn)
242{
243 unsigned slot;
18863bdd
AK
244 struct kvm_shared_msrs *locals
245 = container_of(urn, struct kvm_shared_msrs, urn);
2bf78fa7 246 struct kvm_shared_msr_values *values;
1650b4eb
IA
247 unsigned long flags;
248
249 /*
250 * Disabling irqs at this point since the following code could be
251 * interrupted and executed through kvm_arch_hardware_disable()
252 */
253 local_irq_save(flags);
254 if (locals->registered) {
255 locals->registered = false;
256 user_return_notifier_unregister(urn);
257 }
258 local_irq_restore(flags);
18863bdd 259 for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
2bf78fa7
SY
260 values = &locals->values[slot];
261 if (values->host != values->curr) {
262 wrmsrl(shared_msrs_global.msrs[slot], values->host);
263 values->curr = values->host;
18863bdd
AK
264 }
265 }
18863bdd
AK
266}
267
2bf78fa7
SY
268void kvm_define_shared_msr(unsigned slot, u32 msr)
269{
0123be42 270 BUG_ON(slot >= KVM_NR_SHARED_MSRS);
c847fe88 271 shared_msrs_global.msrs[slot] = msr;
18863bdd
AK
272 if (slot >= shared_msrs_global.nr)
273 shared_msrs_global.nr = slot + 1;
18863bdd
AK
274}
275EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
276
277static void kvm_shared_msr_cpu_online(void)
278{
05c19c2f
SC
279 unsigned int cpu = smp_processor_id();
280 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
281 u64 value;
282 int i;
18863bdd 283
05c19c2f
SC
284 for (i = 0; i < shared_msrs_global.nr; ++i) {
285 rdmsrl_safe(shared_msrs_global.msrs[i], &value);
286 smsr->values[i].host = value;
287 smsr->values[i].curr = value;
288 }
18863bdd
AK
289}
290
8b3c3104 291int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
18863bdd 292{
013f6a5d
MT
293 unsigned int cpu = smp_processor_id();
294 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
8b3c3104 295 int err;
18863bdd 296
de1fca5d
PB
297 value = (value & mask) | (smsr->values[slot].host & ~mask);
298 if (value == smsr->values[slot].curr)
8b3c3104 299 return 0;
8b3c3104
AH
300 err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
301 if (err)
302 return 1;
303
de1fca5d 304 smsr->values[slot].curr = value;
18863bdd
AK
305 if (!smsr->registered) {
306 smsr->urn.on_user_return = kvm_on_user_return;
307 user_return_notifier_register(&smsr->urn);
308 smsr->registered = true;
309 }
8b3c3104 310 return 0;
18863bdd
AK
311}
312EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
313
13a34e06 314static void drop_user_return_notifiers(void)
3548bab5 315{
013f6a5d
MT
316 unsigned int cpu = smp_processor_id();
317 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
3548bab5
AK
318
319 if (smsr->registered)
320 kvm_on_user_return(&smsr->urn);
321}
322
6866b83e
CO
323u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
324{
8a5a87d9 325 return vcpu->arch.apic_base;
6866b83e
CO
326}
327EXPORT_SYMBOL_GPL(kvm_get_apic_base);
328
58871649
JM
329enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
330{
331 return kvm_apic_mode(kvm_get_apic_base(vcpu));
332}
333EXPORT_SYMBOL_GPL(kvm_get_apic_mode);
334
58cb628d
JK
335int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
336{
58871649
JM
337 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
338 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data);
d6321d49
RK
339 u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
340 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
58cb628d 341
58871649 342 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
58cb628d 343 return 1;
58871649
JM
344 if (!msr_info->host_initiated) {
345 if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
346 return 1;
347 if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
348 return 1;
349 }
58cb628d
JK
350
351 kvm_lapic_set_base(vcpu, msr_info->data);
352 return 0;
6866b83e
CO
353}
354EXPORT_SYMBOL_GPL(kvm_set_apic_base);
355
2605fc21 356asmlinkage __visible void kvm_spurious_fault(void)
e3ba45b8
GL
357{
358 /* Fault while not rebooting. We want the trace. */
b4fdcf60 359 BUG_ON(!kvm_rebooting);
e3ba45b8
GL
360}
361EXPORT_SYMBOL_GPL(kvm_spurious_fault);
362
3fd28fce
ED
363#define EXCPT_BENIGN 0
364#define EXCPT_CONTRIBUTORY 1
365#define EXCPT_PF 2
366
367static int exception_class(int vector)
368{
369 switch (vector) {
370 case PF_VECTOR:
371 return EXCPT_PF;
372 case DE_VECTOR:
373 case TS_VECTOR:
374 case NP_VECTOR:
375 case SS_VECTOR:
376 case GP_VECTOR:
377 return EXCPT_CONTRIBUTORY;
378 default:
379 break;
380 }
381 return EXCPT_BENIGN;
382}
383
d6e8c854
NA
384#define EXCPT_FAULT 0
385#define EXCPT_TRAP 1
386#define EXCPT_ABORT 2
387#define EXCPT_INTERRUPT 3
388
389static int exception_type(int vector)
390{
391 unsigned int mask;
392
393 if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
394 return EXCPT_INTERRUPT;
395
396 mask = 1 << vector;
397
398 /* #DB is trap, as instruction watchpoints are handled elsewhere */
399 if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
400 return EXCPT_TRAP;
401
402 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
403 return EXCPT_ABORT;
404
405 /* Reserved exceptions will result in fault */
406 return EXCPT_FAULT;
407}
408
da998b46
JM
409void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
410{
411 unsigned nr = vcpu->arch.exception.nr;
412 bool has_payload = vcpu->arch.exception.has_payload;
413 unsigned long payload = vcpu->arch.exception.payload;
414
415 if (!has_payload)
416 return;
417
418 switch (nr) {
f10c729f
JM
419 case DB_VECTOR:
420 /*
421 * "Certain debug exceptions may clear bit 0-3. The
422 * remaining contents of the DR6 register are never
423 * cleared by the processor".
424 */
425 vcpu->arch.dr6 &= ~DR_TRAP_BITS;
426 /*
427 * DR6.RTM is set by all #DB exceptions that don't clear it.
428 */
429 vcpu->arch.dr6 |= DR6_RTM;
430 vcpu->arch.dr6 |= payload;
431 /*
432 * Bit 16 should be set in the payload whenever the #DB
433 * exception should clear DR6.RTM. This makes the payload
434 * compatible with the pending debug exceptions under VMX.
435 * Though not currently documented in the SDM, this also
436 * makes the payload compatible with the exit qualification
437 * for #DB exceptions under VMX.
438 */
439 vcpu->arch.dr6 ^= payload & DR6_RTM;
440 break;
da998b46
JM
441 case PF_VECTOR:
442 vcpu->arch.cr2 = payload;
443 break;
444 }
445
446 vcpu->arch.exception.has_payload = false;
447 vcpu->arch.exception.payload = 0;
448}
449EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload);
450
3fd28fce 451static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
ce7ddec4 452 unsigned nr, bool has_error, u32 error_code,
91e86d22 453 bool has_payload, unsigned long payload, bool reinject)
3fd28fce
ED
454{
455 u32 prev_nr;
456 int class1, class2;
457
3842d135
AK
458 kvm_make_request(KVM_REQ_EVENT, vcpu);
459
664f8e26 460 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
3fd28fce 461 queue:
3ffb2468
NA
462 if (has_error && !is_protmode(vcpu))
463 has_error = false;
664f8e26
WL
464 if (reinject) {
465 /*
466 * On vmentry, vcpu->arch.exception.pending is only
467 * true if an event injection was blocked by
468 * nested_run_pending. In that case, however,
469 * vcpu_enter_guest requests an immediate exit,
470 * and the guest shouldn't proceed far enough to
471 * need reinjection.
472 */
473 WARN_ON_ONCE(vcpu->arch.exception.pending);
474 vcpu->arch.exception.injected = true;
91e86d22
JM
475 if (WARN_ON_ONCE(has_payload)) {
476 /*
477 * A reinjected event has already
478 * delivered its payload.
479 */
480 has_payload = false;
481 payload = 0;
482 }
664f8e26
WL
483 } else {
484 vcpu->arch.exception.pending = true;
485 vcpu->arch.exception.injected = false;
486 }
3fd28fce
ED
487 vcpu->arch.exception.has_error_code = has_error;
488 vcpu->arch.exception.nr = nr;
489 vcpu->arch.exception.error_code = error_code;
91e86d22
JM
490 vcpu->arch.exception.has_payload = has_payload;
491 vcpu->arch.exception.payload = payload;
da998b46
JM
492 /*
493 * In guest mode, payload delivery should be deferred,
494 * so that the L1 hypervisor can intercept #PF before
f10c729f
JM
495 * CR2 is modified (or intercept #DB before DR6 is
496 * modified under nVMX). However, for ABI
497 * compatibility with KVM_GET_VCPU_EVENTS and
498 * KVM_SET_VCPU_EVENTS, we can't delay payload
499 * delivery unless userspace has enabled this
500 * functionality via the per-VM capability,
501 * KVM_CAP_EXCEPTION_PAYLOAD.
da998b46
JM
502 */
503 if (!vcpu->kvm->arch.exception_payload_enabled ||
504 !is_guest_mode(vcpu))
505 kvm_deliver_exception_payload(vcpu);
3fd28fce
ED
506 return;
507 }
508
509 /* to check exception */
510 prev_nr = vcpu->arch.exception.nr;
511 if (prev_nr == DF_VECTOR) {
512 /* triple fault -> shutdown */
a8eeb04a 513 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3fd28fce
ED
514 return;
515 }
516 class1 = exception_class(prev_nr);
517 class2 = exception_class(nr);
518 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
519 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
664f8e26
WL
520 /*
521 * Generate double fault per SDM Table 5-5. Set
522 * exception.pending = true so that the double fault
523 * can trigger a nested vmexit.
524 */
3fd28fce 525 vcpu->arch.exception.pending = true;
664f8e26 526 vcpu->arch.exception.injected = false;
3fd28fce
ED
527 vcpu->arch.exception.has_error_code = true;
528 vcpu->arch.exception.nr = DF_VECTOR;
529 vcpu->arch.exception.error_code = 0;
c851436a
JM
530 vcpu->arch.exception.has_payload = false;
531 vcpu->arch.exception.payload = 0;
3fd28fce
ED
532 } else
533 /* replace previous exception with a new one in a hope
534 that instruction re-execution will regenerate lost
535 exception */
536 goto queue;
537}
538
298101da
AK
539void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
540{
91e86d22 541 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false);
298101da
AK
542}
543EXPORT_SYMBOL_GPL(kvm_queue_exception);
544
ce7ddec4
JR
545void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
546{
91e86d22 547 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true);
ce7ddec4
JR
548}
549EXPORT_SYMBOL_GPL(kvm_requeue_exception);
550
f10c729f
JM
551static void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
552 unsigned long payload)
553{
554 kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
555}
556
da998b46
JM
557static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
558 u32 error_code, unsigned long payload)
559{
560 kvm_multiple_exception(vcpu, nr, true, error_code,
561 true, payload, false);
562}
563
6affcbed 564int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
c3c91fee 565{
db8fcefa
AP
566 if (err)
567 kvm_inject_gp(vcpu, 0);
568 else
6affcbed
KH
569 return kvm_skip_emulated_instruction(vcpu);
570
571 return 1;
db8fcefa
AP
572}
573EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
8df25a32 574
6389ee94 575void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
c3c91fee
AK
576{
577 ++vcpu->stat.pf_guest;
adfe20fb
WL
578 vcpu->arch.exception.nested_apf =
579 is_guest_mode(vcpu) && fault->async_page_fault;
da998b46 580 if (vcpu->arch.exception.nested_apf) {
adfe20fb 581 vcpu->arch.apf.nested_apf_token = fault->address;
da998b46
JM
582 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
583 } else {
584 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
585 fault->address);
586 }
c3c91fee 587}
27d6c865 588EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
c3c91fee 589
ef54bcfe 590static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
d4f8cf66 591{
6389ee94
AK
592 if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
593 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
d4f8cf66 594 else
44dd3ffa 595 vcpu->arch.mmu->inject_page_fault(vcpu, fault);
ef54bcfe
PB
596
597 return fault->nested_page_fault;
d4f8cf66
JR
598}
599
3419ffc8
SY
600void kvm_inject_nmi(struct kvm_vcpu *vcpu)
601{
7460fb4a
AK
602 atomic_inc(&vcpu->arch.nmi_queued);
603 kvm_make_request(KVM_REQ_NMI, vcpu);
3419ffc8
SY
604}
605EXPORT_SYMBOL_GPL(kvm_inject_nmi);
606
298101da
AK
607void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
608{
91e86d22 609 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false);
298101da
AK
610}
611EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
612
ce7ddec4
JR
613void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
614{
91e86d22 615 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true);
ce7ddec4
JR
616}
617EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
618
0a79b009
AK
619/*
620 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
621 * a #GP and return false.
622 */
623bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
298101da 624{
0a79b009
AK
625 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
626 return true;
627 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
628 return false;
298101da 629}
0a79b009 630EXPORT_SYMBOL_GPL(kvm_require_cpl);
298101da 631
16f8a6f9
NA
632bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
633{
634 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
635 return true;
636
637 kvm_queue_exception(vcpu, UD_VECTOR);
638 return false;
639}
640EXPORT_SYMBOL_GPL(kvm_require_dr);
641
ec92fe44
JR
642/*
643 * This function will be used to read from the physical memory of the currently
54bf36aa 644 * running guest. The difference to kvm_vcpu_read_guest_page is that this function
ec92fe44
JR
645 * can read from guest physical or from the guest's guest physical memory.
646 */
647int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
648 gfn_t ngfn, void *data, int offset, int len,
649 u32 access)
650{
54987b7a 651 struct x86_exception exception;
ec92fe44
JR
652 gfn_t real_gfn;
653 gpa_t ngpa;
654
655 ngpa = gfn_to_gpa(ngfn);
54987b7a 656 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
ec92fe44
JR
657 if (real_gfn == UNMAPPED_GVA)
658 return -EFAULT;
659
660 real_gfn = gpa_to_gfn(real_gfn);
661
54bf36aa 662 return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
ec92fe44
JR
663}
664EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
665
69b0049a 666static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
3d06b8bf
JR
667 void *data, int offset, int len, u32 access)
668{
669 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
670 data, offset, len, access);
671}
672
16cfacc8
SC
673static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
674{
675 return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) |
676 rsvd_bits(1, 2);
677}
678
a03490ed 679/*
16cfacc8 680 * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
a03490ed 681 */
ff03a073 682int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
a03490ed
CO
683{
684 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
685 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
686 int i;
687 int ret;
ff03a073 688 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
a03490ed 689
ff03a073
JR
690 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
691 offset * sizeof(u64), sizeof(pdpte),
692 PFERR_USER_MASK|PFERR_WRITE_MASK);
a03490ed
CO
693 if (ret < 0) {
694 ret = 0;
695 goto out;
696 }
697 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
812f30b2 698 if ((pdpte[i] & PT_PRESENT_MASK) &&
16cfacc8 699 (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
a03490ed
CO
700 ret = 0;
701 goto out;
702 }
703 }
704 ret = 1;
705
ff03a073 706 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
cb3c1e2f
SC
707 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
708
a03490ed 709out:
a03490ed
CO
710
711 return ret;
712}
cc4b6871 713EXPORT_SYMBOL_GPL(load_pdptrs);
a03490ed 714
9ed38ffa 715bool pdptrs_changed(struct kvm_vcpu *vcpu)
d835dfec 716{
ff03a073 717 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
3d06b8bf
JR
718 int offset;
719 gfn_t gfn;
d835dfec
AK
720 int r;
721
bf03d4f9 722 if (!is_pae_paging(vcpu))
d835dfec
AK
723 return false;
724
cb3c1e2f 725 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
6de4f3ad
AK
726 return true;
727
a512177e
PB
728 gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
729 offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1);
3d06b8bf
JR
730 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
731 PFERR_USER_MASK | PFERR_WRITE_MASK);
d835dfec 732 if (r < 0)
7f7f0d9c 733 return true;
d835dfec 734
7f7f0d9c 735 return memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
d835dfec 736}
9ed38ffa 737EXPORT_SYMBOL_GPL(pdptrs_changed);
d835dfec 738
49a9b07e 739int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
a03490ed 740{
aad82703 741 unsigned long old_cr0 = kvm_read_cr0(vcpu);
d81135a5 742 unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
aad82703 743
f9a48e6a
AK
744 cr0 |= X86_CR0_ET;
745
ab344828 746#ifdef CONFIG_X86_64
0f12244f
GN
747 if (cr0 & 0xffffffff00000000UL)
748 return 1;
ab344828
GN
749#endif
750
751 cr0 &= ~CR0_RESERVED_BITS;
a03490ed 752
0f12244f
GN
753 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
754 return 1;
a03490ed 755
0f12244f
GN
756 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
757 return 1;
a03490ed
CO
758
759 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
760#ifdef CONFIG_X86_64
f6801dff 761 if ((vcpu->arch.efer & EFER_LME)) {
a03490ed
CO
762 int cs_db, cs_l;
763
0f12244f
GN
764 if (!is_pae(vcpu))
765 return 1;
a03490ed 766 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
0f12244f
GN
767 if (cs_l)
768 return 1;
a03490ed
CO
769 } else
770#endif
ff03a073 771 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
9f8fe504 772 kvm_read_cr3(vcpu)))
0f12244f 773 return 1;
a03490ed
CO
774 }
775
ad756a16
MJ
776 if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
777 return 1;
778
a03490ed 779 kvm_x86_ops->set_cr0(vcpu, cr0);
a03490ed 780
d170c419 781 if ((cr0 ^ old_cr0) & X86_CR0_PG) {
e5f3f027 782 kvm_clear_async_pf_completion_queue(vcpu);
d170c419
LJ
783 kvm_async_pf_hash_reset(vcpu);
784 }
e5f3f027 785
aad82703
SY
786 if ((cr0 ^ old_cr0) & update_bits)
787 kvm_mmu_reset_context(vcpu);
b18d5431 788
879ae188
LE
789 if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
790 kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
791 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
b18d5431
XG
792 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
793
0f12244f
GN
794 return 0;
795}
2d3ad1f4 796EXPORT_SYMBOL_GPL(kvm_set_cr0);
a03490ed 797
2d3ad1f4 798void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
a03490ed 799{
49a9b07e 800 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
a03490ed 801}
2d3ad1f4 802EXPORT_SYMBOL_GPL(kvm_lmsw);
a03490ed 803
139a12cf 804void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
42bdf991 805{
139a12cf
AL
806 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
807
808 if (vcpu->arch.xcr0 != host_xcr0)
809 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
810
811 if (vcpu->arch.xsaves_enabled &&
812 vcpu->arch.ia32_xss != host_xss)
813 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
814 }
42bdf991 815}
139a12cf 816EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
42bdf991 817
139a12cf 818void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
42bdf991 819{
139a12cf
AL
820 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
821
822 if (vcpu->arch.xcr0 != host_xcr0)
823 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
824
825 if (vcpu->arch.xsaves_enabled &&
826 vcpu->arch.ia32_xss != host_xss)
827 wrmsrl(MSR_IA32_XSS, host_xss);
828 }
829
42bdf991 830}
139a12cf 831EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
42bdf991 832
69b0049a 833static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
2acf923e 834{
56c103ec
LJ
835 u64 xcr0 = xcr;
836 u64 old_xcr0 = vcpu->arch.xcr0;
46c34cb0 837 u64 valid_bits;
2acf923e
DC
838
839 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
840 if (index != XCR_XFEATURE_ENABLED_MASK)
841 return 1;
d91cab78 842 if (!(xcr0 & XFEATURE_MASK_FP))
2acf923e 843 return 1;
d91cab78 844 if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
2acf923e 845 return 1;
46c34cb0
PB
846
847 /*
848 * Do not allow the guest to set bits that we do not support
849 * saving. However, xcr0 bit 0 is always set, even if the
850 * emulated CPU does not support XSAVE (see fx_init).
851 */
d91cab78 852 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
46c34cb0 853 if (xcr0 & ~valid_bits)
2acf923e 854 return 1;
46c34cb0 855
d91cab78
DH
856 if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
857 (!(xcr0 & XFEATURE_MASK_BNDCSR)))
390bd528
LJ
858 return 1;
859
d91cab78
DH
860 if (xcr0 & XFEATURE_MASK_AVX512) {
861 if (!(xcr0 & XFEATURE_MASK_YMM))
612263b3 862 return 1;
d91cab78 863 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
612263b3
CP
864 return 1;
865 }
2acf923e 866 vcpu->arch.xcr0 = xcr0;
56c103ec 867
d91cab78 868 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
56c103ec 869 kvm_update_cpuid(vcpu);
2acf923e
DC
870 return 0;
871}
872
873int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
874{
764bcbc5
Z
875 if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
876 __kvm_set_xcr(vcpu, index, xcr)) {
2acf923e
DC
877 kvm_inject_gp(vcpu, 0);
878 return 1;
879 }
880 return 0;
881}
882EXPORT_SYMBOL_GPL(kvm_set_xcr);
883
345599f9
SC
884#define __cr4_reserved_bits(__cpu_has, __c) \
885({ \
886 u64 __reserved_bits = CR4_RESERVED_BITS; \
887 \
888 if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
889 __reserved_bits |= X86_CR4_OSXSAVE; \
890 if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
891 __reserved_bits |= X86_CR4_SMEP; \
892 if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
893 __reserved_bits |= X86_CR4_SMAP; \
894 if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
895 __reserved_bits |= X86_CR4_FSGSBASE; \
896 if (!__cpu_has(__c, X86_FEATURE_PKU)) \
897 __reserved_bits |= X86_CR4_PKE; \
898 if (!__cpu_has(__c, X86_FEATURE_LA57)) \
899 __reserved_bits |= X86_CR4_LA57; \
900 __reserved_bits; \
901})
902
b11306b5
SC
903static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c)
904{
345599f9 905 u64 reserved_bits = __cr4_reserved_bits(cpu_has, c);
b11306b5 906
87382003 907 if (cpuid_ecx(0x7) & feature_bit(LA57))
345599f9 908 reserved_bits &= ~X86_CR4_LA57;
b11306b5 909
345599f9
SC
910 if (kvm_x86_ops->umip_emulated())
911 reserved_bits &= ~X86_CR4_UMIP;
b11306b5
SC
912
913 return reserved_bits;
914}
915
3ca94192 916static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
a03490ed 917{
b11306b5 918 if (cr4 & cr4_reserved_bits)
3ca94192 919 return -EINVAL;
a03490ed 920
345599f9 921 if (cr4 & __cr4_reserved_bits(guest_cpuid_has, vcpu))
3ca94192
WL
922 return -EINVAL;
923
924 return 0;
925}
926
927int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
928{
929 unsigned long old_cr4 = kvm_read_cr4(vcpu);
930 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
931 X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
932
933 if (kvm_valid_cr4(vcpu, cr4))
ae3e61e1
PB
934 return 1;
935
a03490ed 936 if (is_long_mode(vcpu)) {
0f12244f
GN
937 if (!(cr4 & X86_CR4_PAE))
938 return 1;
a2edf57f
AK
939 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
940 && ((cr4 ^ old_cr4) & pdptr_bits)
9f8fe504
AK
941 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
942 kvm_read_cr3(vcpu)))
0f12244f
GN
943 return 1;
944
ad756a16 945 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
d6321d49 946 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
ad756a16
MJ
947 return 1;
948
949 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
950 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
951 return 1;
952 }
953
5e1746d6 954 if (kvm_x86_ops->set_cr4(vcpu, cr4))
0f12244f 955 return 1;
a03490ed 956
ad756a16
MJ
957 if (((cr4 ^ old_cr4) & pdptr_bits) ||
958 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
aad82703 959 kvm_mmu_reset_context(vcpu);
0f12244f 960
b9baba86 961 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
00b27a3e 962 kvm_update_cpuid(vcpu);
2acf923e 963
0f12244f
GN
964 return 0;
965}
2d3ad1f4 966EXPORT_SYMBOL_GPL(kvm_set_cr4);
a03490ed 967
2390218b 968int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
a03490ed 969{
ade61e28 970 bool skip_tlb_flush = false;
ac146235 971#ifdef CONFIG_X86_64
c19986fe
JS
972 bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
973
ade61e28 974 if (pcid_enabled) {
208320ba
JS
975 skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
976 cr3 &= ~X86_CR3_PCID_NOFLUSH;
ade61e28 977 }
ac146235 978#endif
9d88fca7 979
9f8fe504 980 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
956bf353
JS
981 if (!skip_tlb_flush) {
982 kvm_mmu_sync_roots(vcpu);
ade61e28 983 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
956bf353 984 }
0f12244f 985 return 0;
d835dfec
AK
986 }
987
d1cd3ce9 988 if (is_long_mode(vcpu) &&
a780a3ea 989 (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
d1cd3ce9 990 return 1;
bf03d4f9
PB
991 else if (is_pae_paging(vcpu) &&
992 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
346874c9 993 return 1;
a03490ed 994
ade61e28 995 kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush);
0f12244f 996 vcpu->arch.cr3 = cr3;
cb3c1e2f 997 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
7c390d35 998
0f12244f
GN
999 return 0;
1000}
2d3ad1f4 1001EXPORT_SYMBOL_GPL(kvm_set_cr3);
a03490ed 1002
eea1cff9 1003int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
a03490ed 1004{
0f12244f
GN
1005 if (cr8 & CR8_RESERVED_BITS)
1006 return 1;
35754c98 1007 if (lapic_in_kernel(vcpu))
a03490ed
CO
1008 kvm_lapic_set_tpr(vcpu, cr8);
1009 else
ad312c7c 1010 vcpu->arch.cr8 = cr8;
0f12244f
GN
1011 return 0;
1012}
2d3ad1f4 1013EXPORT_SYMBOL_GPL(kvm_set_cr8);
a03490ed 1014
2d3ad1f4 1015unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
a03490ed 1016{
35754c98 1017 if (lapic_in_kernel(vcpu))
a03490ed
CO
1018 return kvm_lapic_get_cr8(vcpu);
1019 else
ad312c7c 1020 return vcpu->arch.cr8;
a03490ed 1021}
2d3ad1f4 1022EXPORT_SYMBOL_GPL(kvm_get_cr8);
a03490ed 1023
ae561ede
NA
1024static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
1025{
1026 int i;
1027
1028 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1029 for (i = 0; i < KVM_NR_DB_REGS; i++)
1030 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
1031 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
1032 }
1033}
1034
73aaf249
JK
1035static void kvm_update_dr6(struct kvm_vcpu *vcpu)
1036{
1037 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1038 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
1039}
1040
c8639010
JK
1041static void kvm_update_dr7(struct kvm_vcpu *vcpu)
1042{
1043 unsigned long dr7;
1044
1045 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1046 dr7 = vcpu->arch.guest_debug_dr7;
1047 else
1048 dr7 = vcpu->arch.dr7;
1049 kvm_x86_ops->set_dr7(vcpu, dr7);
360b948d
PB
1050 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
1051 if (dr7 & DR7_BP_EN_MASK)
1052 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
c8639010
JK
1053}
1054
6f43ed01
NA
1055static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
1056{
1057 u64 fixed = DR6_FIXED_1;
1058
d6321d49 1059 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
6f43ed01
NA
1060 fixed |= DR6_RTM;
1061 return fixed;
1062}
1063
338dbc97 1064static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
020df079 1065{
ea740059
MP
1066 size_t size = ARRAY_SIZE(vcpu->arch.db);
1067
020df079
GN
1068 switch (dr) {
1069 case 0 ... 3:
ea740059 1070 vcpu->arch.db[array_index_nospec(dr, size)] = val;
020df079
GN
1071 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1072 vcpu->arch.eff_db[dr] = val;
1073 break;
1074 case 4:
020df079
GN
1075 /* fall through */
1076 case 6:
338dbc97
GN
1077 if (val & 0xffffffff00000000ULL)
1078 return -1; /* #GP */
6f43ed01 1079 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
73aaf249 1080 kvm_update_dr6(vcpu);
020df079
GN
1081 break;
1082 case 5:
020df079
GN
1083 /* fall through */
1084 default: /* 7 */
338dbc97
GN
1085 if (val & 0xffffffff00000000ULL)
1086 return -1; /* #GP */
020df079 1087 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
c8639010 1088 kvm_update_dr7(vcpu);
020df079
GN
1089 break;
1090 }
1091
1092 return 0;
1093}
338dbc97
GN
1094
1095int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
1096{
16f8a6f9 1097 if (__kvm_set_dr(vcpu, dr, val)) {
338dbc97 1098 kvm_inject_gp(vcpu, 0);
16f8a6f9
NA
1099 return 1;
1100 }
1101 return 0;
338dbc97 1102}
020df079
GN
1103EXPORT_SYMBOL_GPL(kvm_set_dr);
1104
16f8a6f9 1105int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
020df079 1106{
ea740059
MP
1107 size_t size = ARRAY_SIZE(vcpu->arch.db);
1108
020df079
GN
1109 switch (dr) {
1110 case 0 ... 3:
ea740059 1111 *val = vcpu->arch.db[array_index_nospec(dr, size)];
020df079
GN
1112 break;
1113 case 4:
020df079
GN
1114 /* fall through */
1115 case 6:
73aaf249
JK
1116 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1117 *val = vcpu->arch.dr6;
1118 else
1119 *val = kvm_x86_ops->get_dr6(vcpu);
020df079
GN
1120 break;
1121 case 5:
020df079
GN
1122 /* fall through */
1123 default: /* 7 */
1124 *val = vcpu->arch.dr7;
1125 break;
1126 }
338dbc97
GN
1127 return 0;
1128}
020df079
GN
1129EXPORT_SYMBOL_GPL(kvm_get_dr);
1130
022cd0e8
AK
1131bool kvm_rdpmc(struct kvm_vcpu *vcpu)
1132{
de3cd117 1133 u32 ecx = kvm_rcx_read(vcpu);
022cd0e8
AK
1134 u64 data;
1135 int err;
1136
c6702c9d 1137 err = kvm_pmu_rdpmc(vcpu, ecx, &data);
022cd0e8
AK
1138 if (err)
1139 return err;
de3cd117
SC
1140 kvm_rax_write(vcpu, (u32)data);
1141 kvm_rdx_write(vcpu, data >> 32);
022cd0e8
AK
1142 return err;
1143}
1144EXPORT_SYMBOL_GPL(kvm_rdpmc);
1145
043405e1
CO
1146/*
1147 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
1148 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
1149 *
7a5ee6ed
CQ
1150 * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features)
1151 * extract the supported MSRs from the related const lists.
1152 * msrs_to_save is selected from the msrs_to_save_all to reflect the
e3267cbb 1153 * capabilities of the host cpu. This capabilities test skips MSRs that are
7a5ee6ed 1154 * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
62ef68bb 1155 * may depend on host virtualization features rather than host cpu features.
043405e1 1156 */
e3267cbb 1157
7a5ee6ed 1158static const u32 msrs_to_save_all[] = {
043405e1 1159 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
8c06585d 1160 MSR_STAR,
043405e1
CO
1161#ifdef CONFIG_X86_64
1162 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1163#endif
b3897a49 1164 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
9dbe6cf9 1165 MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
2bdb76c0 1166 MSR_IA32_SPEC_CTRL,
bf8c55d8
CP
1167 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
1168 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
1169 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
1170 MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
1171 MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
1172 MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
6e3ba4ab
TX
1173 MSR_IA32_UMWAIT_CONTROL,
1174
e2ada66e
JM
1175 MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
1176 MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3,
1177 MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
1178 MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
1179 MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
1180 MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
1181 MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
1182 MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
1183 MSR_ARCH_PERFMON_PERFCTR0 + 8, MSR_ARCH_PERFMON_PERFCTR0 + 9,
1184 MSR_ARCH_PERFMON_PERFCTR0 + 10, MSR_ARCH_PERFMON_PERFCTR0 + 11,
1185 MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13,
1186 MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15,
1187 MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17,
e2ada66e
JM
1188 MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
1189 MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
1190 MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
1191 MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
1192 MSR_ARCH_PERFMON_EVENTSEL0 + 8, MSR_ARCH_PERFMON_EVENTSEL0 + 9,
1193 MSR_ARCH_PERFMON_EVENTSEL0 + 10, MSR_ARCH_PERFMON_EVENTSEL0 + 11,
1194 MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
1195 MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
1196 MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
043405e1
CO
1197};
1198
7a5ee6ed 1199static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
043405e1
CO
1200static unsigned num_msrs_to_save;
1201
7a5ee6ed 1202static const u32 emulated_msrs_all[] = {
62ef68bb
PB
1203 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
1204 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
1205 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
1206 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
72c139ba 1207 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
e7d9513b
AS
1208 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
1209 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
e516cebb 1210 HV_X64_MSR_RESET,
11c4b1ca 1211 HV_X64_MSR_VP_INDEX,
9eec50b8 1212 HV_X64_MSR_VP_RUNTIME,
5c919412 1213 HV_X64_MSR_SCONTROL,
1f4b34f8 1214 HV_X64_MSR_STIMER0_CONFIG,
d4abc577 1215 HV_X64_MSR_VP_ASSIST_PAGE,
a2e164e7
VK
1216 HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
1217 HV_X64_MSR_TSC_EMULATION_STATUS,
1218
1219 MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
62ef68bb
PB
1220 MSR_KVM_PV_EOI_EN,
1221
ba904635 1222 MSR_IA32_TSC_ADJUST,
a3e06bbe 1223 MSR_IA32_TSCDEADLINE,
2bdb76c0 1224 MSR_IA32_ARCH_CAPABILITIES,
043405e1 1225 MSR_IA32_MISC_ENABLE,
908e75f3
AK
1226 MSR_IA32_MCG_STATUS,
1227 MSR_IA32_MCG_CTL,
c45dcc71 1228 MSR_IA32_MCG_EXT_CTL,
64d60670 1229 MSR_IA32_SMBASE,
52797bf9 1230 MSR_SMI_COUNT,
db2336a8
KH
1231 MSR_PLATFORM_INFO,
1232 MSR_MISC_FEATURES_ENABLES,
bc226f07 1233 MSR_AMD64_VIRT_SPEC_CTRL,
6c6a2ab9 1234 MSR_IA32_POWER_CTL,
99634e3e 1235 MSR_IA32_UCODE_REV,
191c8137 1236
95c5c7c7
PB
1237 /*
1238 * The following list leaves out MSRs whose values are determined
1239 * by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs.
1240 * We always support the "true" VMX control MSRs, even if the host
1241 * processor does not, so I am putting these registers here rather
7a5ee6ed 1242 * than in msrs_to_save_all.
95c5c7c7
PB
1243 */
1244 MSR_IA32_VMX_BASIC,
1245 MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1246 MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1247 MSR_IA32_VMX_TRUE_EXIT_CTLS,
1248 MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1249 MSR_IA32_VMX_MISC,
1250 MSR_IA32_VMX_CR0_FIXED0,
1251 MSR_IA32_VMX_CR4_FIXED0,
1252 MSR_IA32_VMX_VMCS_ENUM,
1253 MSR_IA32_VMX_PROCBASED_CTLS2,
1254 MSR_IA32_VMX_EPT_VPID_CAP,
1255 MSR_IA32_VMX_VMFUNC,
1256
191c8137 1257 MSR_K7_HWCR,
2d5ba19b 1258 MSR_KVM_POLL_CONTROL,
043405e1
CO
1259};
1260
7a5ee6ed 1261static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
62ef68bb
PB
1262static unsigned num_emulated_msrs;
1263
801e459a
TL
1264/*
1265 * List of msr numbers which are used to expose MSR-based features that
1266 * can be used by a hypervisor to validate requested CPU features.
1267 */
7a5ee6ed 1268static const u32 msr_based_features_all[] = {
1389309c
PB
1269 MSR_IA32_VMX_BASIC,
1270 MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1271 MSR_IA32_VMX_PINBASED_CTLS,
1272 MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1273 MSR_IA32_VMX_PROCBASED_CTLS,
1274 MSR_IA32_VMX_TRUE_EXIT_CTLS,
1275 MSR_IA32_VMX_EXIT_CTLS,
1276 MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1277 MSR_IA32_VMX_ENTRY_CTLS,
1278 MSR_IA32_VMX_MISC,
1279 MSR_IA32_VMX_CR0_FIXED0,
1280 MSR_IA32_VMX_CR0_FIXED1,
1281 MSR_IA32_VMX_CR4_FIXED0,
1282 MSR_IA32_VMX_CR4_FIXED1,
1283 MSR_IA32_VMX_VMCS_ENUM,
1284 MSR_IA32_VMX_PROCBASED_CTLS2,
1285 MSR_IA32_VMX_EPT_VPID_CAP,
1286 MSR_IA32_VMX_VMFUNC,
1287
d1d93fa9 1288 MSR_F10H_DECFG,
518e7b94 1289 MSR_IA32_UCODE_REV,
cd283252 1290 MSR_IA32_ARCH_CAPABILITIES,
801e459a
TL
1291};
1292
7a5ee6ed 1293static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)];
801e459a
TL
1294static unsigned int num_msr_based_features;
1295
4d22c17c 1296static u64 kvm_get_arch_capabilities(void)
5b76a3cf 1297{
4d22c17c 1298 u64 data = 0;
5b76a3cf 1299
4d22c17c
XL
1300 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1301 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
5b76a3cf 1302
b8e8c830
PB
1303 /*
1304 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
1305 * the nested hypervisor runs with NX huge pages. If it is not,
1306 * L1 is anyway vulnerable to ITLB_MULTIHIT explots from other
1307 * L1 guests, so it need not worry about its own (L2) guests.
1308 */
1309 data |= ARCH_CAP_PSCHANGE_MC_NO;
1310
5b76a3cf
PB
1311 /*
1312 * If we're doing cache flushes (either "always" or "cond")
1313 * we will do one whenever the guest does a vmlaunch/vmresume.
1314 * If an outer hypervisor is doing the cache flush for us
1315 * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
1316 * capability to the guest too, and if EPT is disabled we're not
1317 * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will
1318 * require a nested hypervisor to do a flush of its own.
1319 */
1320 if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
1321 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
1322
0c54914d
PB
1323 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
1324 data |= ARCH_CAP_RDCL_NO;
1325 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1326 data |= ARCH_CAP_SSB_NO;
1327 if (!boot_cpu_has_bug(X86_BUG_MDS))
1328 data |= ARCH_CAP_MDS_NO;
1329
e1d38b63 1330 /*
c11f83e0
PB
1331 * On TAA affected systems:
1332 * - nothing to do if TSX is disabled on the host.
1333 * - we emulate TSX_CTRL if present on the host.
1334 * This lets the guest use VERW to clear CPU buffers.
e1d38b63 1335 */
cbbaa272 1336 if (!boot_cpu_has(X86_FEATURE_RTM))
c11f83e0 1337 data &= ~(ARCH_CAP_TAA_NO | ARCH_CAP_TSX_CTRL_MSR);
cbbaa272
PB
1338 else if (!boot_cpu_has_bug(X86_BUG_TAA))
1339 data |= ARCH_CAP_TAA_NO;
e1d38b63 1340
5b76a3cf
PB
1341 return data;
1342}
5b76a3cf 1343
66421c1e
WL
1344static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
1345{
1346 switch (msr->index) {
cd283252 1347 case MSR_IA32_ARCH_CAPABILITIES:
5b76a3cf
PB
1348 msr->data = kvm_get_arch_capabilities();
1349 break;
1350 case MSR_IA32_UCODE_REV:
cd283252 1351 rdmsrl_safe(msr->index, &msr->data);
518e7b94 1352 break;
66421c1e
WL
1353 default:
1354 if (kvm_x86_ops->get_msr_feature(msr))
1355 return 1;
1356 }
1357 return 0;
1358}
1359
801e459a
TL
1360static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1361{
1362 struct kvm_msr_entry msr;
66421c1e 1363 int r;
801e459a
TL
1364
1365 msr.index = index;
66421c1e
WL
1366 r = kvm_get_msr_feature(&msr);
1367 if (r)
1368 return r;
801e459a
TL
1369
1370 *data = msr.data;
1371
1372 return 0;
1373}
1374
11988499 1375static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
15c4a640 1376{
1b4d56b8 1377 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
11988499 1378 return false;
1b2fd70c 1379
1b4d56b8 1380 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
11988499 1381 return false;
d8017474 1382
0a629563
SC
1383 if (efer & (EFER_LME | EFER_LMA) &&
1384 !guest_cpuid_has(vcpu, X86_FEATURE_LM))
1385 return false;
1386
1387 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX))
1388 return false;
d8017474 1389
384bb783 1390 return true;
11988499
SC
1391
1392}
1393bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1394{
1395 if (efer & efer_reserved_bits)
1396 return false;
1397
1398 return __kvm_valid_efer(vcpu, efer);
384bb783
JK
1399}
1400EXPORT_SYMBOL_GPL(kvm_valid_efer);
1401
11988499 1402static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
384bb783
JK
1403{
1404 u64 old_efer = vcpu->arch.efer;
11988499 1405 u64 efer = msr_info->data;
384bb783 1406
11988499 1407 if (efer & efer_reserved_bits)
66f61c92 1408 return 1;
384bb783 1409
11988499
SC
1410 if (!msr_info->host_initiated) {
1411 if (!__kvm_valid_efer(vcpu, efer))
1412 return 1;
1413
1414 if (is_paging(vcpu) &&
1415 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1416 return 1;
1417 }
384bb783 1418
15c4a640 1419 efer &= ~EFER_LMA;
f6801dff 1420 efer |= vcpu->arch.efer & EFER_LMA;
15c4a640 1421
a3d204e2
SY
1422 kvm_x86_ops->set_efer(vcpu, efer);
1423
aad82703
SY
1424 /* Update reserved bits */
1425 if ((efer ^ old_efer) & EFER_NX)
1426 kvm_mmu_reset_context(vcpu);
1427
b69e8cae 1428 return 0;
15c4a640
CO
1429}
1430
f2b4b7dd
JR
1431void kvm_enable_efer_bits(u64 mask)
1432{
1433 efer_reserved_bits &= ~mask;
1434}
1435EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
1436
15c4a640 1437/*
f20935d8
SC
1438 * Write @data into the MSR specified by @index. Select MSR specific fault
1439 * checks are bypassed if @host_initiated is %true.
15c4a640
CO
1440 * Returns 0 on success, non-0 otherwise.
1441 * Assumes vcpu_load() was already called.
1442 */
f20935d8
SC
1443static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
1444 bool host_initiated)
15c4a640 1445{
f20935d8
SC
1446 struct msr_data msr;
1447
1448 switch (index) {
854e8bb1
NA
1449 case MSR_FS_BASE:
1450 case MSR_GS_BASE:
1451 case MSR_KERNEL_GS_BASE:
1452 case MSR_CSTAR:
1453 case MSR_LSTAR:
f20935d8 1454 if (is_noncanonical_address(data, vcpu))
854e8bb1
NA
1455 return 1;
1456 break;
1457 case MSR_IA32_SYSENTER_EIP:
1458 case MSR_IA32_SYSENTER_ESP:
1459 /*
1460 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1461 * non-canonical address is written on Intel but not on
1462 * AMD (which ignores the top 32-bits, because it does
1463 * not implement 64-bit SYSENTER).
1464 *
1465 * 64-bit code should hence be able to write a non-canonical
1466 * value on AMD. Making the address canonical ensures that
1467 * vmentry does not fail on Intel after writing a non-canonical
1468 * value, and that something deterministic happens if the guest
1469 * invokes 64-bit SYSENTER.
1470 */
f20935d8 1471 data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
854e8bb1 1472 }
f20935d8
SC
1473
1474 msr.data = data;
1475 msr.index = index;
1476 msr.host_initiated = host_initiated;
1477
1478 return kvm_x86_ops->set_msr(vcpu, &msr);
15c4a640
CO
1479}
1480
313a3dc7 1481/*
f20935d8
SC
1482 * Read the MSR specified by @index into @data. Select MSR specific fault
1483 * checks are bypassed if @host_initiated is %true.
1484 * Returns 0 on success, non-0 otherwise.
1485 * Assumes vcpu_load() was already called.
313a3dc7 1486 */
edef5c36
PB
1487int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1488 bool host_initiated)
609e36d3
PB
1489{
1490 struct msr_data msr;
f20935d8 1491 int ret;
609e36d3
PB
1492
1493 msr.index = index;
f20935d8 1494 msr.host_initiated = host_initiated;
609e36d3 1495
f20935d8
SC
1496 ret = kvm_x86_ops->get_msr(vcpu, &msr);
1497 if (!ret)
1498 *data = msr.data;
1499 return ret;
609e36d3
PB
1500}
1501
f20935d8 1502int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
313a3dc7 1503{
f20935d8
SC
1504 return __kvm_get_msr(vcpu, index, data, false);
1505}
1506EXPORT_SYMBOL_GPL(kvm_get_msr);
8fe8ab46 1507
f20935d8
SC
1508int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
1509{
1510 return __kvm_set_msr(vcpu, index, data, false);
1511}
1512EXPORT_SYMBOL_GPL(kvm_set_msr);
1513
1edce0a9
SC
1514int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
1515{
1516 u32 ecx = kvm_rcx_read(vcpu);
1517 u64 data;
1518
1519 if (kvm_get_msr(vcpu, ecx, &data)) {
1520 trace_kvm_msr_read_ex(ecx);
1521 kvm_inject_gp(vcpu, 0);
1522 return 1;
1523 }
1524
1525 trace_kvm_msr_read(ecx, data);
1526
1527 kvm_rax_write(vcpu, data & -1u);
1528 kvm_rdx_write(vcpu, (data >> 32) & -1u);
1529 return kvm_skip_emulated_instruction(vcpu);
1530}
1531EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr);
1532
1533int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
1534{
1535 u32 ecx = kvm_rcx_read(vcpu);
1536 u64 data = kvm_read_edx_eax(vcpu);
1537
1538 if (kvm_set_msr(vcpu, ecx, data)) {
1539 trace_kvm_msr_write_ex(ecx, data);
1540 kvm_inject_gp(vcpu, 0);
1541 return 1;
1542 }
1543
1544 trace_kvm_msr_write(ecx, data);
1545 return kvm_skip_emulated_instruction(vcpu);
1546}
1547EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
1548
1e9e2622
WL
1549/*
1550 * The fast path for frequent and performance sensitive wrmsr emulation,
1551 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
1552 * the latency of virtual IPI by avoiding the expensive bits of transitioning
1553 * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the
1554 * other cases which must be called after interrupts are enabled on the host.
1555 */
1556static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
1557{
1558 if (lapic_in_kernel(vcpu) && apic_x2apic_mode(vcpu->arch.apic) &&
1559 ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
1560 ((data & APIC_MODE_MASK) == APIC_DM_FIXED)) {
1561
1562 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32));
1563 return kvm_lapic_reg_write(vcpu->arch.apic, APIC_ICR, (u32)data);
1564 }
1565
1566 return 1;
1567}
1568
1569enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
1570{
1571 u32 msr = kvm_rcx_read(vcpu);
1572 u64 data = kvm_read_edx_eax(vcpu);
1573 int ret = 0;
1574
1575 switch (msr) {
1576 case APIC_BASE_MSR + (APIC_ICR >> 4):
1577 ret = handle_fastpath_set_x2apic_icr_irqoff(vcpu, data);
1578 break;
1579 default:
1580 return EXIT_FASTPATH_NONE;
1581 }
1582
1583 if (!ret) {
1584 trace_kvm_msr_write(msr, data);
1585 return EXIT_FASTPATH_SKIP_EMUL_INS;
1586 }
1587
1588 return EXIT_FASTPATH_NONE;
1589}
1590EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
1591
f20935d8
SC
1592/*
1593 * Adapt set_msr() to msr_io()'s calling convention
1594 */
1595static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1596{
1597 return __kvm_get_msr(vcpu, index, data, true);
1598}
1599
1600static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1601{
1602 return __kvm_set_msr(vcpu, index, *data, true);
313a3dc7
CO
1603}
1604
16e8d74d 1605#ifdef CONFIG_X86_64
53fafdbb
MT
1606struct pvclock_clock {
1607 int vclock_mode;
1608 u64 cycle_last;
1609 u64 mask;
1610 u32 mult;
1611 u32 shift;
1612};
1613
16e8d74d
MT
1614struct pvclock_gtod_data {
1615 seqcount_t seq;
1616
53fafdbb
MT
1617 struct pvclock_clock clock; /* extract of a clocksource struct */
1618 struct pvclock_clock raw_clock; /* extract of a clocksource struct */
16e8d74d 1619
53fafdbb 1620 u64 boot_ns_raw;
cbcf2dd3
TG
1621 u64 boot_ns;
1622 u64 nsec_base;
55dd00a7 1623 u64 wall_time_sec;
53fafdbb 1624 u64 monotonic_raw_nsec;
16e8d74d
MT
1625};
1626
1627static struct pvclock_gtod_data pvclock_gtod_data;
1628
1629static void update_pvclock_gtod(struct timekeeper *tk)
1630{
1631 struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
53fafdbb 1632 u64 boot_ns, boot_ns_raw;
cbcf2dd3 1633
876e7881 1634 boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
53fafdbb 1635 boot_ns_raw = ktime_to_ns(ktime_add(tk->tkr_raw.base, tk->offs_boot));
16e8d74d
MT
1636
1637 write_seqcount_begin(&vdata->seq);
1638
1639 /* copy pvclock gtod data */
876e7881
PZ
1640 vdata->clock.vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
1641 vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
1642 vdata->clock.mask = tk->tkr_mono.mask;
1643 vdata->clock.mult = tk->tkr_mono.mult;
1644 vdata->clock.shift = tk->tkr_mono.shift;
16e8d74d 1645
53fafdbb
MT
1646 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->archdata.vclock_mode;
1647 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last;
1648 vdata->raw_clock.mask = tk->tkr_raw.mask;
1649 vdata->raw_clock.mult = tk->tkr_raw.mult;
1650 vdata->raw_clock.shift = tk->tkr_raw.shift;
1651
cbcf2dd3 1652 vdata->boot_ns = boot_ns;
876e7881 1653 vdata->nsec_base = tk->tkr_mono.xtime_nsec;
16e8d74d 1654
55dd00a7
MT
1655 vdata->wall_time_sec = tk->xtime_sec;
1656
53fafdbb
MT
1657 vdata->boot_ns_raw = boot_ns_raw;
1658 vdata->monotonic_raw_nsec = tk->tkr_raw.xtime_nsec;
1659
16e8d74d
MT
1660 write_seqcount_end(&vdata->seq);
1661}
1662#endif
1663
bab5bb39
NK
1664void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
1665{
bab5bb39 1666 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
4d151bf3 1667 kvm_vcpu_kick(vcpu);
bab5bb39 1668}
16e8d74d 1669
18068523
GOC
1670static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
1671{
9ed3c444
AK
1672 int version;
1673 int r;
50d0a0f9 1674 struct pvclock_wall_clock wc;
87aeb54f 1675 struct timespec64 boot;
18068523
GOC
1676
1677 if (!wall_clock)
1678 return;
1679
9ed3c444
AK
1680 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
1681 if (r)
1682 return;
1683
1684 if (version & 1)
1685 ++version; /* first time write, random junk */
1686
1687 ++version;
18068523 1688
1dab1345
NK
1689 if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
1690 return;
18068523 1691
50d0a0f9
GH
1692 /*
1693 * The guest calculates current wall clock time by adding
34c238a1 1694 * system time (updated by kvm_guest_time_update below) to the
50d0a0f9
GH
1695 * wall clock specified here. guest system time equals host
1696 * system time for us, thus we must fill in host boot time here.
1697 */
87aeb54f 1698 getboottime64(&boot);
50d0a0f9 1699
4b648665 1700 if (kvm->arch.kvmclock_offset) {
87aeb54f
AB
1701 struct timespec64 ts = ns_to_timespec64(kvm->arch.kvmclock_offset);
1702 boot = timespec64_sub(boot, ts);
4b648665 1703 }
87aeb54f 1704 wc.sec = (u32)boot.tv_sec; /* overflow in 2106 guest time */
50d0a0f9
GH
1705 wc.nsec = boot.tv_nsec;
1706 wc.version = version;
18068523
GOC
1707
1708 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
1709
1710 version++;
1711 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
18068523
GOC
1712}
1713
50d0a0f9
GH
1714static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
1715{
b51012de
PB
1716 do_shl32_div32(dividend, divisor);
1717 return dividend;
50d0a0f9
GH
1718}
1719
3ae13faa 1720static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
5f4e3f88 1721 s8 *pshift, u32 *pmultiplier)
50d0a0f9 1722{
5f4e3f88 1723 uint64_t scaled64;
50d0a0f9
GH
1724 int32_t shift = 0;
1725 uint64_t tps64;
1726 uint32_t tps32;
1727
3ae13faa
PB
1728 tps64 = base_hz;
1729 scaled64 = scaled_hz;
50933623 1730 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
50d0a0f9
GH
1731 tps64 >>= 1;
1732 shift--;
1733 }
1734
1735 tps32 = (uint32_t)tps64;
50933623
JK
1736 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
1737 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
5f4e3f88
ZA
1738 scaled64 >>= 1;
1739 else
1740 tps32 <<= 1;
50d0a0f9
GH
1741 shift++;
1742 }
1743
5f4e3f88
ZA
1744 *pshift = shift;
1745 *pmultiplier = div_frac(scaled64, tps32);
50d0a0f9
GH
1746}
1747
d828199e 1748#ifdef CONFIG_X86_64
16e8d74d 1749static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
d828199e 1750#endif
16e8d74d 1751
c8076604 1752static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
69b0049a 1753static unsigned long max_tsc_khz;
c8076604 1754
cc578287 1755static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1e993611 1756{
cc578287
ZA
1757 u64 v = (u64)khz * (1000000 + ppm);
1758 do_div(v, 1000000);
1759 return v;
1e993611
JR
1760}
1761
381d585c
HZ
1762static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
1763{
1764 u64 ratio;
1765
1766 /* Guest TSC same frequency as host TSC? */
1767 if (!scale) {
1768 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1769 return 0;
1770 }
1771
1772 /* TSC scaling supported? */
1773 if (!kvm_has_tsc_control) {
1774 if (user_tsc_khz > tsc_khz) {
1775 vcpu->arch.tsc_catchup = 1;
1776 vcpu->arch.tsc_always_catchup = 1;
1777 return 0;
1778 } else {
3f16a5c3 1779 pr_warn_ratelimited("user requested TSC rate below hardware speed\n");
381d585c
HZ
1780 return -1;
1781 }
1782 }
1783
1784 /* TSC scaling required - calculate ratio */
1785 ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
1786 user_tsc_khz, tsc_khz);
1787
1788 if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
3f16a5c3
PB
1789 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
1790 user_tsc_khz);
381d585c
HZ
1791 return -1;
1792 }
1793
1794 vcpu->arch.tsc_scaling_ratio = ratio;
1795 return 0;
1796}
1797
4941b8cb 1798static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
759379dd 1799{
cc578287
ZA
1800 u32 thresh_lo, thresh_hi;
1801 int use_scaling = 0;
217fc9cf 1802
03ba32ca 1803 /* tsc_khz can be zero if TSC calibration fails */
4941b8cb 1804 if (user_tsc_khz == 0) {
ad721883
HZ
1805 /* set tsc_scaling_ratio to a safe value */
1806 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
381d585c 1807 return -1;
ad721883 1808 }
03ba32ca 1809
c285545f 1810 /* Compute a scale to convert nanoseconds in TSC cycles */
3ae13faa 1811 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
cc578287
ZA
1812 &vcpu->arch.virtual_tsc_shift,
1813 &vcpu->arch.virtual_tsc_mult);
4941b8cb 1814 vcpu->arch.virtual_tsc_khz = user_tsc_khz;
cc578287
ZA
1815
1816 /*
1817 * Compute the variation in TSC rate which is acceptable
1818 * within the range of tolerance and decide if the
1819 * rate being applied is within that bounds of the hardware
1820 * rate. If so, no scaling or compensation need be done.
1821 */
1822 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
1823 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
4941b8cb
PB
1824 if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
1825 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi);
cc578287
ZA
1826 use_scaling = 1;
1827 }
4941b8cb 1828 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
c285545f
ZA
1829}
1830
1831static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
1832{
e26101b1 1833 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
cc578287
ZA
1834 vcpu->arch.virtual_tsc_mult,
1835 vcpu->arch.virtual_tsc_shift);
e26101b1 1836 tsc += vcpu->arch.this_tsc_write;
c285545f
ZA
1837 return tsc;
1838}
1839
b0c39dc6
VK
1840static inline int gtod_is_based_on_tsc(int mode)
1841{
1842 return mode == VCLOCK_TSC || mode == VCLOCK_HVCLOCK;
1843}
1844
69b0049a 1845static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
b48aa97e
MT
1846{
1847#ifdef CONFIG_X86_64
1848 bool vcpus_matched;
b48aa97e
MT
1849 struct kvm_arch *ka = &vcpu->kvm->arch;
1850 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1851
1852 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
1853 atomic_read(&vcpu->kvm->online_vcpus));
1854
7f187922
MT
1855 /*
1856 * Once the masterclock is enabled, always perform request in
1857 * order to update it.
1858 *
1859 * In order to enable masterclock, the host clocksource must be TSC
1860 * and the vcpus need to have matched TSCs. When that happens,
1861 * perform request to enable masterclock.
1862 */
1863 if (ka->use_master_clock ||
b0c39dc6 1864 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched))
b48aa97e
MT
1865 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1866
1867 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
1868 atomic_read(&vcpu->kvm->online_vcpus),
1869 ka->use_master_clock, gtod->clock.vclock_mode);
1870#endif
1871}
1872
ba904635
WA
1873static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
1874{
e79f245d 1875 u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
ba904635
WA
1876 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
1877}
1878
35181e86
HZ
1879/*
1880 * Multiply tsc by a fixed point number represented by ratio.
1881 *
1882 * The most significant 64-N bits (mult) of ratio represent the
1883 * integral part of the fixed point number; the remaining N bits
1884 * (frac) represent the fractional part, ie. ratio represents a fixed
1885 * point number (mult + frac * 2^(-N)).
1886 *
1887 * N equals to kvm_tsc_scaling_ratio_frac_bits.
1888 */
1889static inline u64 __scale_tsc(u64 ratio, u64 tsc)
1890{
1891 return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
1892}
1893
1894u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
1895{
1896 u64 _tsc = tsc;
1897 u64 ratio = vcpu->arch.tsc_scaling_ratio;
1898
1899 if (ratio != kvm_default_tsc_scaling_ratio)
1900 _tsc = __scale_tsc(ratio, tsc);
1901
1902 return _tsc;
1903}
1904EXPORT_SYMBOL_GPL(kvm_scale_tsc);
1905
07c1419a
HZ
1906static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1907{
1908 u64 tsc;
1909
1910 tsc = kvm_scale_tsc(vcpu, rdtsc());
1911
1912 return target_tsc - tsc;
1913}
1914
4ba76538
HZ
1915u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
1916{
e79f245d
KA
1917 u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
1918
1919 return tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
4ba76538
HZ
1920}
1921EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
1922
a545ab6a
LC
1923static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1924{
326e7425 1925 vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset);
a545ab6a
LC
1926}
1927
b0c39dc6
VK
1928static inline bool kvm_check_tsc_unstable(void)
1929{
1930#ifdef CONFIG_X86_64
1931 /*
1932 * TSC is marked unstable when we're running on Hyper-V,
1933 * 'TSC page' clocksource is good.
1934 */
1935 if (pvclock_gtod_data.clock.vclock_mode == VCLOCK_HVCLOCK)
1936 return false;
1937#endif
1938 return check_tsc_unstable();
1939}
1940
8fe8ab46 1941void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
99e3e30a
ZA
1942{
1943 struct kvm *kvm = vcpu->kvm;
f38e098f 1944 u64 offset, ns, elapsed;
99e3e30a 1945 unsigned long flags;
b48aa97e 1946 bool matched;
0d3da0d2 1947 bool already_matched;
8fe8ab46 1948 u64 data = msr->data;
c5e8ec8e 1949 bool synchronizing = false;
99e3e30a 1950
038f8c11 1951 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
07c1419a 1952 offset = kvm_compute_tsc_offset(vcpu, data);
9285ec4c 1953 ns = ktime_get_boottime_ns();
f38e098f 1954 elapsed = ns - kvm->arch.last_tsc_nsec;
5d3cb0f6 1955
03ba32ca 1956 if (vcpu->arch.virtual_tsc_khz) {
bd8fab39
DP
1957 if (data == 0 && msr->host_initiated) {
1958 /*
1959 * detection of vcpu initialization -- need to sync
1960 * with other vCPUs. This particularly helps to keep
1961 * kvm_clock stable after CPU hotplug
1962 */
1963 synchronizing = true;
1964 } else {
1965 u64 tsc_exp = kvm->arch.last_tsc_write +
1966 nsec_to_cycles(vcpu, elapsed);
1967 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
1968 /*
1969 * Special case: TSC write with a small delta (1 second)
1970 * of virtual cycle time against real time is
1971 * interpreted as an attempt to synchronize the CPU.
1972 */
1973 synchronizing = data < tsc_exp + tsc_hz &&
1974 data + tsc_hz > tsc_exp;
1975 }
c5e8ec8e 1976 }
f38e098f
ZA
1977
1978 /*
5d3cb0f6
ZA
1979 * For a reliable TSC, we can match TSC offsets, and for an unstable
1980 * TSC, we add elapsed time in this computation. We could let the
1981 * compensation code attempt to catch up if we fall behind, but
1982 * it's better to try to match offsets from the beginning.
1983 */
c5e8ec8e 1984 if (synchronizing &&
5d3cb0f6 1985 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
b0c39dc6 1986 if (!kvm_check_tsc_unstable()) {
e26101b1 1987 offset = kvm->arch.cur_tsc_offset;
f38e098f 1988 } else {
857e4099 1989 u64 delta = nsec_to_cycles(vcpu, elapsed);
5d3cb0f6 1990 data += delta;
07c1419a 1991 offset = kvm_compute_tsc_offset(vcpu, data);
f38e098f 1992 }
b48aa97e 1993 matched = true;
0d3da0d2 1994 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
e26101b1
ZA
1995 } else {
1996 /*
1997 * We split periods of matched TSC writes into generations.
1998 * For each generation, we track the original measured
1999 * nanosecond time, offset, and write, so if TSCs are in
2000 * sync, we can match exact offset, and if not, we can match
4a969980 2001 * exact software computation in compute_guest_tsc()
e26101b1
ZA
2002 *
2003 * These values are tracked in kvm->arch.cur_xxx variables.
2004 */
2005 kvm->arch.cur_tsc_generation++;
2006 kvm->arch.cur_tsc_nsec = ns;
2007 kvm->arch.cur_tsc_write = data;
2008 kvm->arch.cur_tsc_offset = offset;
b48aa97e 2009 matched = false;
f38e098f 2010 }
e26101b1
ZA
2011
2012 /*
2013 * We also track th most recent recorded KHZ, write and time to
2014 * allow the matching interval to be extended at each write.
2015 */
f38e098f
ZA
2016 kvm->arch.last_tsc_nsec = ns;
2017 kvm->arch.last_tsc_write = data;
5d3cb0f6 2018 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
99e3e30a 2019
b183aa58 2020 vcpu->arch.last_guest_tsc = data;
e26101b1
ZA
2021
2022 /* Keep track of which generation this VCPU has synchronized to */
2023 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
2024 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
2025 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
2026
d6321d49 2027 if (!msr->host_initiated && guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST))
ba904635 2028 update_ia32_tsc_adjust_msr(vcpu, offset);
d6321d49 2029
a545ab6a 2030 kvm_vcpu_write_tsc_offset(vcpu, offset);
e26101b1 2031 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
b48aa97e
MT
2032
2033 spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
0d3da0d2 2034 if (!matched) {
b48aa97e 2035 kvm->arch.nr_vcpus_matched_tsc = 0;
0d3da0d2
TG
2036 } else if (!already_matched) {
2037 kvm->arch.nr_vcpus_matched_tsc++;
2038 }
b48aa97e
MT
2039
2040 kvm_track_tsc_matching(vcpu);
2041 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
99e3e30a 2042}
e26101b1 2043
99e3e30a
ZA
2044EXPORT_SYMBOL_GPL(kvm_write_tsc);
2045
58ea6767
HZ
2046static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
2047 s64 adjustment)
2048{
326e7425
LS
2049 u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
2050 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
58ea6767
HZ
2051}
2052
2053static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
2054{
2055 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
2056 WARN_ON(adjustment < 0);
2057 adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
ea26e4ec 2058 adjust_tsc_offset_guest(vcpu, adjustment);
58ea6767
HZ
2059}
2060
d828199e
MT
2061#ifdef CONFIG_X86_64
2062
a5a1d1c2 2063static u64 read_tsc(void)
d828199e 2064{
a5a1d1c2 2065 u64 ret = (u64)rdtsc_ordered();
03b9730b 2066 u64 last = pvclock_gtod_data.clock.cycle_last;
d828199e
MT
2067
2068 if (likely(ret >= last))
2069 return ret;
2070
2071 /*
2072 * GCC likes to generate cmov here, but this branch is extremely
6a6256f9 2073 * predictable (it's just a function of time and the likely is
d828199e
MT
2074 * very likely) and there's a data dependence, so force GCC
2075 * to generate a branch instead. I don't barrier() because
2076 * we don't actually need a barrier, and if this function
2077 * ever gets inlined it will generate worse code.
2078 */
2079 asm volatile ("");
2080 return last;
2081}
2082
53fafdbb
MT
2083static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
2084 int *mode)
d828199e
MT
2085{
2086 long v;
b0c39dc6
VK
2087 u64 tsc_pg_val;
2088
53fafdbb 2089 switch (clock->vclock_mode) {
b0c39dc6
VK
2090 case VCLOCK_HVCLOCK:
2091 tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
2092 tsc_timestamp);
2093 if (tsc_pg_val != U64_MAX) {
2094 /* TSC page valid */
2095 *mode = VCLOCK_HVCLOCK;
53fafdbb
MT
2096 v = (tsc_pg_val - clock->cycle_last) &
2097 clock->mask;
b0c39dc6
VK
2098 } else {
2099 /* TSC page invalid */
2100 *mode = VCLOCK_NONE;
2101 }
2102 break;
2103 case VCLOCK_TSC:
2104 *mode = VCLOCK_TSC;
2105 *tsc_timestamp = read_tsc();
53fafdbb
MT
2106 v = (*tsc_timestamp - clock->cycle_last) &
2107 clock->mask;
b0c39dc6
VK
2108 break;
2109 default:
2110 *mode = VCLOCK_NONE;
2111 }
d828199e 2112
b0c39dc6
VK
2113 if (*mode == VCLOCK_NONE)
2114 *tsc_timestamp = v = 0;
d828199e 2115
53fafdbb 2116 return v * clock->mult;
d828199e
MT
2117}
2118
53fafdbb 2119static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp)
d828199e 2120{
cbcf2dd3 2121 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
d828199e 2122 unsigned long seq;
d828199e 2123 int mode;
cbcf2dd3 2124 u64 ns;
d828199e 2125
d828199e
MT
2126 do {
2127 seq = read_seqcount_begin(&gtod->seq);
53fafdbb
MT
2128 ns = gtod->monotonic_raw_nsec;
2129 ns += vgettsc(&gtod->raw_clock, tsc_timestamp, &mode);
d828199e 2130 ns >>= gtod->clock.shift;
53fafdbb 2131 ns += gtod->boot_ns_raw;
d828199e 2132 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
cbcf2dd3 2133 *t = ns;
d828199e
MT
2134
2135 return mode;
2136}
2137
899a31f5 2138static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
55dd00a7
MT
2139{
2140 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2141 unsigned long seq;
2142 int mode;
2143 u64 ns;
2144
2145 do {
2146 seq = read_seqcount_begin(&gtod->seq);
55dd00a7
MT
2147 ts->tv_sec = gtod->wall_time_sec;
2148 ns = gtod->nsec_base;
53fafdbb 2149 ns += vgettsc(&gtod->clock, tsc_timestamp, &mode);
55dd00a7
MT
2150 ns >>= gtod->clock.shift;
2151 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
2152
2153 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
2154 ts->tv_nsec = ns;
2155
2156 return mode;
2157}
2158
b0c39dc6
VK
2159/* returns true if host is using TSC based clocksource */
2160static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
d828199e 2161{
d828199e 2162 /* checked again under seqlock below */
b0c39dc6 2163 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
d828199e
MT
2164 return false;
2165
53fafdbb 2166 return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns,
b0c39dc6 2167 tsc_timestamp));
d828199e 2168}
55dd00a7 2169
b0c39dc6 2170/* returns true if host is using TSC based clocksource */
899a31f5 2171static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
b0c39dc6 2172 u64 *tsc_timestamp)
55dd00a7
MT
2173{
2174 /* checked again under seqlock below */
b0c39dc6 2175 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
55dd00a7
MT
2176 return false;
2177
b0c39dc6 2178 return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp));
55dd00a7 2179}
d828199e
MT
2180#endif
2181
2182/*
2183 *
b48aa97e
MT
2184 * Assuming a stable TSC across physical CPUS, and a stable TSC
2185 * across virtual CPUs, the following condition is possible.
2186 * Each numbered line represents an event visible to both
d828199e
MT
2187 * CPUs at the next numbered event.
2188 *
2189 * "timespecX" represents host monotonic time. "tscX" represents
2190 * RDTSC value.
2191 *
2192 * VCPU0 on CPU0 | VCPU1 on CPU1
2193 *
2194 * 1. read timespec0,tsc0
2195 * 2. | timespec1 = timespec0 + N
2196 * | tsc1 = tsc0 + M
2197 * 3. transition to guest | transition to guest
2198 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
2199 * 5. | ret1 = timespec1 + (rdtsc - tsc1)
2200 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
2201 *
2202 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
2203 *
2204 * - ret0 < ret1
2205 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
2206 * ...
2207 * - 0 < N - M => M < N
2208 *
2209 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
2210 * always the case (the difference between two distinct xtime instances
2211 * might be smaller then the difference between corresponding TSC reads,
2212 * when updating guest vcpus pvclock areas).
2213 *
2214 * To avoid that problem, do not allow visibility of distinct
2215 * system_timestamp/tsc_timestamp values simultaneously: use a master
2216 * copy of host monotonic time values. Update that master copy
2217 * in lockstep.
2218 *
b48aa97e 2219 * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
d828199e
MT
2220 *
2221 */
2222
2223static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
2224{
2225#ifdef CONFIG_X86_64
2226 struct kvm_arch *ka = &kvm->arch;
2227 int vclock_mode;
b48aa97e
MT
2228 bool host_tsc_clocksource, vcpus_matched;
2229
2230 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
2231 atomic_read(&kvm->online_vcpus));
d828199e
MT
2232
2233 /*
2234 * If the host uses TSC clock, then passthrough TSC as stable
2235 * to the guest.
2236 */
b48aa97e 2237 host_tsc_clocksource = kvm_get_time_and_clockread(
d828199e
MT
2238 &ka->master_kernel_ns,
2239 &ka->master_cycle_now);
2240
16a96021 2241 ka->use_master_clock = host_tsc_clocksource && vcpus_matched
a826faf1 2242 && !ka->backwards_tsc_observed
54750f2c 2243 && !ka->boot_vcpu_runs_old_kvmclock;
b48aa97e 2244
d828199e
MT
2245 if (ka->use_master_clock)
2246 atomic_set(&kvm_guest_has_master_clock, 1);
2247
2248 vclock_mode = pvclock_gtod_data.clock.vclock_mode;
b48aa97e
MT
2249 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
2250 vcpus_matched);
d828199e
MT
2251#endif
2252}
2253
2860c4b1
PB
2254void kvm_make_mclock_inprogress_request(struct kvm *kvm)
2255{
2256 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
2257}
2258
2e762ff7
MT
2259static void kvm_gen_update_masterclock(struct kvm *kvm)
2260{
2261#ifdef CONFIG_X86_64
2262 int i;
2263 struct kvm_vcpu *vcpu;
2264 struct kvm_arch *ka = &kvm->arch;
2265
2266 spin_lock(&ka->pvclock_gtod_sync_lock);
2267 kvm_make_mclock_inprogress_request(kvm);
2268 /* no guest entries from this point */
2269 pvclock_update_vm_gtod_copy(kvm);
2270
2271 kvm_for_each_vcpu(i, vcpu, kvm)
105b21bb 2272 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2e762ff7
MT
2273
2274 /* guest entries allowed */
2275 kvm_for_each_vcpu(i, vcpu, kvm)
72875d8a 2276 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
2e762ff7
MT
2277
2278 spin_unlock(&ka->pvclock_gtod_sync_lock);
2279#endif
2280}
2281
e891a32e 2282u64 get_kvmclock_ns(struct kvm *kvm)
108b249c 2283{
108b249c 2284 struct kvm_arch *ka = &kvm->arch;
8b953440 2285 struct pvclock_vcpu_time_info hv_clock;
e2c2206a 2286 u64 ret;
108b249c 2287
8b953440
PB
2288 spin_lock(&ka->pvclock_gtod_sync_lock);
2289 if (!ka->use_master_clock) {
2290 spin_unlock(&ka->pvclock_gtod_sync_lock);
9285ec4c 2291 return ktime_get_boottime_ns() + ka->kvmclock_offset;
108b249c
PB
2292 }
2293
8b953440
PB
2294 hv_clock.tsc_timestamp = ka->master_cycle_now;
2295 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
2296 spin_unlock(&ka->pvclock_gtod_sync_lock);
2297
e2c2206a
WL
2298 /* both __this_cpu_read() and rdtsc() should be on the same cpu */
2299 get_cpu();
2300
e70b57a6
WL
2301 if (__this_cpu_read(cpu_tsc_khz)) {
2302 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
2303 &hv_clock.tsc_shift,
2304 &hv_clock.tsc_to_system_mul);
2305 ret = __pvclock_read_cycles(&hv_clock, rdtsc());
2306 } else
9285ec4c 2307 ret = ktime_get_boottime_ns() + ka->kvmclock_offset;
e2c2206a
WL
2308
2309 put_cpu();
2310
2311 return ret;
108b249c
PB
2312}
2313
0d6dd2ff
PB
2314static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
2315{
2316 struct kvm_vcpu_arch *vcpu = &v->arch;
2317 struct pvclock_vcpu_time_info guest_hv_clock;
2318
4e335d9e 2319 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
0d6dd2ff
PB
2320 &guest_hv_clock, sizeof(guest_hv_clock))))
2321 return;
2322
2323 /* This VCPU is paused, but it's legal for a guest to read another
2324 * VCPU's kvmclock, so we really have to follow the specification where
2325 * it says that version is odd if data is being modified, and even after
2326 * it is consistent.
2327 *
2328 * Version field updates must be kept separate. This is because
2329 * kvm_write_guest_cached might use a "rep movs" instruction, and
2330 * writes within a string instruction are weakly ordered. So there
2331 * are three writes overall.
2332 *
2333 * As a small optimization, only write the version field in the first
2334 * and third write. The vcpu->pv_time cache is still valid, because the
2335 * version field is the first in the struct.
2336 */
2337 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
2338
51c4b8bb
LA
2339 if (guest_hv_clock.version & 1)
2340 ++guest_hv_clock.version; /* first time write, random junk */
2341
0d6dd2ff 2342 vcpu->hv_clock.version = guest_hv_clock.version + 1;
4e335d9e
PB
2343 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
2344 &vcpu->hv_clock,
2345 sizeof(vcpu->hv_clock.version));
0d6dd2ff
PB
2346
2347 smp_wmb();
2348
2349 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
2350 vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
2351
2352 if (vcpu->pvclock_set_guest_stopped_request) {
2353 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
2354 vcpu->pvclock_set_guest_stopped_request = false;
2355 }
2356
2357 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
2358
4e335d9e
PB
2359 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
2360 &vcpu->hv_clock,
2361 sizeof(vcpu->hv_clock));
0d6dd2ff
PB
2362
2363 smp_wmb();
2364
2365 vcpu->hv_clock.version++;
4e335d9e
PB
2366 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
2367 &vcpu->hv_clock,
2368 sizeof(vcpu->hv_clock.version));
0d6dd2ff
PB
2369}
2370
34c238a1 2371static int kvm_guest_time_update(struct kvm_vcpu *v)
18068523 2372{
78db6a50 2373 unsigned long flags, tgt_tsc_khz;
18068523 2374 struct kvm_vcpu_arch *vcpu = &v->arch;
d828199e 2375 struct kvm_arch *ka = &v->kvm->arch;
f25e656d 2376 s64 kernel_ns;
d828199e 2377 u64 tsc_timestamp, host_tsc;
51d59c6b 2378 u8 pvclock_flags;
d828199e
MT
2379 bool use_master_clock;
2380
2381 kernel_ns = 0;
2382 host_tsc = 0;
18068523 2383
d828199e
MT
2384 /*
2385 * If the host uses TSC clock, then passthrough TSC as stable
2386 * to the guest.
2387 */
2388 spin_lock(&ka->pvclock_gtod_sync_lock);
2389 use_master_clock = ka->use_master_clock;
2390 if (use_master_clock) {
2391 host_tsc = ka->master_cycle_now;
2392 kernel_ns = ka->master_kernel_ns;
2393 }
2394 spin_unlock(&ka->pvclock_gtod_sync_lock);
c09664bb
MT
2395
2396 /* Keep irq disabled to prevent changes to the clock */
2397 local_irq_save(flags);
78db6a50
PB
2398 tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
2399 if (unlikely(tgt_tsc_khz == 0)) {
c09664bb
MT
2400 local_irq_restore(flags);
2401 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
2402 return 1;
2403 }
d828199e 2404 if (!use_master_clock) {
4ea1636b 2405 host_tsc = rdtsc();
9285ec4c 2406 kernel_ns = ktime_get_boottime_ns();
d828199e
MT
2407 }
2408
4ba76538 2409 tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
d828199e 2410
c285545f
ZA
2411 /*
2412 * We may have to catch up the TSC to match elapsed wall clock
2413 * time for two reasons, even if kvmclock is used.
2414 * 1) CPU could have been running below the maximum TSC rate
2415 * 2) Broken TSC compensation resets the base at each VCPU
2416 * entry to avoid unknown leaps of TSC even when running
2417 * again on the same CPU. This may cause apparent elapsed
2418 * time to disappear, and the guest to stand still or run
2419 * very slowly.
2420 */
2421 if (vcpu->tsc_catchup) {
2422 u64 tsc = compute_guest_tsc(v, kernel_ns);
2423 if (tsc > tsc_timestamp) {
f1e2b260 2424 adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
c285545f
ZA
2425 tsc_timestamp = tsc;
2426 }
50d0a0f9
GH
2427 }
2428
18068523
GOC
2429 local_irq_restore(flags);
2430
0d6dd2ff 2431 /* With all the info we got, fill in the values */
18068523 2432
78db6a50
PB
2433 if (kvm_has_tsc_control)
2434 tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);
2435
2436 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
3ae13faa 2437 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
5f4e3f88
ZA
2438 &vcpu->hv_clock.tsc_shift,
2439 &vcpu->hv_clock.tsc_to_system_mul);
78db6a50 2440 vcpu->hw_tsc_khz = tgt_tsc_khz;
8cfdc000
ZA
2441 }
2442
1d5f066e 2443 vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
759379dd 2444 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
28e4639a 2445 vcpu->last_guest_tsc = tsc_timestamp;
51d59c6b 2446
d828199e 2447 /* If the host uses TSC clocksource, then it is stable */
0d6dd2ff 2448 pvclock_flags = 0;
d828199e
MT
2449 if (use_master_clock)
2450 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
2451
78c0337a
MT
2452 vcpu->hv_clock.flags = pvclock_flags;
2453
095cf55d
PB
2454 if (vcpu->pv_time_enabled)
2455 kvm_setup_pvclock_page(v);
2456 if (v == kvm_get_vcpu(v->kvm, 0))
2457 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
8cfdc000 2458 return 0;
c8076604
GH
2459}
2460
0061d53d
MT
2461/*
2462 * kvmclock updates which are isolated to a given vcpu, such as
2463 * vcpu->cpu migration, should not allow system_timestamp from
2464 * the rest of the vcpus to remain static. Otherwise ntp frequency
2465 * correction applies to one vcpu's system_timestamp but not
2466 * the others.
2467 *
2468 * So in those cases, request a kvmclock update for all vcpus.
7e44e449
AJ
2469 * We need to rate-limit these requests though, as they can
2470 * considerably slow guests that have a large number of vcpus.
2471 * The time for a remote vcpu to update its kvmclock is bound
2472 * by the delay we use to rate-limit the updates.
0061d53d
MT
2473 */
2474
7e44e449
AJ
2475#define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
2476
2477static void kvmclock_update_fn(struct work_struct *work)
0061d53d
MT
2478{
2479 int i;
7e44e449
AJ
2480 struct delayed_work *dwork = to_delayed_work(work);
2481 struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
2482 kvmclock_update_work);
2483 struct kvm *kvm = container_of(ka, struct kvm, arch);
0061d53d
MT
2484 struct kvm_vcpu *vcpu;
2485
2486 kvm_for_each_vcpu(i, vcpu, kvm) {
105b21bb 2487 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
0061d53d
MT
2488 kvm_vcpu_kick(vcpu);
2489 }
2490}
2491
7e44e449
AJ
2492static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
2493{
2494 struct kvm *kvm = v->kvm;
2495
105b21bb 2496 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
7e44e449
AJ
2497 schedule_delayed_work(&kvm->arch.kvmclock_update_work,
2498 KVMCLOCK_UPDATE_DELAY);
2499}
2500
332967a3
AJ
2501#define KVMCLOCK_SYNC_PERIOD (300 * HZ)
2502
2503static void kvmclock_sync_fn(struct work_struct *work)
2504{
2505 struct delayed_work *dwork = to_delayed_work(work);
2506 struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
2507 kvmclock_sync_work);
2508 struct kvm *kvm = container_of(ka, struct kvm, arch);
2509
630994b3
MT
2510 if (!kvmclock_periodic_sync)
2511 return;
2512
332967a3
AJ
2513 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
2514 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
2515 KVMCLOCK_SYNC_PERIOD);
2516}
2517
191c8137
BP
2518/*
2519 * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP.
2520 */
2521static bool can_set_mci_status(struct kvm_vcpu *vcpu)
2522{
2523 /* McStatusWrEn enabled? */
2524 if (guest_cpuid_is_amd(vcpu))
2525 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
2526
2527 return false;
2528}
2529
9ffd986c 2530static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
15c4a640 2531{
890ca9ae
HY
2532 u64 mcg_cap = vcpu->arch.mcg_cap;
2533 unsigned bank_num = mcg_cap & 0xff;
9ffd986c
WL
2534 u32 msr = msr_info->index;
2535 u64 data = msr_info->data;
890ca9ae 2536
15c4a640 2537 switch (msr) {
15c4a640 2538 case MSR_IA32_MCG_STATUS:
890ca9ae 2539 vcpu->arch.mcg_status = data;
15c4a640 2540 break;
c7ac679c 2541 case MSR_IA32_MCG_CTL:
44883f01
PB
2542 if (!(mcg_cap & MCG_CTL_P) &&
2543 (data || !msr_info->host_initiated))
890ca9ae
HY
2544 return 1;
2545 if (data != 0 && data != ~(u64)0)
44883f01 2546 return 1;
890ca9ae
HY
2547 vcpu->arch.mcg_ctl = data;
2548 break;
2549 default:
2550 if (msr >= MSR_IA32_MC0_CTL &&
81760dcc 2551 msr < MSR_IA32_MCx_CTL(bank_num)) {
6ec4c5ee
MP
2552 u32 offset = array_index_nospec(
2553 msr - MSR_IA32_MC0_CTL,
2554 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
2555
114be429
AP
2556 /* only 0 or all 1s can be written to IA32_MCi_CTL
2557 * some Linux kernels though clear bit 10 in bank 4 to
2558 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
2559 * this to avoid an uncatched #GP in the guest
2560 */
890ca9ae 2561 if ((offset & 0x3) == 0 &&
114be429 2562 data != 0 && (data | (1 << 10)) != ~(u64)0)
890ca9ae 2563 return -1;
191c8137
BP
2564
2565 /* MCi_STATUS */
9ffd986c 2566 if (!msr_info->host_initiated &&
191c8137
BP
2567 (offset & 0x3) == 1 && data != 0) {
2568 if (!can_set_mci_status(vcpu))
2569 return -1;
2570 }
2571
890ca9ae
HY
2572 vcpu->arch.mce_banks[offset] = data;
2573 break;
2574 }
2575 return 1;
2576 }
2577 return 0;
2578}
2579
ffde22ac
ES
2580static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
2581{
2582 struct kvm *kvm = vcpu->kvm;
2583 int lm = is_long_mode(vcpu);
2584 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
2585 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
2586 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
2587 : kvm->arch.xen_hvm_config.blob_size_32;
2588 u32 page_num = data & ~PAGE_MASK;
2589 u64 page_addr = data & PAGE_MASK;
2590 u8 *page;
2591 int r;
2592
2593 r = -E2BIG;
2594 if (page_num >= blob_size)
2595 goto out;
2596 r = -ENOMEM;
ff5c2c03
SL
2597 page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
2598 if (IS_ERR(page)) {
2599 r = PTR_ERR(page);
ffde22ac 2600 goto out;
ff5c2c03 2601 }
54bf36aa 2602 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
ffde22ac
ES
2603 goto out_free;
2604 r = 0;
2605out_free:
2606 kfree(page);
2607out:
2608 return r;
2609}
2610
344d9588
GN
2611static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
2612{
2613 gpa_t gpa = data & ~0x3f;
2614
52a5c155
WL
2615 /* Bits 3:5 are reserved, Should be zero */
2616 if (data & 0x38)
344d9588
GN
2617 return 1;
2618
2619 vcpu->arch.apf.msr_val = data;
2620
2621 if (!(data & KVM_ASYNC_PF_ENABLED)) {
2622 kvm_clear_async_pf_completion_queue(vcpu);
2623 kvm_async_pf_hash_reset(vcpu);
2624 return 0;
2625 }
2626
4e335d9e 2627 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
8f964525 2628 sizeof(u32)))
344d9588
GN
2629 return 1;
2630
6adba527 2631 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
52a5c155 2632 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
344d9588
GN
2633 kvm_async_pf_wakeup_all(vcpu);
2634 return 0;
2635}
2636
12f9a48f
GC
2637static void kvmclock_reset(struct kvm_vcpu *vcpu)
2638{
0b79459b 2639 vcpu->arch.pv_time_enabled = false;
49dedf0d 2640 vcpu->arch.time = 0;
12f9a48f
GC
2641}
2642
f38a7b75
WL
2643static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
2644{
2645 ++vcpu->stat.tlb_flush;
2646 kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa);
2647}
2648
c9aaa895
GC
2649static void record_steal_time(struct kvm_vcpu *vcpu)
2650{
2651 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2652 return;
2653
4e335d9e 2654 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
c9aaa895
GC
2655 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
2656 return;
2657
f38a7b75
WL
2658 /*
2659 * Doing a TLB flush here, on the guest's behalf, can avoid
2660 * expensive IPIs.
2661 */
b382f44e
WL
2662 trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
2663 vcpu->arch.st.steal.preempted & KVM_VCPU_FLUSH_TLB);
f38a7b75
WL
2664 if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
2665 kvm_vcpu_flush_tlb(vcpu, false);
0b9f6c46 2666
35f3fae1
WL
2667 if (vcpu->arch.st.steal.version & 1)
2668 vcpu->arch.st.steal.version += 1; /* first time write, random junk */
2669
2670 vcpu->arch.st.steal.version += 1;
2671
4e335d9e 2672 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
35f3fae1
WL
2673 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2674
2675 smp_wmb();
2676
c54cdf14
LC
2677 vcpu->arch.st.steal.steal += current->sched_info.run_delay -
2678 vcpu->arch.st.last_steal;
2679 vcpu->arch.st.last_steal = current->sched_info.run_delay;
35f3fae1 2680
4e335d9e 2681 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
35f3fae1
WL
2682 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2683
2684 smp_wmb();
2685
2686 vcpu->arch.st.steal.version += 1;
c9aaa895 2687
4e335d9e 2688 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
c9aaa895
GC
2689 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2690}
2691
8fe8ab46 2692int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
15c4a640 2693{
5753785f 2694 bool pr = false;
8fe8ab46
WA
2695 u32 msr = msr_info->index;
2696 u64 data = msr_info->data;
5753785f 2697
15c4a640 2698 switch (msr) {
2e32b719 2699 case MSR_AMD64_NB_CFG:
2e32b719
BP
2700 case MSR_IA32_UCODE_WRITE:
2701 case MSR_VM_HSAVE_PA:
2702 case MSR_AMD64_PATCH_LOADER:
2703 case MSR_AMD64_BU_CFG2:
405a353a 2704 case MSR_AMD64_DC_CFG:
0e1b869f 2705 case MSR_F15H_EX_CFG:
2e32b719
BP
2706 break;
2707
518e7b94
WL
2708 case MSR_IA32_UCODE_REV:
2709 if (msr_info->host_initiated)
2710 vcpu->arch.microcode_version = data;
2711 break;
0cf9135b
SC
2712 case MSR_IA32_ARCH_CAPABILITIES:
2713 if (!msr_info->host_initiated)
2714 return 1;
2715 vcpu->arch.arch_capabilities = data;
2716 break;
15c4a640 2717 case MSR_EFER:
11988499 2718 return set_efer(vcpu, msr_info);
8f1589d9
AP
2719 case MSR_K7_HWCR:
2720 data &= ~(u64)0x40; /* ignore flush filter disable */
82494028 2721 data &= ~(u64)0x100; /* ignore ignne emulation enable */
a223c313 2722 data &= ~(u64)0x8; /* ignore TLB cache disable */
191c8137
BP
2723
2724 /* Handle McStatusWrEn */
2725 if (data == BIT_ULL(18)) {
2726 vcpu->arch.msr_hwcr = data;
2727 } else if (data != 0) {
a737f256
CD
2728 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
2729 data);
8f1589d9
AP
2730 return 1;
2731 }
15c4a640 2732 break;
f7c6d140
AP
2733 case MSR_FAM10H_MMIO_CONF_BASE:
2734 if (data != 0) {
a737f256
CD
2735 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
2736 "0x%llx\n", data);
f7c6d140
AP
2737 return 1;
2738 }
15c4a640 2739 break;
b5e2fec0
AG
2740 case MSR_IA32_DEBUGCTLMSR:
2741 if (!data) {
2742 /* We support the non-activated case already */
2743 break;
2744 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
2745 /* Values other than LBR and BTF are vendor-specific,
2746 thus reserved and should throw a #GP */
2747 return 1;
2748 }
a737f256
CD
2749 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
2750 __func__, data);
b5e2fec0 2751 break;
9ba075a6 2752 case 0x200 ... 0x2ff:
ff53604b 2753 return kvm_mtrr_set_msr(vcpu, msr, data);
15c4a640 2754 case MSR_IA32_APICBASE:
58cb628d 2755 return kvm_set_apic_base(vcpu, msr_info);
0105d1a5
GN
2756 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2757 return kvm_x2apic_msr_write(vcpu, msr, data);
a3e06bbe
LJ
2758 case MSR_IA32_TSCDEADLINE:
2759 kvm_set_lapic_tscdeadline_msr(vcpu, data);
2760 break;
ba904635 2761 case MSR_IA32_TSC_ADJUST:
d6321d49 2762 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
ba904635 2763 if (!msr_info->host_initiated) {
d913b904 2764 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
d7add054 2765 adjust_tsc_offset_guest(vcpu, adj);
ba904635
WA
2766 }
2767 vcpu->arch.ia32_tsc_adjust_msr = data;
2768 }
2769 break;
15c4a640 2770 case MSR_IA32_MISC_ENABLE:
511a8556
WL
2771 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
2772 ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) {
2773 if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3))
2774 return 1;
2775 vcpu->arch.ia32_misc_enable_msr = data;
2776 kvm_update_cpuid(vcpu);
2777 } else {
2778 vcpu->arch.ia32_misc_enable_msr = data;
2779 }
15c4a640 2780 break;
64d60670
PB
2781 case MSR_IA32_SMBASE:
2782 if (!msr_info->host_initiated)
2783 return 1;
2784 vcpu->arch.smbase = data;
2785 break;
73f624f4
PB
2786 case MSR_IA32_POWER_CTL:
2787 vcpu->arch.msr_ia32_power_ctl = data;
2788 break;
dd259935
PB
2789 case MSR_IA32_TSC:
2790 kvm_write_tsc(vcpu, msr_info);
2791 break;
864e2ab2
AL
2792 case MSR_IA32_XSS:
2793 if (!msr_info->host_initiated &&
2794 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
2795 return 1;
2796 /*
2797 * We do support PT if kvm_x86_ops->pt_supported(), but we do
2798 * not support IA32_XSS[bit 8]. Guests will have to use
2799 * RDMSR/WRMSR rather than XSAVES/XRSTORS to save/restore PT
2800 * MSRs.
2801 */
2802 if (data != 0)
2803 return 1;
2804 vcpu->arch.ia32_xss = data;
2805 break;
52797bf9
LA
2806 case MSR_SMI_COUNT:
2807 if (!msr_info->host_initiated)
2808 return 1;
2809 vcpu->arch.smi_count = data;
2810 break;
11c6bffa 2811 case MSR_KVM_WALL_CLOCK_NEW:
18068523
GOC
2812 case MSR_KVM_WALL_CLOCK:
2813 vcpu->kvm->arch.wall_clock = data;
2814 kvm_write_wall_clock(vcpu->kvm, data);
2815 break;
11c6bffa 2816 case MSR_KVM_SYSTEM_TIME_NEW:
18068523 2817 case MSR_KVM_SYSTEM_TIME: {
54750f2c
MT
2818 struct kvm_arch *ka = &vcpu->kvm->arch;
2819
54750f2c
MT
2820 if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
2821 bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
2822
2823 if (ka->boot_vcpu_runs_old_kvmclock != tmp)
1bd2009e 2824 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
54750f2c
MT
2825
2826 ka->boot_vcpu_runs_old_kvmclock = tmp;
2827 }
2828
18068523 2829 vcpu->arch.time = data;
0061d53d 2830 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
18068523
GOC
2831
2832 /* we verify if the enable bit is set... */
49dedf0d 2833 vcpu->arch.pv_time_enabled = false;
18068523
GOC
2834 if (!(data & 1))
2835 break;
2836
49dedf0d 2837 if (!kvm_gfn_to_hva_cache_init(vcpu->kvm,
8f964525
AH
2838 &vcpu->arch.pv_time, data & ~1ULL,
2839 sizeof(struct pvclock_vcpu_time_info)))
0b79459b 2840 vcpu->arch.pv_time_enabled = true;
32cad84f 2841
18068523
GOC
2842 break;
2843 }
344d9588
GN
2844 case MSR_KVM_ASYNC_PF_EN:
2845 if (kvm_pv_enable_async_pf(vcpu, data))
2846 return 1;
2847 break;
c9aaa895
GC
2848 case MSR_KVM_STEAL_TIME:
2849
2850 if (unlikely(!sched_info_on()))
2851 return 1;
2852
2853 if (data & KVM_STEAL_RESERVED_MASK)
2854 return 1;
2855
4e335d9e 2856 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
8f964525
AH
2857 data & KVM_STEAL_VALID_BITS,
2858 sizeof(struct kvm_steal_time)))
c9aaa895
GC
2859 return 1;
2860
2861 vcpu->arch.st.msr_val = data;
2862
2863 if (!(data & KVM_MSR_ENABLED))
2864 break;
2865
c9aaa895
GC
2866 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2867
2868 break;
ae7a2a3f 2869 case MSR_KVM_PV_EOI_EN:
72bbf935 2870 if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8)))
ae7a2a3f
MT
2871 return 1;
2872 break;
c9aaa895 2873
2d5ba19b
MT
2874 case MSR_KVM_POLL_CONTROL:
2875 /* only enable bit supported */
2876 if (data & (-1ULL << 1))
2877 return 1;
2878
2879 vcpu->arch.msr_kvm_poll_control = data;
2880 break;
2881
890ca9ae
HY
2882 case MSR_IA32_MCG_CTL:
2883 case MSR_IA32_MCG_STATUS:
81760dcc 2884 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
9ffd986c 2885 return set_msr_mce(vcpu, msr_info);
71db6023 2886
6912ac32
WH
2887 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
2888 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
2889 pr = true; /* fall through */
2890 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
2891 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
c6702c9d 2892 if (kvm_pmu_is_valid_msr(vcpu, msr))
afd80d85 2893 return kvm_pmu_set_msr(vcpu, msr_info);
5753785f
GN
2894
2895 if (pr || data != 0)
a737f256
CD
2896 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
2897 "0x%x data 0x%llx\n", msr, data);
5753785f 2898 break;
84e0cefa
JS
2899 case MSR_K7_CLK_CTL:
2900 /*
2901 * Ignore all writes to this no longer documented MSR.
2902 * Writes are only relevant for old K7 processors,
2903 * all pre-dating SVM, but a recommended workaround from
4a969980 2904 * AMD for these chips. It is possible to specify the
84e0cefa
JS
2905 * affected processor models on the command line, hence
2906 * the need to ignore the workaround.
2907 */
2908 break;
55cd8e5a 2909 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
e7d9513b
AS
2910 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2911 case HV_X64_MSR_CRASH_CTL:
1f4b34f8 2912 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
a2e164e7
VK
2913 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
2914 case HV_X64_MSR_TSC_EMULATION_CONTROL:
2915 case HV_X64_MSR_TSC_EMULATION_STATUS:
e7d9513b
AS
2916 return kvm_hv_set_msr_common(vcpu, msr, data,
2917 msr_info->host_initiated);
91c9c3ed 2918 case MSR_IA32_BBL_CR_CTL3:
2919 /* Drop writes to this legacy MSR -- see rdmsr
2920 * counterpart for further detail.
2921 */
fab0aa3b
EM
2922 if (report_ignored_msrs)
2923 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
2924 msr, data);
91c9c3ed 2925 break;
2b036c6b 2926 case MSR_AMD64_OSVW_ID_LENGTH:
d6321d49 2927 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2b036c6b
BO
2928 return 1;
2929 vcpu->arch.osvw.length = data;
2930 break;
2931 case MSR_AMD64_OSVW_STATUS:
d6321d49 2932 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2b036c6b
BO
2933 return 1;
2934 vcpu->arch.osvw.status = data;
2935 break;
db2336a8
KH
2936 case MSR_PLATFORM_INFO:
2937 if (!msr_info->host_initiated ||
db2336a8
KH
2938 (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
2939 cpuid_fault_enabled(vcpu)))
2940 return 1;
2941 vcpu->arch.msr_platform_info = data;
2942 break;
2943 case MSR_MISC_FEATURES_ENABLES:
2944 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
2945 (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
2946 !supports_cpuid_fault(vcpu)))
2947 return 1;
2948 vcpu->arch.msr_misc_features_enables = data;
2949 break;
15c4a640 2950 default:
ffde22ac
ES
2951 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
2952 return xen_hvm_config(vcpu, data);
c6702c9d 2953 if (kvm_pmu_is_valid_msr(vcpu, msr))
afd80d85 2954 return kvm_pmu_set_msr(vcpu, msr_info);
ed85c068 2955 if (!ignore_msrs) {
ae0f5499 2956 vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
a737f256 2957 msr, data);
ed85c068
AP
2958 return 1;
2959 } else {
fab0aa3b
EM
2960 if (report_ignored_msrs)
2961 vcpu_unimpl(vcpu,
2962 "ignored wrmsr: 0x%x data 0x%llx\n",
2963 msr, data);
ed85c068
AP
2964 break;
2965 }
15c4a640
CO
2966 }
2967 return 0;
2968}
2969EXPORT_SYMBOL_GPL(kvm_set_msr_common);
2970
44883f01 2971static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
15c4a640
CO
2972{
2973 u64 data;
890ca9ae
HY
2974 u64 mcg_cap = vcpu->arch.mcg_cap;
2975 unsigned bank_num = mcg_cap & 0xff;
15c4a640
CO
2976
2977 switch (msr) {
15c4a640
CO
2978 case MSR_IA32_P5_MC_ADDR:
2979 case MSR_IA32_P5_MC_TYPE:
890ca9ae
HY
2980 data = 0;
2981 break;
15c4a640 2982 case MSR_IA32_MCG_CAP:
890ca9ae
HY
2983 data = vcpu->arch.mcg_cap;
2984 break;
c7ac679c 2985 case MSR_IA32_MCG_CTL:
44883f01 2986 if (!(mcg_cap & MCG_CTL_P) && !host)
890ca9ae
HY
2987 return 1;
2988 data = vcpu->arch.mcg_ctl;
2989 break;
2990 case MSR_IA32_MCG_STATUS:
2991 data = vcpu->arch.mcg_status;
2992 break;
2993 default:
2994 if (msr >= MSR_IA32_MC0_CTL &&
81760dcc 2995 msr < MSR_IA32_MCx_CTL(bank_num)) {
6ec4c5ee
MP
2996 u32 offset = array_index_nospec(
2997 msr - MSR_IA32_MC0_CTL,
2998 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
2999
890ca9ae
HY
3000 data = vcpu->arch.mce_banks[offset];
3001 break;
3002 }
3003 return 1;
3004 }
3005 *pdata = data;
3006 return 0;
3007}
3008
609e36d3 3009int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
890ca9ae 3010{
609e36d3 3011 switch (msr_info->index) {
890ca9ae 3012 case MSR_IA32_PLATFORM_ID:
15c4a640 3013 case MSR_IA32_EBL_CR_POWERON:
b5e2fec0
AG
3014 case MSR_IA32_DEBUGCTLMSR:
3015 case MSR_IA32_LASTBRANCHFROMIP:
3016 case MSR_IA32_LASTBRANCHTOIP:
3017 case MSR_IA32_LASTINTFROMIP:
3018 case MSR_IA32_LASTINTTOIP:
60af2ecd 3019 case MSR_K8_SYSCFG:
3afb1121
PB
3020 case MSR_K8_TSEG_ADDR:
3021 case MSR_K8_TSEG_MASK:
61a6bd67 3022 case MSR_VM_HSAVE_PA:
1fdbd48c 3023 case MSR_K8_INT_PENDING_MSG:
c323c0e5 3024 case MSR_AMD64_NB_CFG:
f7c6d140 3025 case MSR_FAM10H_MMIO_CONF_BASE:
2e32b719 3026 case MSR_AMD64_BU_CFG2:
0c2df2a1 3027 case MSR_IA32_PERF_CTL:
405a353a 3028 case MSR_AMD64_DC_CFG:
0e1b869f 3029 case MSR_F15H_EX_CFG:
609e36d3 3030 msr_info->data = 0;
15c4a640 3031 break;
c51eb52b 3032 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
6912ac32
WH
3033 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
3034 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
3035 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
3036 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
c6702c9d 3037 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
609e36d3
PB
3038 return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
3039 msr_info->data = 0;
5753785f 3040 break;
742bc670 3041 case MSR_IA32_UCODE_REV:
518e7b94 3042 msr_info->data = vcpu->arch.microcode_version;
742bc670 3043 break;
0cf9135b
SC
3044 case MSR_IA32_ARCH_CAPABILITIES:
3045 if (!msr_info->host_initiated &&
3046 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
3047 return 1;
3048 msr_info->data = vcpu->arch.arch_capabilities;
3049 break;
73f624f4
PB
3050 case MSR_IA32_POWER_CTL:
3051 msr_info->data = vcpu->arch.msr_ia32_power_ctl;
3052 break;
dd259935
PB
3053 case MSR_IA32_TSC:
3054 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
3055 break;
9ba075a6 3056 case MSR_MTRRcap:
9ba075a6 3057 case 0x200 ... 0x2ff:
ff53604b 3058 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
15c4a640 3059 case 0xcd: /* fsb frequency */
609e36d3 3060 msr_info->data = 3;
15c4a640 3061 break;
7b914098
JS
3062 /*
3063 * MSR_EBC_FREQUENCY_ID
3064 * Conservative value valid for even the basic CPU models.
3065 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
3066 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
3067 * and 266MHz for model 3, or 4. Set Core Clock
3068 * Frequency to System Bus Frequency Ratio to 1 (bits
3069 * 31:24) even though these are only valid for CPU
3070 * models > 2, however guests may end up dividing or
3071 * multiplying by zero otherwise.
3072 */
3073 case MSR_EBC_FREQUENCY_ID:
609e36d3 3074 msr_info->data = 1 << 24;
7b914098 3075 break;
15c4a640 3076 case MSR_IA32_APICBASE:
609e36d3 3077 msr_info->data = kvm_get_apic_base(vcpu);
15c4a640 3078 break;
0105d1a5 3079 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
609e36d3 3080 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
0105d1a5 3081 break;
a3e06bbe 3082 case MSR_IA32_TSCDEADLINE:
609e36d3 3083 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
a3e06bbe 3084 break;
ba904635 3085 case MSR_IA32_TSC_ADJUST:
609e36d3 3086 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
ba904635 3087 break;
15c4a640 3088 case MSR_IA32_MISC_ENABLE:
609e36d3 3089 msr_info->data = vcpu->arch.ia32_misc_enable_msr;
15c4a640 3090 break;
64d60670
PB
3091 case MSR_IA32_SMBASE:
3092 if (!msr_info->host_initiated)
3093 return 1;
3094 msr_info->data = vcpu->arch.smbase;
15c4a640 3095 break;
52797bf9
LA
3096 case MSR_SMI_COUNT:
3097 msr_info->data = vcpu->arch.smi_count;
3098 break;
847f0ad8
AG
3099 case MSR_IA32_PERF_STATUS:
3100 /* TSC increment by tick */
609e36d3 3101 msr_info->data = 1000ULL;
847f0ad8 3102 /* CPU multiplier */
b0996ae4 3103 msr_info->data |= (((uint64_t)4ULL) << 40);
847f0ad8 3104 break;
15c4a640 3105 case MSR_EFER:
609e36d3 3106 msr_info->data = vcpu->arch.efer;
15c4a640 3107 break;
18068523 3108 case MSR_KVM_WALL_CLOCK:
11c6bffa 3109 case MSR_KVM_WALL_CLOCK_NEW:
609e36d3 3110 msr_info->data = vcpu->kvm->arch.wall_clock;
18068523
GOC
3111 break;
3112 case MSR_KVM_SYSTEM_TIME:
11c6bffa 3113 case MSR_KVM_SYSTEM_TIME_NEW:
609e36d3 3114 msr_info->data = vcpu->arch.time;
18068523 3115 break;
344d9588 3116 case MSR_KVM_ASYNC_PF_EN:
609e36d3 3117 msr_info->data = vcpu->arch.apf.msr_val;
344d9588 3118 break;
c9aaa895 3119 case MSR_KVM_STEAL_TIME:
609e36d3 3120 msr_info->data = vcpu->arch.st.msr_val;
c9aaa895 3121 break;
1d92128f 3122 case MSR_KVM_PV_EOI_EN:
609e36d3 3123 msr_info->data = vcpu->arch.pv_eoi.msr_val;
1d92128f 3124 break;
2d5ba19b
MT
3125 case MSR_KVM_POLL_CONTROL:
3126 msr_info->data = vcpu->arch.msr_kvm_poll_control;
3127 break;
890ca9ae
HY
3128 case MSR_IA32_P5_MC_ADDR:
3129 case MSR_IA32_P5_MC_TYPE:
3130 case MSR_IA32_MCG_CAP:
3131 case MSR_IA32_MCG_CTL:
3132 case MSR_IA32_MCG_STATUS:
81760dcc 3133 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
44883f01
PB
3134 return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
3135 msr_info->host_initiated);
864e2ab2
AL
3136 case MSR_IA32_XSS:
3137 if (!msr_info->host_initiated &&
3138 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
3139 return 1;
3140 msr_info->data = vcpu->arch.ia32_xss;
3141 break;
84e0cefa
JS
3142 case MSR_K7_CLK_CTL:
3143 /*
3144 * Provide expected ramp-up count for K7. All other
3145 * are set to zero, indicating minimum divisors for
3146 * every field.
3147 *
3148 * This prevents guest kernels on AMD host with CPU
3149 * type 6, model 8 and higher from exploding due to
3150 * the rdmsr failing.
3151 */
609e36d3 3152 msr_info->data = 0x20000000;
84e0cefa 3153 break;
55cd8e5a 3154 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
e7d9513b
AS
3155 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
3156 case HV_X64_MSR_CRASH_CTL:
1f4b34f8 3157 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
a2e164e7
VK
3158 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
3159 case HV_X64_MSR_TSC_EMULATION_CONTROL:
3160 case HV_X64_MSR_TSC_EMULATION_STATUS:
e83d5887 3161 return kvm_hv_get_msr_common(vcpu,
44883f01
PB
3162 msr_info->index, &msr_info->data,
3163 msr_info->host_initiated);
55cd8e5a 3164 break;
91c9c3ed 3165 case MSR_IA32_BBL_CR_CTL3:
3166 /* This legacy MSR exists but isn't fully documented in current
3167 * silicon. It is however accessed by winxp in very narrow
3168 * scenarios where it sets bit #19, itself documented as
3169 * a "reserved" bit. Best effort attempt to source coherent
3170 * read data here should the balance of the register be
3171 * interpreted by the guest:
3172 *
3173 * L2 cache control register 3: 64GB range, 256KB size,
3174 * enabled, latency 0x1, configured
3175 */
609e36d3 3176 msr_info->data = 0xbe702111;
91c9c3ed 3177 break;
2b036c6b 3178 case MSR_AMD64_OSVW_ID_LENGTH:
d6321d49 3179 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2b036c6b 3180 return 1;
609e36d3 3181 msr_info->data = vcpu->arch.osvw.length;
2b036c6b
BO
3182 break;
3183 case MSR_AMD64_OSVW_STATUS:
d6321d49 3184 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2b036c6b 3185 return 1;
609e36d3 3186 msr_info->data = vcpu->arch.osvw.status;
2b036c6b 3187 break;
db2336a8 3188 case MSR_PLATFORM_INFO:
6fbbde9a
DS
3189 if (!msr_info->host_initiated &&
3190 !vcpu->kvm->arch.guest_can_read_msr_platform_info)
3191 return 1;
db2336a8
KH
3192 msr_info->data = vcpu->arch.msr_platform_info;
3193 break;
3194 case MSR_MISC_FEATURES_ENABLES:
3195 msr_info->data = vcpu->arch.msr_misc_features_enables;
3196 break;
191c8137
BP
3197 case MSR_K7_HWCR:
3198 msr_info->data = vcpu->arch.msr_hwcr;
3199 break;
15c4a640 3200 default:
c6702c9d 3201 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
609e36d3 3202 return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
ed85c068 3203 if (!ignore_msrs) {
ae0f5499
BD
3204 vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
3205 msr_info->index);
ed85c068
AP
3206 return 1;
3207 } else {
fab0aa3b
EM
3208 if (report_ignored_msrs)
3209 vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n",
3210 msr_info->index);
609e36d3 3211 msr_info->data = 0;
ed85c068
AP
3212 }
3213 break;
15c4a640 3214 }
15c4a640
CO
3215 return 0;
3216}
3217EXPORT_SYMBOL_GPL(kvm_get_msr_common);
3218
313a3dc7
CO
3219/*
3220 * Read or write a bunch of msrs. All parameters are kernel addresses.
3221 *
3222 * @return number of msrs set successfully.
3223 */
3224static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
3225 struct kvm_msr_entry *entries,
3226 int (*do_msr)(struct kvm_vcpu *vcpu,
3227 unsigned index, u64 *data))
3228{
801e459a 3229 int i;
313a3dc7 3230
313a3dc7
CO
3231 for (i = 0; i < msrs->nmsrs; ++i)
3232 if (do_msr(vcpu, entries[i].index, &entries[i].data))
3233 break;
3234
313a3dc7
CO
3235 return i;
3236}
3237
3238/*
3239 * Read or write a bunch of msrs. Parameters are user addresses.
3240 *
3241 * @return number of msrs set successfully.
3242 */
3243static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
3244 int (*do_msr)(struct kvm_vcpu *vcpu,
3245 unsigned index, u64 *data),
3246 int writeback)
3247{
3248 struct kvm_msrs msrs;
3249 struct kvm_msr_entry *entries;
3250 int r, n;
3251 unsigned size;
3252
3253 r = -EFAULT;
0e96f31e 3254 if (copy_from_user(&msrs, user_msrs, sizeof(msrs)))
313a3dc7
CO
3255 goto out;
3256
3257 r = -E2BIG;
3258 if (msrs.nmsrs >= MAX_IO_MSRS)
3259 goto out;
3260
313a3dc7 3261 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
ff5c2c03
SL
3262 entries = memdup_user(user_msrs->entries, size);
3263 if (IS_ERR(entries)) {
3264 r = PTR_ERR(entries);
313a3dc7 3265 goto out;
ff5c2c03 3266 }
313a3dc7
CO
3267
3268 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
3269 if (r < 0)
3270 goto out_free;
3271
3272 r = -EFAULT;
3273 if (writeback && copy_to_user(user_msrs->entries, entries, size))
3274 goto out_free;
3275
3276 r = n;
3277
3278out_free:
7a73c028 3279 kfree(entries);
313a3dc7
CO
3280out:
3281 return r;
3282}
3283
4d5422ce
WL
3284static inline bool kvm_can_mwait_in_guest(void)
3285{
3286 return boot_cpu_has(X86_FEATURE_MWAIT) &&
8e9b29b6
KA
3287 !boot_cpu_has_bug(X86_BUG_MONITOR) &&
3288 boot_cpu_has(X86_FEATURE_ARAT);
4d5422ce
WL
3289}
3290
784aa3d7 3291int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
018d00d2 3292{
4d5422ce 3293 int r = 0;
018d00d2
ZX
3294
3295 switch (ext) {
3296 case KVM_CAP_IRQCHIP:
3297 case KVM_CAP_HLT:
3298 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
018d00d2 3299 case KVM_CAP_SET_TSS_ADDR:
07716717 3300 case KVM_CAP_EXT_CPUID:
9c15bb1d 3301 case KVM_CAP_EXT_EMUL_CPUID:
c8076604 3302 case KVM_CAP_CLOCKSOURCE:
7837699f 3303 case KVM_CAP_PIT:
a28e4f5a 3304 case KVM_CAP_NOP_IO_DELAY:
62d9f0db 3305 case KVM_CAP_MP_STATE:
ed848624 3306 case KVM_CAP_SYNC_MMU:
a355c85c 3307 case KVM_CAP_USER_NMI:
52d939a0 3308 case KVM_CAP_REINJECT_CONTROL:
4925663a 3309 case KVM_CAP_IRQ_INJECT_STATUS:
d34e6b17 3310 case KVM_CAP_IOEVENTFD:
f848a5a8 3311 case KVM_CAP_IOEVENTFD_NO_LENGTH:
c5ff41ce 3312 case KVM_CAP_PIT2:
e9f42757 3313 case KVM_CAP_PIT_STATE2:
b927a3ce 3314 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
ffde22ac 3315 case KVM_CAP_XEN_HVM:
3cfc3092 3316 case KVM_CAP_VCPU_EVENTS:
55cd8e5a 3317 case KVM_CAP_HYPERV:
10388a07 3318 case KVM_CAP_HYPERV_VAPIC:
c25bc163 3319 case KVM_CAP_HYPERV_SPIN:
5c919412 3320 case KVM_CAP_HYPERV_SYNIC:
efc479e6 3321 case KVM_CAP_HYPERV_SYNIC2:
d3457c87 3322 case KVM_CAP_HYPERV_VP_INDEX:
faeb7833 3323 case KVM_CAP_HYPERV_EVENTFD:
c1aea919 3324 case KVM_CAP_HYPERV_TLBFLUSH:
214ff83d 3325 case KVM_CAP_HYPERV_SEND_IPI:
2bc39970 3326 case KVM_CAP_HYPERV_CPUID:
ab9f4ecb 3327 case KVM_CAP_PCI_SEGMENT:
a1efbe77 3328 case KVM_CAP_DEBUGREGS:
d2be1651 3329 case KVM_CAP_X86_ROBUST_SINGLESTEP:
2d5b5a66 3330 case KVM_CAP_XSAVE:
344d9588 3331 case KVM_CAP_ASYNC_PF:
92a1f12d 3332 case KVM_CAP_GET_TSC_KHZ:
1c0b28c2 3333 case KVM_CAP_KVMCLOCK_CTRL:
4d8b81ab 3334 case KVM_CAP_READONLY_MEM:
5f66b620 3335 case KVM_CAP_HYPERV_TIME:
100943c5 3336 case KVM_CAP_IOAPIC_POLARITY_IGNORED:
defcf51f 3337 case KVM_CAP_TSC_DEADLINE_TIMER:
90de4a18 3338 case KVM_CAP_DISABLE_QUIRKS:
d71ba788 3339 case KVM_CAP_SET_BOOT_CPU_ID:
49df6397 3340 case KVM_CAP_SPLIT_IRQCHIP:
460df4c1 3341 case KVM_CAP_IMMEDIATE_EXIT:
66bb8a06 3342 case KVM_CAP_PMU_EVENT_FILTER:
801e459a 3343 case KVM_CAP_GET_MSR_FEATURES:
6fbbde9a 3344 case KVM_CAP_MSR_PLATFORM_INFO:
c4f55198 3345 case KVM_CAP_EXCEPTION_PAYLOAD:
018d00d2
ZX
3346 r = 1;
3347 break;
01643c51
KH
3348 case KVM_CAP_SYNC_REGS:
3349 r = KVM_SYNC_X86_VALID_FIELDS;
3350 break;
e3fd9a93
PB
3351 case KVM_CAP_ADJUST_CLOCK:
3352 r = KVM_CLOCK_TSC_STABLE;
3353 break;
4d5422ce 3354 case KVM_CAP_X86_DISABLE_EXITS:
b5170063
WL
3355 r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE |
3356 KVM_X86_DISABLE_EXITS_CSTATE;
4d5422ce
WL
3357 if(kvm_can_mwait_in_guest())
3358 r |= KVM_X86_DISABLE_EXITS_MWAIT;
668fffa3 3359 break;
6d396b55
PB
3360 case KVM_CAP_X86_SMM:
3361 /* SMBASE is usually relocated above 1M on modern chipsets,
3362 * and SMM handlers might indeed rely on 4G segment limits,
3363 * so do not report SMM to be available if real mode is
3364 * emulated via vm86 mode. Still, do not go to great lengths
3365 * to avoid userspace's usage of the feature, because it is a
3366 * fringe case that is not enabled except via specific settings
3367 * of the module parameters.
3368 */
bc226f07 3369 r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
6d396b55 3370 break;
774ead3a
AK
3371 case KVM_CAP_VAPIC:
3372 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
3373 break;
f725230a 3374 case KVM_CAP_NR_VCPUS:
8c3ba334
SL
3375 r = KVM_SOFT_MAX_VCPUS;
3376 break;
3377 case KVM_CAP_MAX_VCPUS:
f725230a
AK
3378 r = KVM_MAX_VCPUS;
3379 break;
a86cb413
TH
3380 case KVM_CAP_MAX_VCPU_ID:
3381 r = KVM_MAX_VCPU_ID;
3382 break;
a68a6a72
MT
3383 case KVM_CAP_PV_MMU: /* obsolete */
3384 r = 0;
2f333bcb 3385 break;
890ca9ae
HY
3386 case KVM_CAP_MCE:
3387 r = KVM_MAX_MCE_BANKS;
3388 break;
2d5b5a66 3389 case KVM_CAP_XCRS:
d366bf7e 3390 r = boot_cpu_has(X86_FEATURE_XSAVE);
2d5b5a66 3391 break;
92a1f12d
JR
3392 case KVM_CAP_TSC_CONTROL:
3393 r = kvm_has_tsc_control;
3394 break;
37131313
RK
3395 case KVM_CAP_X2APIC_API:
3396 r = KVM_X2APIC_API_VALID_FLAGS;
3397 break;
8fcc4b59
JM
3398 case KVM_CAP_NESTED_STATE:
3399 r = kvm_x86_ops->get_nested_state ?
be43c440 3400 kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0;
8fcc4b59 3401 break;
344c6c80 3402 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
5a0165f6
VK
3403 r = kvm_x86_ops->enable_direct_tlbflush != NULL;
3404 break;
3405 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
3406 r = kvm_x86_ops->nested_enable_evmcs != NULL;
344c6c80 3407 break;
018d00d2 3408 default:
018d00d2
ZX
3409 break;
3410 }
3411 return r;
3412
3413}
3414
043405e1
CO
3415long kvm_arch_dev_ioctl(struct file *filp,
3416 unsigned int ioctl, unsigned long arg)
3417{
3418 void __user *argp = (void __user *)arg;
3419 long r;
3420
3421 switch (ioctl) {
3422 case KVM_GET_MSR_INDEX_LIST: {
3423 struct kvm_msr_list __user *user_msr_list = argp;
3424 struct kvm_msr_list msr_list;
3425 unsigned n;
3426
3427 r = -EFAULT;
0e96f31e 3428 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
043405e1
CO
3429 goto out;
3430 n = msr_list.nmsrs;
62ef68bb 3431 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
0e96f31e 3432 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
043405e1
CO
3433 goto out;
3434 r = -E2BIG;
e125e7b6 3435 if (n < msr_list.nmsrs)
043405e1
CO
3436 goto out;
3437 r = -EFAULT;
3438 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
3439 num_msrs_to_save * sizeof(u32)))
3440 goto out;
e125e7b6 3441 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
043405e1 3442 &emulated_msrs,
62ef68bb 3443 num_emulated_msrs * sizeof(u32)))
043405e1
CO
3444 goto out;
3445 r = 0;
3446 break;
3447 }
9c15bb1d
BP
3448 case KVM_GET_SUPPORTED_CPUID:
3449 case KVM_GET_EMULATED_CPUID: {
674eea0f
AK
3450 struct kvm_cpuid2 __user *cpuid_arg = argp;
3451 struct kvm_cpuid2 cpuid;
3452
3453 r = -EFAULT;
0e96f31e 3454 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
674eea0f 3455 goto out;
9c15bb1d
BP
3456
3457 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
3458 ioctl);
674eea0f
AK
3459 if (r)
3460 goto out;
3461
3462 r = -EFAULT;
0e96f31e 3463 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
674eea0f
AK
3464 goto out;
3465 r = 0;
3466 break;
3467 }
890ca9ae 3468 case KVM_X86_GET_MCE_CAP_SUPPORTED: {
890ca9ae 3469 r = -EFAULT;
c45dcc71
AR
3470 if (copy_to_user(argp, &kvm_mce_cap_supported,
3471 sizeof(kvm_mce_cap_supported)))
890ca9ae
HY
3472 goto out;
3473 r = 0;
3474 break;
801e459a
TL
3475 case KVM_GET_MSR_FEATURE_INDEX_LIST: {
3476 struct kvm_msr_list __user *user_msr_list = argp;
3477 struct kvm_msr_list msr_list;
3478 unsigned int n;
3479
3480 r = -EFAULT;
3481 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
3482 goto out;
3483 n = msr_list.nmsrs;
3484 msr_list.nmsrs = num_msr_based_features;
3485 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
3486 goto out;
3487 r = -E2BIG;
3488 if (n < msr_list.nmsrs)
3489 goto out;
3490 r = -EFAULT;
3491 if (copy_to_user(user_msr_list->indices, &msr_based_features,
3492 num_msr_based_features * sizeof(u32)))
3493 goto out;
3494 r = 0;
3495 break;
3496 }
3497 case KVM_GET_MSRS:
3498 r = msr_io(NULL, argp, do_get_msr_feature, 1);
3499 break;
890ca9ae 3500 }
043405e1
CO
3501 default:
3502 r = -EINVAL;
3503 }
3504out:
3505 return r;
3506}
3507
f5f48ee1
SY
3508static void wbinvd_ipi(void *garbage)
3509{
3510 wbinvd();
3511}
3512
3513static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
3514{
e0f0bbc5 3515 return kvm_arch_has_noncoherent_dma(vcpu->kvm);
f5f48ee1
SY
3516}
3517
313a3dc7
CO
3518void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3519{
f5f48ee1
SY
3520 /* Address WBINVD may be executed by guest */
3521 if (need_emulate_wbinvd(vcpu)) {
3522 if (kvm_x86_ops->has_wbinvd_exit())
3523 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
3524 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
3525 smp_call_function_single(vcpu->cpu,
3526 wbinvd_ipi, NULL, 1);
3527 }
3528
313a3dc7 3529 kvm_x86_ops->vcpu_load(vcpu, cpu);
8f6055cb 3530
0dd6a6ed
ZA
3531 /* Apply any externally detected TSC adjustments (due to suspend) */
3532 if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
3533 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
3534 vcpu->arch.tsc_offset_adjustment = 0;
105b21bb 3535 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
0dd6a6ed 3536 }
8f6055cb 3537
b0c39dc6 3538 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
6f526ec5 3539 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
4ea1636b 3540 rdtsc() - vcpu->arch.last_host_tsc;
e48672fa
ZA
3541 if (tsc_delta < 0)
3542 mark_tsc_unstable("KVM discovered backwards TSC");
ce7a058a 3543
b0c39dc6 3544 if (kvm_check_tsc_unstable()) {
07c1419a 3545 u64 offset = kvm_compute_tsc_offset(vcpu,
b183aa58 3546 vcpu->arch.last_guest_tsc);
a545ab6a 3547 kvm_vcpu_write_tsc_offset(vcpu, offset);
c285545f 3548 vcpu->arch.tsc_catchup = 1;
c285545f 3549 }
a749e247
PB
3550
3551 if (kvm_lapic_hv_timer_in_use(vcpu))
3552 kvm_lapic_restart_hv_timer(vcpu);
3553
d98d07ca
MT
3554 /*
3555 * On a host with synchronized TSC, there is no need to update
3556 * kvmclock on vcpu->cpu migration
3557 */
3558 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
0061d53d 3559 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
c285545f 3560 if (vcpu->cpu != cpu)
1bd2009e 3561 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
e48672fa 3562 vcpu->cpu = cpu;
6b7d7e76 3563 }
c9aaa895 3564
c9aaa895 3565 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
313a3dc7
CO
3566}
3567
0b9f6c46
PX
3568static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
3569{
3570 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
3571 return;
3572
fa55eedd 3573 vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
0b9f6c46 3574
4e335d9e 3575 kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
0b9f6c46
PX
3576 &vcpu->arch.st.steal.preempted,
3577 offsetof(struct kvm_steal_time, preempted),
3578 sizeof(vcpu->arch.st.steal.preempted));
3579}
3580
313a3dc7
CO
3581void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3582{
cc0d907c 3583 int idx;
de63ad4c
LM
3584
3585 if (vcpu->preempted)
3586 vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu);
3587
931f261b
AA
3588 /*
3589 * Disable page faults because we're in atomic context here.
3590 * kvm_write_guest_offset_cached() would call might_fault()
3591 * that relies on pagefault_disable() to tell if there's a
3592 * bug. NOTE: the write to guest memory may not go through if
3593 * during postcopy live migration or if there's heavy guest
3594 * paging.
3595 */
3596 pagefault_disable();
cc0d907c
AA
3597 /*
3598 * kvm_memslots() will be called by
3599 * kvm_write_guest_offset_cached() so take the srcu lock.
3600 */
3601 idx = srcu_read_lock(&vcpu->kvm->srcu);
0b9f6c46 3602 kvm_steal_time_set_preempted(vcpu);
cc0d907c 3603 srcu_read_unlock(&vcpu->kvm->srcu, idx);
931f261b 3604 pagefault_enable();
02daab21 3605 kvm_x86_ops->vcpu_put(vcpu);
4ea1636b 3606 vcpu->arch.last_host_tsc = rdtsc();
efdab992 3607 /*
f9dcf08e
RK
3608 * If userspace has set any breakpoints or watchpoints, dr6 is restored
3609 * on every vmexit, but if not, we might have a stale dr6 from the
3610 * guest. do_debug expects dr6 to be cleared after it runs, do the same.
efdab992 3611 */
f9dcf08e 3612 set_debugreg(0, 6);
313a3dc7
CO
3613}
3614
313a3dc7
CO
3615static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
3616 struct kvm_lapic_state *s)
3617{
fa59cc00 3618 if (vcpu->arch.apicv_active)
d62caabb
AS
3619 kvm_x86_ops->sync_pir_to_irr(vcpu);
3620
a92e2543 3621 return kvm_apic_get_state(vcpu, s);
313a3dc7
CO
3622}
3623
3624static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
3625 struct kvm_lapic_state *s)
3626{
a92e2543
RK
3627 int r;
3628
3629 r = kvm_apic_set_state(vcpu, s);
3630 if (r)
3631 return r;
cb142eb7 3632 update_cr8_intercept(vcpu);
313a3dc7
CO
3633
3634 return 0;
3635}
3636
127a457a
MG
3637static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
3638{
3639 return (!lapic_in_kernel(vcpu) ||
3640 kvm_apic_accept_pic_intr(vcpu));
3641}
3642
782d422b
MG
3643/*
3644 * if userspace requested an interrupt window, check that the
3645 * interrupt window is open.
3646 *
3647 * No need to exit to userspace if we already have an interrupt queued.
3648 */
3649static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
3650{
3651 return kvm_arch_interrupt_allowed(vcpu) &&
3652 !kvm_cpu_has_interrupt(vcpu) &&
3653 !kvm_event_needs_reinjection(vcpu) &&
3654 kvm_cpu_accept_dm_intr(vcpu);
3655}
3656
f77bc6a4
ZX
3657static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
3658 struct kvm_interrupt *irq)
3659{
02cdb50f 3660 if (irq->irq >= KVM_NR_INTERRUPTS)
f77bc6a4 3661 return -EINVAL;
1c1a9ce9
SR
3662
3663 if (!irqchip_in_kernel(vcpu->kvm)) {
3664 kvm_queue_interrupt(vcpu, irq->irq, false);
3665 kvm_make_request(KVM_REQ_EVENT, vcpu);
3666 return 0;
3667 }
3668
3669 /*
3670 * With in-kernel LAPIC, we only use this to inject EXTINT, so
3671 * fail for in-kernel 8259.
3672 */
3673 if (pic_in_kernel(vcpu->kvm))
f77bc6a4 3674 return -ENXIO;
f77bc6a4 3675
1c1a9ce9
SR
3676 if (vcpu->arch.pending_external_vector != -1)
3677 return -EEXIST;
f77bc6a4 3678
1c1a9ce9 3679 vcpu->arch.pending_external_vector = irq->irq;
934bf653 3680 kvm_make_request(KVM_REQ_EVENT, vcpu);
f77bc6a4
ZX
3681 return 0;
3682}
3683
c4abb7c9
JK
3684static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
3685{
c4abb7c9 3686 kvm_inject_nmi(vcpu);
c4abb7c9
JK
3687
3688 return 0;
3689}
3690
f077825a
PB
3691static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
3692{
64d60670
PB
3693 kvm_make_request(KVM_REQ_SMI, vcpu);
3694
f077825a
PB
3695 return 0;
3696}
3697
b209749f
AK
3698static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
3699 struct kvm_tpr_access_ctl *tac)
3700{
3701 if (tac->flags)
3702 return -EINVAL;
3703 vcpu->arch.tpr_access_reporting = !!tac->enabled;
3704 return 0;
3705}
3706
890ca9ae
HY
3707static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
3708 u64 mcg_cap)
3709{
3710 int r;
3711 unsigned bank_num = mcg_cap & 0xff, bank;
3712
3713 r = -EINVAL;
a9e38c3e 3714 if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
890ca9ae 3715 goto out;
c45dcc71 3716 if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
890ca9ae
HY
3717 goto out;
3718 r = 0;
3719 vcpu->arch.mcg_cap = mcg_cap;
3720 /* Init IA32_MCG_CTL to all 1s */
3721 if (mcg_cap & MCG_CTL_P)
3722 vcpu->arch.mcg_ctl = ~(u64)0;
3723 /* Init IA32_MCi_CTL to all 1s */
3724 for (bank = 0; bank < bank_num; bank++)
3725 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
c45dcc71 3726
92735b1b 3727 kvm_x86_ops->setup_mce(vcpu);
890ca9ae
HY
3728out:
3729 return r;
3730}
3731
3732static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
3733 struct kvm_x86_mce *mce)
3734{
3735 u64 mcg_cap = vcpu->arch.mcg_cap;
3736 unsigned bank_num = mcg_cap & 0xff;
3737 u64 *banks = vcpu->arch.mce_banks;
3738
3739 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
3740 return -EINVAL;
3741 /*
3742 * if IA32_MCG_CTL is not all 1s, the uncorrected error
3743 * reporting is disabled
3744 */
3745 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
3746 vcpu->arch.mcg_ctl != ~(u64)0)
3747 return 0;
3748 banks += 4 * mce->bank;
3749 /*
3750 * if IA32_MCi_CTL is not all 1s, the uncorrected error
3751 * reporting is disabled for the bank
3752 */
3753 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
3754 return 0;
3755 if (mce->status & MCI_STATUS_UC) {
3756 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
fc78f519 3757 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
a8eeb04a 3758 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
890ca9ae
HY
3759 return 0;
3760 }
3761 if (banks[1] & MCI_STATUS_VAL)
3762 mce->status |= MCI_STATUS_OVER;
3763 banks[2] = mce->addr;
3764 banks[3] = mce->misc;
3765 vcpu->arch.mcg_status = mce->mcg_status;
3766 banks[1] = mce->status;
3767 kvm_queue_exception(vcpu, MC_VECTOR);
3768 } else if (!(banks[1] & MCI_STATUS_VAL)
3769 || !(banks[1] & MCI_STATUS_UC)) {
3770 if (banks[1] & MCI_STATUS_VAL)
3771 mce->status |= MCI_STATUS_OVER;
3772 banks[2] = mce->addr;
3773 banks[3] = mce->misc;
3774 banks[1] = mce->status;
3775 } else
3776 banks[1] |= MCI_STATUS_OVER;
3777 return 0;
3778}
3779
3cfc3092
JK
3780static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
3781 struct kvm_vcpu_events *events)
3782{
7460fb4a 3783 process_nmi(vcpu);
59073aaf 3784
664f8e26 3785 /*
59073aaf
JM
3786 * The API doesn't provide the instruction length for software
3787 * exceptions, so don't report them. As long as the guest RIP
3788 * isn't advanced, we should expect to encounter the exception
3789 * again.
664f8e26 3790 */
59073aaf
JM
3791 if (kvm_exception_is_soft(vcpu->arch.exception.nr)) {
3792 events->exception.injected = 0;
3793 events->exception.pending = 0;
3794 } else {
3795 events->exception.injected = vcpu->arch.exception.injected;
3796 events->exception.pending = vcpu->arch.exception.pending;
3797 /*
3798 * For ABI compatibility, deliberately conflate
3799 * pending and injected exceptions when
3800 * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled.
3801 */
3802 if (!vcpu->kvm->arch.exception_payload_enabled)
3803 events->exception.injected |=
3804 vcpu->arch.exception.pending;
3805 }
3cfc3092
JK
3806 events->exception.nr = vcpu->arch.exception.nr;
3807 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
3808 events->exception.error_code = vcpu->arch.exception.error_code;
59073aaf
JM
3809 events->exception_has_payload = vcpu->arch.exception.has_payload;
3810 events->exception_payload = vcpu->arch.exception.payload;
3cfc3092 3811
03b82a30 3812 events->interrupt.injected =
04140b41 3813 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
3cfc3092 3814 events->interrupt.nr = vcpu->arch.interrupt.nr;
03b82a30 3815 events->interrupt.soft = 0;
37ccdcbe 3816 events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
3cfc3092
JK
3817
3818 events->nmi.injected = vcpu->arch.nmi_injected;
7460fb4a 3819 events->nmi.pending = vcpu->arch.nmi_pending != 0;
3cfc3092 3820 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
97e69aa6 3821 events->nmi.pad = 0;
3cfc3092 3822
66450a21 3823 events->sipi_vector = 0; /* never valid when reporting to user space */
3cfc3092 3824
f077825a
PB
3825 events->smi.smm = is_smm(vcpu);
3826 events->smi.pending = vcpu->arch.smi_pending;
3827 events->smi.smm_inside_nmi =
3828 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
3829 events->smi.latched_init = kvm_lapic_latched_init(vcpu);
3830
dab4b911 3831 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
f077825a
PB
3832 | KVM_VCPUEVENT_VALID_SHADOW
3833 | KVM_VCPUEVENT_VALID_SMM);
59073aaf
JM
3834 if (vcpu->kvm->arch.exception_payload_enabled)
3835 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
3836
97e69aa6 3837 memset(&events->reserved, 0, sizeof(events->reserved));
3cfc3092
JK
3838}
3839
c5833c7a 3840static void kvm_smm_changed(struct kvm_vcpu *vcpu);
6ef4e07e 3841
3cfc3092
JK
3842static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
3843 struct kvm_vcpu_events *events)
3844{
dab4b911 3845 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
48005f64 3846 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
f077825a 3847 | KVM_VCPUEVENT_VALID_SHADOW
59073aaf
JM
3848 | KVM_VCPUEVENT_VALID_SMM
3849 | KVM_VCPUEVENT_VALID_PAYLOAD))
3cfc3092
JK
3850 return -EINVAL;
3851
59073aaf
JM
3852 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
3853 if (!vcpu->kvm->arch.exception_payload_enabled)
3854 return -EINVAL;
3855 if (events->exception.pending)
3856 events->exception.injected = 0;
3857 else
3858 events->exception_has_payload = 0;
3859 } else {
3860 events->exception.pending = 0;
3861 events->exception_has_payload = 0;
3862 }
3863
3864 if ((events->exception.injected || events->exception.pending) &&
3865 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
78e546c8
PB
3866 return -EINVAL;
3867
28bf2888
DH
3868 /* INITs are latched while in SMM */
3869 if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
3870 (events->smi.smm || events->smi.pending) &&
3871 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
3872 return -EINVAL;
3873
7460fb4a 3874 process_nmi(vcpu);
59073aaf
JM
3875 vcpu->arch.exception.injected = events->exception.injected;
3876 vcpu->arch.exception.pending = events->exception.pending;
3cfc3092
JK
3877 vcpu->arch.exception.nr = events->exception.nr;
3878 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
3879 vcpu->arch.exception.error_code = events->exception.error_code;
59073aaf
JM
3880 vcpu->arch.exception.has_payload = events->exception_has_payload;
3881 vcpu->arch.exception.payload = events->exception_payload;
3cfc3092 3882
04140b41 3883 vcpu->arch.interrupt.injected = events->interrupt.injected;
3cfc3092
JK
3884 vcpu->arch.interrupt.nr = events->interrupt.nr;
3885 vcpu->arch.interrupt.soft = events->interrupt.soft;
48005f64
JK
3886 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
3887 kvm_x86_ops->set_interrupt_shadow(vcpu,
3888 events->interrupt.shadow);
3cfc3092
JK
3889
3890 vcpu->arch.nmi_injected = events->nmi.injected;
dab4b911
JK
3891 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
3892 vcpu->arch.nmi_pending = events->nmi.pending;
3cfc3092
JK
3893 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
3894
66450a21 3895 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
bce87cce 3896 lapic_in_kernel(vcpu))
66450a21 3897 vcpu->arch.apic->sipi_vector = events->sipi_vector;
3cfc3092 3898
f077825a 3899 if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
c5833c7a
SC
3900 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
3901 if (events->smi.smm)
3902 vcpu->arch.hflags |= HF_SMM_MASK;
3903 else
3904 vcpu->arch.hflags &= ~HF_SMM_MASK;
3905 kvm_smm_changed(vcpu);
3906 }
6ef4e07e 3907
f077825a 3908 vcpu->arch.smi_pending = events->smi.pending;
f4ef1910
WL
3909
3910 if (events->smi.smm) {
3911 if (events->smi.smm_inside_nmi)
3912 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
f077825a 3913 else
f4ef1910 3914 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
ff90afa7
LA
3915 }
3916
3917 if (lapic_in_kernel(vcpu)) {
3918 if (events->smi.latched_init)
3919 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
3920 else
3921 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
f077825a
PB
3922 }
3923 }
3924
3842d135
AK
3925 kvm_make_request(KVM_REQ_EVENT, vcpu);
3926
3cfc3092
JK
3927 return 0;
3928}
3929
a1efbe77
JK
3930static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
3931 struct kvm_debugregs *dbgregs)
3932{
73aaf249
JK
3933 unsigned long val;
3934
a1efbe77 3935 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
16f8a6f9 3936 kvm_get_dr(vcpu, 6, &val);
73aaf249 3937 dbgregs->dr6 = val;
a1efbe77
JK
3938 dbgregs->dr7 = vcpu->arch.dr7;
3939 dbgregs->flags = 0;
97e69aa6 3940 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
a1efbe77
JK
3941}
3942
3943static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
3944 struct kvm_debugregs *dbgregs)
3945{
3946 if (dbgregs->flags)
3947 return -EINVAL;
3948
d14bdb55
PB
3949 if (dbgregs->dr6 & ~0xffffffffull)
3950 return -EINVAL;
3951 if (dbgregs->dr7 & ~0xffffffffull)
3952 return -EINVAL;
3953
a1efbe77 3954 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
ae561ede 3955 kvm_update_dr0123(vcpu);
a1efbe77 3956 vcpu->arch.dr6 = dbgregs->dr6;
73aaf249 3957 kvm_update_dr6(vcpu);
a1efbe77 3958 vcpu->arch.dr7 = dbgregs->dr7;
9926c9fd 3959 kvm_update_dr7(vcpu);
a1efbe77 3960
a1efbe77
JK
3961 return 0;
3962}
3963
df1daba7
PB
3964#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
3965
3966static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
3967{
b666a4b6 3968 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
400e4b20 3969 u64 xstate_bv = xsave->header.xfeatures;
df1daba7
PB
3970 u64 valid;
3971
3972 /*
3973 * Copy legacy XSAVE area, to avoid complications with CPUID
3974 * leaves 0 and 1 in the loop below.
3975 */
3976 memcpy(dest, xsave, XSAVE_HDR_OFFSET);
3977
3978 /* Set XSTATE_BV */
00c87e9a 3979 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
df1daba7
PB
3980 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
3981
3982 /*
3983 * Copy each region from the possibly compacted offset to the
3984 * non-compacted offset.
3985 */
d91cab78 3986 valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
df1daba7 3987 while (valid) {
abd16d68
SAS
3988 u64 xfeature_mask = valid & -valid;
3989 int xfeature_nr = fls64(xfeature_mask) - 1;
3990 void *src = get_xsave_addr(xsave, xfeature_nr);
df1daba7
PB
3991
3992 if (src) {
3993 u32 size, offset, ecx, edx;
abd16d68 3994 cpuid_count(XSTATE_CPUID, xfeature_nr,
df1daba7 3995 &size, &offset, &ecx, &edx);
abd16d68 3996 if (xfeature_nr == XFEATURE_PKRU)
38cfd5e3
PB
3997 memcpy(dest + offset, &vcpu->arch.pkru,
3998 sizeof(vcpu->arch.pkru));
3999 else
4000 memcpy(dest + offset, src, size);
4001
df1daba7
PB
4002 }
4003
abd16d68 4004 valid -= xfeature_mask;
df1daba7
PB
4005 }
4006}
4007
4008static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
4009{
b666a4b6 4010 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
df1daba7
PB
4011 u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
4012 u64 valid;
4013
4014 /*
4015 * Copy legacy XSAVE area, to avoid complications with CPUID
4016 * leaves 0 and 1 in the loop below.
4017 */
4018 memcpy(xsave, src, XSAVE_HDR_OFFSET);
4019
4020 /* Set XSTATE_BV and possibly XCOMP_BV. */
400e4b20 4021 xsave->header.xfeatures = xstate_bv;
782511b0 4022 if (boot_cpu_has(X86_FEATURE_XSAVES))
3a54450b 4023 xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
df1daba7
PB
4024
4025 /*
4026 * Copy each region from the non-compacted offset to the
4027 * possibly compacted offset.
4028 */
d91cab78 4029 valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
df1daba7 4030 while (valid) {
abd16d68
SAS
4031 u64 xfeature_mask = valid & -valid;
4032 int xfeature_nr = fls64(xfeature_mask) - 1;
4033 void *dest = get_xsave_addr(xsave, xfeature_nr);
df1daba7
PB
4034
4035 if (dest) {
4036 u32 size, offset, ecx, edx;
abd16d68 4037 cpuid_count(XSTATE_CPUID, xfeature_nr,
df1daba7 4038 &size, &offset, &ecx, &edx);
abd16d68 4039 if (xfeature_nr == XFEATURE_PKRU)
38cfd5e3
PB
4040 memcpy(&vcpu->arch.pkru, src + offset,
4041 sizeof(vcpu->arch.pkru));
4042 else
4043 memcpy(dest, src + offset, size);
ee4100da 4044 }
df1daba7 4045
abd16d68 4046 valid -= xfeature_mask;
df1daba7
PB
4047 }
4048}
4049
2d5b5a66
SY
4050static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
4051 struct kvm_xsave *guest_xsave)
4052{
d366bf7e 4053 if (boot_cpu_has(X86_FEATURE_XSAVE)) {
df1daba7
PB
4054 memset(guest_xsave, 0, sizeof(struct kvm_xsave));
4055 fill_xsave((u8 *) guest_xsave->region, vcpu);
4344ee98 4056 } else {
2d5b5a66 4057 memcpy(guest_xsave->region,
b666a4b6 4058 &vcpu->arch.guest_fpu->state.fxsave,
c47ada30 4059 sizeof(struct fxregs_state));
2d5b5a66 4060 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
d91cab78 4061 XFEATURE_MASK_FPSSE;
2d5b5a66
SY
4062 }
4063}
4064
a575813b
WL
4065#define XSAVE_MXCSR_OFFSET 24
4066
2d5b5a66
SY
4067static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
4068 struct kvm_xsave *guest_xsave)
4069{
4070 u64 xstate_bv =
4071 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
a575813b 4072 u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
2d5b5a66 4073
d366bf7e 4074 if (boot_cpu_has(X86_FEATURE_XSAVE)) {
d7876f1b
PB
4075 /*
4076 * Here we allow setting states that are not present in
4077 * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
4078 * with old userspace.
4079 */
a575813b
WL
4080 if (xstate_bv & ~kvm_supported_xcr0() ||
4081 mxcsr & ~mxcsr_feature_mask)
d7876f1b 4082 return -EINVAL;
df1daba7 4083 load_xsave(vcpu, (u8 *)guest_xsave->region);
d7876f1b 4084 } else {
a575813b
WL
4085 if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
4086 mxcsr & ~mxcsr_feature_mask)
2d5b5a66 4087 return -EINVAL;
b666a4b6 4088 memcpy(&vcpu->arch.guest_fpu->state.fxsave,
c47ada30 4089 guest_xsave->region, sizeof(struct fxregs_state));
2d5b5a66
SY
4090 }
4091 return 0;
4092}
4093
4094static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
4095 struct kvm_xcrs *guest_xcrs)
4096{
d366bf7e 4097 if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
2d5b5a66
SY
4098 guest_xcrs->nr_xcrs = 0;
4099 return;
4100 }
4101
4102 guest_xcrs->nr_xcrs = 1;
4103 guest_xcrs->flags = 0;
4104 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
4105 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
4106}
4107
4108static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
4109 struct kvm_xcrs *guest_xcrs)
4110{
4111 int i, r = 0;
4112
d366bf7e 4113 if (!boot_cpu_has(X86_FEATURE_XSAVE))
2d5b5a66
SY
4114 return -EINVAL;
4115
4116 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
4117 return -EINVAL;
4118
4119 for (i = 0; i < guest_xcrs->nr_xcrs; i++)
4120 /* Only support XCR0 currently */
c67a04cb 4121 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
2d5b5a66 4122 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
c67a04cb 4123 guest_xcrs->xcrs[i].value);
2d5b5a66
SY
4124 break;
4125 }
4126 if (r)
4127 r = -EINVAL;
4128 return r;
4129}
4130
1c0b28c2
EM
4131/*
4132 * kvm_set_guest_paused() indicates to the guest kernel that it has been
4133 * stopped by the hypervisor. This function will be called from the host only.
4134 * EINVAL is returned when the host attempts to set the flag for a guest that
4135 * does not support pv clocks.
4136 */
4137static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
4138{
0b79459b 4139 if (!vcpu->arch.pv_time_enabled)
1c0b28c2 4140 return -EINVAL;
51d59c6b 4141 vcpu->arch.pvclock_set_guest_stopped_request = true;
1c0b28c2
EM
4142 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
4143 return 0;
4144}
4145
5c919412
AS
4146static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4147 struct kvm_enable_cap *cap)
4148{
57b119da
VK
4149 int r;
4150 uint16_t vmcs_version;
4151 void __user *user_ptr;
4152
5c919412
AS
4153 if (cap->flags)
4154 return -EINVAL;
4155
4156 switch (cap->cap) {
efc479e6
RK
4157 case KVM_CAP_HYPERV_SYNIC2:
4158 if (cap->args[0])
4159 return -EINVAL;
b2869f28
GS
4160 /* fall through */
4161
5c919412 4162 case KVM_CAP_HYPERV_SYNIC:
546d87e5
WL
4163 if (!irqchip_in_kernel(vcpu->kvm))
4164 return -EINVAL;
efc479e6
RK
4165 return kvm_hv_activate_synic(vcpu, cap->cap ==
4166 KVM_CAP_HYPERV_SYNIC2);
57b119da 4167 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
5158917c
SC
4168 if (!kvm_x86_ops->nested_enable_evmcs)
4169 return -ENOTTY;
57b119da
VK
4170 r = kvm_x86_ops->nested_enable_evmcs(vcpu, &vmcs_version);
4171 if (!r) {
4172 user_ptr = (void __user *)(uintptr_t)cap->args[0];
4173 if (copy_to_user(user_ptr, &vmcs_version,
4174 sizeof(vmcs_version)))
4175 r = -EFAULT;
4176 }
4177 return r;
344c6c80
TL
4178 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
4179 if (!kvm_x86_ops->enable_direct_tlbflush)
4180 return -ENOTTY;
4181
4182 return kvm_x86_ops->enable_direct_tlbflush(vcpu);
57b119da 4183
5c919412
AS
4184 default:
4185 return -EINVAL;
4186 }
4187}
4188
313a3dc7
CO
4189long kvm_arch_vcpu_ioctl(struct file *filp,
4190 unsigned int ioctl, unsigned long arg)
4191{
4192 struct kvm_vcpu *vcpu = filp->private_data;
4193 void __user *argp = (void __user *)arg;
4194 int r;
d1ac91d8
AK
4195 union {
4196 struct kvm_lapic_state *lapic;
4197 struct kvm_xsave *xsave;
4198 struct kvm_xcrs *xcrs;
4199 void *buffer;
4200 } u;
4201
9b062471
CD
4202 vcpu_load(vcpu);
4203
d1ac91d8 4204 u.buffer = NULL;
313a3dc7
CO
4205 switch (ioctl) {
4206 case KVM_GET_LAPIC: {
2204ae3c 4207 r = -EINVAL;
bce87cce 4208 if (!lapic_in_kernel(vcpu))
2204ae3c 4209 goto out;
254272ce
BG
4210 u.lapic = kzalloc(sizeof(struct kvm_lapic_state),
4211 GFP_KERNEL_ACCOUNT);
313a3dc7 4212
b772ff36 4213 r = -ENOMEM;
d1ac91d8 4214 if (!u.lapic)
b772ff36 4215 goto out;
d1ac91d8 4216 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
313a3dc7
CO
4217 if (r)
4218 goto out;
4219 r = -EFAULT;
d1ac91d8 4220 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
313a3dc7
CO
4221 goto out;
4222 r = 0;
4223 break;
4224 }
4225 case KVM_SET_LAPIC: {
2204ae3c 4226 r = -EINVAL;
bce87cce 4227 if (!lapic_in_kernel(vcpu))
2204ae3c 4228 goto out;
ff5c2c03 4229 u.lapic = memdup_user(argp, sizeof(*u.lapic));
9b062471
CD
4230 if (IS_ERR(u.lapic)) {
4231 r = PTR_ERR(u.lapic);
4232 goto out_nofree;
4233 }
ff5c2c03 4234
d1ac91d8 4235 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
313a3dc7
CO
4236 break;
4237 }
f77bc6a4
ZX
4238 case KVM_INTERRUPT: {
4239 struct kvm_interrupt irq;
4240
4241 r = -EFAULT;
0e96f31e 4242 if (copy_from_user(&irq, argp, sizeof(irq)))
f77bc6a4
ZX
4243 goto out;
4244 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
f77bc6a4
ZX
4245 break;
4246 }
c4abb7c9
JK
4247 case KVM_NMI: {
4248 r = kvm_vcpu_ioctl_nmi(vcpu);
c4abb7c9
JK
4249 break;
4250 }
f077825a
PB
4251 case KVM_SMI: {
4252 r = kvm_vcpu_ioctl_smi(vcpu);
4253 break;
4254 }
313a3dc7
CO
4255 case KVM_SET_CPUID: {
4256 struct kvm_cpuid __user *cpuid_arg = argp;
4257 struct kvm_cpuid cpuid;
4258
4259 r = -EFAULT;
0e96f31e 4260 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
313a3dc7
CO
4261 goto out;
4262 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
313a3dc7
CO
4263 break;
4264 }
07716717
DK
4265 case KVM_SET_CPUID2: {
4266 struct kvm_cpuid2 __user *cpuid_arg = argp;
4267 struct kvm_cpuid2 cpuid;
4268
4269 r = -EFAULT;
0e96f31e 4270 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
07716717
DK
4271 goto out;
4272 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
19355475 4273 cpuid_arg->entries);
07716717
DK
4274 break;
4275 }
4276 case KVM_GET_CPUID2: {
4277 struct kvm_cpuid2 __user *cpuid_arg = argp;
4278 struct kvm_cpuid2 cpuid;
4279
4280 r = -EFAULT;
0e96f31e 4281 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
07716717
DK
4282 goto out;
4283 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
19355475 4284 cpuid_arg->entries);
07716717
DK
4285 if (r)
4286 goto out;
4287 r = -EFAULT;
0e96f31e 4288 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
07716717
DK
4289 goto out;
4290 r = 0;
4291 break;
4292 }
801e459a
TL
4293 case KVM_GET_MSRS: {
4294 int idx = srcu_read_lock(&vcpu->kvm->srcu);
609e36d3 4295 r = msr_io(vcpu, argp, do_get_msr, 1);
801e459a 4296 srcu_read_unlock(&vcpu->kvm->srcu, idx);
313a3dc7 4297 break;
801e459a
TL
4298 }
4299 case KVM_SET_MSRS: {
4300 int idx = srcu_read_lock(&vcpu->kvm->srcu);
313a3dc7 4301 r = msr_io(vcpu, argp, do_set_msr, 0);
801e459a 4302 srcu_read_unlock(&vcpu->kvm->srcu, idx);
313a3dc7 4303 break;
801e459a 4304 }
b209749f
AK
4305 case KVM_TPR_ACCESS_REPORTING: {
4306 struct kvm_tpr_access_ctl tac;
4307
4308 r = -EFAULT;
0e96f31e 4309 if (copy_from_user(&tac, argp, sizeof(tac)))
b209749f
AK
4310 goto out;
4311 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
4312 if (r)
4313 goto out;
4314 r = -EFAULT;
0e96f31e 4315 if (copy_to_user(argp, &tac, sizeof(tac)))
b209749f
AK
4316 goto out;
4317 r = 0;
4318 break;
4319 };
b93463aa
AK
4320 case KVM_SET_VAPIC_ADDR: {
4321 struct kvm_vapic_addr va;
7301d6ab 4322 int idx;
b93463aa
AK
4323
4324 r = -EINVAL;
35754c98 4325 if (!lapic_in_kernel(vcpu))
b93463aa
AK
4326 goto out;
4327 r = -EFAULT;
0e96f31e 4328 if (copy_from_user(&va, argp, sizeof(va)))
b93463aa 4329 goto out;
7301d6ab 4330 idx = srcu_read_lock(&vcpu->kvm->srcu);
fda4e2e8 4331 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
7301d6ab 4332 srcu_read_unlock(&vcpu->kvm->srcu, idx);
b93463aa
AK
4333 break;
4334 }
890ca9ae
HY
4335 case KVM_X86_SETUP_MCE: {
4336 u64 mcg_cap;
4337
4338 r = -EFAULT;
0e96f31e 4339 if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap)))
890ca9ae
HY
4340 goto out;
4341 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
4342 break;
4343 }
4344 case KVM_X86_SET_MCE: {
4345 struct kvm_x86_mce mce;
4346
4347 r = -EFAULT;
0e96f31e 4348 if (copy_from_user(&mce, argp, sizeof(mce)))
890ca9ae
HY
4349 goto out;
4350 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
4351 break;
4352 }
3cfc3092
JK
4353 case KVM_GET_VCPU_EVENTS: {
4354 struct kvm_vcpu_events events;
4355
4356 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
4357
4358 r = -EFAULT;
4359 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
4360 break;
4361 r = 0;
4362 break;
4363 }
4364 case KVM_SET_VCPU_EVENTS: {
4365 struct kvm_vcpu_events events;
4366
4367 r = -EFAULT;
4368 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
4369 break;
4370
4371 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
4372 break;
4373 }
a1efbe77
JK
4374 case KVM_GET_DEBUGREGS: {
4375 struct kvm_debugregs dbgregs;
4376
4377 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
4378
4379 r = -EFAULT;
4380 if (copy_to_user(argp, &dbgregs,
4381 sizeof(struct kvm_debugregs)))
4382 break;
4383 r = 0;
4384 break;
4385 }
4386 case KVM_SET_DEBUGREGS: {
4387 struct kvm_debugregs dbgregs;
4388
4389 r = -EFAULT;
4390 if (copy_from_user(&dbgregs, argp,
4391 sizeof(struct kvm_debugregs)))
4392 break;
4393
4394 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
4395 break;
4396 }
2d5b5a66 4397 case KVM_GET_XSAVE: {
254272ce 4398 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT);
2d5b5a66 4399 r = -ENOMEM;
d1ac91d8 4400 if (!u.xsave)
2d5b5a66
SY
4401 break;
4402
d1ac91d8 4403 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
2d5b5a66
SY
4404
4405 r = -EFAULT;
d1ac91d8 4406 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
2d5b5a66
SY
4407 break;
4408 r = 0;
4409 break;
4410 }
4411 case KVM_SET_XSAVE: {
ff5c2c03 4412 u.xsave = memdup_user(argp, sizeof(*u.xsave));
9b062471
CD
4413 if (IS_ERR(u.xsave)) {
4414 r = PTR_ERR(u.xsave);
4415 goto out_nofree;
4416 }
2d5b5a66 4417
d1ac91d8 4418 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
2d5b5a66
SY
4419 break;
4420 }
4421 case KVM_GET_XCRS: {
254272ce 4422 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT);
2d5b5a66 4423 r = -ENOMEM;
d1ac91d8 4424 if (!u.xcrs)
2d5b5a66
SY
4425 break;
4426
d1ac91d8 4427 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
2d5b5a66
SY
4428
4429 r = -EFAULT;
d1ac91d8 4430 if (copy_to_user(argp, u.xcrs,
2d5b5a66
SY
4431 sizeof(struct kvm_xcrs)))
4432 break;
4433 r = 0;
4434 break;
4435 }
4436 case KVM_SET_XCRS: {
ff5c2c03 4437 u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
9b062471
CD
4438 if (IS_ERR(u.xcrs)) {
4439 r = PTR_ERR(u.xcrs);
4440 goto out_nofree;
4441 }
2d5b5a66 4442
d1ac91d8 4443 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
2d5b5a66
SY
4444 break;
4445 }
92a1f12d
JR
4446 case KVM_SET_TSC_KHZ: {
4447 u32 user_tsc_khz;
4448
4449 r = -EINVAL;
92a1f12d
JR
4450 user_tsc_khz = (u32)arg;
4451
4452 if (user_tsc_khz >= kvm_max_guest_tsc_khz)
4453 goto out;
4454
cc578287
ZA
4455 if (user_tsc_khz == 0)
4456 user_tsc_khz = tsc_khz;
4457
381d585c
HZ
4458 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
4459 r = 0;
92a1f12d 4460
92a1f12d
JR
4461 goto out;
4462 }
4463 case KVM_GET_TSC_KHZ: {
cc578287 4464 r = vcpu->arch.virtual_tsc_khz;
92a1f12d
JR
4465 goto out;
4466 }
1c0b28c2
EM
4467 case KVM_KVMCLOCK_CTRL: {
4468 r = kvm_set_guest_paused(vcpu);
4469 goto out;
4470 }
5c919412
AS
4471 case KVM_ENABLE_CAP: {
4472 struct kvm_enable_cap cap;
4473
4474 r = -EFAULT;
4475 if (copy_from_user(&cap, argp, sizeof(cap)))
4476 goto out;
4477 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4478 break;
4479 }
8fcc4b59
JM
4480 case KVM_GET_NESTED_STATE: {
4481 struct kvm_nested_state __user *user_kvm_nested_state = argp;
4482 u32 user_data_size;
4483
4484 r = -EINVAL;
4485 if (!kvm_x86_ops->get_nested_state)
4486 break;
4487
4488 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
26b471c7 4489 r = -EFAULT;
8fcc4b59 4490 if (get_user(user_data_size, &user_kvm_nested_state->size))
26b471c7 4491 break;
8fcc4b59
JM
4492
4493 r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
4494 user_data_size);
4495 if (r < 0)
26b471c7 4496 break;
8fcc4b59
JM
4497
4498 if (r > user_data_size) {
4499 if (put_user(r, &user_kvm_nested_state->size))
26b471c7
LA
4500 r = -EFAULT;
4501 else
4502 r = -E2BIG;
4503 break;
8fcc4b59 4504 }
26b471c7 4505
8fcc4b59
JM
4506 r = 0;
4507 break;
4508 }
4509 case KVM_SET_NESTED_STATE: {
4510 struct kvm_nested_state __user *user_kvm_nested_state = argp;
4511 struct kvm_nested_state kvm_state;
ad5996d9 4512 int idx;
8fcc4b59
JM
4513
4514 r = -EINVAL;
4515 if (!kvm_x86_ops->set_nested_state)
4516 break;
4517
26b471c7 4518 r = -EFAULT;
8fcc4b59 4519 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
26b471c7 4520 break;
8fcc4b59 4521
26b471c7 4522 r = -EINVAL;
8fcc4b59 4523 if (kvm_state.size < sizeof(kvm_state))
26b471c7 4524 break;
8fcc4b59
JM
4525
4526 if (kvm_state.flags &
8cab6507
VK
4527 ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
4528 | KVM_STATE_NESTED_EVMCS))
26b471c7 4529 break;
8fcc4b59
JM
4530
4531 /* nested_run_pending implies guest_mode. */
8cab6507
VK
4532 if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING)
4533 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
26b471c7 4534 break;
8fcc4b59 4535
ad5996d9 4536 idx = srcu_read_lock(&vcpu->kvm->srcu);
8fcc4b59 4537 r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
ad5996d9 4538 srcu_read_unlock(&vcpu->kvm->srcu, idx);
8fcc4b59
JM
4539 break;
4540 }
2bc39970
VK
4541 case KVM_GET_SUPPORTED_HV_CPUID: {
4542 struct kvm_cpuid2 __user *cpuid_arg = argp;
4543 struct kvm_cpuid2 cpuid;
4544
4545 r = -EFAULT;
4546 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
4547 goto out;
4548
4549 r = kvm_vcpu_ioctl_get_hv_cpuid(vcpu, &cpuid,
4550 cpuid_arg->entries);
4551 if (r)
4552 goto out;
4553
4554 r = -EFAULT;
4555 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
4556 goto out;
4557 r = 0;
4558 break;
4559 }
313a3dc7
CO
4560 default:
4561 r = -EINVAL;
4562 }
4563out:
d1ac91d8 4564 kfree(u.buffer);
9b062471
CD
4565out_nofree:
4566 vcpu_put(vcpu);
313a3dc7
CO
4567 return r;
4568}
4569
1499fa80 4570vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5b1c1493
CO
4571{
4572 return VM_FAULT_SIGBUS;
4573}
4574
1fe779f8
CO
4575static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
4576{
4577 int ret;
4578
4579 if (addr > (unsigned int)(-3 * PAGE_SIZE))
951179ce 4580 return -EINVAL;
1fe779f8
CO
4581 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
4582 return ret;
4583}
4584
b927a3ce
SY
4585static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
4586 u64 ident_addr)
4587{
2ac52ab8 4588 return kvm_x86_ops->set_identity_map_addr(kvm, ident_addr);
b927a3ce
SY
4589}
4590
1fe779f8 4591static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
bc8a3d89 4592 unsigned long kvm_nr_mmu_pages)
1fe779f8
CO
4593{
4594 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
4595 return -EINVAL;
4596
79fac95e 4597 mutex_lock(&kvm->slots_lock);
1fe779f8
CO
4598
4599 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
f05e70ac 4600 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1fe779f8 4601
79fac95e 4602 mutex_unlock(&kvm->slots_lock);
1fe779f8
CO
4603 return 0;
4604}
4605
bc8a3d89 4606static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1fe779f8 4607{
39de71ec 4608 return kvm->arch.n_max_mmu_pages;
1fe779f8
CO
4609}
4610
1fe779f8
CO
4611static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
4612{
90bca052 4613 struct kvm_pic *pic = kvm->arch.vpic;
1fe779f8
CO
4614 int r;
4615
4616 r = 0;
4617 switch (chip->chip_id) {
4618 case KVM_IRQCHIP_PIC_MASTER:
90bca052 4619 memcpy(&chip->chip.pic, &pic->pics[0],
1fe779f8
CO
4620 sizeof(struct kvm_pic_state));
4621 break;
4622 case KVM_IRQCHIP_PIC_SLAVE:
90bca052 4623 memcpy(&chip->chip.pic, &pic->pics[1],
1fe779f8
CO
4624 sizeof(struct kvm_pic_state));
4625 break;
4626 case KVM_IRQCHIP_IOAPIC:
33392b49 4627 kvm_get_ioapic(kvm, &chip->chip.ioapic);
1fe779f8
CO
4628 break;
4629 default:
4630 r = -EINVAL;
4631 break;
4632 }
4633 return r;
4634}
4635
4636static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
4637{
90bca052 4638 struct kvm_pic *pic = kvm->arch.vpic;
1fe779f8
CO
4639 int r;
4640
4641 r = 0;
4642 switch (chip->chip_id) {
4643 case KVM_IRQCHIP_PIC_MASTER:
90bca052
DH
4644 spin_lock(&pic->lock);
4645 memcpy(&pic->pics[0], &chip->chip.pic,
1fe779f8 4646 sizeof(struct kvm_pic_state));
90bca052 4647 spin_unlock(&pic->lock);
1fe779f8
CO
4648 break;
4649 case KVM_IRQCHIP_PIC_SLAVE:
90bca052
DH
4650 spin_lock(&pic->lock);
4651 memcpy(&pic->pics[1], &chip->chip.pic,
1fe779f8 4652 sizeof(struct kvm_pic_state));
90bca052 4653 spin_unlock(&pic->lock);
1fe779f8
CO
4654 break;
4655 case KVM_IRQCHIP_IOAPIC:
33392b49 4656 kvm_set_ioapic(kvm, &chip->chip.ioapic);
1fe779f8
CO
4657 break;
4658 default:
4659 r = -EINVAL;
4660 break;
4661 }
90bca052 4662 kvm_pic_update_irq(pic);
1fe779f8
CO
4663 return r;
4664}
4665
e0f63cb9
SY
4666static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
4667{
34f3941c
RK
4668 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
4669
4670 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));
4671
4672 mutex_lock(&kps->lock);
4673 memcpy(ps, &kps->channels, sizeof(*ps));
4674 mutex_unlock(&kps->lock);
2da29bcc 4675 return 0;
e0f63cb9
SY
4676}
4677
4678static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
4679{
0185604c 4680 int i;
09edea72
RK
4681 struct kvm_pit *pit = kvm->arch.vpit;
4682
4683 mutex_lock(&pit->pit_state.lock);
34f3941c 4684 memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
0185604c 4685 for (i = 0; i < 3; i++)
09edea72
RK
4686 kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
4687 mutex_unlock(&pit->pit_state.lock);
2da29bcc 4688 return 0;
e9f42757
BK
4689}
4690
4691static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
4692{
e9f42757
BK
4693 mutex_lock(&kvm->arch.vpit->pit_state.lock);
4694 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
4695 sizeof(ps->channels));
4696 ps->flags = kvm->arch.vpit->pit_state.flags;
4697 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
97e69aa6 4698 memset(&ps->reserved, 0, sizeof(ps->reserved));
2da29bcc 4699 return 0;
e9f42757
BK
4700}
4701
4702static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
4703{
2da29bcc 4704 int start = 0;
0185604c 4705 int i;
e9f42757 4706 u32 prev_legacy, cur_legacy;
09edea72
RK
4707 struct kvm_pit *pit = kvm->arch.vpit;
4708
4709 mutex_lock(&pit->pit_state.lock);
4710 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
e9f42757
BK
4711 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
4712 if (!prev_legacy && cur_legacy)
4713 start = 1;
09edea72
RK
4714 memcpy(&pit->pit_state.channels, &ps->channels,
4715 sizeof(pit->pit_state.channels));
4716 pit->pit_state.flags = ps->flags;
0185604c 4717 for (i = 0; i < 3; i++)
09edea72 4718 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
e5e57e7a 4719 start && i == 0);
09edea72 4720 mutex_unlock(&pit->pit_state.lock);
2da29bcc 4721 return 0;
e0f63cb9
SY
4722}
4723
52d939a0
MT
4724static int kvm_vm_ioctl_reinject(struct kvm *kvm,
4725 struct kvm_reinject_control *control)
4726{
71474e2f
RK
4727 struct kvm_pit *pit = kvm->arch.vpit;
4728
71474e2f
RK
4729 /* pit->pit_state.lock was overloaded to prevent userspace from getting
4730 * an inconsistent state after running multiple KVM_REINJECT_CONTROL
4731 * ioctls in parallel. Use a separate lock if that ioctl isn't rare.
4732 */
4733 mutex_lock(&pit->pit_state.lock);
4734 kvm_pit_set_reinject(pit, control->pit_reinject);
4735 mutex_unlock(&pit->pit_state.lock);
b39c90b6 4736
52d939a0
MT
4737 return 0;
4738}
4739
95d4c16c 4740/**
60c34612
TY
4741 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
4742 * @kvm: kvm instance
4743 * @log: slot id and address to which we copy the log
95d4c16c 4744 *
e108ff2f
PB
4745 * Steps 1-4 below provide general overview of dirty page logging. See
4746 * kvm_get_dirty_log_protect() function description for additional details.
4747 *
4748 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
4749 * always flush the TLB (step 4) even if previous step failed and the dirty
4750 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
4751 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
4752 * writes will be marked dirty for next log read.
95d4c16c 4753 *
60c34612
TY
4754 * 1. Take a snapshot of the bit and clear it if needed.
4755 * 2. Write protect the corresponding page.
e108ff2f
PB
4756 * 3. Copy the snapshot to the userspace.
4757 * 4. Flush TLB's if needed.
5bb064dc 4758 */
60c34612 4759int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
5bb064dc 4760{
8fe65a82 4761 bool flush = false;
e108ff2f 4762 int r;
5bb064dc 4763
79fac95e 4764 mutex_lock(&kvm->slots_lock);
5bb064dc 4765
88178fd4
KH
4766 /*
4767 * Flush potentially hardware-cached dirty pages to dirty_bitmap.
4768 */
4769 if (kvm_x86_ops->flush_log_dirty)
4770 kvm_x86_ops->flush_log_dirty(kvm);
4771
8fe65a82 4772 r = kvm_get_dirty_log_protect(kvm, log, &flush);
198c74f4
XG
4773
4774 /*
4775 * All the TLBs can be flushed out of mmu lock, see the comments in
4776 * kvm_mmu_slot_remove_write_access().
4777 */
e108ff2f 4778 lockdep_assert_held(&kvm->slots_lock);
8fe65a82 4779 if (flush)
2a31b9db
PB
4780 kvm_flush_remote_tlbs(kvm);
4781
4782 mutex_unlock(&kvm->slots_lock);
4783 return r;
4784}
4785
4786int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
4787{
4788 bool flush = false;
4789 int r;
4790
4791 mutex_lock(&kvm->slots_lock);
4792
4793 /*
4794 * Flush potentially hardware-cached dirty pages to dirty_bitmap.
4795 */
4796 if (kvm_x86_ops->flush_log_dirty)
4797 kvm_x86_ops->flush_log_dirty(kvm);
4798
4799 r = kvm_clear_dirty_log_protect(kvm, log, &flush);
4800
4801 /*
4802 * All the TLBs can be flushed out of mmu lock, see the comments in
4803 * kvm_mmu_slot_remove_write_access().
4804 */
4805 lockdep_assert_held(&kvm->slots_lock);
4806 if (flush)
198c74f4
XG
4807 kvm_flush_remote_tlbs(kvm);
4808
79fac95e 4809 mutex_unlock(&kvm->slots_lock);
5bb064dc
ZX
4810 return r;
4811}
4812
aa2fbe6d
YZ
4813int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
4814 bool line_status)
23d43cf9
CD
4815{
4816 if (!irqchip_in_kernel(kvm))
4817 return -ENXIO;
4818
4819 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
aa2fbe6d
YZ
4820 irq_event->irq, irq_event->level,
4821 line_status);
23d43cf9
CD
4822 return 0;
4823}
4824
e5d83c74
PB
4825int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
4826 struct kvm_enable_cap *cap)
90de4a18
NA
4827{
4828 int r;
4829
4830 if (cap->flags)
4831 return -EINVAL;
4832
4833 switch (cap->cap) {
4834 case KVM_CAP_DISABLE_QUIRKS:
4835 kvm->arch.disabled_quirks = cap->args[0];
4836 r = 0;
4837 break;
49df6397
SR
4838 case KVM_CAP_SPLIT_IRQCHIP: {
4839 mutex_lock(&kvm->lock);
b053b2ae
SR
4840 r = -EINVAL;
4841 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
4842 goto split_irqchip_unlock;
49df6397
SR
4843 r = -EEXIST;
4844 if (irqchip_in_kernel(kvm))
4845 goto split_irqchip_unlock;
557abc40 4846 if (kvm->created_vcpus)
49df6397
SR
4847 goto split_irqchip_unlock;
4848 r = kvm_setup_empty_irq_routing(kvm);
5c0aea0e 4849 if (r)
49df6397
SR
4850 goto split_irqchip_unlock;
4851 /* Pairs with irqchip_in_kernel. */
4852 smp_wmb();
49776faf 4853 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
b053b2ae 4854 kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
49df6397
SR
4855 r = 0;
4856split_irqchip_unlock:
4857 mutex_unlock(&kvm->lock);
4858 break;
4859 }
37131313
RK
4860 case KVM_CAP_X2APIC_API:
4861 r = -EINVAL;
4862 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
4863 break;
4864
4865 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
4866 kvm->arch.x2apic_format = true;
c519265f
RK
4867 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
4868 kvm->arch.x2apic_broadcast_quirk_disabled = true;
37131313
RK
4869
4870 r = 0;
4871 break;
4d5422ce
WL
4872 case KVM_CAP_X86_DISABLE_EXITS:
4873 r = -EINVAL;
4874 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS)
4875 break;
4876
4877 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
4878 kvm_can_mwait_in_guest())
4879 kvm->arch.mwait_in_guest = true;
766d3571 4880 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
caa057a2 4881 kvm->arch.hlt_in_guest = true;
b31c114b
WL
4882 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
4883 kvm->arch.pause_in_guest = true;
b5170063
WL
4884 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
4885 kvm->arch.cstate_in_guest = true;
4d5422ce
WL
4886 r = 0;
4887 break;
6fbbde9a
DS
4888 case KVM_CAP_MSR_PLATFORM_INFO:
4889 kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
4890 r = 0;
c4f55198
JM
4891 break;
4892 case KVM_CAP_EXCEPTION_PAYLOAD:
4893 kvm->arch.exception_payload_enabled = cap->args[0];
4894 r = 0;
6fbbde9a 4895 break;
90de4a18
NA
4896 default:
4897 r = -EINVAL;
4898 break;
4899 }
4900 return r;
4901}
4902
1fe779f8
CO
4903long kvm_arch_vm_ioctl(struct file *filp,
4904 unsigned int ioctl, unsigned long arg)
4905{
4906 struct kvm *kvm = filp->private_data;
4907 void __user *argp = (void __user *)arg;
367e1319 4908 int r = -ENOTTY;
f0d66275
DH
4909 /*
4910 * This union makes it completely explicit to gcc-3.x
4911 * that these two variables' stack usage should be
4912 * combined, not added together.
4913 */
4914 union {
4915 struct kvm_pit_state ps;
e9f42757 4916 struct kvm_pit_state2 ps2;
c5ff41ce 4917 struct kvm_pit_config pit_config;
f0d66275 4918 } u;
1fe779f8
CO
4919
4920 switch (ioctl) {
4921 case KVM_SET_TSS_ADDR:
4922 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1fe779f8 4923 break;
b927a3ce
SY
4924 case KVM_SET_IDENTITY_MAP_ADDR: {
4925 u64 ident_addr;
4926
1af1ac91
DH
4927 mutex_lock(&kvm->lock);
4928 r = -EINVAL;
4929 if (kvm->created_vcpus)
4930 goto set_identity_unlock;
b927a3ce 4931 r = -EFAULT;
0e96f31e 4932 if (copy_from_user(&ident_addr, argp, sizeof(ident_addr)))
1af1ac91 4933 goto set_identity_unlock;
b927a3ce 4934 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
1af1ac91
DH
4935set_identity_unlock:
4936 mutex_unlock(&kvm->lock);
b927a3ce
SY
4937 break;
4938 }
1fe779f8
CO
4939 case KVM_SET_NR_MMU_PAGES:
4940 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1fe779f8
CO
4941 break;
4942 case KVM_GET_NR_MMU_PAGES:
4943 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
4944 break;
3ddea128 4945 case KVM_CREATE_IRQCHIP: {
3ddea128 4946 mutex_lock(&kvm->lock);
09941366 4947
3ddea128 4948 r = -EEXIST;
35e6eaa3 4949 if (irqchip_in_kernel(kvm))
3ddea128 4950 goto create_irqchip_unlock;
09941366 4951
3e515705 4952 r = -EINVAL;
557abc40 4953 if (kvm->created_vcpus)
3e515705 4954 goto create_irqchip_unlock;
09941366
RK
4955
4956 r = kvm_pic_init(kvm);
4957 if (r)
3ddea128 4958 goto create_irqchip_unlock;
09941366
RK
4959
4960 r = kvm_ioapic_init(kvm);
4961 if (r) {
09941366 4962 kvm_pic_destroy(kvm);
3ddea128 4963 goto create_irqchip_unlock;
09941366
RK
4964 }
4965
399ec807
AK
4966 r = kvm_setup_default_irq_routing(kvm);
4967 if (r) {
72bb2fcd 4968 kvm_ioapic_destroy(kvm);
09941366 4969 kvm_pic_destroy(kvm);
71ba994c 4970 goto create_irqchip_unlock;
399ec807 4971 }
49776faf 4972 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
71ba994c 4973 smp_wmb();
49776faf 4974 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
3ddea128
MT
4975 create_irqchip_unlock:
4976 mutex_unlock(&kvm->lock);
1fe779f8 4977 break;
3ddea128 4978 }
7837699f 4979 case KVM_CREATE_PIT:
c5ff41ce
JK
4980 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
4981 goto create_pit;
4982 case KVM_CREATE_PIT2:
4983 r = -EFAULT;
4984 if (copy_from_user(&u.pit_config, argp,
4985 sizeof(struct kvm_pit_config)))
4986 goto out;
4987 create_pit:
250715a6 4988 mutex_lock(&kvm->lock);
269e05e4
AK
4989 r = -EEXIST;
4990 if (kvm->arch.vpit)
4991 goto create_pit_unlock;
7837699f 4992 r = -ENOMEM;
c5ff41ce 4993 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
7837699f
SY
4994 if (kvm->arch.vpit)
4995 r = 0;
269e05e4 4996 create_pit_unlock:
250715a6 4997 mutex_unlock(&kvm->lock);
7837699f 4998 break;
1fe779f8
CO
4999 case KVM_GET_IRQCHIP: {
5000 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
ff5c2c03 5001 struct kvm_irqchip *chip;
1fe779f8 5002
ff5c2c03
SL
5003 chip = memdup_user(argp, sizeof(*chip));
5004 if (IS_ERR(chip)) {
5005 r = PTR_ERR(chip);
1fe779f8 5006 goto out;
ff5c2c03
SL
5007 }
5008
1fe779f8 5009 r = -ENXIO;
826da321 5010 if (!irqchip_kernel(kvm))
f0d66275
DH
5011 goto get_irqchip_out;
5012 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
1fe779f8 5013 if (r)
f0d66275 5014 goto get_irqchip_out;
1fe779f8 5015 r = -EFAULT;
0e96f31e 5016 if (copy_to_user(argp, chip, sizeof(*chip)))
f0d66275 5017 goto get_irqchip_out;
1fe779f8 5018 r = 0;
f0d66275
DH
5019 get_irqchip_out:
5020 kfree(chip);
1fe779f8
CO
5021 break;
5022 }
5023 case KVM_SET_IRQCHIP: {
5024 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
ff5c2c03 5025 struct kvm_irqchip *chip;
1fe779f8 5026
ff5c2c03
SL
5027 chip = memdup_user(argp, sizeof(*chip));
5028 if (IS_ERR(chip)) {
5029 r = PTR_ERR(chip);
1fe779f8 5030 goto out;
ff5c2c03
SL
5031 }
5032
1fe779f8 5033 r = -ENXIO;
826da321 5034 if (!irqchip_kernel(kvm))
f0d66275
DH
5035 goto set_irqchip_out;
5036 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
f0d66275
DH
5037 set_irqchip_out:
5038 kfree(chip);
1fe779f8
CO
5039 break;
5040 }
e0f63cb9 5041 case KVM_GET_PIT: {
e0f63cb9 5042 r = -EFAULT;
f0d66275 5043 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
5044 goto out;
5045 r = -ENXIO;
5046 if (!kvm->arch.vpit)
5047 goto out;
f0d66275 5048 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
e0f63cb9
SY
5049 if (r)
5050 goto out;
5051 r = -EFAULT;
f0d66275 5052 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
5053 goto out;
5054 r = 0;
5055 break;
5056 }
5057 case KVM_SET_PIT: {
e0f63cb9 5058 r = -EFAULT;
0e96f31e 5059 if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
e0f63cb9
SY
5060 goto out;
5061 r = -ENXIO;
5062 if (!kvm->arch.vpit)
5063 goto out;
f0d66275 5064 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
e0f63cb9
SY
5065 break;
5066 }
e9f42757
BK
5067 case KVM_GET_PIT2: {
5068 r = -ENXIO;
5069 if (!kvm->arch.vpit)
5070 goto out;
5071 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
5072 if (r)
5073 goto out;
5074 r = -EFAULT;
5075 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
5076 goto out;
5077 r = 0;
5078 break;
5079 }
5080 case KVM_SET_PIT2: {
5081 r = -EFAULT;
5082 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
5083 goto out;
5084 r = -ENXIO;
5085 if (!kvm->arch.vpit)
5086 goto out;
5087 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
e9f42757
BK
5088 break;
5089 }
52d939a0
MT
5090 case KVM_REINJECT_CONTROL: {
5091 struct kvm_reinject_control control;
5092 r = -EFAULT;
5093 if (copy_from_user(&control, argp, sizeof(control)))
5094 goto out;
cad23e72
ML
5095 r = -ENXIO;
5096 if (!kvm->arch.vpit)
5097 goto out;
52d939a0 5098 r = kvm_vm_ioctl_reinject(kvm, &control);
52d939a0
MT
5099 break;
5100 }
d71ba788
PB
5101 case KVM_SET_BOOT_CPU_ID:
5102 r = 0;
5103 mutex_lock(&kvm->lock);
557abc40 5104 if (kvm->created_vcpus)
d71ba788
PB
5105 r = -EBUSY;
5106 else
5107 kvm->arch.bsp_vcpu_id = arg;
5108 mutex_unlock(&kvm->lock);
5109 break;
ffde22ac 5110 case KVM_XEN_HVM_CONFIG: {
51776043 5111 struct kvm_xen_hvm_config xhc;
ffde22ac 5112 r = -EFAULT;
51776043 5113 if (copy_from_user(&xhc, argp, sizeof(xhc)))
ffde22ac
ES
5114 goto out;
5115 r = -EINVAL;
51776043 5116 if (xhc.flags)
ffde22ac 5117 goto out;
51776043 5118 memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
ffde22ac
ES
5119 r = 0;
5120 break;
5121 }
afbcf7ab 5122 case KVM_SET_CLOCK: {
afbcf7ab
GC
5123 struct kvm_clock_data user_ns;
5124 u64 now_ns;
afbcf7ab
GC
5125
5126 r = -EFAULT;
5127 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
5128 goto out;
5129
5130 r = -EINVAL;
5131 if (user_ns.flags)
5132 goto out;
5133
5134 r = 0;
0bc48bea
RK
5135 /*
5136 * TODO: userspace has to take care of races with VCPU_RUN, so
5137 * kvm_gen_update_masterclock() can be cut down to locked
5138 * pvclock_update_vm_gtod_copy().
5139 */
5140 kvm_gen_update_masterclock(kvm);
e891a32e 5141 now_ns = get_kvmclock_ns(kvm);
108b249c 5142 kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
0bc48bea 5143 kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
afbcf7ab
GC
5144 break;
5145 }
5146 case KVM_GET_CLOCK: {
afbcf7ab
GC
5147 struct kvm_clock_data user_ns;
5148 u64 now_ns;
5149
e891a32e 5150 now_ns = get_kvmclock_ns(kvm);
108b249c 5151 user_ns.clock = now_ns;
e3fd9a93 5152 user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
97e69aa6 5153 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
afbcf7ab
GC
5154
5155 r = -EFAULT;
5156 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
5157 goto out;
5158 r = 0;
5159 break;
5160 }
5acc5c06
BS
5161 case KVM_MEMORY_ENCRYPT_OP: {
5162 r = -ENOTTY;
5163 if (kvm_x86_ops->mem_enc_op)
5164 r = kvm_x86_ops->mem_enc_op(kvm, argp);
5165 break;
5166 }
69eaedee
BS
5167 case KVM_MEMORY_ENCRYPT_REG_REGION: {
5168 struct kvm_enc_region region;
5169
5170 r = -EFAULT;
5171 if (copy_from_user(&region, argp, sizeof(region)))
5172 goto out;
5173
5174 r = -ENOTTY;
5175 if (kvm_x86_ops->mem_enc_reg_region)
5176 r = kvm_x86_ops->mem_enc_reg_region(kvm, &region);
5177 break;
5178 }
5179 case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
5180 struct kvm_enc_region region;
5181
5182 r = -EFAULT;
5183 if (copy_from_user(&region, argp, sizeof(region)))
5184 goto out;
5185
5186 r = -ENOTTY;
5187 if (kvm_x86_ops->mem_enc_unreg_region)
5188 r = kvm_x86_ops->mem_enc_unreg_region(kvm, &region);
5189 break;
5190 }
faeb7833
RK
5191 case KVM_HYPERV_EVENTFD: {
5192 struct kvm_hyperv_eventfd hvevfd;
5193
5194 r = -EFAULT;
5195 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd)))
5196 goto out;
5197 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
5198 break;
5199 }
66bb8a06
EH
5200 case KVM_SET_PMU_EVENT_FILTER:
5201 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
5202 break;
1fe779f8 5203 default:
ad6260da 5204 r = -ENOTTY;
1fe779f8
CO
5205 }
5206out:
5207 return r;
5208}
5209
a16b043c 5210static void kvm_init_msr_list(void)
043405e1 5211{
24c29b7a 5212 struct x86_pmu_capability x86_pmu;
043405e1 5213 u32 dummy[2];
7a5ee6ed 5214 unsigned i;
043405e1 5215
e2ada66e 5216 BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
7a5ee6ed 5217 "Please update the fixed PMCs in msrs_to_saved_all[]");
24c29b7a
PB
5218
5219 perf_get_x86_pmu_capability(&x86_pmu);
e2ada66e 5220
6cbee2b9
XL
5221 num_msrs_to_save = 0;
5222 num_emulated_msrs = 0;
5223 num_msr_based_features = 0;
5224
7a5ee6ed
CQ
5225 for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
5226 if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
043405e1 5227 continue;
93c4adc7
PB
5228
5229 /*
5230 * Even MSRs that are valid in the host may not be exposed
9dbe6cf9 5231 * to the guests in some cases.
93c4adc7 5232 */
7a5ee6ed 5233 switch (msrs_to_save_all[i]) {
93c4adc7 5234 case MSR_IA32_BNDCFGS:
503234b3 5235 if (!kvm_mpx_supported())
93c4adc7
PB
5236 continue;
5237 break;
9dbe6cf9
PB
5238 case MSR_TSC_AUX:
5239 if (!kvm_x86_ops->rdtscp_supported())
5240 continue;
5241 break;
bf8c55d8
CP
5242 case MSR_IA32_RTIT_CTL:
5243 case MSR_IA32_RTIT_STATUS:
5244 if (!kvm_x86_ops->pt_supported())
5245 continue;
5246 break;
5247 case MSR_IA32_RTIT_CR3_MATCH:
5248 if (!kvm_x86_ops->pt_supported() ||
5249 !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering))
5250 continue;
5251 break;
5252 case MSR_IA32_RTIT_OUTPUT_BASE:
5253 case MSR_IA32_RTIT_OUTPUT_MASK:
5254 if (!kvm_x86_ops->pt_supported() ||
5255 (!intel_pt_validate_hw_cap(PT_CAP_topa_output) &&
5256 !intel_pt_validate_hw_cap(PT_CAP_single_range_output)))
5257 continue;
5258 break;
5259 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: {
5260 if (!kvm_x86_ops->pt_supported() ||
7a5ee6ed 5261 msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >=
bf8c55d8
CP
5262 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
5263 continue;
5264 break;
cf05a67b 5265 case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
7a5ee6ed 5266 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
24c29b7a
PB
5267 min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
5268 continue;
5269 break;
cf05a67b 5270 case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
7a5ee6ed 5271 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
24c29b7a
PB
5272 min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
5273 continue;
bf8c55d8 5274 }
93c4adc7
PB
5275 default:
5276 break;
5277 }
5278
7a5ee6ed 5279 msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i];
043405e1 5280 }
62ef68bb 5281
7a5ee6ed
CQ
5282 for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
5283 if (!kvm_x86_ops->has_emulated_msr(emulated_msrs_all[i]))
bc226f07 5284 continue;
62ef68bb 5285
7a5ee6ed 5286 emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
62ef68bb 5287 }
801e459a 5288
7a5ee6ed 5289 for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) {
801e459a
TL
5290 struct kvm_msr_entry msr;
5291
7a5ee6ed 5292 msr.index = msr_based_features_all[i];
66421c1e 5293 if (kvm_get_msr_feature(&msr))
801e459a
TL
5294 continue;
5295
7a5ee6ed 5296 msr_based_features[num_msr_based_features++] = msr_based_features_all[i];
801e459a 5297 }
043405e1
CO
5298}
5299
bda9020e
MT
5300static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
5301 const void *v)
bbd9b64e 5302{
70252a10
AK
5303 int handled = 0;
5304 int n;
5305
5306 do {
5307 n = min(len, 8);
bce87cce 5308 if (!(lapic_in_kernel(vcpu) &&
e32edf4f
NN
5309 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
5310 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
70252a10
AK
5311 break;
5312 handled += n;
5313 addr += n;
5314 len -= n;
5315 v += n;
5316 } while (len);
bbd9b64e 5317
70252a10 5318 return handled;
bbd9b64e
CO
5319}
5320
bda9020e 5321static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
bbd9b64e 5322{
70252a10
AK
5323 int handled = 0;
5324 int n;
5325
5326 do {
5327 n = min(len, 8);
bce87cce 5328 if (!(lapic_in_kernel(vcpu) &&
e32edf4f
NN
5329 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
5330 addr, n, v))
5331 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
70252a10 5332 break;
e39d200f 5333 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
70252a10
AK
5334 handled += n;
5335 addr += n;
5336 len -= n;
5337 v += n;
5338 } while (len);
bbd9b64e 5339
70252a10 5340 return handled;
bbd9b64e
CO
5341}
5342
2dafc6c2
GN
5343static void kvm_set_segment(struct kvm_vcpu *vcpu,
5344 struct kvm_segment *var, int seg)
5345{
5346 kvm_x86_ops->set_segment(vcpu, var, seg);
5347}
5348
5349void kvm_get_segment(struct kvm_vcpu *vcpu,
5350 struct kvm_segment *var, int seg)
5351{
5352 kvm_x86_ops->get_segment(vcpu, var, seg);
5353}
5354
54987b7a
PB
5355gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
5356 struct x86_exception *exception)
02f59dc9
JR
5357{
5358 gpa_t t_gpa;
02f59dc9
JR
5359
5360 BUG_ON(!mmu_is_nested(vcpu));
5361
5362 /* NPT walks are always user-walks */
5363 access |= PFERR_USER_MASK;
44dd3ffa 5364 t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception);
02f59dc9
JR
5365
5366 return t_gpa;
5367}
5368
ab9ae313
AK
5369gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
5370 struct x86_exception *exception)
1871c602
GN
5371{
5372 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
ab9ae313 5373 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
1871c602
GN
5374}
5375
ab9ae313
AK
5376 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
5377 struct x86_exception *exception)
1871c602
GN
5378{
5379 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
5380 access |= PFERR_FETCH_MASK;
ab9ae313 5381 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
1871c602
GN
5382}
5383
ab9ae313
AK
5384gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
5385 struct x86_exception *exception)
1871c602
GN
5386{
5387 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
5388 access |= PFERR_WRITE_MASK;
ab9ae313 5389 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
1871c602
GN
5390}
5391
5392/* uses this to access any guest's mapped memory without checking CPL */
ab9ae313
AK
5393gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
5394 struct x86_exception *exception)
1871c602 5395{
ab9ae313 5396 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
1871c602
GN
5397}
5398
5399static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
5400 struct kvm_vcpu *vcpu, u32 access,
bcc55cba 5401 struct x86_exception *exception)
bbd9b64e
CO
5402{
5403 void *data = val;
10589a46 5404 int r = X86EMUL_CONTINUE;
bbd9b64e
CO
5405
5406 while (bytes) {
14dfe855 5407 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
ab9ae313 5408 exception);
bbd9b64e 5409 unsigned offset = addr & (PAGE_SIZE-1);
77c2002e 5410 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
bbd9b64e
CO
5411 int ret;
5412
bcc55cba 5413 if (gpa == UNMAPPED_GVA)
ab9ae313 5414 return X86EMUL_PROPAGATE_FAULT;
54bf36aa
PB
5415 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
5416 offset, toread);
10589a46 5417 if (ret < 0) {
c3cd7ffa 5418 r = X86EMUL_IO_NEEDED;
10589a46
MT
5419 goto out;
5420 }
bbd9b64e 5421
77c2002e
IE
5422 bytes -= toread;
5423 data += toread;
5424 addr += toread;
bbd9b64e 5425 }
10589a46 5426out:
10589a46 5427 return r;
bbd9b64e 5428}
77c2002e 5429
1871c602 5430/* used for instruction fetching */
0f65dd70
AK
5431static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
5432 gva_t addr, void *val, unsigned int bytes,
bcc55cba 5433 struct x86_exception *exception)
1871c602 5434{
0f65dd70 5435 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
1871c602 5436 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
44583cba
PB
5437 unsigned offset;
5438 int ret;
0f65dd70 5439
44583cba
PB
5440 /* Inline kvm_read_guest_virt_helper for speed. */
5441 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
5442 exception);
5443 if (unlikely(gpa == UNMAPPED_GVA))
5444 return X86EMUL_PROPAGATE_FAULT;
5445
5446 offset = addr & (PAGE_SIZE-1);
5447 if (WARN_ON(offset + bytes > PAGE_SIZE))
5448 bytes = (unsigned)PAGE_SIZE - offset;
54bf36aa
PB
5449 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
5450 offset, bytes);
44583cba
PB
5451 if (unlikely(ret < 0))
5452 return X86EMUL_IO_NEEDED;
5453
5454 return X86EMUL_CONTINUE;
1871c602
GN
5455}
5456
ce14e868 5457int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
0f65dd70 5458 gva_t addr, void *val, unsigned int bytes,
bcc55cba 5459 struct x86_exception *exception)
1871c602
GN
5460{
5461 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
0f65dd70 5462
353c0956
PB
5463 /*
5464 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
5465 * is returned, but our callers are not ready for that and they blindly
5466 * call kvm_inject_page_fault. Ensure that they at least do not leak
5467 * uninitialized kernel stack memory into cr2 and error code.
5468 */
5469 memset(exception, 0, sizeof(*exception));
1871c602 5470 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
bcc55cba 5471 exception);
1871c602 5472}
064aea77 5473EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
1871c602 5474
ce14e868
PB
5475static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
5476 gva_t addr, void *val, unsigned int bytes,
3c9fa24c 5477 struct x86_exception *exception, bool system)
1871c602 5478{
0f65dd70 5479 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3c9fa24c
PB
5480 u32 access = 0;
5481
5482 if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
5483 access |= PFERR_USER_MASK;
5484
5485 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
1871c602
GN
5486}
5487
7a036a6f
RK
5488static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
5489 unsigned long addr, void *val, unsigned int bytes)
5490{
5491 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5492 int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes);
5493
5494 return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
5495}
5496
ce14e868
PB
5497static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
5498 struct kvm_vcpu *vcpu, u32 access,
5499 struct x86_exception *exception)
77c2002e
IE
5500{
5501 void *data = val;
5502 int r = X86EMUL_CONTINUE;
5503
5504 while (bytes) {
14dfe855 5505 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
ce14e868 5506 access,
ab9ae313 5507 exception);
77c2002e
IE
5508 unsigned offset = addr & (PAGE_SIZE-1);
5509 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
5510 int ret;
5511
bcc55cba 5512 if (gpa == UNMAPPED_GVA)
ab9ae313 5513 return X86EMUL_PROPAGATE_FAULT;
54bf36aa 5514 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
77c2002e 5515 if (ret < 0) {
c3cd7ffa 5516 r = X86EMUL_IO_NEEDED;
77c2002e
IE
5517 goto out;
5518 }
5519
5520 bytes -= towrite;
5521 data += towrite;
5522 addr += towrite;
5523 }
5524out:
5525 return r;
5526}
ce14e868
PB
5527
5528static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
3c9fa24c
PB
5529 unsigned int bytes, struct x86_exception *exception,
5530 bool system)
ce14e868
PB
5531{
5532 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3c9fa24c
PB
5533 u32 access = PFERR_WRITE_MASK;
5534
5535 if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
5536 access |= PFERR_USER_MASK;
ce14e868
PB
5537
5538 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
3c9fa24c 5539 access, exception);
ce14e868
PB
5540}
5541
5542int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
5543 unsigned int bytes, struct x86_exception *exception)
5544{
c595ceee
PB
5545 /* kvm_write_guest_virt_system can pull in tons of pages. */
5546 vcpu->arch.l1tf_flush_l1d = true;
5547
541ab2ae
FH
5548 /*
5549 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
5550 * is returned, but our callers are not ready for that and they blindly
5551 * call kvm_inject_page_fault. Ensure that they at least do not leak
5552 * uninitialized kernel stack memory into cr2 and error code.
5553 */
5554 memset(exception, 0, sizeof(*exception));
ce14e868
PB
5555 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
5556 PFERR_WRITE_MASK, exception);
5557}
6a4d7550 5558EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
77c2002e 5559
082d06ed
WL
5560int handle_ud(struct kvm_vcpu *vcpu)
5561{
b3dc0695 5562 static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
6c86eedc 5563 int emul_type = EMULTYPE_TRAP_UD;
6c86eedc
WL
5564 char sig[5]; /* ud2; .ascii "kvm" */
5565 struct x86_exception e;
5566
5567 if (force_emulation_prefix &&
3c9fa24c
PB
5568 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
5569 sig, sizeof(sig), &e) == 0 &&
b3dc0695 5570 memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) {
6c86eedc 5571 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
b4000606 5572 emul_type = EMULTYPE_TRAP_UD_FORCED;
6c86eedc 5573 }
082d06ed 5574
60fc3d02 5575 return kvm_emulate_instruction(vcpu, emul_type);
082d06ed
WL
5576}
5577EXPORT_SYMBOL_GPL(handle_ud);
5578
0f89b207
TL
5579static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
5580 gpa_t gpa, bool write)
5581{
5582 /* For APIC access vmexit */
5583 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
5584 return 1;
5585
5586 if (vcpu_match_mmio_gpa(vcpu, gpa)) {
5587 trace_vcpu_match_mmio(gva, gpa, write, true);
5588 return 1;
5589 }
5590
5591 return 0;
5592}
5593
af7cc7d1
XG
5594static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
5595 gpa_t *gpa, struct x86_exception *exception,
5596 bool write)
5597{
97d64b78
AK
5598 u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
5599 | (write ? PFERR_WRITE_MASK : 0);
af7cc7d1 5600
be94f6b7
HH
5601 /*
5602 * currently PKRU is only applied to ept enabled guest so
5603 * there is no pkey in EPT page table for L1 guest or EPT
5604 * shadow page table for L2 guest.
5605 */
97d64b78 5606 if (vcpu_match_mmio_gva(vcpu, gva)
97ec8c06 5607 && !permission_fault(vcpu, vcpu->arch.walk_mmu,
871bd034 5608 vcpu->arch.mmio_access, 0, access)) {
bebb106a
XG
5609 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
5610 (gva & (PAGE_SIZE - 1));
4f022648 5611 trace_vcpu_match_mmio(gva, *gpa, write, false);
bebb106a
XG
5612 return 1;
5613 }
5614
af7cc7d1
XG
5615 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
5616
5617 if (*gpa == UNMAPPED_GVA)
5618 return -1;
5619
0f89b207 5620 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write);
af7cc7d1
XG
5621}
5622
3200f405 5623int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
bcc55cba 5624 const void *val, int bytes)
bbd9b64e
CO
5625{
5626 int ret;
5627
54bf36aa 5628 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
9f811285 5629 if (ret < 0)
bbd9b64e 5630 return 0;
0eb05bf2 5631 kvm_page_track_write(vcpu, gpa, val, bytes);
bbd9b64e
CO
5632 return 1;
5633}
5634
77d197b2
XG
5635struct read_write_emulator_ops {
5636 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
5637 int bytes);
5638 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
5639 void *val, int bytes);
5640 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
5641 int bytes, void *val);
5642 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
5643 void *val, int bytes);
5644 bool write;
5645};
5646
5647static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
5648{
5649 if (vcpu->mmio_read_completed) {
77d197b2 5650 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
e39d200f 5651 vcpu->mmio_fragments[0].gpa, val);
77d197b2
XG
5652 vcpu->mmio_read_completed = 0;
5653 return 1;
5654 }
5655
5656 return 0;
5657}
5658
5659static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
5660 void *val, int bytes)
5661{
54bf36aa 5662 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
77d197b2
XG
5663}
5664
5665static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
5666 void *val, int bytes)
5667{
5668 return emulator_write_phys(vcpu, gpa, val, bytes);
5669}
5670
5671static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
5672{
e39d200f 5673 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
77d197b2
XG
5674 return vcpu_mmio_write(vcpu, gpa, bytes, val);
5675}
5676
5677static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
5678 void *val, int bytes)
5679{
e39d200f 5680 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
77d197b2
XG
5681 return X86EMUL_IO_NEEDED;
5682}
5683
5684static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
5685 void *val, int bytes)
5686{
f78146b0
AK
5687 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
5688
87da7e66 5689 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
77d197b2
XG
5690 return X86EMUL_CONTINUE;
5691}
5692
0fbe9b0b 5693static const struct read_write_emulator_ops read_emultor = {
77d197b2
XG
5694 .read_write_prepare = read_prepare,
5695 .read_write_emulate = read_emulate,
5696 .read_write_mmio = vcpu_mmio_read,
5697 .read_write_exit_mmio = read_exit_mmio,
5698};
5699
0fbe9b0b 5700static const struct read_write_emulator_ops write_emultor = {
77d197b2
XG
5701 .read_write_emulate = write_emulate,
5702 .read_write_mmio = write_mmio,
5703 .read_write_exit_mmio = write_exit_mmio,
5704 .write = true,
5705};
5706
22388a3c
XG
5707static int emulator_read_write_onepage(unsigned long addr, void *val,
5708 unsigned int bytes,
5709 struct x86_exception *exception,
5710 struct kvm_vcpu *vcpu,
0fbe9b0b 5711 const struct read_write_emulator_ops *ops)
bbd9b64e 5712{
af7cc7d1
XG
5713 gpa_t gpa;
5714 int handled, ret;
22388a3c 5715 bool write = ops->write;
f78146b0 5716 struct kvm_mmio_fragment *frag;
0f89b207
TL
5717 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5718
5719 /*
5720 * If the exit was due to a NPF we may already have a GPA.
5721 * If the GPA is present, use it to avoid the GVA to GPA table walk.
5722 * Note, this cannot be used on string operations since string
5723 * operation using rep will only have the initial GPA from the NPF
5724 * occurred.
5725 */
5726 if (vcpu->arch.gpa_available &&
5727 emulator_can_use_gpa(ctxt) &&
618232e2
BS
5728 (addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) {
5729 gpa = vcpu->arch.gpa_val;
5730 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
5731 } else {
5732 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
5733 if (ret < 0)
5734 return X86EMUL_PROPAGATE_FAULT;
0f89b207 5735 }
10589a46 5736
618232e2 5737 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
bbd9b64e
CO
5738 return X86EMUL_CONTINUE;
5739
bbd9b64e
CO
5740 /*
5741 * Is this MMIO handled locally?
5742 */
22388a3c 5743 handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
70252a10 5744 if (handled == bytes)
bbd9b64e 5745 return X86EMUL_CONTINUE;
bbd9b64e 5746
70252a10
AK
5747 gpa += handled;
5748 bytes -= handled;
5749 val += handled;
5750
87da7e66
XG
5751 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
5752 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
5753 frag->gpa = gpa;
5754 frag->data = val;
5755 frag->len = bytes;
f78146b0 5756 return X86EMUL_CONTINUE;
bbd9b64e
CO
5757}
5758
52eb5a6d
XL
5759static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
5760 unsigned long addr,
22388a3c
XG
5761 void *val, unsigned int bytes,
5762 struct x86_exception *exception,
0fbe9b0b 5763 const struct read_write_emulator_ops *ops)
bbd9b64e 5764{
0f65dd70 5765 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
f78146b0
AK
5766 gpa_t gpa;
5767 int rc;
5768
5769 if (ops->read_write_prepare &&
5770 ops->read_write_prepare(vcpu, val, bytes))
5771 return X86EMUL_CONTINUE;
5772
5773 vcpu->mmio_nr_fragments = 0;
0f65dd70 5774
bbd9b64e
CO
5775 /* Crossing a page boundary? */
5776 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
f78146b0 5777 int now;
bbd9b64e
CO
5778
5779 now = -addr & ~PAGE_MASK;
22388a3c
XG
5780 rc = emulator_read_write_onepage(addr, val, now, exception,
5781 vcpu, ops);
5782
bbd9b64e
CO
5783 if (rc != X86EMUL_CONTINUE)
5784 return rc;
5785 addr += now;
bac15531
NA
5786 if (ctxt->mode != X86EMUL_MODE_PROT64)
5787 addr = (u32)addr;
bbd9b64e
CO
5788 val += now;
5789 bytes -= now;
5790 }
22388a3c 5791
f78146b0
AK
5792 rc = emulator_read_write_onepage(addr, val, bytes, exception,
5793 vcpu, ops);
5794 if (rc != X86EMUL_CONTINUE)
5795 return rc;
5796
5797 if (!vcpu->mmio_nr_fragments)
5798 return rc;
5799
5800 gpa = vcpu->mmio_fragments[0].gpa;
5801
5802 vcpu->mmio_needed = 1;
5803 vcpu->mmio_cur_fragment = 0;
5804
87da7e66 5805 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
f78146b0
AK
5806 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
5807 vcpu->run->exit_reason = KVM_EXIT_MMIO;
5808 vcpu->run->mmio.phys_addr = gpa;
5809
5810 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
22388a3c
XG
5811}
5812
5813static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
5814 unsigned long addr,
5815 void *val,
5816 unsigned int bytes,
5817 struct x86_exception *exception)
5818{
5819 return emulator_read_write(ctxt, addr, val, bytes,
5820 exception, &read_emultor);
5821}
5822
52eb5a6d 5823static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
22388a3c
XG
5824 unsigned long addr,
5825 const void *val,
5826 unsigned int bytes,
5827 struct x86_exception *exception)
5828{
5829 return emulator_read_write(ctxt, addr, (void *)val, bytes,
5830 exception, &write_emultor);
bbd9b64e 5831}
bbd9b64e 5832
daea3e73
AK
5833#define CMPXCHG_TYPE(t, ptr, old, new) \
5834 (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
5835
5836#ifdef CONFIG_X86_64
5837# define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
5838#else
5839# define CMPXCHG64(ptr, old, new) \
9749a6c0 5840 (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
daea3e73
AK
5841#endif
5842
0f65dd70
AK
5843static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
5844 unsigned long addr,
bbd9b64e
CO
5845 const void *old,
5846 const void *new,
5847 unsigned int bytes,
0f65dd70 5848 struct x86_exception *exception)
bbd9b64e 5849{
42e35f80 5850 struct kvm_host_map map;
0f65dd70 5851 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
daea3e73 5852 gpa_t gpa;
daea3e73
AK
5853 char *kaddr;
5854 bool exchanged;
2bacc55c 5855
daea3e73
AK
5856 /* guests cmpxchg8b have to be emulated atomically */
5857 if (bytes > 8 || (bytes & (bytes - 1)))
5858 goto emul_write;
10589a46 5859
daea3e73 5860 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
2bacc55c 5861
daea3e73
AK
5862 if (gpa == UNMAPPED_GVA ||
5863 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
5864 goto emul_write;
2bacc55c 5865
daea3e73
AK
5866 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
5867 goto emul_write;
72dc67a6 5868
42e35f80 5869 if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
c19b8bd6 5870 goto emul_write;
72dc67a6 5871
42e35f80
KA
5872 kaddr = map.hva + offset_in_page(gpa);
5873
daea3e73
AK
5874 switch (bytes) {
5875 case 1:
5876 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
5877 break;
5878 case 2:
5879 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
5880 break;
5881 case 4:
5882 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
5883 break;
5884 case 8:
5885 exchanged = CMPXCHG64(kaddr, old, new);
5886 break;
5887 default:
5888 BUG();
2bacc55c 5889 }
42e35f80
KA
5890
5891 kvm_vcpu_unmap(vcpu, &map, true);
daea3e73
AK
5892
5893 if (!exchanged)
5894 return X86EMUL_CMPXCHG_FAILED;
5895
0eb05bf2 5896 kvm_page_track_write(vcpu, gpa, new, bytes);
8f6abd06
GN
5897
5898 return X86EMUL_CONTINUE;
4a5f48f6 5899
3200f405 5900emul_write:
daea3e73 5901 printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
2bacc55c 5902
0f65dd70 5903 return emulator_write_emulated(ctxt, addr, new, bytes, exception);
bbd9b64e
CO
5904}
5905
cf8f70bf
GN
5906static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
5907{
cbfc6c91 5908 int r = 0, i;
cf8f70bf 5909
cbfc6c91
WL
5910 for (i = 0; i < vcpu->arch.pio.count; i++) {
5911 if (vcpu->arch.pio.in)
5912 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
5913 vcpu->arch.pio.size, pd);
5914 else
5915 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
5916 vcpu->arch.pio.port, vcpu->arch.pio.size,
5917 pd);
5918 if (r)
5919 break;
5920 pd += vcpu->arch.pio.size;
5921 }
cf8f70bf
GN
5922 return r;
5923}
5924
6f6fbe98
XG
5925static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
5926 unsigned short port, void *val,
5927 unsigned int count, bool in)
cf8f70bf 5928{
cf8f70bf 5929 vcpu->arch.pio.port = port;
6f6fbe98 5930 vcpu->arch.pio.in = in;
7972995b 5931 vcpu->arch.pio.count = count;
cf8f70bf
GN
5932 vcpu->arch.pio.size = size;
5933
5934 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
7972995b 5935 vcpu->arch.pio.count = 0;
cf8f70bf
GN
5936 return 1;
5937 }
5938
5939 vcpu->run->exit_reason = KVM_EXIT_IO;
6f6fbe98 5940 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
cf8f70bf
GN
5941 vcpu->run->io.size = size;
5942 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
5943 vcpu->run->io.count = count;
5944 vcpu->run->io.port = port;
5945
5946 return 0;
5947}
5948
6f6fbe98
XG
5949static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
5950 int size, unsigned short port, void *val,
5951 unsigned int count)
cf8f70bf 5952{
ca1d4a9e 5953 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6f6fbe98 5954 int ret;
ca1d4a9e 5955
6f6fbe98
XG
5956 if (vcpu->arch.pio.count)
5957 goto data_avail;
cf8f70bf 5958
cbfc6c91
WL
5959 memset(vcpu->arch.pio_data, 0, size * count);
5960
6f6fbe98
XG
5961 ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
5962 if (ret) {
5963data_avail:
5964 memcpy(val, vcpu->arch.pio_data, size * count);
1171903d 5965 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
7972995b 5966 vcpu->arch.pio.count = 0;
cf8f70bf
GN
5967 return 1;
5968 }
5969
cf8f70bf
GN
5970 return 0;
5971}
5972
6f6fbe98
XG
5973static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
5974 int size, unsigned short port,
5975 const void *val, unsigned int count)
5976{
5977 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5978
5979 memcpy(vcpu->arch.pio_data, val, size * count);
1171903d 5980 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
6f6fbe98
XG
5981 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
5982}
5983
bbd9b64e
CO
5984static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
5985{
5986 return kvm_x86_ops->get_segment_base(vcpu, seg);
5987}
5988
3cb16fe7 5989static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
bbd9b64e 5990{
3cb16fe7 5991 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
bbd9b64e
CO
5992}
5993
ae6a2375 5994static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
f5f48ee1
SY
5995{
5996 if (!need_emulate_wbinvd(vcpu))
5997 return X86EMUL_CONTINUE;
5998
5999 if (kvm_x86_ops->has_wbinvd_exit()) {
2eec7343
JK
6000 int cpu = get_cpu();
6001
6002 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
f5f48ee1
SY
6003 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
6004 wbinvd_ipi, NULL, 1);
2eec7343 6005 put_cpu();
f5f48ee1 6006 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
2eec7343
JK
6007 } else
6008 wbinvd();
f5f48ee1
SY
6009 return X86EMUL_CONTINUE;
6010}
5cb56059
JS
6011
6012int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
6013{
6affcbed
KH
6014 kvm_emulate_wbinvd_noskip(vcpu);
6015 return kvm_skip_emulated_instruction(vcpu);
5cb56059 6016}
f5f48ee1
SY
6017EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
6018
5cb56059
JS
6019
6020
bcaf5cc5
AK
6021static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
6022{
5cb56059 6023 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
bcaf5cc5
AK
6024}
6025
52eb5a6d
XL
6026static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
6027 unsigned long *dest)
bbd9b64e 6028{
16f8a6f9 6029 return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
bbd9b64e
CO
6030}
6031
52eb5a6d
XL
6032static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
6033 unsigned long value)
bbd9b64e 6034{
338dbc97 6035
717746e3 6036 return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
bbd9b64e
CO
6037}
6038
52a46617 6039static u64 mk_cr_64(u64 curr_cr, u32 new_val)
5fdbf976 6040{
52a46617 6041 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
5fdbf976
MT
6042}
6043
717746e3 6044static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
bbd9b64e 6045{
717746e3 6046 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
52a46617
GN
6047 unsigned long value;
6048
6049 switch (cr) {
6050 case 0:
6051 value = kvm_read_cr0(vcpu);
6052 break;
6053 case 2:
6054 value = vcpu->arch.cr2;
6055 break;
6056 case 3:
9f8fe504 6057 value = kvm_read_cr3(vcpu);
52a46617
GN
6058 break;
6059 case 4:
6060 value = kvm_read_cr4(vcpu);
6061 break;
6062 case 8:
6063 value = kvm_get_cr8(vcpu);
6064 break;
6065 default:
a737f256 6066 kvm_err("%s: unexpected cr %u\n", __func__, cr);
52a46617
GN
6067 return 0;
6068 }
6069
6070 return value;
6071}
6072
717746e3 6073static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
52a46617 6074{
717746e3 6075 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
0f12244f
GN
6076 int res = 0;
6077
52a46617
GN
6078 switch (cr) {
6079 case 0:
49a9b07e 6080 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
52a46617
GN
6081 break;
6082 case 2:
6083 vcpu->arch.cr2 = val;
6084 break;
6085 case 3:
2390218b 6086 res = kvm_set_cr3(vcpu, val);
52a46617
GN
6087 break;
6088 case 4:
a83b29c6 6089 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
52a46617
GN
6090 break;
6091 case 8:
eea1cff9 6092 res = kvm_set_cr8(vcpu, val);
52a46617
GN
6093 break;
6094 default:
a737f256 6095 kvm_err("%s: unexpected cr %u\n", __func__, cr);
0f12244f 6096 res = -1;
52a46617 6097 }
0f12244f
GN
6098
6099 return res;
52a46617
GN
6100}
6101
717746e3 6102static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
9c537244 6103{
717746e3 6104 return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
9c537244
GN
6105}
6106
4bff1e86 6107static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
2dafc6c2 6108{
4bff1e86 6109 kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
2dafc6c2
GN
6110}
6111
4bff1e86 6112static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
160ce1f1 6113{
4bff1e86 6114 kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
160ce1f1
MG
6115}
6116
1ac9d0cf
AK
6117static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
6118{
6119 kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
6120}
6121
6122static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
6123{
6124 kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
6125}
6126
4bff1e86
AK
6127static unsigned long emulator_get_cached_segment_base(
6128 struct x86_emulate_ctxt *ctxt, int seg)
5951c442 6129{
4bff1e86 6130 return get_segment_base(emul_to_vcpu(ctxt), seg);
5951c442
GN
6131}
6132
1aa36616
AK
6133static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
6134 struct desc_struct *desc, u32 *base3,
6135 int seg)
2dafc6c2
GN
6136{
6137 struct kvm_segment var;
6138
4bff1e86 6139 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
1aa36616 6140 *selector = var.selector;
2dafc6c2 6141
378a8b09
GN
6142 if (var.unusable) {
6143 memset(desc, 0, sizeof(*desc));
f0367ee1
RK
6144 if (base3)
6145 *base3 = 0;
2dafc6c2 6146 return false;
378a8b09 6147 }
2dafc6c2
GN
6148
6149 if (var.g)
6150 var.limit >>= 12;
6151 set_desc_limit(desc, var.limit);
6152 set_desc_base(desc, (unsigned long)var.base);
5601d05b
GN
6153#ifdef CONFIG_X86_64
6154 if (base3)
6155 *base3 = var.base >> 32;
6156#endif
2dafc6c2
GN
6157 desc->type = var.type;
6158 desc->s = var.s;
6159 desc->dpl = var.dpl;
6160 desc->p = var.present;
6161 desc->avl = var.avl;
6162 desc->l = var.l;
6163 desc->d = var.db;
6164 desc->g = var.g;
6165
6166 return true;
6167}
6168
1aa36616
AK
6169static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
6170 struct desc_struct *desc, u32 base3,
6171 int seg)
2dafc6c2 6172{
4bff1e86 6173 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
2dafc6c2
GN
6174 struct kvm_segment var;
6175
1aa36616 6176 var.selector = selector;
2dafc6c2 6177 var.base = get_desc_base(desc);
5601d05b
GN
6178#ifdef CONFIG_X86_64
6179 var.base |= ((u64)base3) << 32;
6180#endif
2dafc6c2
GN
6181 var.limit = get_desc_limit(desc);
6182 if (desc->g)
6183 var.limit = (var.limit << 12) | 0xfff;
6184 var.type = desc->type;
2dafc6c2
GN
6185 var.dpl = desc->dpl;
6186 var.db = desc->d;
6187 var.s = desc->s;
6188 var.l = desc->l;
6189 var.g = desc->g;
6190 var.avl = desc->avl;
6191 var.present = desc->p;
6192 var.unusable = !var.present;
6193 var.padding = 0;
6194
6195 kvm_set_segment(vcpu, &var, seg);
6196 return;
6197}
6198
717746e3
AK
6199static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
6200 u32 msr_index, u64 *pdata)
6201{
f20935d8 6202 return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
717746e3
AK
6203}
6204
6205static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
6206 u32 msr_index, u64 data)
6207{
f20935d8 6208 return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
717746e3
AK
6209}
6210
64d60670
PB
6211static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
6212{
6213 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6214
6215 return vcpu->arch.smbase;
6216}
6217
6218static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
6219{
6220 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6221
6222 vcpu->arch.smbase = smbase;
6223}
6224
67f4d428
NA
6225static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
6226 u32 pmc)
6227{
98ff80f5 6228 return kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc);
67f4d428
NA
6229}
6230
222d21aa
AK
6231static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
6232 u32 pmc, u64 *pdata)
6233{
c6702c9d 6234 return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
222d21aa
AK
6235}
6236
6c3287f7
AK
6237static void emulator_halt(struct x86_emulate_ctxt *ctxt)
6238{
6239 emul_to_vcpu(ctxt)->arch.halt_request = 1;
6240}
6241
2953538e 6242static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
8a76d7f2 6243 struct x86_instruction_info *info,
c4f035c6
AK
6244 enum x86_intercept_stage stage)
6245{
2953538e 6246 return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
c4f035c6
AK
6247}
6248
e911eb3b
YZ
6249static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
6250 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool check_limit)
bdb42f5a 6251{
e911eb3b 6252 return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, check_limit);
bdb42f5a
SB
6253}
6254
5ae78e95
SC
6255static bool emulator_guest_has_long_mode(struct x86_emulate_ctxt *ctxt)
6256{
6257 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_LM);
6258}
6259
6260static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt)
6261{
6262 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE);
6263}
6264
6265static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
6266{
6267 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
6268}
6269
dd856efa
AK
6270static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
6271{
6272 return kvm_register_read(emul_to_vcpu(ctxt), reg);
6273}
6274
6275static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
6276{
6277 kvm_register_write(emul_to_vcpu(ctxt), reg, val);
6278}
6279
801806d9
NA
6280static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
6281{
6282 kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
6283}
6284
6ed071f0
LP
6285static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
6286{
6287 return emul_to_vcpu(ctxt)->arch.hflags;
6288}
6289
6290static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
6291{
c5833c7a 6292 emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
6ed071f0
LP
6293}
6294
ed19321f
SC
6295static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
6296 const char *smstate)
0234bf88 6297{
ed19321f 6298 return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate);
0234bf88
LP
6299}
6300
c5833c7a
SC
6301static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
6302{
6303 kvm_smm_changed(emul_to_vcpu(ctxt));
6304}
6305
02d4160f
VK
6306static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
6307{
6308 return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr);
6309}
6310
0225fb50 6311static const struct x86_emulate_ops emulate_ops = {
dd856efa
AK
6312 .read_gpr = emulator_read_gpr,
6313 .write_gpr = emulator_write_gpr,
ce14e868
PB
6314 .read_std = emulator_read_std,
6315 .write_std = emulator_write_std,
7a036a6f 6316 .read_phys = kvm_read_guest_phys_system,
1871c602 6317 .fetch = kvm_fetch_guest_virt,
bbd9b64e
CO
6318 .read_emulated = emulator_read_emulated,
6319 .write_emulated = emulator_write_emulated,
6320 .cmpxchg_emulated = emulator_cmpxchg_emulated,
3cb16fe7 6321 .invlpg = emulator_invlpg,
cf8f70bf
GN
6322 .pio_in_emulated = emulator_pio_in_emulated,
6323 .pio_out_emulated = emulator_pio_out_emulated,
1aa36616
AK
6324 .get_segment = emulator_get_segment,
6325 .set_segment = emulator_set_segment,
5951c442 6326 .get_cached_segment_base = emulator_get_cached_segment_base,
2dafc6c2 6327 .get_gdt = emulator_get_gdt,
160ce1f1 6328 .get_idt = emulator_get_idt,
1ac9d0cf
AK
6329 .set_gdt = emulator_set_gdt,
6330 .set_idt = emulator_set_idt,
52a46617
GN
6331 .get_cr = emulator_get_cr,
6332 .set_cr = emulator_set_cr,
9c537244 6333 .cpl = emulator_get_cpl,
35aa5375
GN
6334 .get_dr = emulator_get_dr,
6335 .set_dr = emulator_set_dr,
64d60670
PB
6336 .get_smbase = emulator_get_smbase,
6337 .set_smbase = emulator_set_smbase,
717746e3
AK
6338 .set_msr = emulator_set_msr,
6339 .get_msr = emulator_get_msr,
67f4d428 6340 .check_pmc = emulator_check_pmc,
222d21aa 6341 .read_pmc = emulator_read_pmc,
6c3287f7 6342 .halt = emulator_halt,
bcaf5cc5 6343 .wbinvd = emulator_wbinvd,
d6aa1000 6344 .fix_hypercall = emulator_fix_hypercall,
c4f035c6 6345 .intercept = emulator_intercept,
bdb42f5a 6346 .get_cpuid = emulator_get_cpuid,
5ae78e95
SC
6347 .guest_has_long_mode = emulator_guest_has_long_mode,
6348 .guest_has_movbe = emulator_guest_has_movbe,
6349 .guest_has_fxsr = emulator_guest_has_fxsr,
801806d9 6350 .set_nmi_mask = emulator_set_nmi_mask,
6ed071f0
LP
6351 .get_hflags = emulator_get_hflags,
6352 .set_hflags = emulator_set_hflags,
0234bf88 6353 .pre_leave_smm = emulator_pre_leave_smm,
c5833c7a 6354 .post_leave_smm = emulator_post_leave_smm,
02d4160f 6355 .set_xcr = emulator_set_xcr,
bbd9b64e
CO
6356};
6357
95cb2295
GN
6358static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
6359{
37ccdcbe 6360 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
95cb2295
GN
6361 /*
6362 * an sti; sti; sequence only disable interrupts for the first
6363 * instruction. So, if the last instruction, be it emulated or
6364 * not, left the system with the INT_STI flag enabled, it
6365 * means that the last instruction is an sti. We should not
6366 * leave the flag on in this case. The same goes for mov ss
6367 */
37ccdcbe
PB
6368 if (int_shadow & mask)
6369 mask = 0;
6addfc42 6370 if (unlikely(int_shadow || mask)) {
95cb2295 6371 kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
6addfc42
PB
6372 if (!mask)
6373 kvm_make_request(KVM_REQ_EVENT, vcpu);
6374 }
95cb2295
GN
6375}
6376
ef54bcfe 6377static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
54b8486f
GN
6378{
6379 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
da9cb575 6380 if (ctxt->exception.vector == PF_VECTOR)
ef54bcfe
PB
6381 return kvm_propagate_fault(vcpu, &ctxt->exception);
6382
6383 if (ctxt->exception.error_code_valid)
da9cb575
AK
6384 kvm_queue_exception_e(vcpu, ctxt->exception.vector,
6385 ctxt->exception.error_code);
54b8486f 6386 else
da9cb575 6387 kvm_queue_exception(vcpu, ctxt->exception.vector);
ef54bcfe 6388 return false;
54b8486f
GN
6389}
6390
8ec4722d
MG
6391static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
6392{
adf52235 6393 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
8ec4722d
MG
6394 int cs_db, cs_l;
6395
8ec4722d
MG
6396 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
6397
adf52235 6398 ctxt->eflags = kvm_get_rflags(vcpu);
c8401dda
PB
6399 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
6400
adf52235
TY
6401 ctxt->eip = kvm_rip_read(vcpu);
6402 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
6403 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
42bf549f 6404 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 :
adf52235
TY
6405 cs_db ? X86EMUL_MODE_PROT32 :
6406 X86EMUL_MODE_PROT16;
a584539b 6407 BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
64d60670
PB
6408 BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
6409 BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
adf52235 6410
dd856efa 6411 init_decode_cache(ctxt);
7ae441ea 6412 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
8ec4722d
MG
6413}
6414
9497e1f2 6415void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
63995653 6416{
9d74191a 6417 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
63995653
MG
6418 int ret;
6419
6420 init_emulate_ctxt(vcpu);
6421
9dac77fa
AK
6422 ctxt->op_bytes = 2;
6423 ctxt->ad_bytes = 2;
6424 ctxt->_eip = ctxt->eip + inc_eip;
9d74191a 6425 ret = emulate_int_real(ctxt, irq);
63995653 6426
9497e1f2
SC
6427 if (ret != X86EMUL_CONTINUE) {
6428 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
6429 } else {
6430 ctxt->eip = ctxt->_eip;
6431 kvm_rip_write(vcpu, ctxt->eip);
6432 kvm_set_rflags(vcpu, ctxt->eflags);
6433 }
63995653
MG
6434}
6435EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
6436
e2366171 6437static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
6d77dbfc 6438{
6d77dbfc
GN
6439 ++vcpu->stat.insn_emulation_fail;
6440 trace_kvm_emulate_insn_failed(vcpu);
e2366171 6441
42cbf068
SC
6442 if (emulation_type & EMULTYPE_VMWARE_GP) {
6443 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
60fc3d02 6444 return 1;
42cbf068 6445 }
e2366171 6446
738fece4
SC
6447 if (emulation_type & EMULTYPE_SKIP) {
6448 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6449 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
6450 vcpu->run->internal.ndata = 0;
60fc3d02 6451 return 0;
738fece4
SC
6452 }
6453
22da61c9
SC
6454 kvm_queue_exception(vcpu, UD_VECTOR);
6455
a2b9e6c1 6456 if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
fc3a9157
JR
6457 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6458 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
6459 vcpu->run->internal.ndata = 0;
60fc3d02 6460 return 0;
fc3a9157 6461 }
e2366171 6462
60fc3d02 6463 return 1;
6d77dbfc
GN
6464}
6465
736c291c 6466static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
991eebf9
GN
6467 bool write_fault_to_shadow_pgtable,
6468 int emulation_type)
a6f177ef 6469{
736c291c 6470 gpa_t gpa = cr2_or_gpa;
ba049e93 6471 kvm_pfn_t pfn;
a6f177ef 6472
384bf221 6473 if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
991eebf9
GN
6474 return false;
6475
6c3dfeb6
SC
6476 if (WARN_ON_ONCE(is_guest_mode(vcpu)))
6477 return false;
6478
44dd3ffa 6479 if (!vcpu->arch.mmu->direct_map) {
95b3cf69
XG
6480 /*
6481 * Write permission should be allowed since only
6482 * write access need to be emulated.
6483 */
736c291c 6484 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
a6f177ef 6485
95b3cf69
XG
6486 /*
6487 * If the mapping is invalid in guest, let cpu retry
6488 * it to generate fault.
6489 */
6490 if (gpa == UNMAPPED_GVA)
6491 return true;
6492 }
a6f177ef 6493
8e3d9d06
XG
6494 /*
6495 * Do not retry the unhandleable instruction if it faults on the
6496 * readonly host memory, otherwise it will goto a infinite loop:
6497 * retry instruction -> write #PF -> emulation fail -> retry
6498 * instruction -> ...
6499 */
6500 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
95b3cf69
XG
6501
6502 /*
6503 * If the instruction failed on the error pfn, it can not be fixed,
6504 * report the error to userspace.
6505 */
6506 if (is_error_noslot_pfn(pfn))
6507 return false;
6508
6509 kvm_release_pfn_clean(pfn);
6510
6511 /* The instructions are well-emulated on direct mmu. */
44dd3ffa 6512 if (vcpu->arch.mmu->direct_map) {
95b3cf69
XG
6513 unsigned int indirect_shadow_pages;
6514
6515 spin_lock(&vcpu->kvm->mmu_lock);
6516 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
6517 spin_unlock(&vcpu->kvm->mmu_lock);
6518
6519 if (indirect_shadow_pages)
6520 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
6521
a6f177ef 6522 return true;
8e3d9d06 6523 }
a6f177ef 6524
95b3cf69
XG
6525 /*
6526 * if emulation was due to access to shadowed page table
6527 * and it failed try to unshadow page and re-enter the
6528 * guest to let CPU execute the instruction.
6529 */
6530 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
93c05d3e
XG
6531
6532 /*
6533 * If the access faults on its page table, it can not
6534 * be fixed by unprotecting shadow page and it should
6535 * be reported to userspace.
6536 */
6537 return !write_fault_to_shadow_pgtable;
a6f177ef
GN
6538}
6539
1cb3f3ae 6540static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
736c291c 6541 gpa_t cr2_or_gpa, int emulation_type)
1cb3f3ae
XG
6542{
6543 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
736c291c 6544 unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
1cb3f3ae
XG
6545
6546 last_retry_eip = vcpu->arch.last_retry_eip;
6547 last_retry_addr = vcpu->arch.last_retry_addr;
6548
6549 /*
6550 * If the emulation is caused by #PF and it is non-page_table
6551 * writing instruction, it means the VM-EXIT is caused by shadow
6552 * page protected, we can zap the shadow page and retry this
6553 * instruction directly.
6554 *
6555 * Note: if the guest uses a non-page-table modifying instruction
6556 * on the PDE that points to the instruction, then we will unmap
6557 * the instruction and go to an infinite loop. So, we cache the
6558 * last retried eip and the last fault address, if we meet the eip
6559 * and the address again, we can break out of the potential infinite
6560 * loop.
6561 */
6562 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
6563
384bf221 6564 if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
1cb3f3ae
XG
6565 return false;
6566
6c3dfeb6
SC
6567 if (WARN_ON_ONCE(is_guest_mode(vcpu)))
6568 return false;
6569
1cb3f3ae
XG
6570 if (x86_page_table_writing_insn(ctxt))
6571 return false;
6572
736c291c 6573 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
1cb3f3ae
XG
6574 return false;
6575
6576 vcpu->arch.last_retry_eip = ctxt->eip;
736c291c 6577 vcpu->arch.last_retry_addr = cr2_or_gpa;
1cb3f3ae 6578
44dd3ffa 6579 if (!vcpu->arch.mmu->direct_map)
736c291c 6580 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
1cb3f3ae 6581
22368028 6582 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
1cb3f3ae
XG
6583
6584 return true;
6585}
6586
716d51ab
GN
6587static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
6588static int complete_emulated_pio(struct kvm_vcpu *vcpu);
6589
64d60670 6590static void kvm_smm_changed(struct kvm_vcpu *vcpu)
a584539b 6591{
64d60670 6592 if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
660a5d51
PB
6593 /* This is a good place to trace that we are exiting SMM. */
6594 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
6595
c43203ca
PB
6596 /* Process a latched INIT or SMI, if any. */
6597 kvm_make_request(KVM_REQ_EVENT, vcpu);
64d60670 6598 }
699023e2
PB
6599
6600 kvm_mmu_reset_context(vcpu);
64d60670
PB
6601}
6602
4a1e10d5
PB
6603static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
6604 unsigned long *db)
6605{
6606 u32 dr6 = 0;
6607 int i;
6608 u32 enable, rwlen;
6609
6610 enable = dr7;
6611 rwlen = dr7 >> 16;
6612 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
6613 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
6614 dr6 |= (1 << i);
6615 return dr6;
6616}
6617
120c2c4f 6618static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
663f4c61
PB
6619{
6620 struct kvm_run *kvm_run = vcpu->run;
6621
c8401dda
PB
6622 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
6623 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
6624 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
6625 kvm_run->debug.arch.exception = DB_VECTOR;
6626 kvm_run->exit_reason = KVM_EXIT_DEBUG;
60fc3d02 6627 return 0;
663f4c61 6628 }
120c2c4f 6629 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS);
60fc3d02 6630 return 1;
663f4c61
PB
6631}
6632
6affcbed
KH
6633int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
6634{
6635 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
f8ea7c60 6636 int r;
6affcbed 6637
f8ea7c60 6638 r = kvm_x86_ops->skip_emulated_instruction(vcpu);
60fc3d02 6639 if (unlikely(!r))
f8ea7c60 6640 return 0;
c8401dda
PB
6641
6642 /*
6643 * rflags is the old, "raw" value of the flags. The new value has
6644 * not been saved yet.
6645 *
6646 * This is correct even for TF set by the guest, because "the
6647 * processor will not generate this exception after the instruction
6648 * that sets the TF flag".
6649 */
6650 if (unlikely(rflags & X86_EFLAGS_TF))
120c2c4f 6651 r = kvm_vcpu_do_singlestep(vcpu);
60fc3d02 6652 return r;
6affcbed
KH
6653}
6654EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
6655
4a1e10d5
PB
6656static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
6657{
4a1e10d5
PB
6658 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
6659 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
82b32774
NA
6660 struct kvm_run *kvm_run = vcpu->run;
6661 unsigned long eip = kvm_get_linear_rip(vcpu);
6662 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
4a1e10d5
PB
6663 vcpu->arch.guest_debug_dr7,
6664 vcpu->arch.eff_db);
6665
6666 if (dr6 != 0) {
6f43ed01 6667 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
82b32774 6668 kvm_run->debug.arch.pc = eip;
4a1e10d5
PB
6669 kvm_run->debug.arch.exception = DB_VECTOR;
6670 kvm_run->exit_reason = KVM_EXIT_DEBUG;
60fc3d02 6671 *r = 0;
4a1e10d5
PB
6672 return true;
6673 }
6674 }
6675
4161a569
NA
6676 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
6677 !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
82b32774
NA
6678 unsigned long eip = kvm_get_linear_rip(vcpu);
6679 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
4a1e10d5
PB
6680 vcpu->arch.dr7,
6681 vcpu->arch.db);
6682
6683 if (dr6 != 0) {
1fc5d194 6684 vcpu->arch.dr6 &= ~DR_TRAP_BITS;
6f43ed01 6685 vcpu->arch.dr6 |= dr6 | DR6_RTM;
4a1e10d5 6686 kvm_queue_exception(vcpu, DB_VECTOR);
60fc3d02 6687 *r = 1;
4a1e10d5
PB
6688 return true;
6689 }
6690 }
6691
6692 return false;
6693}
6694
04789b66
LA
6695static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
6696{
2d7921c4
AM
6697 switch (ctxt->opcode_len) {
6698 case 1:
6699 switch (ctxt->b) {
6700 case 0xe4: /* IN */
6701 case 0xe5:
6702 case 0xec:
6703 case 0xed:
6704 case 0xe6: /* OUT */
6705 case 0xe7:
6706 case 0xee:
6707 case 0xef:
6708 case 0x6c: /* INS */
6709 case 0x6d:
6710 case 0x6e: /* OUTS */
6711 case 0x6f:
6712 return true;
6713 }
6714 break;
6715 case 2:
6716 switch (ctxt->b) {
6717 case 0x33: /* RDPMC */
6718 return true;
6719 }
6720 break;
04789b66
LA
6721 }
6722
6723 return false;
6724}
6725
736c291c
SC
6726int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
6727 int emulation_type, void *insn, int insn_len)
bbd9b64e 6728{
95cb2295 6729 int r;
9d74191a 6730 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
7ae441ea 6731 bool writeback = true;
93c05d3e 6732 bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
bbd9b64e 6733
c595ceee
PB
6734 vcpu->arch.l1tf_flush_l1d = true;
6735
93c05d3e
XG
6736 /*
6737 * Clear write_fault_to_shadow_pgtable here to ensure it is
6738 * never reused.
6739 */
6740 vcpu->arch.write_fault_to_shadow_pgtable = false;
26eef70c 6741 kvm_clear_exception_queue(vcpu);
8d7d8102 6742
571008da 6743 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
8ec4722d 6744 init_emulate_ctxt(vcpu);
4a1e10d5
PB
6745
6746 /*
6747 * We will reenter on the same instruction since
6748 * we do not set complete_userspace_io. This does not
6749 * handle watchpoints yet, those would be handled in
6750 * the emulate_ops.
6751 */
d391f120
VK
6752 if (!(emulation_type & EMULTYPE_SKIP) &&
6753 kvm_vcpu_check_breakpoint(vcpu, &r))
4a1e10d5
PB
6754 return r;
6755
9d74191a
TY
6756 ctxt->interruptibility = 0;
6757 ctxt->have_exception = false;
e0ad0b47 6758 ctxt->exception.vector = -1;
9d74191a 6759 ctxt->perm_ok = false;
bbd9b64e 6760
b51e974f 6761 ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
4005996e 6762
9d74191a 6763 r = x86_decode_insn(ctxt, insn, insn_len);
bbd9b64e 6764
e46479f8 6765 trace_kvm_emulate_insn_start(vcpu);
f2b5756b 6766 ++vcpu->stat.insn_emulation;
1d2887e2 6767 if (r != EMULATION_OK) {
b4000606 6768 if ((emulation_type & EMULTYPE_TRAP_UD) ||
c83fad65
SC
6769 (emulation_type & EMULTYPE_TRAP_UD_FORCED)) {
6770 kvm_queue_exception(vcpu, UD_VECTOR);
60fc3d02 6771 return 1;
c83fad65 6772 }
736c291c
SC
6773 if (reexecute_instruction(vcpu, cr2_or_gpa,
6774 write_fault_to_spt,
6775 emulation_type))
60fc3d02 6776 return 1;
8530a79c 6777 if (ctxt->have_exception) {
c8848cee
JD
6778 /*
6779 * #UD should result in just EMULATION_FAILED, and trap-like
6780 * exception should not be encountered during decode.
6781 */
6782 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
6783 exception_type(ctxt->exception.vector) == EXCPT_TRAP);
8530a79c 6784 inject_emulated_exception(vcpu);
60fc3d02 6785 return 1;
8530a79c 6786 }
e2366171 6787 return handle_emulation_failure(vcpu, emulation_type);
bbd9b64e
CO
6788 }
6789 }
6790
42cbf068
SC
6791 if ((emulation_type & EMULTYPE_VMWARE_GP) &&
6792 !is_vmware_backdoor_opcode(ctxt)) {
6793 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
60fc3d02 6794 return 1;
42cbf068 6795 }
04789b66 6796
1957aa63
SC
6797 /*
6798 * Note, EMULTYPE_SKIP is intended for use *only* by vendor callbacks
6799 * for kvm_skip_emulated_instruction(). The caller is responsible for
6800 * updating interruptibility state and injecting single-step #DBs.
6801 */
ba8afb6b 6802 if (emulation_type & EMULTYPE_SKIP) {
9dac77fa 6803 kvm_rip_write(vcpu, ctxt->_eip);
bb663c7a
NA
6804 if (ctxt->eflags & X86_EFLAGS_RF)
6805 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
60fc3d02 6806 return 1;
ba8afb6b
GN
6807 }
6808
736c291c 6809 if (retry_instruction(ctxt, cr2_or_gpa, emulation_type))
60fc3d02 6810 return 1;
1cb3f3ae 6811
7ae441ea 6812 /* this is needed for vmware backdoor interface to work since it
4d2179e1 6813 changes registers values during IO operation */
7ae441ea
GN
6814 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
6815 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
dd856efa 6816 emulator_invalidate_register_cache(ctxt);
7ae441ea 6817 }
4d2179e1 6818
5cd21917 6819restart:
0f89b207 6820 /* Save the faulting GPA (cr2) in the address field */
736c291c 6821 ctxt->exception.address = cr2_or_gpa;
0f89b207 6822
9d74191a 6823 r = x86_emulate_insn(ctxt);
bbd9b64e 6824
775fde86 6825 if (r == EMULATION_INTERCEPTED)
60fc3d02 6826 return 1;
775fde86 6827
d2ddd1c4 6828 if (r == EMULATION_FAILED) {
736c291c 6829 if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt,
991eebf9 6830 emulation_type))
60fc3d02 6831 return 1;
c3cd7ffa 6832
e2366171 6833 return handle_emulation_failure(vcpu, emulation_type);
bbd9b64e
CO
6834 }
6835
9d74191a 6836 if (ctxt->have_exception) {
60fc3d02 6837 r = 1;
ef54bcfe
PB
6838 if (inject_emulated_exception(vcpu))
6839 return r;
d2ddd1c4 6840 } else if (vcpu->arch.pio.count) {
0912c977
PB
6841 if (!vcpu->arch.pio.in) {
6842 /* FIXME: return into emulator if single-stepping. */
3457e419 6843 vcpu->arch.pio.count = 0;
0912c977 6844 } else {
7ae441ea 6845 writeback = false;
716d51ab
GN
6846 vcpu->arch.complete_userspace_io = complete_emulated_pio;
6847 }
60fc3d02 6848 r = 0;
7ae441ea 6849 } else if (vcpu->mmio_needed) {
bc8a0aaf
SC
6850 ++vcpu->stat.mmio_exits;
6851
7ae441ea
GN
6852 if (!vcpu->mmio_is_write)
6853 writeback = false;
60fc3d02 6854 r = 0;
716d51ab 6855 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
7ae441ea 6856 } else if (r == EMULATION_RESTART)
5cd21917 6857 goto restart;
d2ddd1c4 6858 else
60fc3d02 6859 r = 1;
f850e2e6 6860
7ae441ea 6861 if (writeback) {
6addfc42 6862 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
9d74191a 6863 toggle_interruptibility(vcpu, ctxt->interruptibility);
7ae441ea 6864 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
38827dbd 6865 if (!ctxt->have_exception ||
75ee23b3
SC
6866 exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
6867 kvm_rip_write(vcpu, ctxt->eip);
60fc3d02 6868 if (r && ctxt->tf)
120c2c4f 6869 r = kvm_vcpu_do_singlestep(vcpu);
38827dbd 6870 __kvm_set_rflags(vcpu, ctxt->eflags);
75ee23b3 6871 }
6addfc42
PB
6872
6873 /*
6874 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
6875 * do nothing, and it will be requested again as soon as
6876 * the shadow expires. But we still need to check here,
6877 * because POPF has no interrupt shadow.
6878 */
6879 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
6880 kvm_make_request(KVM_REQ_EVENT, vcpu);
7ae441ea
GN
6881 } else
6882 vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
e85d28f8
GN
6883
6884 return r;
de7d789a 6885}
c60658d1
SC
6886
6887int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
6888{
6889 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
6890}
6891EXPORT_SYMBOL_GPL(kvm_emulate_instruction);
6892
6893int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
6894 void *insn, int insn_len)
6895{
6896 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
6897}
6898EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
de7d789a 6899
8764ed55
SC
6900static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
6901{
6902 vcpu->arch.pio.count = 0;
6903 return 1;
6904}
6905
45def77e
SC
6906static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
6907{
6908 vcpu->arch.pio.count = 0;
6909
6910 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
6911 return 1;
6912
6913 return kvm_skip_emulated_instruction(vcpu);
6914}
6915
dca7f128
SC
6916static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
6917 unsigned short port)
de7d789a 6918{
de3cd117 6919 unsigned long val = kvm_rax_read(vcpu);
ca1d4a9e
AK
6920 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
6921 size, port, &val, 1);
8764ed55
SC
6922 if (ret)
6923 return ret;
45def77e 6924
8764ed55
SC
6925 /*
6926 * Workaround userspace that relies on old KVM behavior of %rip being
6927 * incremented prior to exiting to userspace to handle "OUT 0x7e".
6928 */
6929 if (port == 0x7e &&
6930 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
6931 vcpu->arch.complete_userspace_io =
6932 complete_fast_pio_out_port_0x7e;
6933 kvm_skip_emulated_instruction(vcpu);
6934 } else {
45def77e
SC
6935 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
6936 vcpu->arch.complete_userspace_io = complete_fast_pio_out;
6937 }
8764ed55 6938 return 0;
de7d789a 6939}
de7d789a 6940
8370c3d0
TL
6941static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
6942{
6943 unsigned long val;
6944
6945 /* We should only ever be called with arch.pio.count equal to 1 */
6946 BUG_ON(vcpu->arch.pio.count != 1);
6947
45def77e
SC
6948 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
6949 vcpu->arch.pio.count = 0;
6950 return 1;
6951 }
6952
8370c3d0 6953 /* For size less than 4 we merge, else we zero extend */
de3cd117 6954 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
8370c3d0
TL
6955
6956 /*
6957 * Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
6958 * the copy and tracing
6959 */
6960 emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
6961 vcpu->arch.pio.port, &val, 1);
de3cd117 6962 kvm_rax_write(vcpu, val);
8370c3d0 6963
45def77e 6964 return kvm_skip_emulated_instruction(vcpu);
8370c3d0
TL
6965}
6966
dca7f128
SC
6967static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
6968 unsigned short port)
8370c3d0
TL
6969{
6970 unsigned long val;
6971 int ret;
6972
6973 /* For size less than 4 we merge, else we zero extend */
de3cd117 6974 val = (size < 4) ? kvm_rax_read(vcpu) : 0;
8370c3d0
TL
6975
6976 ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
6977 &val, 1);
6978 if (ret) {
de3cd117 6979 kvm_rax_write(vcpu, val);
8370c3d0
TL
6980 return ret;
6981 }
6982
45def77e 6983 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
8370c3d0
TL
6984 vcpu->arch.complete_userspace_io = complete_fast_pio_in;
6985
6986 return 0;
6987}
dca7f128
SC
6988
6989int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
6990{
45def77e 6991 int ret;
dca7f128 6992
dca7f128 6993 if (in)
45def77e 6994 ret = kvm_fast_pio_in(vcpu, size, port);
dca7f128 6995 else
45def77e
SC
6996 ret = kvm_fast_pio_out(vcpu, size, port);
6997 return ret && kvm_skip_emulated_instruction(vcpu);
dca7f128
SC
6998}
6999EXPORT_SYMBOL_GPL(kvm_fast_pio);
8370c3d0 7000
251a5fd6 7001static int kvmclock_cpu_down_prep(unsigned int cpu)
8cfdc000 7002{
0a3aee0d 7003 __this_cpu_write(cpu_tsc_khz, 0);
251a5fd6 7004 return 0;
8cfdc000
ZA
7005}
7006
7007static void tsc_khz_changed(void *data)
c8076604 7008{
8cfdc000
ZA
7009 struct cpufreq_freqs *freq = data;
7010 unsigned long khz = 0;
7011
7012 if (data)
7013 khz = freq->new;
7014 else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
7015 khz = cpufreq_quick_get(raw_smp_processor_id());
7016 if (!khz)
7017 khz = tsc_khz;
0a3aee0d 7018 __this_cpu_write(cpu_tsc_khz, khz);
c8076604
GH
7019}
7020
5fa4ec9c 7021#ifdef CONFIG_X86_64
0092e434
VK
7022static void kvm_hyperv_tsc_notifier(void)
7023{
0092e434
VK
7024 struct kvm *kvm;
7025 struct kvm_vcpu *vcpu;
7026 int cpu;
7027
0d9ce162 7028 mutex_lock(&kvm_lock);
0092e434
VK
7029 list_for_each_entry(kvm, &vm_list, vm_list)
7030 kvm_make_mclock_inprogress_request(kvm);
7031
7032 hyperv_stop_tsc_emulation();
7033
7034 /* TSC frequency always matches when on Hyper-V */
7035 for_each_present_cpu(cpu)
7036 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
7037 kvm_max_guest_tsc_khz = tsc_khz;
7038
7039 list_for_each_entry(kvm, &vm_list, vm_list) {
7040 struct kvm_arch *ka = &kvm->arch;
7041
7042 spin_lock(&ka->pvclock_gtod_sync_lock);
7043
7044 pvclock_update_vm_gtod_copy(kvm);
7045
7046 kvm_for_each_vcpu(cpu, vcpu, kvm)
7047 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
7048
7049 kvm_for_each_vcpu(cpu, vcpu, kvm)
7050 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
7051
7052 spin_unlock(&ka->pvclock_gtod_sync_lock);
7053 }
0d9ce162 7054 mutex_unlock(&kvm_lock);
0092e434 7055}
5fa4ec9c 7056#endif
0092e434 7057
df24014a 7058static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
c8076604 7059{
c8076604
GH
7060 struct kvm *kvm;
7061 struct kvm_vcpu *vcpu;
7062 int i, send_ipi = 0;
7063
8cfdc000
ZA
7064 /*
7065 * We allow guests to temporarily run on slowing clocks,
7066 * provided we notify them after, or to run on accelerating
7067 * clocks, provided we notify them before. Thus time never
7068 * goes backwards.
7069 *
7070 * However, we have a problem. We can't atomically update
7071 * the frequency of a given CPU from this function; it is
7072 * merely a notifier, which can be called from any CPU.
7073 * Changing the TSC frequency at arbitrary points in time
7074 * requires a recomputation of local variables related to
7075 * the TSC for each VCPU. We must flag these local variables
7076 * to be updated and be sure the update takes place with the
7077 * new frequency before any guests proceed.
7078 *
7079 * Unfortunately, the combination of hotplug CPU and frequency
7080 * change creates an intractable locking scenario; the order
7081 * of when these callouts happen is undefined with respect to
7082 * CPU hotplug, and they can race with each other. As such,
7083 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
7084 * undefined; you can actually have a CPU frequency change take
7085 * place in between the computation of X and the setting of the
7086 * variable. To protect against this problem, all updates of
7087 * the per_cpu tsc_khz variable are done in an interrupt
7088 * protected IPI, and all callers wishing to update the value
7089 * must wait for a synchronous IPI to complete (which is trivial
7090 * if the caller is on the CPU already). This establishes the
7091 * necessary total order on variable updates.
7092 *
7093 * Note that because a guest time update may take place
7094 * anytime after the setting of the VCPU's request bit, the
7095 * correct TSC value must be set before the request. However,
7096 * to ensure the update actually makes it to any guest which
7097 * starts running in hardware virtualization between the set
7098 * and the acquisition of the spinlock, we must also ping the
7099 * CPU after setting the request bit.
7100 *
7101 */
7102
df24014a 7103 smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
c8076604 7104
0d9ce162 7105 mutex_lock(&kvm_lock);
c8076604 7106 list_for_each_entry(kvm, &vm_list, vm_list) {
988a2cae 7107 kvm_for_each_vcpu(i, vcpu, kvm) {
df24014a 7108 if (vcpu->cpu != cpu)
c8076604 7109 continue;
c285545f 7110 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
0d9ce162 7111 if (vcpu->cpu != raw_smp_processor_id())
8cfdc000 7112 send_ipi = 1;
c8076604
GH
7113 }
7114 }
0d9ce162 7115 mutex_unlock(&kvm_lock);
c8076604
GH
7116
7117 if (freq->old < freq->new && send_ipi) {
7118 /*
7119 * We upscale the frequency. Must make the guest
7120 * doesn't see old kvmclock values while running with
7121 * the new frequency, otherwise we risk the guest sees
7122 * time go backwards.
7123 *
7124 * In case we update the frequency for another cpu
7125 * (which might be in guest context) send an interrupt
7126 * to kick the cpu out of guest context. Next time
7127 * guest context is entered kvmclock will be updated,
7128 * so the guest will not see stale values.
7129 */
df24014a 7130 smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
c8076604 7131 }
df24014a
VK
7132}
7133
7134static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
7135 void *data)
7136{
7137 struct cpufreq_freqs *freq = data;
7138 int cpu;
7139
7140 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
7141 return 0;
7142 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
7143 return 0;
7144
7145 for_each_cpu(cpu, freq->policy->cpus)
7146 __kvmclock_cpufreq_notifier(freq, cpu);
7147
c8076604
GH
7148 return 0;
7149}
7150
7151static struct notifier_block kvmclock_cpufreq_notifier_block = {
8cfdc000
ZA
7152 .notifier_call = kvmclock_cpufreq_notifier
7153};
7154
251a5fd6 7155static int kvmclock_cpu_online(unsigned int cpu)
8cfdc000 7156{
251a5fd6
SAS
7157 tsc_khz_changed(NULL);
7158 return 0;
8cfdc000
ZA
7159}
7160
b820cc0c
ZA
7161static void kvm_timer_init(void)
7162{
c285545f 7163 max_tsc_khz = tsc_khz;
460dd42e 7164
b820cc0c 7165 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
c285545f
ZA
7166#ifdef CONFIG_CPU_FREQ
7167 struct cpufreq_policy policy;
758f588d
BP
7168 int cpu;
7169
c285545f 7170 memset(&policy, 0, sizeof(policy));
3e26f230
AK
7171 cpu = get_cpu();
7172 cpufreq_get_policy(&policy, cpu);
c285545f
ZA
7173 if (policy.cpuinfo.max_freq)
7174 max_tsc_khz = policy.cpuinfo.max_freq;
3e26f230 7175 put_cpu();
c285545f 7176#endif
b820cc0c
ZA
7177 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
7178 CPUFREQ_TRANSITION_NOTIFIER);
7179 }
460dd42e 7180
73c1b41e 7181 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
251a5fd6 7182 kvmclock_cpu_online, kvmclock_cpu_down_prep);
b820cc0c
ZA
7183}
7184
dd60d217
AK
7185DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
7186EXPORT_PER_CPU_SYMBOL_GPL(current_vcpu);
ff9d07a0 7187
f5132b01 7188int kvm_is_in_guest(void)
ff9d07a0 7189{
086c9855 7190 return __this_cpu_read(current_vcpu) != NULL;
ff9d07a0
ZY
7191}
7192
7193static int kvm_is_user_mode(void)
7194{
7195 int user_mode = 3;
dcf46b94 7196
086c9855
AS
7197 if (__this_cpu_read(current_vcpu))
7198 user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
dcf46b94 7199
ff9d07a0
ZY
7200 return user_mode != 0;
7201}
7202
7203static unsigned long kvm_get_guest_ip(void)
7204{
7205 unsigned long ip = 0;
dcf46b94 7206
086c9855
AS
7207 if (__this_cpu_read(current_vcpu))
7208 ip = kvm_rip_read(__this_cpu_read(current_vcpu));
dcf46b94 7209
ff9d07a0
ZY
7210 return ip;
7211}
7212
8479e04e
LK
7213static void kvm_handle_intel_pt_intr(void)
7214{
7215 struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
7216
7217 kvm_make_request(KVM_REQ_PMI, vcpu);
7218 __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
7219 (unsigned long *)&vcpu->arch.pmu.global_status);
7220}
7221
ff9d07a0
ZY
7222static struct perf_guest_info_callbacks kvm_guest_cbs = {
7223 .is_in_guest = kvm_is_in_guest,
7224 .is_user_mode = kvm_is_user_mode,
7225 .get_guest_ip = kvm_get_guest_ip,
8479e04e 7226 .handle_intel_pt_intr = kvm_handle_intel_pt_intr,
ff9d07a0
ZY
7227};
7228
16e8d74d
MT
7229#ifdef CONFIG_X86_64
7230static void pvclock_gtod_update_fn(struct work_struct *work)
7231{
d828199e
MT
7232 struct kvm *kvm;
7233
7234 struct kvm_vcpu *vcpu;
7235 int i;
7236
0d9ce162 7237 mutex_lock(&kvm_lock);
d828199e
MT
7238 list_for_each_entry(kvm, &vm_list, vm_list)
7239 kvm_for_each_vcpu(i, vcpu, kvm)
105b21bb 7240 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
d828199e 7241 atomic_set(&kvm_guest_has_master_clock, 0);
0d9ce162 7242 mutex_unlock(&kvm_lock);
16e8d74d
MT
7243}
7244
7245static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
7246
7247/*
7248 * Notification about pvclock gtod data update.
7249 */
7250static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
7251 void *priv)
7252{
7253 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
7254 struct timekeeper *tk = priv;
7255
7256 update_pvclock_gtod(tk);
7257
7258 /* disable master clock if host does not trust, or does not
b0c39dc6 7259 * use, TSC based clocksource.
16e8d74d 7260 */
b0c39dc6 7261 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
16e8d74d
MT
7262 atomic_read(&kvm_guest_has_master_clock) != 0)
7263 queue_work(system_long_wq, &pvclock_gtod_work);
7264
7265 return 0;
7266}
7267
7268static struct notifier_block pvclock_gtod_notifier = {
7269 .notifier_call = pvclock_gtod_notify,
7270};
7271#endif
7272
f8c16bba 7273int kvm_arch_init(void *opaque)
043405e1 7274{
b820cc0c 7275 int r;
6b61edf7 7276 struct kvm_x86_ops *ops = opaque;
f8c16bba 7277
f8c16bba
ZX
7278 if (kvm_x86_ops) {
7279 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
7280 r = -EEXIST;
7281 goto out;
f8c16bba
ZX
7282 }
7283
7284 if (!ops->cpu_has_kvm_support()) {
7285 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
7286 r = -EOPNOTSUPP;
7287 goto out;
f8c16bba
ZX
7288 }
7289 if (ops->disabled_by_bios()) {
7290 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
7291 r = -EOPNOTSUPP;
7292 goto out;
f8c16bba
ZX
7293 }
7294
b666a4b6
MO
7295 /*
7296 * KVM explicitly assumes that the guest has an FPU and
7297 * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the
7298 * vCPU's FPU state as a fxregs_state struct.
7299 */
7300 if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) {
7301 printk(KERN_ERR "kvm: inadequate fpu\n");
7302 r = -EOPNOTSUPP;
7303 goto out;
7304 }
7305
013f6a5d 7306 r = -ENOMEM;
ed8e4812 7307 x86_fpu_cache = kmem_cache_create("x86_fpu", sizeof(struct fpu),
b666a4b6
MO
7308 __alignof__(struct fpu), SLAB_ACCOUNT,
7309 NULL);
7310 if (!x86_fpu_cache) {
7311 printk(KERN_ERR "kvm: failed to allocate cache for x86 fpu\n");
7312 goto out;
7313 }
7314
013f6a5d
MT
7315 shared_msrs = alloc_percpu(struct kvm_shared_msrs);
7316 if (!shared_msrs) {
7317 printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
b666a4b6 7318 goto out_free_x86_fpu_cache;
013f6a5d
MT
7319 }
7320
97db56ce
AK
7321 r = kvm_mmu_module_init();
7322 if (r)
013f6a5d 7323 goto out_free_percpu;
97db56ce 7324
f8c16bba 7325 kvm_x86_ops = ops;
920c8377 7326
7b52345e 7327 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
ffb128c8 7328 PT_DIRTY_MASK, PT64_NX_MASK, 0,
d0ec49d4 7329 PT_PRESENT_MASK, 0, sme_me_mask);
b820cc0c 7330 kvm_timer_init();
c8076604 7331
ff9d07a0
ZY
7332 perf_register_guest_info_callbacks(&kvm_guest_cbs);
7333
d366bf7e 7334 if (boot_cpu_has(X86_FEATURE_XSAVE))
2acf923e
DC
7335 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
7336
c5cc421b 7337 kvm_lapic_init();
0c5f81da
WL
7338 if (pi_inject_timer == -1)
7339 pi_inject_timer = housekeeping_enabled(HK_FLAG_TIMER);
16e8d74d
MT
7340#ifdef CONFIG_X86_64
7341 pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
0092e434 7342
5fa4ec9c 7343 if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
0092e434 7344 set_hv_tscchange_cb(kvm_hyperv_tsc_notifier);
16e8d74d
MT
7345#endif
7346
f8c16bba 7347 return 0;
56c6d28a 7348
013f6a5d
MT
7349out_free_percpu:
7350 free_percpu(shared_msrs);
b666a4b6
MO
7351out_free_x86_fpu_cache:
7352 kmem_cache_destroy(x86_fpu_cache);
56c6d28a 7353out:
56c6d28a 7354 return r;
043405e1 7355}
8776e519 7356
f8c16bba
ZX
7357void kvm_arch_exit(void)
7358{
0092e434 7359#ifdef CONFIG_X86_64
5fa4ec9c 7360 if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
0092e434
VK
7361 clear_hv_tscchange_cb();
7362#endif
cef84c30 7363 kvm_lapic_exit();
ff9d07a0
ZY
7364 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
7365
888d256e
JK
7366 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
7367 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
7368 CPUFREQ_TRANSITION_NOTIFIER);
251a5fd6 7369 cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
16e8d74d
MT
7370#ifdef CONFIG_X86_64
7371 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
7372#endif
f8c16bba 7373 kvm_x86_ops = NULL;
56c6d28a 7374 kvm_mmu_module_exit();
013f6a5d 7375 free_percpu(shared_msrs);
b666a4b6 7376 kmem_cache_destroy(x86_fpu_cache);
56c6d28a 7377}
f8c16bba 7378
5cb56059 7379int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
8776e519
HB
7380{
7381 ++vcpu->stat.halt_exits;
35754c98 7382 if (lapic_in_kernel(vcpu)) {
a4535290 7383 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
8776e519
HB
7384 return 1;
7385 } else {
7386 vcpu->run->exit_reason = KVM_EXIT_HLT;
7387 return 0;
7388 }
7389}
5cb56059
JS
7390EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
7391
7392int kvm_emulate_halt(struct kvm_vcpu *vcpu)
7393{
6affcbed
KH
7394 int ret = kvm_skip_emulated_instruction(vcpu);
7395 /*
7396 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
7397 * KVM_EXIT_DEBUG here.
7398 */
7399 return kvm_vcpu_halt(vcpu) && ret;
5cb56059 7400}
8776e519
HB
7401EXPORT_SYMBOL_GPL(kvm_emulate_halt);
7402
8ef81a9a 7403#ifdef CONFIG_X86_64
55dd00a7
MT
7404static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
7405 unsigned long clock_type)
7406{
7407 struct kvm_clock_pairing clock_pairing;
899a31f5 7408 struct timespec64 ts;
80fbd89c 7409 u64 cycle;
55dd00a7
MT
7410 int ret;
7411
7412 if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK)
7413 return -KVM_EOPNOTSUPP;
7414
7415 if (kvm_get_walltime_and_clockread(&ts, &cycle) == false)
7416 return -KVM_EOPNOTSUPP;
7417
7418 clock_pairing.sec = ts.tv_sec;
7419 clock_pairing.nsec = ts.tv_nsec;
7420 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
7421 clock_pairing.flags = 0;
bcbfbd8e 7422 memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
55dd00a7
MT
7423
7424 ret = 0;
7425 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
7426 sizeof(struct kvm_clock_pairing)))
7427 ret = -KVM_EFAULT;
7428
7429 return ret;
7430}
8ef81a9a 7431#endif
55dd00a7 7432
6aef266c
SV
7433/*
7434 * kvm_pv_kick_cpu_op: Kick a vcpu.
7435 *
7436 * @apicid - apicid of vcpu to be kicked.
7437 */
7438static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
7439{
24d2166b 7440 struct kvm_lapic_irq lapic_irq;
6aef266c 7441
150a84fe 7442 lapic_irq.shorthand = APIC_DEST_NOSHORT;
c96001c5 7443 lapic_irq.dest_mode = APIC_DEST_PHYSICAL;
ebd28fcb 7444 lapic_irq.level = 0;
24d2166b 7445 lapic_irq.dest_id = apicid;
93bbf0b8 7446 lapic_irq.msi_redir_hint = false;
6aef266c 7447
24d2166b 7448 lapic_irq.delivery_mode = APIC_DM_REMRD;
795a149e 7449 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
6aef266c
SV
7450}
7451
d62caabb
AS
7452void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
7453{
f7589cca
PB
7454 if (!lapic_in_kernel(vcpu)) {
7455 WARN_ON_ONCE(vcpu->arch.apicv_active);
7456 return;
7457 }
7458 if (!vcpu->arch.apicv_active)
7459 return;
7460
d62caabb
AS
7461 vcpu->arch.apicv_active = false;
7462 kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
7463}
7464
71506297
WL
7465static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
7466{
7467 struct kvm_vcpu *target = NULL;
7468 struct kvm_apic_map *map;
7469
7470 rcu_read_lock();
7471 map = rcu_dereference(kvm->arch.apic_map);
7472
7473 if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id])
7474 target = map->phys_map[dest_id]->vcpu;
7475
7476 rcu_read_unlock();
7477
266e85a5 7478 if (target && READ_ONCE(target->ready))
71506297
WL
7479 kvm_vcpu_yield_to(target);
7480}
7481
8776e519
HB
7482int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
7483{
7484 unsigned long nr, a0, a1, a2, a3, ret;
6356ee0c 7485 int op_64_bit;
8776e519 7486
696ca779
RK
7487 if (kvm_hv_hypercall_enabled(vcpu->kvm))
7488 return kvm_hv_hypercall(vcpu);
55cd8e5a 7489
de3cd117
SC
7490 nr = kvm_rax_read(vcpu);
7491 a0 = kvm_rbx_read(vcpu);
7492 a1 = kvm_rcx_read(vcpu);
7493 a2 = kvm_rdx_read(vcpu);
7494 a3 = kvm_rsi_read(vcpu);
8776e519 7495
229456fc 7496 trace_kvm_hypercall(nr, a0, a1, a2, a3);
2714d1d3 7497
a449c7aa
NA
7498 op_64_bit = is_64_bit_mode(vcpu);
7499 if (!op_64_bit) {
8776e519
HB
7500 nr &= 0xFFFFFFFF;
7501 a0 &= 0xFFFFFFFF;
7502 a1 &= 0xFFFFFFFF;
7503 a2 &= 0xFFFFFFFF;
7504 a3 &= 0xFFFFFFFF;
7505 }
7506
07708c4a
JK
7507 if (kvm_x86_ops->get_cpl(vcpu) != 0) {
7508 ret = -KVM_EPERM;
696ca779 7509 goto out;
07708c4a
JK
7510 }
7511
8776e519 7512 switch (nr) {
b93463aa
AK
7513 case KVM_HC_VAPIC_POLL_IRQ:
7514 ret = 0;
7515 break;
6aef266c
SV
7516 case KVM_HC_KICK_CPU:
7517 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
266e85a5 7518 kvm_sched_yield(vcpu->kvm, a1);
6aef266c
SV
7519 ret = 0;
7520 break;
8ef81a9a 7521#ifdef CONFIG_X86_64
55dd00a7
MT
7522 case KVM_HC_CLOCK_PAIRING:
7523 ret = kvm_pv_clock_pairing(vcpu, a0, a1);
7524 break;
1ed199a4 7525#endif
4180bf1b
WL
7526 case KVM_HC_SEND_IPI:
7527 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
7528 break;
71506297
WL
7529 case KVM_HC_SCHED_YIELD:
7530 kvm_sched_yield(vcpu->kvm, a0);
7531 ret = 0;
7532 break;
8776e519
HB
7533 default:
7534 ret = -KVM_ENOSYS;
7535 break;
7536 }
696ca779 7537out:
a449c7aa
NA
7538 if (!op_64_bit)
7539 ret = (u32)ret;
de3cd117 7540 kvm_rax_write(vcpu, ret);
6356ee0c 7541
f11c3a8d 7542 ++vcpu->stat.hypercalls;
6356ee0c 7543 return kvm_skip_emulated_instruction(vcpu);
8776e519
HB
7544}
7545EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
7546
b6785def 7547static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
8776e519 7548{
d6aa1000 7549 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8776e519 7550 char instruction[3];
5fdbf976 7551 unsigned long rip = kvm_rip_read(vcpu);
8776e519 7552
8776e519 7553 kvm_x86_ops->patch_hypercall(vcpu, instruction);
8776e519 7554
ce2e852e
DV
7555 return emulator_write_emulated(ctxt, rip, instruction, 3,
7556 &ctxt->exception);
8776e519
HB
7557}
7558
851ba692 7559static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
b6c7a5dc 7560{
782d422b
MG
7561 return vcpu->run->request_interrupt_window &&
7562 likely(!pic_in_kernel(vcpu->kvm));
b6c7a5dc
HB
7563}
7564
851ba692 7565static void post_kvm_run_save(struct kvm_vcpu *vcpu)
b6c7a5dc 7566{
851ba692
AK
7567 struct kvm_run *kvm_run = vcpu->run;
7568
91586a3b 7569 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
f077825a 7570 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
2d3ad1f4 7571 kvm_run->cr8 = kvm_get_cr8(vcpu);
b6c7a5dc 7572 kvm_run->apic_base = kvm_get_apic_base(vcpu);
127a457a
MG
7573 kvm_run->ready_for_interrupt_injection =
7574 pic_in_kernel(vcpu->kvm) ||
782d422b 7575 kvm_vcpu_ready_for_interrupt_injection(vcpu);
b6c7a5dc
HB
7576}
7577
95ba8273
GN
7578static void update_cr8_intercept(struct kvm_vcpu *vcpu)
7579{
7580 int max_irr, tpr;
7581
7582 if (!kvm_x86_ops->update_cr8_intercept)
7583 return;
7584
bce87cce 7585 if (!lapic_in_kernel(vcpu))
88c808fd
AK
7586 return;
7587
d62caabb
AS
7588 if (vcpu->arch.apicv_active)
7589 return;
7590
8db3baa2
GN
7591 if (!vcpu->arch.apic->vapic_addr)
7592 max_irr = kvm_lapic_find_highest_irr(vcpu);
7593 else
7594 max_irr = -1;
95ba8273
GN
7595
7596 if (max_irr != -1)
7597 max_irr >>= 4;
7598
7599 tpr = kvm_lapic_get_cr8(vcpu);
7600
7601 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
7602}
7603
b6b8a145 7604static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
95ba8273 7605{
b6b8a145
JK
7606 int r;
7607
95ba8273 7608 /* try to reinject previous events if any */
664f8e26 7609
1a680e35
LA
7610 if (vcpu->arch.exception.injected)
7611 kvm_x86_ops->queue_exception(vcpu);
664f8e26 7612 /*
a042c26f
LA
7613 * Do not inject an NMI or interrupt if there is a pending
7614 * exception. Exceptions and interrupts are recognized at
7615 * instruction boundaries, i.e. the start of an instruction.
7616 * Trap-like exceptions, e.g. #DB, have higher priority than
7617 * NMIs and interrupts, i.e. traps are recognized before an
7618 * NMI/interrupt that's pending on the same instruction.
7619 * Fault-like exceptions, e.g. #GP and #PF, are the lowest
7620 * priority, but are only generated (pended) during instruction
7621 * execution, i.e. a pending fault-like exception means the
7622 * fault occurred on the *previous* instruction and must be
7623 * serviced prior to recognizing any new events in order to
7624 * fully complete the previous instruction.
664f8e26 7625 */
1a680e35
LA
7626 else if (!vcpu->arch.exception.pending) {
7627 if (vcpu->arch.nmi_injected)
664f8e26 7628 kvm_x86_ops->set_nmi(vcpu);
1a680e35 7629 else if (vcpu->arch.interrupt.injected)
664f8e26 7630 kvm_x86_ops->set_irq(vcpu);
664f8e26
WL
7631 }
7632
1a680e35
LA
7633 /*
7634 * Call check_nested_events() even if we reinjected a previous event
7635 * in order for caller to determine if it should require immediate-exit
7636 * from L2 to L1 due to pending L1 events which require exit
7637 * from L2 to L1.
7638 */
664f8e26
WL
7639 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
7640 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
7641 if (r != 0)
7642 return r;
7643 }
7644
7645 /* try to inject new event if pending */
b59bb7bd 7646 if (vcpu->arch.exception.pending) {
5c1c85d0
AK
7647 trace_kvm_inj_exception(vcpu->arch.exception.nr,
7648 vcpu->arch.exception.has_error_code,
7649 vcpu->arch.exception.error_code);
d6e8c854 7650
1a680e35 7651 WARN_ON_ONCE(vcpu->arch.exception.injected);
664f8e26
WL
7652 vcpu->arch.exception.pending = false;
7653 vcpu->arch.exception.injected = true;
7654
d6e8c854
NA
7655 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
7656 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
7657 X86_EFLAGS_RF);
7658
f10c729f
JM
7659 if (vcpu->arch.exception.nr == DB_VECTOR) {
7660 /*
7661 * This code assumes that nSVM doesn't use
7662 * check_nested_events(). If it does, the
7663 * DR6/DR7 changes should happen before L1
7664 * gets a #VMEXIT for an intercepted #DB in
7665 * L2. (Under VMX, on the other hand, the
7666 * DR6/DR7 changes should not happen in the
7667 * event of a VM-exit to L1 for an intercepted
7668 * #DB in L2.)
7669 */
7670 kvm_deliver_exception_payload(vcpu);
7671 if (vcpu->arch.dr7 & DR7_GD) {
7672 vcpu->arch.dr7 &= ~DR7_GD;
7673 kvm_update_dr7(vcpu);
7674 }
6bdf0662
NA
7675 }
7676
cfcd20e5 7677 kvm_x86_ops->queue_exception(vcpu);
1a680e35
LA
7678 }
7679
7680 /* Don't consider new event if we re-injected an event */
7681 if (kvm_event_needs_reinjection(vcpu))
7682 return 0;
7683
7684 if (vcpu->arch.smi_pending && !is_smm(vcpu) &&
7685 kvm_x86_ops->smi_allowed(vcpu)) {
c43203ca 7686 vcpu->arch.smi_pending = false;
52797bf9 7687 ++vcpu->arch.smi_count;
ee2cd4b7 7688 enter_smm(vcpu);
c43203ca 7689 } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
321c5658
YS
7690 --vcpu->arch.nmi_pending;
7691 vcpu->arch.nmi_injected = true;
7692 kvm_x86_ops->set_nmi(vcpu);
c7c9c56c 7693 } else if (kvm_cpu_has_injectable_intr(vcpu)) {
9242b5b6
BD
7694 /*
7695 * Because interrupts can be injected asynchronously, we are
7696 * calling check_nested_events again here to avoid a race condition.
7697 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
7698 * proposal and current concerns. Perhaps we should be setting
7699 * KVM_REQ_EVENT only on certain events and not unconditionally?
7700 */
7701 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
7702 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
7703 if (r != 0)
7704 return r;
7705 }
95ba8273 7706 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
66fd3f7f
GN
7707 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
7708 false);
7709 kvm_x86_ops->set_irq(vcpu);
95ba8273
GN
7710 }
7711 }
ee2cd4b7 7712
b6b8a145 7713 return 0;
95ba8273
GN
7714}
7715
7460fb4a
AK
7716static void process_nmi(struct kvm_vcpu *vcpu)
7717{
7718 unsigned limit = 2;
7719
7720 /*
7721 * x86 is limited to one NMI running, and one NMI pending after it.
7722 * If an NMI is already in progress, limit further NMIs to just one.
7723 * Otherwise, allow two (and we'll inject the first one immediately).
7724 */
7725 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
7726 limit = 1;
7727
7728 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
7729 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
7730 kvm_make_request(KVM_REQ_EVENT, vcpu);
7731}
7732
ee2cd4b7 7733static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
660a5d51
PB
7734{
7735 u32 flags = 0;
7736 flags |= seg->g << 23;
7737 flags |= seg->db << 22;
7738 flags |= seg->l << 21;
7739 flags |= seg->avl << 20;
7740 flags |= seg->present << 15;
7741 flags |= seg->dpl << 13;
7742 flags |= seg->s << 12;
7743 flags |= seg->type << 8;
7744 return flags;
7745}
7746
ee2cd4b7 7747static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
660a5d51
PB
7748{
7749 struct kvm_segment seg;
7750 int offset;
7751
7752 kvm_get_segment(vcpu, &seg, n);
7753 put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);
7754
7755 if (n < 3)
7756 offset = 0x7f84 + n * 12;
7757 else
7758 offset = 0x7f2c + (n - 3) * 12;
7759
7760 put_smstate(u32, buf, offset + 8, seg.base);
7761 put_smstate(u32, buf, offset + 4, seg.limit);
ee2cd4b7 7762 put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg));
660a5d51
PB
7763}
7764
efbb288a 7765#ifdef CONFIG_X86_64
ee2cd4b7 7766static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
660a5d51
PB
7767{
7768 struct kvm_segment seg;
7769 int offset;
7770 u16 flags;
7771
7772 kvm_get_segment(vcpu, &seg, n);
7773 offset = 0x7e00 + n * 16;
7774
ee2cd4b7 7775 flags = enter_smm_get_segment_flags(&seg) >> 8;
660a5d51
PB
7776 put_smstate(u16, buf, offset, seg.selector);
7777 put_smstate(u16, buf, offset + 2, flags);
7778 put_smstate(u32, buf, offset + 4, seg.limit);
7779 put_smstate(u64, buf, offset + 8, seg.base);
7780}
efbb288a 7781#endif
660a5d51 7782
ee2cd4b7 7783static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
660a5d51
PB
7784{
7785 struct desc_ptr dt;
7786 struct kvm_segment seg;
7787 unsigned long val;
7788 int i;
7789
7790 put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
7791 put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
7792 put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
7793 put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
7794
7795 for (i = 0; i < 8; i++)
7796 put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i));
7797
7798 kvm_get_dr(vcpu, 6, &val);
7799 put_smstate(u32, buf, 0x7fcc, (u32)val);
7800 kvm_get_dr(vcpu, 7, &val);
7801 put_smstate(u32, buf, 0x7fc8, (u32)val);
7802
7803 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
7804 put_smstate(u32, buf, 0x7fc4, seg.selector);
7805 put_smstate(u32, buf, 0x7f64, seg.base);
7806 put_smstate(u32, buf, 0x7f60, seg.limit);
ee2cd4b7 7807 put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
660a5d51
PB
7808
7809 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
7810 put_smstate(u32, buf, 0x7fc0, seg.selector);
7811 put_smstate(u32, buf, 0x7f80, seg.base);
7812 put_smstate(u32, buf, 0x7f7c, seg.limit);
ee2cd4b7 7813 put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
660a5d51
PB
7814
7815 kvm_x86_ops->get_gdt(vcpu, &dt);
7816 put_smstate(u32, buf, 0x7f74, dt.address);
7817 put_smstate(u32, buf, 0x7f70, dt.size);
7818
7819 kvm_x86_ops->get_idt(vcpu, &dt);
7820 put_smstate(u32, buf, 0x7f58, dt.address);
7821 put_smstate(u32, buf, 0x7f54, dt.size);
7822
7823 for (i = 0; i < 6; i++)
ee2cd4b7 7824 enter_smm_save_seg_32(vcpu, buf, i);
660a5d51
PB
7825
7826 put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
7827
7828 /* revision id */
7829 put_smstate(u32, buf, 0x7efc, 0x00020000);
7830 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
7831}
7832
b68f3cc7 7833#ifdef CONFIG_X86_64
ee2cd4b7 7834static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
660a5d51 7835{
660a5d51
PB
7836 struct desc_ptr dt;
7837 struct kvm_segment seg;
7838 unsigned long val;
7839 int i;
7840
7841 for (i = 0; i < 16; i++)
7842 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i));
7843
7844 put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
7845 put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
7846
7847 kvm_get_dr(vcpu, 6, &val);
7848 put_smstate(u64, buf, 0x7f68, val);
7849 kvm_get_dr(vcpu, 7, &val);
7850 put_smstate(u64, buf, 0x7f60, val);
7851
7852 put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
7853 put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
7854 put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
7855
7856 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);
7857
7858 /* revision id */
7859 put_smstate(u32, buf, 0x7efc, 0x00020064);
7860
7861 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);
7862
7863 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
7864 put_smstate(u16, buf, 0x7e90, seg.selector);
ee2cd4b7 7865 put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
660a5d51
PB
7866 put_smstate(u32, buf, 0x7e94, seg.limit);
7867 put_smstate(u64, buf, 0x7e98, seg.base);
7868
7869 kvm_x86_ops->get_idt(vcpu, &dt);
7870 put_smstate(u32, buf, 0x7e84, dt.size);
7871 put_smstate(u64, buf, 0x7e88, dt.address);
7872
7873 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
7874 put_smstate(u16, buf, 0x7e70, seg.selector);
ee2cd4b7 7875 put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
660a5d51
PB
7876 put_smstate(u32, buf, 0x7e74, seg.limit);
7877 put_smstate(u64, buf, 0x7e78, seg.base);
7878
7879 kvm_x86_ops->get_gdt(vcpu, &dt);
7880 put_smstate(u32, buf, 0x7e64, dt.size);
7881 put_smstate(u64, buf, 0x7e68, dt.address);
7882
7883 for (i = 0; i < 6; i++)
ee2cd4b7 7884 enter_smm_save_seg_64(vcpu, buf, i);
660a5d51 7885}
b68f3cc7 7886#endif
660a5d51 7887
ee2cd4b7 7888static void enter_smm(struct kvm_vcpu *vcpu)
64d60670 7889{
660a5d51 7890 struct kvm_segment cs, ds;
18c3626e 7891 struct desc_ptr dt;
660a5d51
PB
7892 char buf[512];
7893 u32 cr0;
7894
660a5d51 7895 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
660a5d51 7896 memset(buf, 0, 512);
b68f3cc7 7897#ifdef CONFIG_X86_64
d6321d49 7898 if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
ee2cd4b7 7899 enter_smm_save_state_64(vcpu, buf);
660a5d51 7900 else
b68f3cc7 7901#endif
ee2cd4b7 7902 enter_smm_save_state_32(vcpu, buf);
660a5d51 7903
0234bf88
LP
7904 /*
7905 * Give pre_enter_smm() a chance to make ISA-specific changes to the
7906 * vCPU state (e.g. leave guest mode) after we've saved the state into
7907 * the SMM state-save area.
7908 */
7909 kvm_x86_ops->pre_enter_smm(vcpu, buf);
7910
7911 vcpu->arch.hflags |= HF_SMM_MASK;
54bf36aa 7912 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
660a5d51
PB
7913
7914 if (kvm_x86_ops->get_nmi_mask(vcpu))
7915 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
7916 else
7917 kvm_x86_ops->set_nmi_mask(vcpu, true);
7918
7919 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
7920 kvm_rip_write(vcpu, 0x8000);
7921
7922 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
7923 kvm_x86_ops->set_cr0(vcpu, cr0);
7924 vcpu->arch.cr0 = cr0;
7925
7926 kvm_x86_ops->set_cr4(vcpu, 0);
7927
18c3626e
PB
7928 /* Undocumented: IDT limit is set to zero on entry to SMM. */
7929 dt.address = dt.size = 0;
7930 kvm_x86_ops->set_idt(vcpu, &dt);
7931
660a5d51
PB
7932 __kvm_set_dr(vcpu, 7, DR7_FIXED_1);
7933
7934 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
7935 cs.base = vcpu->arch.smbase;
7936
7937 ds.selector = 0;
7938 ds.base = 0;
7939
7940 cs.limit = ds.limit = 0xffffffff;
7941 cs.type = ds.type = 0x3;
7942 cs.dpl = ds.dpl = 0;
7943 cs.db = ds.db = 0;
7944 cs.s = ds.s = 1;
7945 cs.l = ds.l = 0;
7946 cs.g = ds.g = 1;
7947 cs.avl = ds.avl = 0;
7948 cs.present = ds.present = 1;
7949 cs.unusable = ds.unusable = 0;
7950 cs.padding = ds.padding = 0;
7951
7952 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
7953 kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
7954 kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
7955 kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
7956 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
7957 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
7958
b68f3cc7 7959#ifdef CONFIG_X86_64
d6321d49 7960 if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
660a5d51 7961 kvm_x86_ops->set_efer(vcpu, 0);
b68f3cc7 7962#endif
660a5d51
PB
7963
7964 kvm_update_cpuid(vcpu);
7965 kvm_mmu_reset_context(vcpu);
64d60670
PB
7966}
7967
ee2cd4b7 7968static void process_smi(struct kvm_vcpu *vcpu)
c43203ca
PB
7969{
7970 vcpu->arch.smi_pending = true;
7971 kvm_make_request(KVM_REQ_EVENT, vcpu);
7972}
7973
7ee30bc1
NNL
7974void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
7975 unsigned long *vcpu_bitmap)
7976{
7977 cpumask_var_t cpus;
7ee30bc1
NNL
7978
7979 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
7980
db5a95ec
MW
7981 kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC,
7982 vcpu_bitmap, cpus);
7ee30bc1
NNL
7983
7984 free_cpumask_var(cpus);
7985}
7986
2860c4b1
PB
7987void kvm_make_scan_ioapic_request(struct kvm *kvm)
7988{
7989 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
7990}
7991
3d81bc7e 7992static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
c7c9c56c 7993{
dcbd3e49 7994 if (!kvm_apic_present(vcpu))
3d81bc7e 7995 return;
c7c9c56c 7996
6308630b 7997 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
c7c9c56c 7998
b053b2ae 7999 if (irqchip_split(vcpu->kvm))
6308630b 8000 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
db2bdcbb 8001 else {
fa59cc00 8002 if (vcpu->arch.apicv_active)
d62caabb 8003 kvm_x86_ops->sync_pir_to_irr(vcpu);
e97f852f
WL
8004 if (ioapic_in_kernel(vcpu->kvm))
8005 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
db2bdcbb 8006 }
e40ff1d6
LA
8007
8008 if (is_guest_mode(vcpu))
8009 vcpu->arch.load_eoi_exitmap_pending = true;
8010 else
8011 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
8012}
8013
8014static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
8015{
8016 u64 eoi_exit_bitmap[4];
8017
8018 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
8019 return;
8020
5c919412
AS
8021 bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
8022 vcpu_to_synic(vcpu)->vec_bitmap, 256);
8023 kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
c7c9c56c
YZ
8024}
8025
93065ac7
MH
8026int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
8027 unsigned long start, unsigned long end,
8028 bool blockable)
b1394e74
RK
8029{
8030 unsigned long apic_address;
8031
8032 /*
8033 * The physical address of apic access page is stored in the VMCS.
8034 * Update it when it becomes invalid.
8035 */
8036 apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
8037 if (start <= apic_address && apic_address < end)
8038 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
93065ac7
MH
8039
8040 return 0;
b1394e74
RK
8041}
8042
4256f43f
TC
8043void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
8044{
c24ae0dc
TC
8045 struct page *page = NULL;
8046
35754c98 8047 if (!lapic_in_kernel(vcpu))
f439ed27
PB
8048 return;
8049
4256f43f
TC
8050 if (!kvm_x86_ops->set_apic_access_page_addr)
8051 return;
8052
c24ae0dc 8053 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
e8fd5e9e
AA
8054 if (is_error_page(page))
8055 return;
c24ae0dc
TC
8056 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
8057
8058 /*
8059 * Do not pin apic access page in memory, the MMU notifier
8060 * will call us again if it is migrated or swapped out.
8061 */
8062 put_page(page);
4256f43f 8063}
4256f43f 8064
d264ee0c
SC
8065void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
8066{
8067 smp_send_reschedule(vcpu->cpu);
8068}
8069EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
8070
9357d939 8071/*
362c698f 8072 * Returns 1 to let vcpu_run() continue the guest execution loop without
9357d939
TY
8073 * exiting to the userspace. Otherwise, the value will be returned to the
8074 * userspace.
8075 */
851ba692 8076static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
b6c7a5dc
HB
8077{
8078 int r;
62a193ed
MG
8079 bool req_int_win =
8080 dm_request_for_irq_injection(vcpu) &&
8081 kvm_cpu_accept_dm_intr(vcpu);
1e9e2622 8082 enum exit_fastpath_completion exit_fastpath = EXIT_FASTPATH_NONE;
62a193ed 8083
730dca42 8084 bool req_immediate_exit = false;
b6c7a5dc 8085
2fa6e1e1 8086 if (kvm_request_pending(vcpu)) {
671ddc70
JM
8087 if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
8088 if (unlikely(!kvm_x86_ops->get_vmcs12_pages(vcpu))) {
8089 r = 0;
8090 goto out;
8091 }
8092 }
a8eeb04a 8093 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
2e53d63a 8094 kvm_mmu_unload(vcpu);
a8eeb04a 8095 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
2f599714 8096 __kvm_migrate_timers(vcpu);
d828199e
MT
8097 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
8098 kvm_gen_update_masterclock(vcpu->kvm);
0061d53d
MT
8099 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
8100 kvm_gen_kvmclock_update(vcpu);
34c238a1
ZA
8101 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
8102 r = kvm_guest_time_update(vcpu);
8cfdc000
ZA
8103 if (unlikely(r))
8104 goto out;
8105 }
a8eeb04a 8106 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
4731d4c7 8107 kvm_mmu_sync_roots(vcpu);
6e42782f
JS
8108 if (kvm_check_request(KVM_REQ_LOAD_CR3, vcpu))
8109 kvm_mmu_load_cr3(vcpu);
a8eeb04a 8110 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
c2ba05cc 8111 kvm_vcpu_flush_tlb(vcpu, true);
a8eeb04a 8112 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
851ba692 8113 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
b93463aa
AK
8114 r = 0;
8115 goto out;
8116 }
a8eeb04a 8117 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
851ba692 8118 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
bbeac283 8119 vcpu->mmio_needed = 0;
71c4dfaf
JR
8120 r = 0;
8121 goto out;
8122 }
af585b92
GN
8123 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
8124 /* Page is swapped out. Do synthetic halt */
8125 vcpu->arch.apf.halted = true;
8126 r = 1;
8127 goto out;
8128 }
c9aaa895
GC
8129 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
8130 record_steal_time(vcpu);
64d60670
PB
8131 if (kvm_check_request(KVM_REQ_SMI, vcpu))
8132 process_smi(vcpu);
7460fb4a
AK
8133 if (kvm_check_request(KVM_REQ_NMI, vcpu))
8134 process_nmi(vcpu);
f5132b01 8135 if (kvm_check_request(KVM_REQ_PMU, vcpu))
c6702c9d 8136 kvm_pmu_handle_event(vcpu);
f5132b01 8137 if (kvm_check_request(KVM_REQ_PMI, vcpu))
c6702c9d 8138 kvm_pmu_deliver_pmi(vcpu);
7543a635
SR
8139 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
8140 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
8141 if (test_bit(vcpu->arch.pending_ioapic_eoi,
6308630b 8142 vcpu->arch.ioapic_handled_vectors)) {
7543a635
SR
8143 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
8144 vcpu->run->eoi.vector =
8145 vcpu->arch.pending_ioapic_eoi;
8146 r = 0;
8147 goto out;
8148 }
8149 }
3d81bc7e
YZ
8150 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
8151 vcpu_scan_ioapic(vcpu);
e40ff1d6
LA
8152 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu))
8153 vcpu_load_eoi_exitmap(vcpu);
4256f43f
TC
8154 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
8155 kvm_vcpu_reload_apic_access_page(vcpu);
2ce79189
AS
8156 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
8157 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
8158 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
8159 r = 0;
8160 goto out;
8161 }
e516cebb
AS
8162 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
8163 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
8164 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
8165 r = 0;
8166 goto out;
8167 }
db397571
AS
8168 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
8169 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
8170 vcpu->run->hyperv = vcpu->arch.hyperv.exit;
8171 r = 0;
8172 goto out;
8173 }
f3b138c5
AS
8174
8175 /*
8176 * KVM_REQ_HV_STIMER has to be processed after
8177 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
8178 * depend on the guest clock being up-to-date
8179 */
1f4b34f8
AS
8180 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
8181 kvm_hv_process_stimers(vcpu);
2f52d58c 8182 }
b93463aa 8183
b463a6f7 8184 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
0f1e261e 8185 ++vcpu->stat.req_event;
66450a21
JK
8186 kvm_apic_accept_events(vcpu);
8187 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
8188 r = 1;
8189 goto out;
8190 }
8191
b6b8a145
JK
8192 if (inject_pending_event(vcpu, req_int_win) != 0)
8193 req_immediate_exit = true;
321c5658 8194 else {
cc3d967f 8195 /* Enable SMI/NMI/IRQ window open exits if needed.
c43203ca 8196 *
cc3d967f
LP
8197 * SMIs have three cases:
8198 * 1) They can be nested, and then there is nothing to
8199 * do here because RSM will cause a vmexit anyway.
8200 * 2) There is an ISA-specific reason why SMI cannot be
8201 * injected, and the moment when this changes can be
8202 * intercepted.
8203 * 3) Or the SMI can be pending because
8204 * inject_pending_event has completed the injection
8205 * of an IRQ or NMI from the previous vmexit, and
8206 * then we request an immediate exit to inject the
8207 * SMI.
c43203ca
PB
8208 */
8209 if (vcpu->arch.smi_pending && !is_smm(vcpu))
cc3d967f
LP
8210 if (!kvm_x86_ops->enable_smi_window(vcpu))
8211 req_immediate_exit = true;
321c5658
YS
8212 if (vcpu->arch.nmi_pending)
8213 kvm_x86_ops->enable_nmi_window(vcpu);
8214 if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
8215 kvm_x86_ops->enable_irq_window(vcpu);
664f8e26 8216 WARN_ON(vcpu->arch.exception.pending);
321c5658 8217 }
b463a6f7
AK
8218
8219 if (kvm_lapic_enabled(vcpu)) {
8220 update_cr8_intercept(vcpu);
8221 kvm_lapic_sync_to_vapic(vcpu);
8222 }
8223 }
8224
d8368af8
AK
8225 r = kvm_mmu_reload(vcpu);
8226 if (unlikely(r)) {
d905c069 8227 goto cancel_injection;
d8368af8
AK
8228 }
8229
b6c7a5dc
HB
8230 preempt_disable();
8231
8232 kvm_x86_ops->prepare_guest_switch(vcpu);
b95234c8
PB
8233
8234 /*
8235 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt
8236 * IPI are then delayed after guest entry, which ensures that they
8237 * result in virtual interrupt delivery.
8238 */
8239 local_irq_disable();
6b7e2d09
XG
8240 vcpu->mode = IN_GUEST_MODE;
8241
01b71917
MT
8242 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
8243
0f127d12 8244 /*
b95234c8 8245 * 1) We should set ->mode before checking ->requests. Please see
cde9af6e 8246 * the comment in kvm_vcpu_exiting_guest_mode().
b95234c8 8247 *
81b01667 8248 * 2) For APICv, we should set ->mode before checking PID.ON. This
b95234c8
PB
8249 * pairs with the memory barrier implicit in pi_test_and_set_on
8250 * (see vmx_deliver_posted_interrupt).
8251 *
8252 * 3) This also orders the write to mode from any reads to the page
8253 * tables done while the VCPU is running. Please see the comment
8254 * in kvm_flush_remote_tlbs.
6b7e2d09 8255 */
01b71917 8256 smp_mb__after_srcu_read_unlock();
b6c7a5dc 8257
b95234c8
PB
8258 /*
8259 * This handles the case where a posted interrupt was
8260 * notified with kvm_vcpu_kick.
8261 */
fa59cc00
LA
8262 if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
8263 kvm_x86_ops->sync_pir_to_irr(vcpu);
32f88400 8264
2fa6e1e1 8265 if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
d94e1dc9 8266 || need_resched() || signal_pending(current)) {
6b7e2d09 8267 vcpu->mode = OUTSIDE_GUEST_MODE;
d94e1dc9 8268 smp_wmb();
6c142801
AK
8269 local_irq_enable();
8270 preempt_enable();
01b71917 8271 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6c142801 8272 r = 1;
d905c069 8273 goto cancel_injection;
6c142801
AK
8274 }
8275
c43203ca
PB
8276 if (req_immediate_exit) {
8277 kvm_make_request(KVM_REQ_EVENT, vcpu);
d264ee0c 8278 kvm_x86_ops->request_immediate_exit(vcpu);
c43203ca 8279 }
d6185f20 8280
8b89fe1f 8281 trace_kvm_entry(vcpu->vcpu_id);
6edaa530 8282 guest_enter_irqoff();
b6c7a5dc 8283
2620fe26
SC
8284 fpregs_assert_state_consistent();
8285 if (test_thread_flag(TIF_NEED_FPU_LOAD))
8286 switch_fpu_return();
5f409e20 8287
42dbaa5a 8288 if (unlikely(vcpu->arch.switch_db_regs)) {
42dbaa5a
JK
8289 set_debugreg(0, 7);
8290 set_debugreg(vcpu->arch.eff_db[0], 0);
8291 set_debugreg(vcpu->arch.eff_db[1], 1);
8292 set_debugreg(vcpu->arch.eff_db[2], 2);
8293 set_debugreg(vcpu->arch.eff_db[3], 3);
c77fb5fe 8294 set_debugreg(vcpu->arch.dr6, 6);
ae561ede 8295 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
42dbaa5a 8296 }
b6c7a5dc 8297
851ba692 8298 kvm_x86_ops->run(vcpu);
b6c7a5dc 8299
c77fb5fe
PB
8300 /*
8301 * Do this here before restoring debug registers on the host. And
8302 * since we do this before handling the vmexit, a DR access vmexit
8303 * can (a) read the correct value of the debug registers, (b) set
8304 * KVM_DEBUGREG_WONT_EXIT again.
8305 */
8306 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
c77fb5fe
PB
8307 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
8308 kvm_x86_ops->sync_dirty_debug_regs(vcpu);
70e4da7a
PB
8309 kvm_update_dr0123(vcpu);
8310 kvm_update_dr6(vcpu);
8311 kvm_update_dr7(vcpu);
8312 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
c77fb5fe
PB
8313 }
8314
24f1e32c
FW
8315 /*
8316 * If the guest has used debug registers, at least dr7
8317 * will be disabled while returning to the host.
8318 * If we don't have active breakpoints in the host, we don't
8319 * care about the messed up debug address registers. But if
8320 * we have some of them active, restore the old state.
8321 */
59d8eb53 8322 if (hw_breakpoint_active())
24f1e32c 8323 hw_breakpoint_restore();
42dbaa5a 8324
4ba76538 8325 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1d5f066e 8326
6b7e2d09 8327 vcpu->mode = OUTSIDE_GUEST_MODE;
d94e1dc9 8328 smp_wmb();
a547c6db 8329
1e9e2622 8330 kvm_x86_ops->handle_exit_irqoff(vcpu, &exit_fastpath);
b6c7a5dc 8331
d7a08882
SC
8332 /*
8333 * Consume any pending interrupts, including the possible source of
8334 * VM-Exit on SVM and any ticks that occur between VM-Exit and now.
8335 * An instruction is required after local_irq_enable() to fully unblock
8336 * interrupts on processors that implement an interrupt shadow, the
8337 * stat.exits increment will do nicely.
8338 */
8339 kvm_before_interrupt(vcpu);
8340 local_irq_enable();
b6c7a5dc 8341 ++vcpu->stat.exits;
d7a08882
SC
8342 local_irq_disable();
8343 kvm_after_interrupt(vcpu);
b6c7a5dc 8344
f2485b3e 8345 guest_exit_irqoff();
ec0671d5
WL
8346 if (lapic_in_kernel(vcpu)) {
8347 s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
8348 if (delta != S64_MIN) {
8349 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, delta);
8350 vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN;
8351 }
8352 }
b6c7a5dc 8353
f2485b3e 8354 local_irq_enable();
b6c7a5dc
HB
8355 preempt_enable();
8356
f656ce01 8357 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3200f405 8358
b6c7a5dc
HB
8359 /*
8360 * Profile KVM exit RIPs:
8361 */
8362 if (unlikely(prof_on == KVM_PROFILING)) {
5fdbf976
MT
8363 unsigned long rip = kvm_rip_read(vcpu);
8364 profile_hit(KVM_PROFILING, (void *)rip);
b6c7a5dc
HB
8365 }
8366
cc578287
ZA
8367 if (unlikely(vcpu->arch.tsc_always_catchup))
8368 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
298101da 8369
5cfb1d5a
MT
8370 if (vcpu->arch.apic_attention)
8371 kvm_lapic_sync_from_vapic(vcpu);
b93463aa 8372
618232e2 8373 vcpu->arch.gpa_available = false;
1e9e2622 8374 r = kvm_x86_ops->handle_exit(vcpu, exit_fastpath);
d905c069
MT
8375 return r;
8376
8377cancel_injection:
8378 kvm_x86_ops->cancel_injection(vcpu);
ae7a2a3f
MT
8379 if (unlikely(vcpu->arch.apic_attention))
8380 kvm_lapic_sync_from_vapic(vcpu);
d7690175
MT
8381out:
8382 return r;
8383}
b6c7a5dc 8384
362c698f
PB
8385static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
8386{
bf9f6ac8
FW
8387 if (!kvm_arch_vcpu_runnable(vcpu) &&
8388 (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) {
9c8fd1ba
PB
8389 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
8390 kvm_vcpu_block(vcpu);
8391 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
bf9f6ac8
FW
8392
8393 if (kvm_x86_ops->post_block)
8394 kvm_x86_ops->post_block(vcpu);
8395
9c8fd1ba
PB
8396 if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
8397 return 1;
8398 }
362c698f
PB
8399
8400 kvm_apic_accept_events(vcpu);
8401 switch(vcpu->arch.mp_state) {
8402 case KVM_MP_STATE_HALTED:
8403 vcpu->arch.pv.pv_unhalted = false;
8404 vcpu->arch.mp_state =
8405 KVM_MP_STATE_RUNNABLE;
b2869f28 8406 /* fall through */
362c698f
PB
8407 case KVM_MP_STATE_RUNNABLE:
8408 vcpu->arch.apf.halted = false;
8409 break;
8410 case KVM_MP_STATE_INIT_RECEIVED:
8411 break;
8412 default:
8413 return -EINTR;
8414 break;
8415 }
8416 return 1;
8417}
09cec754 8418
5d9bc648
PB
8419static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
8420{
0ad3bed6
PB
8421 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
8422 kvm_x86_ops->check_nested_events(vcpu, false);
8423
5d9bc648
PB
8424 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
8425 !vcpu->arch.apf.halted);
8426}
8427
362c698f 8428static int vcpu_run(struct kvm_vcpu *vcpu)
d7690175
MT
8429{
8430 int r;
f656ce01 8431 struct kvm *kvm = vcpu->kvm;
d7690175 8432
f656ce01 8433 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
c595ceee 8434 vcpu->arch.l1tf_flush_l1d = true;
d7690175 8435
362c698f 8436 for (;;) {
58f800d5 8437 if (kvm_vcpu_running(vcpu)) {
851ba692 8438 r = vcpu_enter_guest(vcpu);
bf9f6ac8 8439 } else {
362c698f 8440 r = vcpu_block(kvm, vcpu);
bf9f6ac8
FW
8441 }
8442
09cec754
GN
8443 if (r <= 0)
8444 break;
8445
72875d8a 8446 kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
09cec754
GN
8447 if (kvm_cpu_has_pending_timer(vcpu))
8448 kvm_inject_pending_timer_irqs(vcpu);
8449
782d422b
MG
8450 if (dm_request_for_irq_injection(vcpu) &&
8451 kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
4ca7dd8c
PB
8452 r = 0;
8453 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
09cec754 8454 ++vcpu->stat.request_irq_exits;
362c698f 8455 break;
09cec754 8456 }
af585b92
GN
8457
8458 kvm_check_async_pf_completion(vcpu);
8459
09cec754
GN
8460 if (signal_pending(current)) {
8461 r = -EINTR;
851ba692 8462 vcpu->run->exit_reason = KVM_EXIT_INTR;
09cec754 8463 ++vcpu->stat.signal_exits;
362c698f 8464 break;
09cec754
GN
8465 }
8466 if (need_resched()) {
f656ce01 8467 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
c08ac06a 8468 cond_resched();
f656ce01 8469 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
d7690175 8470 }
b6c7a5dc
HB
8471 }
8472
f656ce01 8473 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
b6c7a5dc
HB
8474
8475 return r;
8476}
8477
716d51ab
GN
8478static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
8479{
8480 int r;
60fc3d02 8481
716d51ab 8482 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
0ce97a2b 8483 r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
716d51ab 8484 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
60fc3d02 8485 return r;
716d51ab
GN
8486}
8487
8488static int complete_emulated_pio(struct kvm_vcpu *vcpu)
8489{
8490 BUG_ON(!vcpu->arch.pio.count);
8491
8492 return complete_emulated_io(vcpu);
8493}
8494
f78146b0
AK
8495/*
8496 * Implements the following, as a state machine:
8497 *
8498 * read:
8499 * for each fragment
87da7e66
XG
8500 * for each mmio piece in the fragment
8501 * write gpa, len
8502 * exit
8503 * copy data
f78146b0
AK
8504 * execute insn
8505 *
8506 * write:
8507 * for each fragment
87da7e66
XG
8508 * for each mmio piece in the fragment
8509 * write gpa, len
8510 * copy data
8511 * exit
f78146b0 8512 */
716d51ab 8513static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
5287f194
AK
8514{
8515 struct kvm_run *run = vcpu->run;
f78146b0 8516 struct kvm_mmio_fragment *frag;
87da7e66 8517 unsigned len;
5287f194 8518
716d51ab 8519 BUG_ON(!vcpu->mmio_needed);
5287f194 8520
716d51ab 8521 /* Complete previous fragment */
87da7e66
XG
8522 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
8523 len = min(8u, frag->len);
716d51ab 8524 if (!vcpu->mmio_is_write)
87da7e66
XG
8525 memcpy(frag->data, run->mmio.data, len);
8526
8527 if (frag->len <= 8) {
8528 /* Switch to the next fragment. */
8529 frag++;
8530 vcpu->mmio_cur_fragment++;
8531 } else {
8532 /* Go forward to the next mmio piece. */
8533 frag->data += len;
8534 frag->gpa += len;
8535 frag->len -= len;
8536 }
8537
a08d3b3b 8538 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
716d51ab 8539 vcpu->mmio_needed = 0;
0912c977
PB
8540
8541 /* FIXME: return into emulator if single-stepping. */
cef4dea0 8542 if (vcpu->mmio_is_write)
716d51ab
GN
8543 return 1;
8544 vcpu->mmio_read_completed = 1;
8545 return complete_emulated_io(vcpu);
8546 }
87da7e66 8547
716d51ab
GN
8548 run->exit_reason = KVM_EXIT_MMIO;
8549 run->mmio.phys_addr = frag->gpa;
8550 if (vcpu->mmio_is_write)
87da7e66
XG
8551 memcpy(run->mmio.data, frag->data, min(8u, frag->len));
8552 run->mmio.len = min(8u, frag->len);
716d51ab
GN
8553 run->mmio.is_write = vcpu->mmio_is_write;
8554 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
8555 return 0;
5287f194
AK
8556}
8557
c9aef3b8
SC
8558static void kvm_save_current_fpu(struct fpu *fpu)
8559{
8560 /*
8561 * If the target FPU state is not resident in the CPU registers, just
8562 * memcpy() from current, else save CPU state directly to the target.
8563 */
8564 if (test_thread_flag(TIF_NEED_FPU_LOAD))
8565 memcpy(&fpu->state, &current->thread.fpu.state,
8566 fpu_kernel_xstate_size);
8567 else
8568 copy_fpregs_to_fpstate(fpu);
8569}
8570
822f312d
SAS
8571/* Swap (qemu) user FPU context for the guest FPU context. */
8572static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
8573{
5f409e20
RR
8574 fpregs_lock();
8575
c9aef3b8
SC
8576 kvm_save_current_fpu(vcpu->arch.user_fpu);
8577
822f312d 8578 /* PKRU is separately restored in kvm_x86_ops->run. */
b666a4b6 8579 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
822f312d 8580 ~XFEATURE_MASK_PKRU);
5f409e20
RR
8581
8582 fpregs_mark_activate();
8583 fpregs_unlock();
8584
822f312d
SAS
8585 trace_kvm_fpu(1);
8586}
8587
8588/* When vcpu_run ends, restore user space FPU context. */
8589static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
8590{
5f409e20
RR
8591 fpregs_lock();
8592
c9aef3b8
SC
8593 kvm_save_current_fpu(vcpu->arch.guest_fpu);
8594
d9a710e5 8595 copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
5f409e20
RR
8596
8597 fpregs_mark_activate();
8598 fpregs_unlock();
8599
822f312d
SAS
8600 ++vcpu->stat.fpu_reload;
8601 trace_kvm_fpu(0);
8602}
8603
b6c7a5dc
HB
8604int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
8605{
8606 int r;
b6c7a5dc 8607
accb757d 8608 vcpu_load(vcpu);
20b7035c 8609 kvm_sigset_activate(vcpu);
5663d8f9
PX
8610 kvm_load_guest_fpu(vcpu);
8611
a4535290 8612 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
2f173d26
JS
8613 if (kvm_run->immediate_exit) {
8614 r = -EINTR;
8615 goto out;
8616 }
b6c7a5dc 8617 kvm_vcpu_block(vcpu);
66450a21 8618 kvm_apic_accept_events(vcpu);
72875d8a 8619 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
ac9f6dc0 8620 r = -EAGAIN;
a0595000
JS
8621 if (signal_pending(current)) {
8622 r = -EINTR;
8623 vcpu->run->exit_reason = KVM_EXIT_INTR;
8624 ++vcpu->stat.signal_exits;
8625 }
ac9f6dc0 8626 goto out;
b6c7a5dc
HB
8627 }
8628
01643c51
KH
8629 if (vcpu->run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
8630 r = -EINVAL;
8631 goto out;
8632 }
8633
8634 if (vcpu->run->kvm_dirty_regs) {
8635 r = sync_regs(vcpu);
8636 if (r != 0)
8637 goto out;
8638 }
8639
b6c7a5dc 8640 /* re-sync apic's tpr */
35754c98 8641 if (!lapic_in_kernel(vcpu)) {
eea1cff9
AP
8642 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
8643 r = -EINVAL;
8644 goto out;
8645 }
8646 }
b6c7a5dc 8647
716d51ab
GN
8648 if (unlikely(vcpu->arch.complete_userspace_io)) {
8649 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
8650 vcpu->arch.complete_userspace_io = NULL;
8651 r = cui(vcpu);
8652 if (r <= 0)
5663d8f9 8653 goto out;
716d51ab
GN
8654 } else
8655 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
5287f194 8656
460df4c1
PB
8657 if (kvm_run->immediate_exit)
8658 r = -EINTR;
8659 else
8660 r = vcpu_run(vcpu);
b6c7a5dc
HB
8661
8662out:
5663d8f9 8663 kvm_put_guest_fpu(vcpu);
01643c51
KH
8664 if (vcpu->run->kvm_valid_regs)
8665 store_regs(vcpu);
f1d86e46 8666 post_kvm_run_save(vcpu);
20b7035c 8667 kvm_sigset_deactivate(vcpu);
b6c7a5dc 8668
accb757d 8669 vcpu_put(vcpu);
b6c7a5dc
HB
8670 return r;
8671}
8672
01643c51 8673static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
b6c7a5dc 8674{
7ae441ea
GN
8675 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
8676 /*
8677 * We are here if userspace calls get_regs() in the middle of
8678 * instruction emulation. Registers state needs to be copied
4a969980 8679 * back from emulation context to vcpu. Userspace shouldn't do
7ae441ea
GN
8680 * that usually, but some bad designed PV devices (vmware
8681 * backdoor interface) need this to work
8682 */
dd856efa 8683 emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
7ae441ea
GN
8684 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
8685 }
de3cd117
SC
8686 regs->rax = kvm_rax_read(vcpu);
8687 regs->rbx = kvm_rbx_read(vcpu);
8688 regs->rcx = kvm_rcx_read(vcpu);
8689 regs->rdx = kvm_rdx_read(vcpu);
8690 regs->rsi = kvm_rsi_read(vcpu);
8691 regs->rdi = kvm_rdi_read(vcpu);
e9c16c78 8692 regs->rsp = kvm_rsp_read(vcpu);
de3cd117 8693 regs->rbp = kvm_rbp_read(vcpu);
b6c7a5dc 8694#ifdef CONFIG_X86_64
de3cd117
SC
8695 regs->r8 = kvm_r8_read(vcpu);
8696 regs->r9 = kvm_r9_read(vcpu);
8697 regs->r10 = kvm_r10_read(vcpu);
8698 regs->r11 = kvm_r11_read(vcpu);
8699 regs->r12 = kvm_r12_read(vcpu);
8700 regs->r13 = kvm_r13_read(vcpu);
8701 regs->r14 = kvm_r14_read(vcpu);
8702 regs->r15 = kvm_r15_read(vcpu);
b6c7a5dc
HB
8703#endif
8704
5fdbf976 8705 regs->rip = kvm_rip_read(vcpu);
91586a3b 8706 regs->rflags = kvm_get_rflags(vcpu);
01643c51 8707}
b6c7a5dc 8708
01643c51
KH
8709int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
8710{
8711 vcpu_load(vcpu);
8712 __get_regs(vcpu, regs);
1fc9b76b 8713 vcpu_put(vcpu);
b6c7a5dc
HB
8714 return 0;
8715}
8716
01643c51 8717static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
b6c7a5dc 8718{
7ae441ea
GN
8719 vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
8720 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
8721
de3cd117
SC
8722 kvm_rax_write(vcpu, regs->rax);
8723 kvm_rbx_write(vcpu, regs->rbx);
8724 kvm_rcx_write(vcpu, regs->rcx);
8725 kvm_rdx_write(vcpu, regs->rdx);
8726 kvm_rsi_write(vcpu, regs->rsi);
8727 kvm_rdi_write(vcpu, regs->rdi);
e9c16c78 8728 kvm_rsp_write(vcpu, regs->rsp);
de3cd117 8729 kvm_rbp_write(vcpu, regs->rbp);
b6c7a5dc 8730#ifdef CONFIG_X86_64
de3cd117
SC
8731 kvm_r8_write(vcpu, regs->r8);
8732 kvm_r9_write(vcpu, regs->r9);
8733 kvm_r10_write(vcpu, regs->r10);
8734 kvm_r11_write(vcpu, regs->r11);
8735 kvm_r12_write(vcpu, regs->r12);
8736 kvm_r13_write(vcpu, regs->r13);
8737 kvm_r14_write(vcpu, regs->r14);
8738 kvm_r15_write(vcpu, regs->r15);
b6c7a5dc
HB
8739#endif
8740
5fdbf976 8741 kvm_rip_write(vcpu, regs->rip);
d73235d1 8742 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
b6c7a5dc 8743
b4f14abd
JK
8744 vcpu->arch.exception.pending = false;
8745
3842d135 8746 kvm_make_request(KVM_REQ_EVENT, vcpu);
01643c51 8747}
3842d135 8748
01643c51
KH
8749int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
8750{
8751 vcpu_load(vcpu);
8752 __set_regs(vcpu, regs);
875656fe 8753 vcpu_put(vcpu);
b6c7a5dc
HB
8754 return 0;
8755}
8756
b6c7a5dc
HB
8757void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
8758{
8759 struct kvm_segment cs;
8760
3e6e0aab 8761 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
b6c7a5dc
HB
8762 *db = cs.db;
8763 *l = cs.l;
8764}
8765EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
8766
01643c51 8767static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
b6c7a5dc 8768{
89a27f4d 8769 struct desc_ptr dt;
b6c7a5dc 8770
3e6e0aab
GT
8771 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
8772 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
8773 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
8774 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
8775 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
8776 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 8777
3e6e0aab
GT
8778 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
8779 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc
HB
8780
8781 kvm_x86_ops->get_idt(vcpu, &dt);
89a27f4d
GN
8782 sregs->idt.limit = dt.size;
8783 sregs->idt.base = dt.address;
b6c7a5dc 8784 kvm_x86_ops->get_gdt(vcpu, &dt);
89a27f4d
GN
8785 sregs->gdt.limit = dt.size;
8786 sregs->gdt.base = dt.address;
b6c7a5dc 8787
4d4ec087 8788 sregs->cr0 = kvm_read_cr0(vcpu);
ad312c7c 8789 sregs->cr2 = vcpu->arch.cr2;
9f8fe504 8790 sregs->cr3 = kvm_read_cr3(vcpu);
fc78f519 8791 sregs->cr4 = kvm_read_cr4(vcpu);
2d3ad1f4 8792 sregs->cr8 = kvm_get_cr8(vcpu);
f6801dff 8793 sregs->efer = vcpu->arch.efer;
b6c7a5dc
HB
8794 sregs->apic_base = kvm_get_apic_base(vcpu);
8795
0e96f31e 8796 memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap));
b6c7a5dc 8797
04140b41 8798 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
14d0bc1f
GN
8799 set_bit(vcpu->arch.interrupt.nr,
8800 (unsigned long *)sregs->interrupt_bitmap);
01643c51 8801}
16d7a191 8802
01643c51
KH
8803int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
8804 struct kvm_sregs *sregs)
8805{
8806 vcpu_load(vcpu);
8807 __get_sregs(vcpu, sregs);
bcdec41c 8808 vcpu_put(vcpu);
b6c7a5dc
HB
8809 return 0;
8810}
8811
62d9f0db
MT
8812int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
8813 struct kvm_mp_state *mp_state)
8814{
fd232561 8815 vcpu_load(vcpu);
f958bd23
SC
8816 if (kvm_mpx_supported())
8817 kvm_load_guest_fpu(vcpu);
fd232561 8818
66450a21 8819 kvm_apic_accept_events(vcpu);
6aef266c
SV
8820 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
8821 vcpu->arch.pv.pv_unhalted)
8822 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
8823 else
8824 mp_state->mp_state = vcpu->arch.mp_state;
8825
f958bd23
SC
8826 if (kvm_mpx_supported())
8827 kvm_put_guest_fpu(vcpu);
fd232561 8828 vcpu_put(vcpu);
62d9f0db
MT
8829 return 0;
8830}
8831
8832int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
8833 struct kvm_mp_state *mp_state)
8834{
e83dff5e
CD
8835 int ret = -EINVAL;
8836
8837 vcpu_load(vcpu);
8838
bce87cce 8839 if (!lapic_in_kernel(vcpu) &&
66450a21 8840 mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
e83dff5e 8841 goto out;
66450a21 8842
27cbe7d6
LA
8843 /*
8844 * KVM_MP_STATE_INIT_RECEIVED means the processor is in
8845 * INIT state; latched init should be reported using
8846 * KVM_SET_VCPU_EVENTS, so reject it here.
8847 */
8848 if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) &&
28bf2888
DH
8849 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
8850 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
e83dff5e 8851 goto out;
28bf2888 8852
66450a21
JK
8853 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
8854 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
8855 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
8856 } else
8857 vcpu->arch.mp_state = mp_state->mp_state;
3842d135 8858 kvm_make_request(KVM_REQ_EVENT, vcpu);
e83dff5e
CD
8859
8860 ret = 0;
8861out:
8862 vcpu_put(vcpu);
8863 return ret;
62d9f0db
MT
8864}
8865
7f3d35fd
KW
8866int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
8867 int reason, bool has_error_code, u32 error_code)
b6c7a5dc 8868{
9d74191a 8869 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
8ec4722d 8870 int ret;
e01c2426 8871
8ec4722d 8872 init_emulate_ctxt(vcpu);
c697518a 8873
7f3d35fd 8874 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
9d74191a 8875 has_error_code, error_code);
1051778f
SC
8876 if (ret) {
8877 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
8878 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
8879 vcpu->run->internal.ndata = 0;
60fc3d02 8880 return 0;
1051778f 8881 }
37817f29 8882
9d74191a
TY
8883 kvm_rip_write(vcpu, ctxt->eip);
8884 kvm_set_rflags(vcpu, ctxt->eflags);
3842d135 8885 kvm_make_request(KVM_REQ_EVENT, vcpu);
60fc3d02 8886 return 1;
37817f29
IE
8887}
8888EXPORT_SYMBOL_GPL(kvm_task_switch);
8889
3140c156 8890static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
f2981033 8891{
37b95951 8892 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
f2981033
LT
8893 /*
8894 * When EFER.LME and CR0.PG are set, the processor is in
8895 * 64-bit mode (though maybe in a 32-bit code segment).
8896 * CR4.PAE and EFER.LMA must be set.
8897 */
37b95951 8898 if (!(sregs->cr4 & X86_CR4_PAE)
f2981033
LT
8899 || !(sregs->efer & EFER_LMA))
8900 return -EINVAL;
8901 } else {
8902 /*
8903 * Not in 64-bit mode: EFER.LMA is clear and the code
8904 * segment cannot be 64-bit.
8905 */
8906 if (sregs->efer & EFER_LMA || sregs->cs.l)
8907 return -EINVAL;
8908 }
8909
3ca94192 8910 return kvm_valid_cr4(vcpu, sregs->cr4);
f2981033
LT
8911}
8912
01643c51 8913static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
b6c7a5dc 8914{
58cb628d 8915 struct msr_data apic_base_msr;
b6c7a5dc 8916 int mmu_reset_needed = 0;
c4d21882 8917 int cpuid_update_needed = 0;
63f42e02 8918 int pending_vec, max_bits, idx;
89a27f4d 8919 struct desc_ptr dt;
b4ef9d4e
CD
8920 int ret = -EINVAL;
8921
f2981033 8922 if (kvm_valid_sregs(vcpu, sregs))
8dbfb2bf 8923 goto out;
f2981033 8924
d3802286
JM
8925 apic_base_msr.data = sregs->apic_base;
8926 apic_base_msr.host_initiated = true;
8927 if (kvm_set_apic_base(vcpu, &apic_base_msr))
b4ef9d4e 8928 goto out;
6d1068b3 8929
89a27f4d
GN
8930 dt.size = sregs->idt.limit;
8931 dt.address = sregs->idt.base;
b6c7a5dc 8932 kvm_x86_ops->set_idt(vcpu, &dt);
89a27f4d
GN
8933 dt.size = sregs->gdt.limit;
8934 dt.address = sregs->gdt.base;
b6c7a5dc
HB
8935 kvm_x86_ops->set_gdt(vcpu, &dt);
8936
ad312c7c 8937 vcpu->arch.cr2 = sregs->cr2;
9f8fe504 8938 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
dc7e795e 8939 vcpu->arch.cr3 = sregs->cr3;
cb3c1e2f 8940 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
b6c7a5dc 8941
2d3ad1f4 8942 kvm_set_cr8(vcpu, sregs->cr8);
b6c7a5dc 8943
f6801dff 8944 mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
b6c7a5dc 8945 kvm_x86_ops->set_efer(vcpu, sregs->efer);
b6c7a5dc 8946
4d4ec087 8947 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
b6c7a5dc 8948 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
d7306163 8949 vcpu->arch.cr0 = sregs->cr0;
b6c7a5dc 8950
fc78f519 8951 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
c4d21882
WH
8952 cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
8953 (X86_CR4_OSXSAVE | X86_CR4_PKE));
b6c7a5dc 8954 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
c4d21882 8955 if (cpuid_update_needed)
00b27a3e 8956 kvm_update_cpuid(vcpu);
63f42e02
XG
8957
8958 idx = srcu_read_lock(&vcpu->kvm->srcu);
bf03d4f9 8959 if (is_pae_paging(vcpu)) {
9f8fe504 8960 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
7c93be44
MT
8961 mmu_reset_needed = 1;
8962 }
63f42e02 8963 srcu_read_unlock(&vcpu->kvm->srcu, idx);
b6c7a5dc
HB
8964
8965 if (mmu_reset_needed)
8966 kvm_mmu_reset_context(vcpu);
8967
a50abc3b 8968 max_bits = KVM_NR_INTERRUPTS;
923c61bb
GN
8969 pending_vec = find_first_bit(
8970 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
8971 if (pending_vec < max_bits) {
66fd3f7f 8972 kvm_queue_interrupt(vcpu, pending_vec, false);
923c61bb 8973 pr_debug("Set back pending irq %d\n", pending_vec);
b6c7a5dc
HB
8974 }
8975
3e6e0aab
GT
8976 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
8977 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
8978 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
8979 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
8980 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
8981 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 8982
3e6e0aab
GT
8983 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
8984 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc 8985
5f0269f5
ME
8986 update_cr8_intercept(vcpu);
8987
9c3e4aab 8988 /* Older userspace won't unhalt the vcpu on reset. */
c5af89b6 8989 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
9c3e4aab 8990 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
3eeb3288 8991 !is_protmode(vcpu))
9c3e4aab
MT
8992 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
8993
3842d135
AK
8994 kvm_make_request(KVM_REQ_EVENT, vcpu);
8995
b4ef9d4e
CD
8996 ret = 0;
8997out:
01643c51
KH
8998 return ret;
8999}
9000
9001int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
9002 struct kvm_sregs *sregs)
9003{
9004 int ret;
9005
9006 vcpu_load(vcpu);
9007 ret = __set_sregs(vcpu, sregs);
b4ef9d4e
CD
9008 vcpu_put(vcpu);
9009 return ret;
b6c7a5dc
HB
9010}
9011
d0bfb940
JK
9012int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
9013 struct kvm_guest_debug *dbg)
b6c7a5dc 9014{
355be0b9 9015 unsigned long rflags;
ae675ef0 9016 int i, r;
b6c7a5dc 9017
66b56562
CD
9018 vcpu_load(vcpu);
9019
4f926bf2
JK
9020 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
9021 r = -EBUSY;
9022 if (vcpu->arch.exception.pending)
2122ff5e 9023 goto out;
4f926bf2
JK
9024 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
9025 kvm_queue_exception(vcpu, DB_VECTOR);
9026 else
9027 kvm_queue_exception(vcpu, BP_VECTOR);
9028 }
9029
91586a3b
JK
9030 /*
9031 * Read rflags as long as potentially injected trace flags are still
9032 * filtered out.
9033 */
9034 rflags = kvm_get_rflags(vcpu);
355be0b9
JK
9035
9036 vcpu->guest_debug = dbg->control;
9037 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
9038 vcpu->guest_debug = 0;
9039
9040 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
ae675ef0
JK
9041 for (i = 0; i < KVM_NR_DB_REGS; ++i)
9042 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
c8639010 9043 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
ae675ef0
JK
9044 } else {
9045 for (i = 0; i < KVM_NR_DB_REGS; i++)
9046 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
ae675ef0 9047 }
c8639010 9048 kvm_update_dr7(vcpu);
ae675ef0 9049
f92653ee
JK
9050 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
9051 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
9052 get_segment_base(vcpu, VCPU_SREG_CS);
94fe45da 9053
91586a3b
JK
9054 /*
9055 * Trigger an rflags update that will inject or remove the trace
9056 * flags.
9057 */
9058 kvm_set_rflags(vcpu, rflags);
b6c7a5dc 9059
a96036b8 9060 kvm_x86_ops->update_bp_intercept(vcpu);
b6c7a5dc 9061
4f926bf2 9062 r = 0;
d0bfb940 9063
2122ff5e 9064out:
66b56562 9065 vcpu_put(vcpu);
b6c7a5dc
HB
9066 return r;
9067}
9068
8b006791
ZX
9069/*
9070 * Translate a guest virtual address to a guest physical address.
9071 */
9072int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
9073 struct kvm_translation *tr)
9074{
9075 unsigned long vaddr = tr->linear_address;
9076 gpa_t gpa;
f656ce01 9077 int idx;
8b006791 9078
1da5b61d
CD
9079 vcpu_load(vcpu);
9080
f656ce01 9081 idx = srcu_read_lock(&vcpu->kvm->srcu);
1871c602 9082 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
f656ce01 9083 srcu_read_unlock(&vcpu->kvm->srcu, idx);
8b006791
ZX
9084 tr->physical_address = gpa;
9085 tr->valid = gpa != UNMAPPED_GVA;
9086 tr->writeable = 1;
9087 tr->usermode = 0;
8b006791 9088
1da5b61d 9089 vcpu_put(vcpu);
8b006791
ZX
9090 return 0;
9091}
9092
d0752060
HB
9093int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
9094{
1393123e 9095 struct fxregs_state *fxsave;
d0752060 9096
1393123e 9097 vcpu_load(vcpu);
d0752060 9098
b666a4b6 9099 fxsave = &vcpu->arch.guest_fpu->state.fxsave;
d0752060
HB
9100 memcpy(fpu->fpr, fxsave->st_space, 128);
9101 fpu->fcw = fxsave->cwd;
9102 fpu->fsw = fxsave->swd;
9103 fpu->ftwx = fxsave->twd;
9104 fpu->last_opcode = fxsave->fop;
9105 fpu->last_ip = fxsave->rip;
9106 fpu->last_dp = fxsave->rdp;
0e96f31e 9107 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space));
d0752060 9108
1393123e 9109 vcpu_put(vcpu);
d0752060
HB
9110 return 0;
9111}
9112
9113int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
9114{
6a96bc7f
CD
9115 struct fxregs_state *fxsave;
9116
9117 vcpu_load(vcpu);
9118
b666a4b6 9119 fxsave = &vcpu->arch.guest_fpu->state.fxsave;
d0752060 9120
d0752060
HB
9121 memcpy(fxsave->st_space, fpu->fpr, 128);
9122 fxsave->cwd = fpu->fcw;
9123 fxsave->swd = fpu->fsw;
9124 fxsave->twd = fpu->ftwx;
9125 fxsave->fop = fpu->last_opcode;
9126 fxsave->rip = fpu->last_ip;
9127 fxsave->rdp = fpu->last_dp;
0e96f31e 9128 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space));
d0752060 9129
6a96bc7f 9130 vcpu_put(vcpu);
d0752060
HB
9131 return 0;
9132}
9133
01643c51
KH
9134static void store_regs(struct kvm_vcpu *vcpu)
9135{
9136 BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES);
9137
9138 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS)
9139 __get_regs(vcpu, &vcpu->run->s.regs.regs);
9140
9141 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS)
9142 __get_sregs(vcpu, &vcpu->run->s.regs.sregs);
9143
9144 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS)
9145 kvm_vcpu_ioctl_x86_get_vcpu_events(
9146 vcpu, &vcpu->run->s.regs.events);
9147}
9148
9149static int sync_regs(struct kvm_vcpu *vcpu)
9150{
9151 if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)
9152 return -EINVAL;
9153
9154 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) {
9155 __set_regs(vcpu, &vcpu->run->s.regs.regs);
9156 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
9157 }
9158 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
9159 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs))
9160 return -EINVAL;
9161 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
9162 }
9163 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
9164 if (kvm_vcpu_ioctl_x86_set_vcpu_events(
9165 vcpu, &vcpu->run->s.regs.events))
9166 return -EINVAL;
9167 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
9168 }
9169
9170 return 0;
9171}
9172
0ee6a517 9173static void fx_init(struct kvm_vcpu *vcpu)
d0752060 9174{
b666a4b6 9175 fpstate_init(&vcpu->arch.guest_fpu->state);
782511b0 9176 if (boot_cpu_has(X86_FEATURE_XSAVES))
b666a4b6 9177 vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv =
df1daba7 9178 host_xcr0 | XSTATE_COMPACTION_ENABLED;
d0752060 9179
2acf923e
DC
9180 /*
9181 * Ensure guest xcr0 is valid for loading
9182 */
d91cab78 9183 vcpu->arch.xcr0 = XFEATURE_MASK_FP;
2acf923e 9184
ad312c7c 9185 vcpu->arch.cr0 |= X86_CR0_ET;
d0752060 9186}
d0752060 9187
897cc38e
SC
9188int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
9189{
9190 if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
9191 pr_warn_once("kvm: SMP vm created on host with unstable TSC; "
9192 "guest TSC will not be reliable\n");
9193
9194 return 0;
9195}
9196
e529ef66 9197int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
e9b11c17 9198{
95a0d01e
SC
9199 struct page *page;
9200 int r;
5f73db11 9201
95a0d01e
SC
9202 vcpu->arch.emulate_ctxt.ops = &emulate_ops;
9203 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
9204 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
9205 else
9206 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
9207
9208 kvm_set_tsc_khz(vcpu, max_tsc_khz);
9209
9210 r = kvm_mmu_create(vcpu);
9211 if (r < 0)
9212 return r;
9213
9214 if (irqchip_in_kernel(vcpu->kvm)) {
9215 vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu->kvm);
9216 r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
9217 if (r < 0)
9218 goto fail_mmu_destroy;
9219 } else
9220 static_key_slow_inc(&kvm_no_apic_vcpu);
9221
9222 r = -ENOMEM;
9223
9224 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
9225 if (!page)
9226 goto fail_free_lapic;
9227 vcpu->arch.pio_data = page_address(page);
9228
9229 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
9230 GFP_KERNEL_ACCOUNT);
9231 if (!vcpu->arch.mce_banks)
9232 goto fail_free_pio_data;
9233 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
9234
9235 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
9236 GFP_KERNEL_ACCOUNT))
9237 goto fail_free_mce_banks;
9238
9239 vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
9240 GFP_KERNEL_ACCOUNT);
9241 if (!vcpu->arch.user_fpu) {
9242 pr_err("kvm: failed to allocate userspace's fpu\n");
9243 goto free_wbinvd_dirty_mask;
9244 }
9245
9246 vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
9247 GFP_KERNEL_ACCOUNT);
9248 if (!vcpu->arch.guest_fpu) {
9249 pr_err("kvm: failed to allocate vcpu's fpu\n");
9250 goto free_user_fpu;
9251 }
9252 fx_init(vcpu);
9253
9254 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
9255
9256 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
9257
9258 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
9259
9260 kvm_async_pf_hash_reset(vcpu);
9261 kvm_pmu_init(vcpu);
9262
9263 vcpu->arch.pending_external_vector = -1;
9264 vcpu->arch.preempted_in_kernel = false;
9265
9266 kvm_hv_vcpu_init(vcpu);
9267
9268 r = kvm_x86_ops->vcpu_create(vcpu);
9269 if (r)
9270 goto free_guest_fpu;
e9b11c17 9271
0cf9135b 9272 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
e53d88af 9273 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
19efffa2 9274 kvm_vcpu_mtrr_init(vcpu);
ec7660cc 9275 vcpu_load(vcpu);
d28bc9dd 9276 kvm_vcpu_reset(vcpu, false);
e1732991 9277 kvm_init_mmu(vcpu, false);
e9b11c17 9278 vcpu_put(vcpu);
ec7660cc 9279 return 0;
95a0d01e
SC
9280
9281free_guest_fpu:
9282 kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
9283free_user_fpu:
9284 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
9285free_wbinvd_dirty_mask:
9286 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
9287fail_free_mce_banks:
9288 kfree(vcpu->arch.mce_banks);
9289fail_free_pio_data:
9290 free_page((unsigned long)vcpu->arch.pio_data);
9291fail_free_lapic:
9292 kvm_free_lapic(vcpu);
9293fail_mmu_destroy:
9294 kvm_mmu_destroy(vcpu);
9295 return r;
e9b11c17
ZX
9296}
9297
31928aa5 9298void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 9299{
8fe8ab46 9300 struct msr_data msr;
332967a3 9301 struct kvm *kvm = vcpu->kvm;
42897d86 9302
d3457c87
RK
9303 kvm_hv_vcpu_postcreate(vcpu);
9304
ec7660cc 9305 if (mutex_lock_killable(&vcpu->mutex))
31928aa5 9306 return;
ec7660cc 9307 vcpu_load(vcpu);
8fe8ab46
WA
9308 msr.data = 0x0;
9309 msr.index = MSR_IA32_TSC;
9310 msr.host_initiated = true;
9311 kvm_write_tsc(vcpu, &msr);
42897d86 9312 vcpu_put(vcpu);
2d5ba19b
MT
9313
9314 /* poll control enabled by default */
9315 vcpu->arch.msr_kvm_poll_control = 1;
9316
ec7660cc 9317 mutex_unlock(&vcpu->mutex);
42897d86 9318
630994b3
MT
9319 if (!kvmclock_periodic_sync)
9320 return;
9321
332967a3
AJ
9322 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
9323 KVMCLOCK_SYNC_PERIOD);
42897d86
MT
9324}
9325
d40ccc62 9326void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17 9327{
95a0d01e
SC
9328 int idx;
9329
50b143e1
SC
9330 kvmclock_reset(vcpu);
9331
9332 kvm_x86_ops->vcpu_free(vcpu);
9333
50b143e1
SC
9334 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
9335 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
9336 kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
95a0d01e
SC
9337
9338 kvm_hv_vcpu_uninit(vcpu);
9339 kvm_pmu_destroy(vcpu);
9340 kfree(vcpu->arch.mce_banks);
9341 kvm_free_lapic(vcpu);
9342 idx = srcu_read_lock(&vcpu->kvm->srcu);
9343 kvm_mmu_destroy(vcpu);
9344 srcu_read_unlock(&vcpu->kvm->srcu, idx);
9345 free_page((unsigned long)vcpu->arch.pio_data);
9346 if (!lapic_in_kernel(vcpu))
9347 static_key_slow_dec(&kvm_no_apic_vcpu);
e9b11c17
ZX
9348}
9349
d28bc9dd 9350void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
e9b11c17 9351{
b7e31be3
RK
9352 kvm_lapic_reset(vcpu, init_event);
9353
e69fab5d
PB
9354 vcpu->arch.hflags = 0;
9355
c43203ca 9356 vcpu->arch.smi_pending = 0;
52797bf9 9357 vcpu->arch.smi_count = 0;
7460fb4a
AK
9358 atomic_set(&vcpu->arch.nmi_queued, 0);
9359 vcpu->arch.nmi_pending = 0;
448fa4a9 9360 vcpu->arch.nmi_injected = false;
5f7552d4
NA
9361 kvm_clear_interrupt_queue(vcpu);
9362 kvm_clear_exception_queue(vcpu);
448fa4a9 9363
42dbaa5a 9364 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
ae561ede 9365 kvm_update_dr0123(vcpu);
6f43ed01 9366 vcpu->arch.dr6 = DR6_INIT;
73aaf249 9367 kvm_update_dr6(vcpu);
42dbaa5a 9368 vcpu->arch.dr7 = DR7_FIXED_1;
c8639010 9369 kvm_update_dr7(vcpu);
42dbaa5a 9370
1119022c
NA
9371 vcpu->arch.cr2 = 0;
9372
3842d135 9373 kvm_make_request(KVM_REQ_EVENT, vcpu);
344d9588 9374 vcpu->arch.apf.msr_val = 0;
c9aaa895 9375 vcpu->arch.st.msr_val = 0;
3842d135 9376
12f9a48f
GC
9377 kvmclock_reset(vcpu);
9378
af585b92
GN
9379 kvm_clear_async_pf_completion_queue(vcpu);
9380 kvm_async_pf_hash_reset(vcpu);
9381 vcpu->arch.apf.halted = false;
3842d135 9382
a554d207
WL
9383 if (kvm_mpx_supported()) {
9384 void *mpx_state_buffer;
9385
9386 /*
9387 * To avoid have the INIT path from kvm_apic_has_events() that be
9388 * called with loaded FPU and does not let userspace fix the state.
9389 */
f775b13e
RR
9390 if (init_event)
9391 kvm_put_guest_fpu(vcpu);
b666a4b6 9392 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
abd16d68 9393 XFEATURE_BNDREGS);
a554d207
WL
9394 if (mpx_state_buffer)
9395 memset(mpx_state_buffer, 0, sizeof(struct mpx_bndreg_state));
b666a4b6 9396 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
abd16d68 9397 XFEATURE_BNDCSR);
a554d207
WL
9398 if (mpx_state_buffer)
9399 memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));
f775b13e
RR
9400 if (init_event)
9401 kvm_load_guest_fpu(vcpu);
a554d207
WL
9402 }
9403
64d60670 9404 if (!init_event) {
d28bc9dd 9405 kvm_pmu_reset(vcpu);
64d60670 9406 vcpu->arch.smbase = 0x30000;
db2336a8 9407
db2336a8 9408 vcpu->arch.msr_misc_features_enables = 0;
a554d207
WL
9409
9410 vcpu->arch.xcr0 = XFEATURE_MASK_FP;
64d60670 9411 }
f5132b01 9412
66f7b72e
JS
9413 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
9414 vcpu->arch.regs_avail = ~0;
9415 vcpu->arch.regs_dirty = ~0;
9416
a554d207
WL
9417 vcpu->arch.ia32_xss = 0;
9418
d28bc9dd 9419 kvm_x86_ops->vcpu_reset(vcpu, init_event);
e9b11c17
ZX
9420}
9421
2b4a273b 9422void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
66450a21
JK
9423{
9424 struct kvm_segment cs;
9425
9426 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
9427 cs.selector = vector << 8;
9428 cs.base = vector << 12;
9429 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
9430 kvm_rip_write(vcpu, 0);
e9b11c17
ZX
9431}
9432
13a34e06 9433int kvm_arch_hardware_enable(void)
e9b11c17 9434{
ca84d1a2
ZA
9435 struct kvm *kvm;
9436 struct kvm_vcpu *vcpu;
9437 int i;
0dd6a6ed
ZA
9438 int ret;
9439 u64 local_tsc;
9440 u64 max_tsc = 0;
9441 bool stable, backwards_tsc = false;
18863bdd
AK
9442
9443 kvm_shared_msr_cpu_online();
13a34e06 9444 ret = kvm_x86_ops->hardware_enable();
0dd6a6ed
ZA
9445 if (ret != 0)
9446 return ret;
9447
4ea1636b 9448 local_tsc = rdtsc();
b0c39dc6 9449 stable = !kvm_check_tsc_unstable();
0dd6a6ed
ZA
9450 list_for_each_entry(kvm, &vm_list, vm_list) {
9451 kvm_for_each_vcpu(i, vcpu, kvm) {
9452 if (!stable && vcpu->cpu == smp_processor_id())
105b21bb 9453 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
0dd6a6ed
ZA
9454 if (stable && vcpu->arch.last_host_tsc > local_tsc) {
9455 backwards_tsc = true;
9456 if (vcpu->arch.last_host_tsc > max_tsc)
9457 max_tsc = vcpu->arch.last_host_tsc;
9458 }
9459 }
9460 }
9461
9462 /*
9463 * Sometimes, even reliable TSCs go backwards. This happens on
9464 * platforms that reset TSC during suspend or hibernate actions, but
9465 * maintain synchronization. We must compensate. Fortunately, we can
9466 * detect that condition here, which happens early in CPU bringup,
9467 * before any KVM threads can be running. Unfortunately, we can't
9468 * bring the TSCs fully up to date with real time, as we aren't yet far
9469 * enough into CPU bringup that we know how much real time has actually
9285ec4c 9470 * elapsed; our helper function, ktime_get_boottime_ns() will be using boot
0dd6a6ed
ZA
9471 * variables that haven't been updated yet.
9472 *
9473 * So we simply find the maximum observed TSC above, then record the
9474 * adjustment to TSC in each VCPU. When the VCPU later gets loaded,
9475 * the adjustment will be applied. Note that we accumulate
9476 * adjustments, in case multiple suspend cycles happen before some VCPU
9477 * gets a chance to run again. In the event that no KVM threads get a
9478 * chance to run, we will miss the entire elapsed period, as we'll have
9479 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
9480 * loose cycle time. This isn't too big a deal, since the loss will be
9481 * uniform across all VCPUs (not to mention the scenario is extremely
9482 * unlikely). It is possible that a second hibernate recovery happens
9483 * much faster than a first, causing the observed TSC here to be
9484 * smaller; this would require additional padding adjustment, which is
9485 * why we set last_host_tsc to the local tsc observed here.
9486 *
9487 * N.B. - this code below runs only on platforms with reliable TSC,
9488 * as that is the only way backwards_tsc is set above. Also note
9489 * that this runs for ALL vcpus, which is not a bug; all VCPUs should
9490 * have the same delta_cyc adjustment applied if backwards_tsc
9491 * is detected. Note further, this adjustment is only done once,
9492 * as we reset last_host_tsc on all VCPUs to stop this from being
9493 * called multiple times (one for each physical CPU bringup).
9494 *
4a969980 9495 * Platforms with unreliable TSCs don't have to deal with this, they
0dd6a6ed
ZA
9496 * will be compensated by the logic in vcpu_load, which sets the TSC to
9497 * catchup mode. This will catchup all VCPUs to real time, but cannot
9498 * guarantee that they stay in perfect synchronization.
9499 */
9500 if (backwards_tsc) {
9501 u64 delta_cyc = max_tsc - local_tsc;
9502 list_for_each_entry(kvm, &vm_list, vm_list) {
a826faf1 9503 kvm->arch.backwards_tsc_observed = true;
0dd6a6ed
ZA
9504 kvm_for_each_vcpu(i, vcpu, kvm) {
9505 vcpu->arch.tsc_offset_adjustment += delta_cyc;
9506 vcpu->arch.last_host_tsc = local_tsc;
105b21bb 9507 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
0dd6a6ed
ZA
9508 }
9509
9510 /*
9511 * We have to disable TSC offset matching.. if you were
9512 * booting a VM while issuing an S4 host suspend....
9513 * you may have some problem. Solving this issue is
9514 * left as an exercise to the reader.
9515 */
9516 kvm->arch.last_tsc_nsec = 0;
9517 kvm->arch.last_tsc_write = 0;
9518 }
9519
9520 }
9521 return 0;
e9b11c17
ZX
9522}
9523
13a34e06 9524void kvm_arch_hardware_disable(void)
e9b11c17 9525{
13a34e06
RK
9526 kvm_x86_ops->hardware_disable();
9527 drop_user_return_notifiers();
e9b11c17
ZX
9528}
9529
9530int kvm_arch_hardware_setup(void)
9531{
9e9c3fe4
NA
9532 int r;
9533
9534 r = kvm_x86_ops->hardware_setup();
9535 if (r != 0)
9536 return r;
9537
b11306b5
SC
9538 cr4_reserved_bits = kvm_host_cr4_reserved_bits(&boot_cpu_data);
9539
35181e86
HZ
9540 if (kvm_has_tsc_control) {
9541 /*
9542 * Make sure the user can only configure tsc_khz values that
9543 * fit into a signed integer.
273ba457 9544 * A min value is not calculated because it will always
35181e86
HZ
9545 * be 1 on all machines.
9546 */
9547 u64 max = min(0x7fffffffULL,
9548 __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
9549 kvm_max_guest_tsc_khz = max;
9550
ad721883 9551 kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
35181e86 9552 }
ad721883 9553
139a12cf
AL
9554 if (boot_cpu_has(X86_FEATURE_XSAVES))
9555 rdmsrl(MSR_IA32_XSS, host_xss);
9556
9e9c3fe4
NA
9557 kvm_init_msr_list();
9558 return 0;
e9b11c17
ZX
9559}
9560
9561void kvm_arch_hardware_unsetup(void)
9562{
9563 kvm_x86_ops->hardware_unsetup();
9564}
9565
f257d6dc 9566int kvm_arch_check_processor_compat(void)
e9b11c17 9567{
f1cdecf5
SC
9568 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
9569
9570 WARN_ON(!irqs_disabled());
9571
9572 if (kvm_host_cr4_reserved_bits(c) != cr4_reserved_bits)
9573 return -EIO;
9574
f257d6dc 9575 return kvm_x86_ops->check_processor_compatibility();
d71ba788
PB
9576}
9577
9578bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
9579{
9580 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
9581}
9582EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);
9583
9584bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
9585{
9586 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
e9b11c17
ZX
9587}
9588
54e9818f 9589struct static_key kvm_no_apic_vcpu __read_mostly;
bce87cce 9590EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
54e9818f 9591
e790d9ef
RK
9592void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
9593{
b35e5548
LX
9594 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
9595
c595ceee 9596 vcpu->arch.l1tf_flush_l1d = true;
b35e5548
LX
9597 if (pmu->version && unlikely(pmu->event_count)) {
9598 pmu->need_cleanup = true;
9599 kvm_make_request(KVM_REQ_PMU, vcpu);
9600 }
ae97a3b8 9601 kvm_x86_ops->sched_in(vcpu, cpu);
e790d9ef
RK
9602}
9603
e08b9637 9604int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
d19a9cd2 9605{
e08b9637
CO
9606 if (type)
9607 return -EINVAL;
9608
6ef768fa 9609 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
f05e70ac 9610 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
10605204 9611 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
1aa9b957 9612 INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
4d5c5d0f 9613 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
e0f0bbc5 9614 atomic_set(&kvm->arch.noncoherent_dma_count, 0);
d19a9cd2 9615
5550af4d
SY
9616 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
9617 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
7a84428a
AW
9618 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
9619 set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
9620 &kvm->arch.irq_sources_bitmap);
5550af4d 9621
038f8c11 9622 raw_spin_lock_init(&kvm->arch.tsc_write_lock);
1e08ec4a 9623 mutex_init(&kvm->arch.apic_map_lock);
d828199e
MT
9624 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
9625
9285ec4c 9626 kvm->arch.kvmclock_offset = -ktime_get_boottime_ns();
d828199e 9627 pvclock_update_vm_gtod_copy(kvm);
53f658b3 9628
6fbbde9a
DS
9629 kvm->arch.guest_can_read_msr_platform_info = true;
9630
7e44e449 9631 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
332967a3 9632 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
7e44e449 9633
cbc0236a 9634 kvm_hv_init_vm(kvm);
0eb05bf2 9635 kvm_page_track_init(kvm);
13d268ca 9636 kvm_mmu_init_vm(kvm);
0eb05bf2 9637
92735b1b 9638 return kvm_x86_ops->vm_init(kvm);
d19a9cd2
ZX
9639}
9640
1aa9b957
JS
9641int kvm_arch_post_init_vm(struct kvm *kvm)
9642{
9643 return kvm_mmu_post_init_vm(kvm);
9644}
9645
d19a9cd2
ZX
9646static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
9647{
ec7660cc 9648 vcpu_load(vcpu);
d19a9cd2
ZX
9649 kvm_mmu_unload(vcpu);
9650 vcpu_put(vcpu);
9651}
9652
9653static void kvm_free_vcpus(struct kvm *kvm)
9654{
9655 unsigned int i;
988a2cae 9656 struct kvm_vcpu *vcpu;
d19a9cd2
ZX
9657
9658 /*
9659 * Unpin any mmu pages first.
9660 */
af585b92
GN
9661 kvm_for_each_vcpu(i, vcpu, kvm) {
9662 kvm_clear_async_pf_completion_queue(vcpu);
988a2cae 9663 kvm_unload_vcpu_mmu(vcpu);
af585b92 9664 }
988a2cae 9665 kvm_for_each_vcpu(i, vcpu, kvm)
4543bdc0 9666 kvm_vcpu_destroy(vcpu);
988a2cae
GN
9667
9668 mutex_lock(&kvm->lock);
9669 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
9670 kvm->vcpus[i] = NULL;
d19a9cd2 9671
988a2cae
GN
9672 atomic_set(&kvm->online_vcpus, 0);
9673 mutex_unlock(&kvm->lock);
d19a9cd2
ZX
9674}
9675
ad8ba2cd
SY
9676void kvm_arch_sync_events(struct kvm *kvm)
9677{
332967a3 9678 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
7e44e449 9679 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
aea924f6 9680 kvm_free_pit(kvm);
ad8ba2cd
SY
9681}
9682
1d8007bd 9683int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
9da0e4d5
PB
9684{
9685 int i, r;
25188b99 9686 unsigned long hva;
f0d648bd
PB
9687 struct kvm_memslots *slots = kvm_memslots(kvm);
9688 struct kvm_memory_slot *slot, old;
9da0e4d5
PB
9689
9690 /* Called with kvm->slots_lock held. */
1d8007bd
PB
9691 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
9692 return -EINVAL;
9da0e4d5 9693
f0d648bd
PB
9694 slot = id_to_memslot(slots, id);
9695 if (size) {
b21629da 9696 if (slot->npages)
f0d648bd
PB
9697 return -EEXIST;
9698
9699 /*
9700 * MAP_SHARED to prevent internal slot pages from being moved
9701 * by fork()/COW.
9702 */
9703 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
9704 MAP_SHARED | MAP_ANONYMOUS, 0);
9705 if (IS_ERR((void *)hva))
9706 return PTR_ERR((void *)hva);
9707 } else {
9708 if (!slot->npages)
9709 return 0;
9710
9711 hva = 0;
9712 }
9713
9714 old = *slot;
9da0e4d5 9715 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1d8007bd 9716 struct kvm_userspace_memory_region m;
9da0e4d5 9717
1d8007bd
PB
9718 m.slot = id | (i << 16);
9719 m.flags = 0;
9720 m.guest_phys_addr = gpa;
f0d648bd 9721 m.userspace_addr = hva;
1d8007bd 9722 m.memory_size = size;
9da0e4d5
PB
9723 r = __kvm_set_memory_region(kvm, &m);
9724 if (r < 0)
9725 return r;
9726 }
9727
103c763c
EB
9728 if (!size)
9729 vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
f0d648bd 9730
9da0e4d5
PB
9731 return 0;
9732}
9733EXPORT_SYMBOL_GPL(__x86_set_memory_region);
9734
1d8007bd 9735int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
9da0e4d5
PB
9736{
9737 int r;
9738
9739 mutex_lock(&kvm->slots_lock);
1d8007bd 9740 r = __x86_set_memory_region(kvm, id, gpa, size);
9da0e4d5
PB
9741 mutex_unlock(&kvm->slots_lock);
9742
9743 return r;
9744}
9745EXPORT_SYMBOL_GPL(x86_set_memory_region);
9746
1aa9b957
JS
9747void kvm_arch_pre_destroy_vm(struct kvm *kvm)
9748{
9749 kvm_mmu_pre_destroy_vm(kvm);
9750}
9751
d19a9cd2
ZX
9752void kvm_arch_destroy_vm(struct kvm *kvm)
9753{
27469d29
AH
9754 if (current->mm == kvm->mm) {
9755 /*
9756 * Free memory regions allocated on behalf of userspace,
9757 * unless the the memory map has changed due to process exit
9758 * or fd copying.
9759 */
1d8007bd
PB
9760 x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
9761 x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
9762 x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
27469d29 9763 }
03543133
SS
9764 if (kvm_x86_ops->vm_destroy)
9765 kvm_x86_ops->vm_destroy(kvm);
c761159c
PX
9766 kvm_pic_destroy(kvm);
9767 kvm_ioapic_destroy(kvm);
d19a9cd2 9768 kvm_free_vcpus(kvm);
af1bae54 9769 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
66bb8a06 9770 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
13d268ca 9771 kvm_mmu_uninit_vm(kvm);
2beb6dad 9772 kvm_page_track_cleanup(kvm);
cbc0236a 9773 kvm_hv_destroy_vm(kvm);
d19a9cd2 9774}
0de10343 9775
5587027c 9776void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
9777 struct kvm_memory_slot *dont)
9778{
9779 int i;
9780
d89cc617
TY
9781 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
9782 if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
548ef284 9783 kvfree(free->arch.rmap[i]);
d89cc617 9784 free->arch.rmap[i] = NULL;
77d11309 9785 }
d89cc617
TY
9786 if (i == 0)
9787 continue;
9788
9789 if (!dont || free->arch.lpage_info[i - 1] !=
9790 dont->arch.lpage_info[i - 1]) {
548ef284 9791 kvfree(free->arch.lpage_info[i - 1]);
d89cc617 9792 free->arch.lpage_info[i - 1] = NULL;
db3fe4eb
TY
9793 }
9794 }
21ebbeda
XG
9795
9796 kvm_page_track_free_memslot(free, dont);
db3fe4eb
TY
9797}
9798
5587027c
AK
9799int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
9800 unsigned long npages)
db3fe4eb
TY
9801{
9802 int i;
9803
d89cc617 9804 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
92f94f1e 9805 struct kvm_lpage_info *linfo;
db3fe4eb
TY
9806 unsigned long ugfn;
9807 int lpages;
d89cc617 9808 int level = i + 1;
db3fe4eb
TY
9809
9810 lpages = gfn_to_index(slot->base_gfn + npages - 1,
9811 slot->base_gfn, level) + 1;
9812
d89cc617 9813 slot->arch.rmap[i] =
778e1cdd 9814 kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
254272ce 9815 GFP_KERNEL_ACCOUNT);
d89cc617 9816 if (!slot->arch.rmap[i])
77d11309 9817 goto out_free;
d89cc617
TY
9818 if (i == 0)
9819 continue;
77d11309 9820
254272ce 9821 linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT);
92f94f1e 9822 if (!linfo)
db3fe4eb
TY
9823 goto out_free;
9824
92f94f1e
XG
9825 slot->arch.lpage_info[i - 1] = linfo;
9826
db3fe4eb 9827 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
92f94f1e 9828 linfo[0].disallow_lpage = 1;
db3fe4eb 9829 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
92f94f1e 9830 linfo[lpages - 1].disallow_lpage = 1;
db3fe4eb
TY
9831 ugfn = slot->userspace_addr >> PAGE_SHIFT;
9832 /*
9833 * If the gfn and userspace address are not aligned wrt each
9834 * other, or if explicitly asked to, disable large page
9835 * support for this slot
9836 */
9837 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
9838 !kvm_largepages_enabled()) {
9839 unsigned long j;
9840
9841 for (j = 0; j < lpages; ++j)
92f94f1e 9842 linfo[j].disallow_lpage = 1;
db3fe4eb
TY
9843 }
9844 }
9845
21ebbeda
XG
9846 if (kvm_page_track_create_memslot(slot, npages))
9847 goto out_free;
9848
db3fe4eb
TY
9849 return 0;
9850
9851out_free:
d89cc617 9852 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
548ef284 9853 kvfree(slot->arch.rmap[i]);
d89cc617
TY
9854 slot->arch.rmap[i] = NULL;
9855 if (i == 0)
9856 continue;
9857
548ef284 9858 kvfree(slot->arch.lpage_info[i - 1]);
d89cc617 9859 slot->arch.lpage_info[i - 1] = NULL;
db3fe4eb
TY
9860 }
9861 return -ENOMEM;
9862}
9863
15248258 9864void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
e59dbe09 9865{
e6dff7d1
TY
9866 /*
9867 * memslots->generation has been incremented.
9868 * mmio generation may have reached its maximum value.
9869 */
15248258 9870 kvm_mmu_invalidate_mmio_sptes(kvm, gen);
e59dbe09
TY
9871}
9872
f7784b8e
MT
9873int kvm_arch_prepare_memory_region(struct kvm *kvm,
9874 struct kvm_memory_slot *memslot,
09170a49 9875 const struct kvm_userspace_memory_region *mem,
7b6195a9 9876 enum kvm_mr_change change)
0de10343 9877{
f7784b8e
MT
9878 return 0;
9879}
9880
88178fd4
KH
9881static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
9882 struct kvm_memory_slot *new)
9883{
9884 /* Still write protect RO slot */
9885 if (new->flags & KVM_MEM_READONLY) {
9886 kvm_mmu_slot_remove_write_access(kvm, new);
9887 return;
9888 }
9889
9890 /*
9891 * Call kvm_x86_ops dirty logging hooks when they are valid.
9892 *
9893 * kvm_x86_ops->slot_disable_log_dirty is called when:
9894 *
9895 * - KVM_MR_CREATE with dirty logging is disabled
9896 * - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
9897 *
9898 * The reason is, in case of PML, we need to set D-bit for any slots
9899 * with dirty logging disabled in order to eliminate unnecessary GPA
0a03cbda 9900 * logging in PML buffer (and potential PML buffer full VMEXIT). This
88178fd4 9901 * guarantees leaving PML enabled during guest's lifetime won't have
bdd303cb 9902 * any additional overhead from PML when guest is running with dirty
88178fd4
KH
9903 * logging disabled for memory slots.
9904 *
9905 * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
9906 * to dirty logging mode.
9907 *
9908 * If kvm_x86_ops dirty logging hooks are invalid, use write protect.
9909 *
9910 * In case of write protect:
9911 *
9912 * Write protect all pages for dirty logging.
9913 *
9914 * All the sptes including the large sptes which point to this
9915 * slot are set to readonly. We can not create any new large
9916 * spte on this slot until the end of the logging.
9917 *
9918 * See the comments in fast_page_fault().
9919 */
9920 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
9921 if (kvm_x86_ops->slot_enable_log_dirty)
9922 kvm_x86_ops->slot_enable_log_dirty(kvm, new);
9923 else
9924 kvm_mmu_slot_remove_write_access(kvm, new);
9925 } else {
9926 if (kvm_x86_ops->slot_disable_log_dirty)
9927 kvm_x86_ops->slot_disable_log_dirty(kvm, new);
9928 }
9929}
9930
f7784b8e 9931void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 9932 const struct kvm_userspace_memory_region *mem,
8482644a 9933 const struct kvm_memory_slot *old,
f36f3f28 9934 const struct kvm_memory_slot *new,
8482644a 9935 enum kvm_mr_change change)
f7784b8e 9936{
48c0e4e9 9937 if (!kvm->arch.n_requested_mmu_pages)
4d66623c
WY
9938 kvm_mmu_change_mmu_pages(kvm,
9939 kvm_mmu_calculate_default_mmu_pages(kvm));
1c91cad4 9940
3ea3b7fa
WL
9941 /*
9942 * Dirty logging tracks sptes in 4k granularity, meaning that large
9943 * sptes have to be split. If live migration is successful, the guest
9944 * in the source machine will be destroyed and large sptes will be
9945 * created in the destination. However, if the guest continues to run
9946 * in the source machine (for example if live migration fails), small
9947 * sptes will remain around and cause bad performance.
9948 *
9949 * Scan sptes if dirty logging has been stopped, dropping those
9950 * which can be collapsed into a single large-page spte. Later
9951 * page faults will create the large-page sptes.
319109a2
SC
9952 *
9953 * There is no need to do this in any of the following cases:
9954 * CREATE: No dirty mappings will already exist.
9955 * MOVE/DELETE: The old mappings will already have been cleaned up by
9956 * kvm_arch_flush_shadow_memslot()
3ea3b7fa 9957 */
319109a2 9958 if (change == KVM_MR_FLAGS_ONLY &&
3ea3b7fa
WL
9959 (old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
9960 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
9961 kvm_mmu_zap_collapsible_sptes(kvm, new);
9962
c972f3b1 9963 /*
88178fd4 9964 * Set up write protection and/or dirty logging for the new slot.
c126d94f 9965 *
88178fd4
KH
9966 * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
9967 * been zapped so no dirty logging staff is needed for old slot. For
9968 * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the
9969 * new and it's also covered when dealing with the new slot.
f36f3f28
PB
9970 *
9971 * FIXME: const-ify all uses of struct kvm_memory_slot.
c972f3b1 9972 */
88178fd4 9973 if (change != KVM_MR_DELETE)
f36f3f28 9974 kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new);
0de10343 9975}
1d737c8a 9976
2df72e9b 9977void kvm_arch_flush_shadow_all(struct kvm *kvm)
34d4cb8f 9978{
7390de1e 9979 kvm_mmu_zap_all(kvm);
34d4cb8f
MT
9980}
9981
2df72e9b
MT
9982void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
9983 struct kvm_memory_slot *slot)
9984{
ae7cd873 9985 kvm_page_track_flush_slot(kvm, slot);
2df72e9b
MT
9986}
9987
e6c67d8c
LA
9988static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
9989{
9990 return (is_guest_mode(vcpu) &&
9991 kvm_x86_ops->guest_apic_has_interrupt &&
9992 kvm_x86_ops->guest_apic_has_interrupt(vcpu));
9993}
9994
5d9bc648
PB
9995static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
9996{
9997 if (!list_empty_careful(&vcpu->async_pf.done))
9998 return true;
9999
10000 if (kvm_apic_has_events(vcpu))
10001 return true;
10002
10003 if (vcpu->arch.pv.pv_unhalted)
10004 return true;
10005
a5f01f8e
WL
10006 if (vcpu->arch.exception.pending)
10007 return true;
10008
47a66eed
Z
10009 if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
10010 (vcpu->arch.nmi_pending &&
10011 kvm_x86_ops->nmi_allowed(vcpu)))
5d9bc648
PB
10012 return true;
10013
47a66eed
Z
10014 if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
10015 (vcpu->arch.smi_pending && !is_smm(vcpu)))
73917739
PB
10016 return true;
10017
5d9bc648 10018 if (kvm_arch_interrupt_allowed(vcpu) &&
e6c67d8c
LA
10019 (kvm_cpu_has_interrupt(vcpu) ||
10020 kvm_guest_apic_has_interrupt(vcpu)))
5d9bc648
PB
10021 return true;
10022
1f4b34f8
AS
10023 if (kvm_hv_has_stimer_pending(vcpu))
10024 return true;
10025
5d9bc648
PB
10026 return false;
10027}
10028
1d737c8a
ZX
10029int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
10030{
5d9bc648 10031 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
1d737c8a 10032}
5736199a 10033
17e433b5
WL
10034bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
10035{
10036 if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
10037 return true;
10038
10039 if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
10040 kvm_test_request(KVM_REQ_SMI, vcpu) ||
10041 kvm_test_request(KVM_REQ_EVENT, vcpu))
10042 return true;
10043
10044 if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
10045 return true;
10046
10047 return false;
10048}
10049
199b5763
LM
10050bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
10051{
de63ad4c 10052 return vcpu->arch.preempted_in_kernel;
199b5763
LM
10053}
10054
b6d33834 10055int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
5736199a 10056{
b6d33834 10057 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
5736199a 10058}
78646121
GN
10059
10060int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
10061{
10062 return kvm_x86_ops->interrupt_allowed(vcpu);
10063}
229456fc 10064
82b32774 10065unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
f92653ee 10066{
82b32774
NA
10067 if (is_64_bit_mode(vcpu))
10068 return kvm_rip_read(vcpu);
10069 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
10070 kvm_rip_read(vcpu));
10071}
10072EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
f92653ee 10073
82b32774
NA
10074bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
10075{
10076 return kvm_get_linear_rip(vcpu) == linear_rip;
f92653ee
JK
10077}
10078EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
10079
94fe45da
JK
10080unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
10081{
10082 unsigned long rflags;
10083
10084 rflags = kvm_x86_ops->get_rflags(vcpu);
10085 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
c310bac5 10086 rflags &= ~X86_EFLAGS_TF;
94fe45da
JK
10087 return rflags;
10088}
10089EXPORT_SYMBOL_GPL(kvm_get_rflags);
10090
6addfc42 10091static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
94fe45da
JK
10092{
10093 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
f92653ee 10094 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
c310bac5 10095 rflags |= X86_EFLAGS_TF;
94fe45da 10096 kvm_x86_ops->set_rflags(vcpu, rflags);
6addfc42
PB
10097}
10098
10099void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
10100{
10101 __kvm_set_rflags(vcpu, rflags);
3842d135 10102 kvm_make_request(KVM_REQ_EVENT, vcpu);
94fe45da
JK
10103}
10104EXPORT_SYMBOL_GPL(kvm_set_rflags);
10105
56028d08
GN
10106void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
10107{
10108 int r;
10109
44dd3ffa 10110 if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) ||
f2e10669 10111 work->wakeup_all)
56028d08
GN
10112 return;
10113
10114 r = kvm_mmu_reload(vcpu);
10115 if (unlikely(r))
10116 return;
10117
44dd3ffa
VK
10118 if (!vcpu->arch.mmu->direct_map &&
10119 work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
fb67e14f
XG
10120 return;
10121
736c291c 10122 vcpu->arch.mmu->page_fault(vcpu, work->cr2_or_gpa, 0, true);
56028d08
GN
10123}
10124
af585b92
GN
10125static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
10126{
10127 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
10128}
10129
10130static inline u32 kvm_async_pf_next_probe(u32 key)
10131{
10132 return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
10133}
10134
10135static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
10136{
10137 u32 key = kvm_async_pf_hash_fn(gfn);
10138
10139 while (vcpu->arch.apf.gfns[key] != ~0)
10140 key = kvm_async_pf_next_probe(key);
10141
10142 vcpu->arch.apf.gfns[key] = gfn;
10143}
10144
10145static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
10146{
10147 int i;
10148 u32 key = kvm_async_pf_hash_fn(gfn);
10149
10150 for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
c7d28c24
XG
10151 (vcpu->arch.apf.gfns[key] != gfn &&
10152 vcpu->arch.apf.gfns[key] != ~0); i++)
af585b92
GN
10153 key = kvm_async_pf_next_probe(key);
10154
10155 return key;
10156}
10157
10158bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
10159{
10160 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
10161}
10162
10163static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
10164{
10165 u32 i, j, k;
10166
10167 i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
10168 while (true) {
10169 vcpu->arch.apf.gfns[i] = ~0;
10170 do {
10171 j = kvm_async_pf_next_probe(j);
10172 if (vcpu->arch.apf.gfns[j] == ~0)
10173 return;
10174 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
10175 /*
10176 * k lies cyclically in ]i,j]
10177 * | i.k.j |
10178 * |....j i.k.| or |.k..j i...|
10179 */
10180 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
10181 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
10182 i = j;
10183 }
10184}
10185
7c90705b
GN
10186static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
10187{
4e335d9e
PB
10188
10189 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
10190 sizeof(val));
7c90705b
GN
10191}
10192
9a6e7c39
WL
10193static int apf_get_user(struct kvm_vcpu *vcpu, u32 *val)
10194{
10195
10196 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, val,
10197 sizeof(u32));
10198}
10199
1dfdb45e
PB
10200static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
10201{
10202 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
10203 return false;
10204
10205 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
10206 (vcpu->arch.apf.send_user_only &&
10207 kvm_x86_ops->get_cpl(vcpu) == 0))
10208 return false;
10209
10210 return true;
10211}
10212
10213bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
10214{
10215 if (unlikely(!lapic_in_kernel(vcpu) ||
10216 kvm_event_needs_reinjection(vcpu) ||
10217 vcpu->arch.exception.pending))
10218 return false;
10219
10220 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu))
10221 return false;
10222
10223 /*
10224 * If interrupts are off we cannot even use an artificial
10225 * halt state.
10226 */
10227 return kvm_x86_ops->interrupt_allowed(vcpu);
10228}
10229
af585b92
GN
10230void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
10231 struct kvm_async_pf *work)
10232{
6389ee94
AK
10233 struct x86_exception fault;
10234
736c291c 10235 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
af585b92 10236 kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
7c90705b 10237
1dfdb45e
PB
10238 if (kvm_can_deliver_async_pf(vcpu) &&
10239 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
6389ee94
AK
10240 fault.vector = PF_VECTOR;
10241 fault.error_code_valid = true;
10242 fault.error_code = 0;
10243 fault.nested_page_fault = false;
10244 fault.address = work->arch.token;
adfe20fb 10245 fault.async_page_fault = true;
6389ee94 10246 kvm_inject_page_fault(vcpu, &fault);
1dfdb45e
PB
10247 } else {
10248 /*
10249 * It is not possible to deliver a paravirtualized asynchronous
10250 * page fault, but putting the guest in an artificial halt state
10251 * can be beneficial nevertheless: if an interrupt arrives, we
10252 * can deliver it timely and perhaps the guest will schedule
10253 * another process. When the instruction that triggered a page
10254 * fault is retried, hopefully the page will be ready in the host.
10255 */
10256 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
7c90705b 10257 }
af585b92
GN
10258}
10259
10260void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
10261 struct kvm_async_pf *work)
10262{
6389ee94 10263 struct x86_exception fault;
9a6e7c39 10264 u32 val;
6389ee94 10265
f2e10669 10266 if (work->wakeup_all)
7c90705b
GN
10267 work->arch.token = ~0; /* broadcast wakeup */
10268 else
10269 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
736c291c 10270 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
7c90705b 10271
9a6e7c39
WL
10272 if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
10273 !apf_get_user(vcpu, &val)) {
10274 if (val == KVM_PV_REASON_PAGE_NOT_PRESENT &&
10275 vcpu->arch.exception.pending &&
10276 vcpu->arch.exception.nr == PF_VECTOR &&
10277 !apf_put_user(vcpu, 0)) {
10278 vcpu->arch.exception.injected = false;
10279 vcpu->arch.exception.pending = false;
10280 vcpu->arch.exception.nr = 0;
10281 vcpu->arch.exception.has_error_code = false;
10282 vcpu->arch.exception.error_code = 0;
c851436a
JM
10283 vcpu->arch.exception.has_payload = false;
10284 vcpu->arch.exception.payload = 0;
9a6e7c39
WL
10285 } else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
10286 fault.vector = PF_VECTOR;
10287 fault.error_code_valid = true;
10288 fault.error_code = 0;
10289 fault.nested_page_fault = false;
10290 fault.address = work->arch.token;
10291 fault.async_page_fault = true;
10292 kvm_inject_page_fault(vcpu, &fault);
10293 }
7c90705b 10294 }
e6d53e3b 10295 vcpu->arch.apf.halted = false;
a4fa1635 10296 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
7c90705b
GN
10297}
10298
10299bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
10300{
10301 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
10302 return true;
10303 else
9bc1f09f 10304 return kvm_can_do_async_pf(vcpu);
af585b92
GN
10305}
10306
5544eb9b
PB
10307void kvm_arch_start_assignment(struct kvm *kvm)
10308{
10309 atomic_inc(&kvm->arch.assigned_device_count);
10310}
10311EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
10312
10313void kvm_arch_end_assignment(struct kvm *kvm)
10314{
10315 atomic_dec(&kvm->arch.assigned_device_count);
10316}
10317EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
10318
10319bool kvm_arch_has_assigned_device(struct kvm *kvm)
10320{
10321 return atomic_read(&kvm->arch.assigned_device_count);
10322}
10323EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
10324
e0f0bbc5
AW
10325void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
10326{
10327 atomic_inc(&kvm->arch.noncoherent_dma_count);
10328}
10329EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
10330
10331void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
10332{
10333 atomic_dec(&kvm->arch.noncoherent_dma_count);
10334}
10335EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
10336
10337bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
10338{
10339 return atomic_read(&kvm->arch.noncoherent_dma_count);
10340}
10341EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
10342
14717e20
AW
10343bool kvm_arch_has_irq_bypass(void)
10344{
92735b1b 10345 return true;
14717e20
AW
10346}
10347
87276880
FW
10348int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
10349 struct irq_bypass_producer *prod)
10350{
10351 struct kvm_kernel_irqfd *irqfd =
10352 container_of(cons, struct kvm_kernel_irqfd, consumer);
10353
14717e20 10354 irqfd->producer = prod;
87276880 10355
14717e20
AW
10356 return kvm_x86_ops->update_pi_irte(irqfd->kvm,
10357 prod->irq, irqfd->gsi, 1);
87276880
FW
10358}
10359
10360void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
10361 struct irq_bypass_producer *prod)
10362{
10363 int ret;
10364 struct kvm_kernel_irqfd *irqfd =
10365 container_of(cons, struct kvm_kernel_irqfd, consumer);
10366
87276880
FW
10367 WARN_ON(irqfd->producer != prod);
10368 irqfd->producer = NULL;
10369
10370 /*
10371 * When producer of consumer is unregistered, we change back to
10372 * remapped mode, so we can re-use the current implementation
bb3541f1 10373 * when the irq is masked/disabled or the consumer side (KVM
87276880
FW
10374 * int this case doesn't want to receive the interrupts.
10375 */
10376 ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
10377 if (ret)
10378 printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
10379 " fails: %d\n", irqfd->consumer.token, ret);
10380}
10381
10382int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
10383 uint32_t guest_irq, bool set)
10384{
87276880
FW
10385 return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
10386}
10387
52004014
FW
10388bool kvm_vector_hashing_enabled(void)
10389{
10390 return vector_hashing;
10391}
10392EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled);
10393
2d5ba19b
MT
10394bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
10395{
10396 return (vcpu->arch.msr_kvm_poll_control & 1) == 0;
10397}
10398EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
10399
6441fa61
PB
10400u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
10401{
10402 uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
10403
10404 /* The STIBP bit doesn't fault even if it's not advertised */
10405 if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
10406 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
10407 bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
10408 if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
10409 !boot_cpu_has(X86_FEATURE_AMD_IBRS))
10410 bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
10411
10412 if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
10413 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
10414 bits &= ~SPEC_CTRL_SSBD;
10415 if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
10416 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
10417 bits &= ~SPEC_CTRL_SSBD;
10418
10419 return bits;
10420}
10421EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
2d5ba19b 10422
229456fc 10423EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
931c33b1 10424EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
229456fc
MT
10425EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
10426EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
10427EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
10428EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
0ac406de 10429EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
d8cabddf 10430EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
17897f36 10431EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
236649de 10432EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
5497b955 10433EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed);
ec1ff790 10434EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
532a46b9 10435EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
2e554e8d 10436EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
489223ed 10437EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
4f75bcc3 10438EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update);
843e4330 10439EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
efc64404 10440EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
18f40c53
SS
10441EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
10442EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);