1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
8 * Copyright (C) 2006 Qumranet, Inc.
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
16 #include <linux/highmem.h>
17 #include <linux/hrtimer.h>
18 #include <linux/kernel.h>
19 #include <linux/kvm_host.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/mod_devicetable.h>
24 #include <linux/objtool.h>
25 #include <linux/sched.h>
26 #include <linux/sched/smt.h>
27 #include <linux/slab.h>
28 #include <linux/tboot.h>
29 #include <linux/trace_events.h>
30 #include <linux/entry-kvm.h>
35 #include <asm/cpu_device_id.h>
36 #include <asm/debugreg.h>
38 #include <asm/fpu/internal.h>
39 #include <asm/idtentry.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/kexec.h>
43 #include <asm/perf_event.h>
44 #include <asm/mmu_context.h>
45 #include <asm/mshyperv.h>
46 #include <asm/mwait.h>
47 #include <asm/spec-ctrl.h>
48 #include <asm/virtext.h>
51 #include "capabilities.h"
55 #include "kvm_onhyperv.h"
57 #include "kvm_cache_regs.h"
69 MODULE_AUTHOR("Qumranet");
70 MODULE_LICENSE("GPL");
73 static const struct x86_cpu_id vmx_cpu_id
[] = {
74 X86_MATCH_FEATURE(X86_FEATURE_VMX
, NULL
),
77 MODULE_DEVICE_TABLE(x86cpu
, vmx_cpu_id
);
80 bool __read_mostly enable_vpid
= 1;
81 module_param_named(vpid
, enable_vpid
, bool, 0444);
83 static bool __read_mostly enable_vnmi
= 1;
84 module_param_named(vnmi
, enable_vnmi
, bool, S_IRUGO
);
86 bool __read_mostly flexpriority_enabled
= 1;
87 module_param_named(flexpriority
, flexpriority_enabled
, bool, S_IRUGO
);
89 bool __read_mostly enable_ept
= 1;
90 module_param_named(ept
, enable_ept
, bool, S_IRUGO
);
92 bool __read_mostly enable_unrestricted_guest
= 1;
93 module_param_named(unrestricted_guest
,
94 enable_unrestricted_guest
, bool, S_IRUGO
);
96 bool __read_mostly enable_ept_ad_bits
= 1;
97 module_param_named(eptad
, enable_ept_ad_bits
, bool, S_IRUGO
);
99 static bool __read_mostly emulate_invalid_guest_state
= true;
100 module_param(emulate_invalid_guest_state
, bool, S_IRUGO
);
102 static bool __read_mostly fasteoi
= 1;
103 module_param(fasteoi
, bool, S_IRUGO
);
105 module_param(enable_apicv
, bool, S_IRUGO
);
108 * If nested=1, nested virtualization is supported, i.e., guests may use
109 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
110 * use VMX instructions.
112 static bool __read_mostly nested
= 1;
113 module_param(nested
, bool, S_IRUGO
);
115 bool __read_mostly enable_pml
= 1;
116 module_param_named(pml
, enable_pml
, bool, S_IRUGO
);
118 static bool __read_mostly dump_invalid_vmcs
= 0;
119 module_param(dump_invalid_vmcs
, bool, 0644);
121 #define MSR_BITMAP_MODE_X2APIC 1
122 #define MSR_BITMAP_MODE_X2APIC_APICV 2
124 #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
126 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */
127 static int __read_mostly cpu_preemption_timer_multi
;
128 static bool __read_mostly enable_preemption_timer
= 1;
130 module_param_named(preemption_timer
, enable_preemption_timer
, bool, S_IRUGO
);
133 extern bool __read_mostly allow_smaller_maxphyaddr
;
134 module_param(allow_smaller_maxphyaddr
, bool, S_IRUGO
);
136 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
137 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
138 #define KVM_VM_CR0_ALWAYS_ON \
139 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
141 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
142 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
143 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
145 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
147 #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \
148 RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \
149 RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
150 RTIT_STATUS_BYTECNT))
153 * List of MSRs that can be directly passed to the guest.
154 * In addition to these x2apic and PT MSRs are handled specially.
156 static u32 vmx_possible_passthrough_msrs
[MAX_POSSIBLE_PASSTHROUGH_MSRS
] = {
165 MSR_IA32_SYSENTER_CS
,
166 MSR_IA32_SYSENTER_ESP
,
167 MSR_IA32_SYSENTER_EIP
,
169 MSR_CORE_C3_RESIDENCY
,
170 MSR_CORE_C6_RESIDENCY
,
171 MSR_CORE_C7_RESIDENCY
,
175 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
176 * ple_gap: upper bound on the amount of time between two successive
177 * executions of PAUSE in a loop. Also indicate if ple enabled.
178 * According to test, this time is usually smaller than 128 cycles.
179 * ple_window: upper bound on the amount of time a guest is allowed to execute
180 * in a PAUSE loop. Tests indicate that most spinlocks are held for
181 * less than 2^12 cycles
182 * Time is measured based on a counter that runs at the same rate as the TSC,
183 * refer SDM volume 3b section 21.6.13 & 22.1.3.
185 static unsigned int ple_gap
= KVM_DEFAULT_PLE_GAP
;
186 module_param(ple_gap
, uint
, 0444);
188 static unsigned int ple_window
= KVM_VMX_DEFAULT_PLE_WINDOW
;
189 module_param(ple_window
, uint
, 0444);
191 /* Default doubles per-vcpu window every exit. */
192 static unsigned int ple_window_grow
= KVM_DEFAULT_PLE_WINDOW_GROW
;
193 module_param(ple_window_grow
, uint
, 0444);
195 /* Default resets per-vcpu window every exit to ple_window. */
196 static unsigned int ple_window_shrink
= KVM_DEFAULT_PLE_WINDOW_SHRINK
;
197 module_param(ple_window_shrink
, uint
, 0444);
199 /* Default is to compute the maximum so we can never overflow. */
200 static unsigned int ple_window_max
= KVM_VMX_DEFAULT_PLE_WINDOW_MAX
;
201 module_param(ple_window_max
, uint
, 0444);
203 /* Default is SYSTEM mode, 1 for host-guest mode */
204 int __read_mostly pt_mode
= PT_MODE_SYSTEM
;
205 module_param(pt_mode
, int, S_IRUGO
);
207 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush
);
208 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond
);
209 static DEFINE_MUTEX(vmx_l1d_flush_mutex
);
211 /* Storage for pre module init parameter parsing */
212 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param
= VMENTER_L1D_FLUSH_AUTO
;
214 static const struct {
217 } vmentry_l1d_param
[] = {
218 [VMENTER_L1D_FLUSH_AUTO
] = {"auto", true},
219 [VMENTER_L1D_FLUSH_NEVER
] = {"never", true},
220 [VMENTER_L1D_FLUSH_COND
] = {"cond", true},
221 [VMENTER_L1D_FLUSH_ALWAYS
] = {"always", true},
222 [VMENTER_L1D_FLUSH_EPT_DISABLED
] = {"EPT disabled", false},
223 [VMENTER_L1D_FLUSH_NOT_REQUIRED
] = {"not required", false},
226 #define L1D_CACHE_ORDER 4
227 static void *vmx_l1d_flush_pages
;
229 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf
)
234 if (!boot_cpu_has_bug(X86_BUG_L1TF
)) {
235 l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_NOT_REQUIRED
;
240 l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_EPT_DISABLED
;
244 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES
)) {
247 rdmsrl(MSR_IA32_ARCH_CAPABILITIES
, msr
);
248 if (msr
& ARCH_CAP_SKIP_VMENTRY_L1DFLUSH
) {
249 l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_NOT_REQUIRED
;
254 /* If set to auto use the default l1tf mitigation method */
255 if (l1tf
== VMENTER_L1D_FLUSH_AUTO
) {
256 switch (l1tf_mitigation
) {
257 case L1TF_MITIGATION_OFF
:
258 l1tf
= VMENTER_L1D_FLUSH_NEVER
;
260 case L1TF_MITIGATION_FLUSH_NOWARN
:
261 case L1TF_MITIGATION_FLUSH
:
262 case L1TF_MITIGATION_FLUSH_NOSMT
:
263 l1tf
= VMENTER_L1D_FLUSH_COND
;
265 case L1TF_MITIGATION_FULL
:
266 case L1TF_MITIGATION_FULL_FORCE
:
267 l1tf
= VMENTER_L1D_FLUSH_ALWAYS
;
270 } else if (l1tf_mitigation
== L1TF_MITIGATION_FULL_FORCE
) {
271 l1tf
= VMENTER_L1D_FLUSH_ALWAYS
;
274 if (l1tf
!= VMENTER_L1D_FLUSH_NEVER
&& !vmx_l1d_flush_pages
&&
275 !boot_cpu_has(X86_FEATURE_FLUSH_L1D
)) {
277 * This allocation for vmx_l1d_flush_pages is not tied to a VM
278 * lifetime and so should not be charged to a memcg.
280 page
= alloc_pages(GFP_KERNEL
, L1D_CACHE_ORDER
);
283 vmx_l1d_flush_pages
= page_address(page
);
286 * Initialize each page with a different pattern in
287 * order to protect against KSM in the nested
288 * virtualization case.
290 for (i
= 0; i
< 1u << L1D_CACHE_ORDER
; ++i
) {
291 memset(vmx_l1d_flush_pages
+ i
* PAGE_SIZE
, i
+ 1,
296 l1tf_vmx_mitigation
= l1tf
;
298 if (l1tf
!= VMENTER_L1D_FLUSH_NEVER
)
299 static_branch_enable(&vmx_l1d_should_flush
);
301 static_branch_disable(&vmx_l1d_should_flush
);
303 if (l1tf
== VMENTER_L1D_FLUSH_COND
)
304 static_branch_enable(&vmx_l1d_flush_cond
);
306 static_branch_disable(&vmx_l1d_flush_cond
);
310 static int vmentry_l1d_flush_parse(const char *s
)
315 for (i
= 0; i
< ARRAY_SIZE(vmentry_l1d_param
); i
++) {
316 if (vmentry_l1d_param
[i
].for_parse
&&
317 sysfs_streq(s
, vmentry_l1d_param
[i
].option
))
324 static int vmentry_l1d_flush_set(const char *s
, const struct kernel_param
*kp
)
328 l1tf
= vmentry_l1d_flush_parse(s
);
332 if (!boot_cpu_has(X86_BUG_L1TF
))
336 * Has vmx_init() run already? If not then this is the pre init
337 * parameter parsing. In that case just store the value and let
338 * vmx_init() do the proper setup after enable_ept has been
341 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_AUTO
) {
342 vmentry_l1d_flush_param
= l1tf
;
346 mutex_lock(&vmx_l1d_flush_mutex
);
347 ret
= vmx_setup_l1d_flush(l1tf
);
348 mutex_unlock(&vmx_l1d_flush_mutex
);
352 static int vmentry_l1d_flush_get(char *s
, const struct kernel_param
*kp
)
354 if (WARN_ON_ONCE(l1tf_vmx_mitigation
>= ARRAY_SIZE(vmentry_l1d_param
)))
355 return sprintf(s
, "???\n");
357 return sprintf(s
, "%s\n", vmentry_l1d_param
[l1tf_vmx_mitigation
].option
);
360 static const struct kernel_param_ops vmentry_l1d_flush_ops
= {
361 .set
= vmentry_l1d_flush_set
,
362 .get
= vmentry_l1d_flush_get
,
364 module_param_cb(vmentry_l1d_flush
, &vmentry_l1d_flush_ops
, NULL
, 0644);
366 static u32
vmx_segment_access_rights(struct kvm_segment
*var
);
368 void vmx_vmexit(void);
370 #define vmx_insn_failed(fmt...) \
373 pr_warn_ratelimited(fmt); \
376 asmlinkage
void vmread_error(unsigned long field
, bool fault
)
379 kvm_spurious_fault();
381 vmx_insn_failed("kvm: vmread failed: field=%lx\n", field
);
384 noinline
void vmwrite_error(unsigned long field
, unsigned long value
)
386 vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n",
387 field
, value
, vmcs_read32(VM_INSTRUCTION_ERROR
));
390 noinline
void vmclear_error(struct vmcs
*vmcs
, u64 phys_addr
)
392 vmx_insn_failed("kvm: vmclear failed: %p/%llx\n", vmcs
, phys_addr
);
395 noinline
void vmptrld_error(struct vmcs
*vmcs
, u64 phys_addr
)
397 vmx_insn_failed("kvm: vmptrld failed: %p/%llx\n", vmcs
, phys_addr
);
400 noinline
void invvpid_error(unsigned long ext
, u16 vpid
, gva_t gva
)
402 vmx_insn_failed("kvm: invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n",
406 noinline
void invept_error(unsigned long ext
, u64 eptp
, gpa_t gpa
)
408 vmx_insn_failed("kvm: invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n",
412 static DEFINE_PER_CPU(struct vmcs
*, vmxarea
);
413 DEFINE_PER_CPU(struct vmcs
*, current_vmcs
);
415 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
416 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
418 static DEFINE_PER_CPU(struct list_head
, loaded_vmcss_on_cpu
);
420 static DECLARE_BITMAP(vmx_vpid_bitmap
, VMX_NR_VPIDS
);
421 static DEFINE_SPINLOCK(vmx_vpid_lock
);
423 struct vmcs_config vmcs_config
;
424 struct vmx_capability vmx_capability
;
426 #define VMX_SEGMENT_FIELD(seg) \
427 [VCPU_SREG_##seg] = { \
428 .selector = GUEST_##seg##_SELECTOR, \
429 .base = GUEST_##seg##_BASE, \
430 .limit = GUEST_##seg##_LIMIT, \
431 .ar_bytes = GUEST_##seg##_AR_BYTES, \
434 static const struct kvm_vmx_segment_field
{
439 } kvm_vmx_segment_fields
[] = {
440 VMX_SEGMENT_FIELD(CS
),
441 VMX_SEGMENT_FIELD(DS
),
442 VMX_SEGMENT_FIELD(ES
),
443 VMX_SEGMENT_FIELD(FS
),
444 VMX_SEGMENT_FIELD(GS
),
445 VMX_SEGMENT_FIELD(SS
),
446 VMX_SEGMENT_FIELD(TR
),
447 VMX_SEGMENT_FIELD(LDTR
),
450 static inline void vmx_segment_cache_clear(struct vcpu_vmx
*vmx
)
452 vmx
->segment_cache
.bitmask
= 0;
455 static unsigned long host_idt_base
;
457 #if IS_ENABLED(CONFIG_HYPERV)
458 static bool __read_mostly enlightened_vmcs
= true;
459 module_param(enlightened_vmcs
, bool, 0444);
461 static int hv_enable_direct_tlbflush(struct kvm_vcpu
*vcpu
)
463 struct hv_enlightened_vmcs
*evmcs
;
464 struct hv_partition_assist_pg
**p_hv_pa_pg
=
465 &to_kvm_hv(vcpu
->kvm
)->hv_pa_pg
;
467 * Synthetic VM-Exit is not enabled in current code and so All
468 * evmcs in singe VM shares same assist page.
471 *p_hv_pa_pg
= kzalloc(PAGE_SIZE
, GFP_KERNEL_ACCOUNT
);
476 evmcs
= (struct hv_enlightened_vmcs
*)to_vmx(vcpu
)->loaded_vmcs
->vmcs
;
478 evmcs
->partition_assist_page
=
480 evmcs
->hv_vm_id
= (unsigned long)vcpu
->kvm
;
481 evmcs
->hv_enlightenments_control
.nested_flush_hypercall
= 1;
486 #endif /* IS_ENABLED(CONFIG_HYPERV) */
489 * Comment's format: document - errata name - stepping - processor name.
491 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
493 static u32 vmx_preemption_cpu_tfms
[] = {
494 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
496 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */
497 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
498 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
500 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
502 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */
503 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */
505 * 320767.pdf - AAP86 - B1 -
506 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
509 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
511 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
513 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
515 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
516 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
517 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
519 /* Xeon E3-1220 V2 */
523 static inline bool cpu_has_broken_vmx_preemption_timer(void)
525 u32 eax
= cpuid_eax(0x00000001), i
;
527 /* Clear the reserved bits */
528 eax
&= ~(0x3U
<< 14 | 0xfU
<< 28);
529 for (i
= 0; i
< ARRAY_SIZE(vmx_preemption_cpu_tfms
); i
++)
530 if (eax
== vmx_preemption_cpu_tfms
[i
])
536 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu
*vcpu
)
538 return flexpriority_enabled
&& lapic_in_kernel(vcpu
);
541 static inline bool report_flexpriority(void)
543 return flexpriority_enabled
;
546 static int possible_passthrough_msr_slot(u32 msr
)
550 for (i
= 0; i
< ARRAY_SIZE(vmx_possible_passthrough_msrs
); i
++)
551 if (vmx_possible_passthrough_msrs
[i
] == msr
)
557 static bool is_valid_passthrough_msr(u32 msr
)
562 case 0x800 ... 0x8ff:
563 /* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
565 case MSR_IA32_RTIT_STATUS
:
566 case MSR_IA32_RTIT_OUTPUT_BASE
:
567 case MSR_IA32_RTIT_OUTPUT_MASK
:
568 case MSR_IA32_RTIT_CR3_MATCH
:
569 case MSR_IA32_RTIT_ADDR0_A
... MSR_IA32_RTIT_ADDR3_B
:
570 /* PT MSRs. These are handled in pt_update_intercept_for_msr() */
573 case MSR_LBR_INFO_0
... MSR_LBR_INFO_0
+ 31:
574 case MSR_LBR_NHM_FROM
... MSR_LBR_NHM_FROM
+ 31:
575 case MSR_LBR_NHM_TO
... MSR_LBR_NHM_TO
+ 31:
576 case MSR_LBR_CORE_FROM
... MSR_LBR_CORE_FROM
+ 8:
577 case MSR_LBR_CORE_TO
... MSR_LBR_CORE_TO
+ 8:
578 /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
582 r
= possible_passthrough_msr_slot(msr
) != -ENOENT
;
584 WARN(!r
, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr
);
589 struct vmx_uret_msr
*vmx_find_uret_msr(struct vcpu_vmx
*vmx
, u32 msr
)
593 i
= kvm_find_user_return_msr(msr
);
595 return &vmx
->guest_uret_msrs
[i
];
599 static int vmx_set_guest_uret_msr(struct vcpu_vmx
*vmx
,
600 struct vmx_uret_msr
*msr
, u64 data
)
602 unsigned int slot
= msr
- vmx
->guest_uret_msrs
;
605 u64 old_msr_data
= msr
->data
;
607 if (msr
->load_into_hardware
) {
609 ret
= kvm_set_user_return_msr(slot
, msr
->data
, msr
->mask
);
612 msr
->data
= old_msr_data
;
617 #ifdef CONFIG_KEXEC_CORE
618 static void crash_vmclear_local_loaded_vmcss(void)
620 int cpu
= raw_smp_processor_id();
621 struct loaded_vmcs
*v
;
623 list_for_each_entry(v
, &per_cpu(loaded_vmcss_on_cpu
, cpu
),
624 loaded_vmcss_on_cpu_link
)
627 #endif /* CONFIG_KEXEC_CORE */
629 static void __loaded_vmcs_clear(void *arg
)
631 struct loaded_vmcs
*loaded_vmcs
= arg
;
632 int cpu
= raw_smp_processor_id();
634 if (loaded_vmcs
->cpu
!= cpu
)
635 return; /* vcpu migration can race with cpu offline */
636 if (per_cpu(current_vmcs
, cpu
) == loaded_vmcs
->vmcs
)
637 per_cpu(current_vmcs
, cpu
) = NULL
;
639 vmcs_clear(loaded_vmcs
->vmcs
);
640 if (loaded_vmcs
->shadow_vmcs
&& loaded_vmcs
->launched
)
641 vmcs_clear(loaded_vmcs
->shadow_vmcs
);
643 list_del(&loaded_vmcs
->loaded_vmcss_on_cpu_link
);
646 * Ensure all writes to loaded_vmcs, including deleting it from its
647 * current percpu list, complete before setting loaded_vmcs->vcpu to
648 * -1, otherwise a different cpu can see vcpu == -1 first and add
649 * loaded_vmcs to its percpu list before it's deleted from this cpu's
650 * list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
654 loaded_vmcs
->cpu
= -1;
655 loaded_vmcs
->launched
= 0;
658 void loaded_vmcs_clear(struct loaded_vmcs
*loaded_vmcs
)
660 int cpu
= loaded_vmcs
->cpu
;
663 smp_call_function_single(cpu
,
664 __loaded_vmcs_clear
, loaded_vmcs
, 1);
667 static bool vmx_segment_cache_test_set(struct vcpu_vmx
*vmx
, unsigned seg
,
671 u32 mask
= 1 << (seg
* SEG_FIELD_NR
+ field
);
673 if (!kvm_register_is_available(&vmx
->vcpu
, VCPU_EXREG_SEGMENTS
)) {
674 kvm_register_mark_available(&vmx
->vcpu
, VCPU_EXREG_SEGMENTS
);
675 vmx
->segment_cache
.bitmask
= 0;
677 ret
= vmx
->segment_cache
.bitmask
& mask
;
678 vmx
->segment_cache
.bitmask
|= mask
;
682 static u16
vmx_read_guest_seg_selector(struct vcpu_vmx
*vmx
, unsigned seg
)
684 u16
*p
= &vmx
->segment_cache
.seg
[seg
].selector
;
686 if (!vmx_segment_cache_test_set(vmx
, seg
, SEG_FIELD_SEL
))
687 *p
= vmcs_read16(kvm_vmx_segment_fields
[seg
].selector
);
691 static ulong
vmx_read_guest_seg_base(struct vcpu_vmx
*vmx
, unsigned seg
)
693 ulong
*p
= &vmx
->segment_cache
.seg
[seg
].base
;
695 if (!vmx_segment_cache_test_set(vmx
, seg
, SEG_FIELD_BASE
))
696 *p
= vmcs_readl(kvm_vmx_segment_fields
[seg
].base
);
700 static u32
vmx_read_guest_seg_limit(struct vcpu_vmx
*vmx
, unsigned seg
)
702 u32
*p
= &vmx
->segment_cache
.seg
[seg
].limit
;
704 if (!vmx_segment_cache_test_set(vmx
, seg
, SEG_FIELD_LIMIT
))
705 *p
= vmcs_read32(kvm_vmx_segment_fields
[seg
].limit
);
709 static u32
vmx_read_guest_seg_ar(struct vcpu_vmx
*vmx
, unsigned seg
)
711 u32
*p
= &vmx
->segment_cache
.seg
[seg
].ar
;
713 if (!vmx_segment_cache_test_set(vmx
, seg
, SEG_FIELD_AR
))
714 *p
= vmcs_read32(kvm_vmx_segment_fields
[seg
].ar_bytes
);
718 void vmx_update_exception_bitmap(struct kvm_vcpu
*vcpu
)
722 eb
= (1u << PF_VECTOR
) | (1u << UD_VECTOR
) | (1u << MC_VECTOR
) |
723 (1u << DB_VECTOR
) | (1u << AC_VECTOR
);
725 * Guest access to VMware backdoor ports could legitimately
726 * trigger #GP because of TSS I/O permission bitmap.
727 * We intercept those #GP and allow access to them anyway
730 if (enable_vmware_backdoor
)
731 eb
|= (1u << GP_VECTOR
);
732 if ((vcpu
->guest_debug
&
733 (KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
)) ==
734 (KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
))
735 eb
|= 1u << BP_VECTOR
;
736 if (to_vmx(vcpu
)->rmode
.vm86_active
)
738 if (!vmx_need_pf_intercept(vcpu
))
739 eb
&= ~(1u << PF_VECTOR
);
741 /* When we are running a nested L2 guest and L1 specified for it a
742 * certain exception bitmap, we must trap the same exceptions and pass
743 * them to L1. When running L2, we will only handle the exceptions
744 * specified above if L1 did not want them.
746 if (is_guest_mode(vcpu
))
747 eb
|= get_vmcs12(vcpu
)->exception_bitmap
;
749 int mask
= 0, match
= 0;
751 if (enable_ept
&& (eb
& (1u << PF_VECTOR
))) {
753 * If EPT is enabled, #PF is currently only intercepted
754 * if MAXPHYADDR is smaller on the guest than on the
755 * host. In that case we only care about present,
756 * non-reserved faults. For vmcs02, however, PFEC_MASK
757 * and PFEC_MATCH are set in prepare_vmcs02_rare.
759 mask
= PFERR_PRESENT_MASK
| PFERR_RSVD_MASK
;
760 match
= PFERR_PRESENT_MASK
;
762 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK
, mask
);
763 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH
, match
);
766 vmcs_write32(EXCEPTION_BITMAP
, eb
);
770 * Check if MSR is intercepted for currently loaded MSR bitmap.
772 static bool msr_write_intercepted(struct kvm_vcpu
*vcpu
, u32 msr
)
774 unsigned long *msr_bitmap
;
775 int f
= sizeof(unsigned long);
777 if (!cpu_has_vmx_msr_bitmap())
780 msr_bitmap
= to_vmx(vcpu
)->loaded_vmcs
->msr_bitmap
;
783 return !!test_bit(msr
, msr_bitmap
+ 0x800 / f
);
784 } else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff)) {
786 return !!test_bit(msr
, msr_bitmap
+ 0xc00 / f
);
792 static void clear_atomic_switch_msr_special(struct vcpu_vmx
*vmx
,
793 unsigned long entry
, unsigned long exit
)
795 vm_entry_controls_clearbit(vmx
, entry
);
796 vm_exit_controls_clearbit(vmx
, exit
);
799 int vmx_find_loadstore_msr_slot(struct vmx_msrs
*m
, u32 msr
)
803 for (i
= 0; i
< m
->nr
; ++i
) {
804 if (m
->val
[i
].index
== msr
)
810 static void clear_atomic_switch_msr(struct vcpu_vmx
*vmx
, unsigned msr
)
813 struct msr_autoload
*m
= &vmx
->msr_autoload
;
817 if (cpu_has_load_ia32_efer()) {
818 clear_atomic_switch_msr_special(vmx
,
819 VM_ENTRY_LOAD_IA32_EFER
,
820 VM_EXIT_LOAD_IA32_EFER
);
824 case MSR_CORE_PERF_GLOBAL_CTRL
:
825 if (cpu_has_load_perf_global_ctrl()) {
826 clear_atomic_switch_msr_special(vmx
,
827 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
,
828 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
);
833 i
= vmx_find_loadstore_msr_slot(&m
->guest
, msr
);
837 m
->guest
.val
[i
] = m
->guest
.val
[m
->guest
.nr
];
838 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, m
->guest
.nr
);
841 i
= vmx_find_loadstore_msr_slot(&m
->host
, msr
);
846 m
->host
.val
[i
] = m
->host
.val
[m
->host
.nr
];
847 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, m
->host
.nr
);
850 static void add_atomic_switch_msr_special(struct vcpu_vmx
*vmx
,
851 unsigned long entry
, unsigned long exit
,
852 unsigned long guest_val_vmcs
, unsigned long host_val_vmcs
,
853 u64 guest_val
, u64 host_val
)
855 vmcs_write64(guest_val_vmcs
, guest_val
);
856 if (host_val_vmcs
!= HOST_IA32_EFER
)
857 vmcs_write64(host_val_vmcs
, host_val
);
858 vm_entry_controls_setbit(vmx
, entry
);
859 vm_exit_controls_setbit(vmx
, exit
);
862 static void add_atomic_switch_msr(struct vcpu_vmx
*vmx
, unsigned msr
,
863 u64 guest_val
, u64 host_val
, bool entry_only
)
866 struct msr_autoload
*m
= &vmx
->msr_autoload
;
870 if (cpu_has_load_ia32_efer()) {
871 add_atomic_switch_msr_special(vmx
,
872 VM_ENTRY_LOAD_IA32_EFER
,
873 VM_EXIT_LOAD_IA32_EFER
,
876 guest_val
, host_val
);
880 case MSR_CORE_PERF_GLOBAL_CTRL
:
881 if (cpu_has_load_perf_global_ctrl()) {
882 add_atomic_switch_msr_special(vmx
,
883 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
,
884 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
,
885 GUEST_IA32_PERF_GLOBAL_CTRL
,
886 HOST_IA32_PERF_GLOBAL_CTRL
,
887 guest_val
, host_val
);
891 case MSR_IA32_PEBS_ENABLE
:
892 /* PEBS needs a quiescent period after being disabled (to write
893 * a record). Disabling PEBS through VMX MSR swapping doesn't
894 * provide that period, so a CPU could write host's record into
897 wrmsrl(MSR_IA32_PEBS_ENABLE
, 0);
900 i
= vmx_find_loadstore_msr_slot(&m
->guest
, msr
);
902 j
= vmx_find_loadstore_msr_slot(&m
->host
, msr
);
904 if ((i
< 0 && m
->guest
.nr
== MAX_NR_LOADSTORE_MSRS
) ||
905 (j
< 0 && m
->host
.nr
== MAX_NR_LOADSTORE_MSRS
)) {
906 printk_once(KERN_WARNING
"Not enough msr switch entries. "
907 "Can't add msr %x\n", msr
);
912 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, m
->guest
.nr
);
914 m
->guest
.val
[i
].index
= msr
;
915 m
->guest
.val
[i
].value
= guest_val
;
922 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, m
->host
.nr
);
924 m
->host
.val
[j
].index
= msr
;
925 m
->host
.val
[j
].value
= host_val
;
928 static bool update_transition_efer(struct vcpu_vmx
*vmx
)
930 u64 guest_efer
= vmx
->vcpu
.arch
.efer
;
934 /* Shadow paging assumes NX to be available. */
936 guest_efer
|= EFER_NX
;
939 * LMA and LME handled by hardware; SCE meaningless outside long mode.
941 ignore_bits
|= EFER_SCE
;
943 ignore_bits
|= EFER_LMA
| EFER_LME
;
944 /* SCE is meaningful only in long mode on Intel */
945 if (guest_efer
& EFER_LMA
)
946 ignore_bits
&= ~(u64
)EFER_SCE
;
950 * On EPT, we can't emulate NX, so we must switch EFER atomically.
951 * On CPUs that support "load IA32_EFER", always switch EFER
952 * atomically, since it's faster than switching it manually.
954 if (cpu_has_load_ia32_efer() ||
955 (enable_ept
&& ((vmx
->vcpu
.arch
.efer
^ host_efer
) & EFER_NX
))) {
956 if (!(guest_efer
& EFER_LMA
))
957 guest_efer
&= ~EFER_LME
;
958 if (guest_efer
!= host_efer
)
959 add_atomic_switch_msr(vmx
, MSR_EFER
,
960 guest_efer
, host_efer
, false);
962 clear_atomic_switch_msr(vmx
, MSR_EFER
);
966 i
= kvm_find_user_return_msr(MSR_EFER
);
970 clear_atomic_switch_msr(vmx
, MSR_EFER
);
972 guest_efer
&= ~ignore_bits
;
973 guest_efer
|= host_efer
& ignore_bits
;
975 vmx
->guest_uret_msrs
[i
].data
= guest_efer
;
976 vmx
->guest_uret_msrs
[i
].mask
= ~ignore_bits
;
983 * On 32-bit kernels, VM exits still load the FS and GS bases from the
984 * VMCS rather than the segment table. KVM uses this helper to figure
985 * out the current bases to poke them into the VMCS before entry.
987 static unsigned long segment_base(u16 selector
)
989 struct desc_struct
*table
;
992 if (!(selector
& ~SEGMENT_RPL_MASK
))
995 table
= get_current_gdt_ro();
997 if ((selector
& SEGMENT_TI_MASK
) == SEGMENT_LDT
) {
998 u16 ldt_selector
= kvm_read_ldt();
1000 if (!(ldt_selector
& ~SEGMENT_RPL_MASK
))
1003 table
= (struct desc_struct
*)segment_base(ldt_selector
);
1005 v
= get_desc_base(&table
[selector
>> 3]);
1010 static inline bool pt_can_write_msr(struct vcpu_vmx
*vmx
)
1012 return vmx_pt_mode_is_host_guest() &&
1013 !(vmx
->pt_desc
.guest
.ctl
& RTIT_CTL_TRACEEN
);
1016 static inline bool pt_output_base_valid(struct kvm_vcpu
*vcpu
, u64 base
)
1018 /* The base must be 128-byte aligned and a legal physical address. */
1019 return kvm_vcpu_is_legal_aligned_gpa(vcpu
, base
, 128);
1022 static inline void pt_load_msr(struct pt_ctx
*ctx
, u32 addr_range
)
1026 wrmsrl(MSR_IA32_RTIT_STATUS
, ctx
->status
);
1027 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE
, ctx
->output_base
);
1028 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK
, ctx
->output_mask
);
1029 wrmsrl(MSR_IA32_RTIT_CR3_MATCH
, ctx
->cr3_match
);
1030 for (i
= 0; i
< addr_range
; i
++) {
1031 wrmsrl(MSR_IA32_RTIT_ADDR0_A
+ i
* 2, ctx
->addr_a
[i
]);
1032 wrmsrl(MSR_IA32_RTIT_ADDR0_B
+ i
* 2, ctx
->addr_b
[i
]);
1036 static inline void pt_save_msr(struct pt_ctx
*ctx
, u32 addr_range
)
1040 rdmsrl(MSR_IA32_RTIT_STATUS
, ctx
->status
);
1041 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE
, ctx
->output_base
);
1042 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK
, ctx
->output_mask
);
1043 rdmsrl(MSR_IA32_RTIT_CR3_MATCH
, ctx
->cr3_match
);
1044 for (i
= 0; i
< addr_range
; i
++) {
1045 rdmsrl(MSR_IA32_RTIT_ADDR0_A
+ i
* 2, ctx
->addr_a
[i
]);
1046 rdmsrl(MSR_IA32_RTIT_ADDR0_B
+ i
* 2, ctx
->addr_b
[i
]);
1050 static void pt_guest_enter(struct vcpu_vmx
*vmx
)
1052 if (vmx_pt_mode_is_system())
1056 * GUEST_IA32_RTIT_CTL is already set in the VMCS.
1057 * Save host state before VM entry.
1059 rdmsrl(MSR_IA32_RTIT_CTL
, vmx
->pt_desc
.host
.ctl
);
1060 if (vmx
->pt_desc
.guest
.ctl
& RTIT_CTL_TRACEEN
) {
1061 wrmsrl(MSR_IA32_RTIT_CTL
, 0);
1062 pt_save_msr(&vmx
->pt_desc
.host
, vmx
->pt_desc
.addr_range
);
1063 pt_load_msr(&vmx
->pt_desc
.guest
, vmx
->pt_desc
.addr_range
);
1067 static void pt_guest_exit(struct vcpu_vmx
*vmx
)
1069 if (vmx_pt_mode_is_system())
1072 if (vmx
->pt_desc
.guest
.ctl
& RTIT_CTL_TRACEEN
) {
1073 pt_save_msr(&vmx
->pt_desc
.guest
, vmx
->pt_desc
.addr_range
);
1074 pt_load_msr(&vmx
->pt_desc
.host
, vmx
->pt_desc
.addr_range
);
1077 /* Reload host state (IA32_RTIT_CTL will be cleared on VM exit). */
1078 wrmsrl(MSR_IA32_RTIT_CTL
, vmx
->pt_desc
.host
.ctl
);
1081 void vmx_set_host_fs_gs(struct vmcs_host_state
*host
, u16 fs_sel
, u16 gs_sel
,
1082 unsigned long fs_base
, unsigned long gs_base
)
1084 if (unlikely(fs_sel
!= host
->fs_sel
)) {
1086 vmcs_write16(HOST_FS_SELECTOR
, fs_sel
);
1088 vmcs_write16(HOST_FS_SELECTOR
, 0);
1089 host
->fs_sel
= fs_sel
;
1091 if (unlikely(gs_sel
!= host
->gs_sel
)) {
1093 vmcs_write16(HOST_GS_SELECTOR
, gs_sel
);
1095 vmcs_write16(HOST_GS_SELECTOR
, 0);
1096 host
->gs_sel
= gs_sel
;
1098 if (unlikely(fs_base
!= host
->fs_base
)) {
1099 vmcs_writel(HOST_FS_BASE
, fs_base
);
1100 host
->fs_base
= fs_base
;
1102 if (unlikely(gs_base
!= host
->gs_base
)) {
1103 vmcs_writel(HOST_GS_BASE
, gs_base
);
1104 host
->gs_base
= gs_base
;
1108 void vmx_prepare_switch_to_guest(struct kvm_vcpu
*vcpu
)
1110 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1111 struct vmcs_host_state
*host_state
;
1112 #ifdef CONFIG_X86_64
1113 int cpu
= raw_smp_processor_id();
1115 unsigned long fs_base
, gs_base
;
1119 vmx
->req_immediate_exit
= false;
1122 * Note that guest MSRs to be saved/restored can also be changed
1123 * when guest state is loaded. This happens when guest transitions
1124 * to/from long-mode by setting MSR_EFER.LMA.
1126 if (!vmx
->guest_uret_msrs_loaded
) {
1127 vmx
->guest_uret_msrs_loaded
= true;
1128 for (i
= 0; i
< kvm_nr_uret_msrs
; ++i
) {
1129 if (!vmx
->guest_uret_msrs
[i
].load_into_hardware
)
1132 kvm_set_user_return_msr(i
,
1133 vmx
->guest_uret_msrs
[i
].data
,
1134 vmx
->guest_uret_msrs
[i
].mask
);
1138 if (vmx
->nested
.need_vmcs12_to_shadow_sync
)
1139 nested_sync_vmcs12_to_shadow(vcpu
);
1141 if (vmx
->guest_state_loaded
)
1144 host_state
= &vmx
->loaded_vmcs
->host_state
;
1147 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
1148 * allow segment selectors with cpl > 0 or ti == 1.
1150 host_state
->ldt_sel
= kvm_read_ldt();
1152 #ifdef CONFIG_X86_64
1153 savesegment(ds
, host_state
->ds_sel
);
1154 savesegment(es
, host_state
->es_sel
);
1156 gs_base
= cpu_kernelmode_gs_base(cpu
);
1157 if (likely(is_64bit_mm(current
->mm
))) {
1158 current_save_fsgs();
1159 fs_sel
= current
->thread
.fsindex
;
1160 gs_sel
= current
->thread
.gsindex
;
1161 fs_base
= current
->thread
.fsbase
;
1162 vmx
->msr_host_kernel_gs_base
= current
->thread
.gsbase
;
1164 savesegment(fs
, fs_sel
);
1165 savesegment(gs
, gs_sel
);
1166 fs_base
= read_msr(MSR_FS_BASE
);
1167 vmx
->msr_host_kernel_gs_base
= read_msr(MSR_KERNEL_GS_BASE
);
1170 wrmsrl(MSR_KERNEL_GS_BASE
, vmx
->msr_guest_kernel_gs_base
);
1172 savesegment(fs
, fs_sel
);
1173 savesegment(gs
, gs_sel
);
1174 fs_base
= segment_base(fs_sel
);
1175 gs_base
= segment_base(gs_sel
);
1178 vmx_set_host_fs_gs(host_state
, fs_sel
, gs_sel
, fs_base
, gs_base
);
1179 vmx
->guest_state_loaded
= true;
1182 static void vmx_prepare_switch_to_host(struct vcpu_vmx
*vmx
)
1184 struct vmcs_host_state
*host_state
;
1186 if (!vmx
->guest_state_loaded
)
1189 host_state
= &vmx
->loaded_vmcs
->host_state
;
1191 ++vmx
->vcpu
.stat
.host_state_reload
;
1193 #ifdef CONFIG_X86_64
1194 rdmsrl(MSR_KERNEL_GS_BASE
, vmx
->msr_guest_kernel_gs_base
);
1196 if (host_state
->ldt_sel
|| (host_state
->gs_sel
& 7)) {
1197 kvm_load_ldt(host_state
->ldt_sel
);
1198 #ifdef CONFIG_X86_64
1199 load_gs_index(host_state
->gs_sel
);
1201 loadsegment(gs
, host_state
->gs_sel
);
1204 if (host_state
->fs_sel
& 7)
1205 loadsegment(fs
, host_state
->fs_sel
);
1206 #ifdef CONFIG_X86_64
1207 if (unlikely(host_state
->ds_sel
| host_state
->es_sel
)) {
1208 loadsegment(ds
, host_state
->ds_sel
);
1209 loadsegment(es
, host_state
->es_sel
);
1212 invalidate_tss_limit();
1213 #ifdef CONFIG_X86_64
1214 wrmsrl(MSR_KERNEL_GS_BASE
, vmx
->msr_host_kernel_gs_base
);
1216 load_fixmap_gdt(raw_smp_processor_id());
1217 vmx
->guest_state_loaded
= false;
1218 vmx
->guest_uret_msrs_loaded
= false;
1221 #ifdef CONFIG_X86_64
1222 static u64
vmx_read_guest_kernel_gs_base(struct vcpu_vmx
*vmx
)
1225 if (vmx
->guest_state_loaded
)
1226 rdmsrl(MSR_KERNEL_GS_BASE
, vmx
->msr_guest_kernel_gs_base
);
1228 return vmx
->msr_guest_kernel_gs_base
;
1231 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx
*vmx
, u64 data
)
1234 if (vmx
->guest_state_loaded
)
1235 wrmsrl(MSR_KERNEL_GS_BASE
, data
);
1237 vmx
->msr_guest_kernel_gs_base
= data
;
1241 void vmx_vcpu_load_vmcs(struct kvm_vcpu
*vcpu
, int cpu
,
1242 struct loaded_vmcs
*buddy
)
1244 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1245 bool already_loaded
= vmx
->loaded_vmcs
->cpu
== cpu
;
1248 if (!already_loaded
) {
1249 loaded_vmcs_clear(vmx
->loaded_vmcs
);
1250 local_irq_disable();
1253 * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
1254 * this cpu's percpu list, otherwise it may not yet be deleted
1255 * from its previous cpu's percpu list. Pairs with the
1256 * smb_wmb() in __loaded_vmcs_clear().
1260 list_add(&vmx
->loaded_vmcs
->loaded_vmcss_on_cpu_link
,
1261 &per_cpu(loaded_vmcss_on_cpu
, cpu
));
1265 prev
= per_cpu(current_vmcs
, cpu
);
1266 if (prev
!= vmx
->loaded_vmcs
->vmcs
) {
1267 per_cpu(current_vmcs
, cpu
) = vmx
->loaded_vmcs
->vmcs
;
1268 vmcs_load(vmx
->loaded_vmcs
->vmcs
);
1271 * No indirect branch prediction barrier needed when switching
1272 * the active VMCS within a guest, e.g. on nested VM-Enter.
1273 * The L1 VMM can protect itself with retpolines, IBPB or IBRS.
1275 if (!buddy
|| WARN_ON_ONCE(buddy
->vmcs
!= prev
))
1276 indirect_branch_prediction_barrier();
1279 if (!already_loaded
) {
1280 void *gdt
= get_current_gdt_ro();
1281 unsigned long sysenter_esp
;
1284 * Flush all EPTP/VPID contexts, the new pCPU may have stale
1285 * TLB entries from its previous association with the vCPU.
1287 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1290 * Linux uses per-cpu TSS and GDT, so set these when switching
1291 * processors. See 22.2.4.
1293 vmcs_writel(HOST_TR_BASE
,
1294 (unsigned long)&get_cpu_entry_area(cpu
)->tss
.x86_tss
);
1295 vmcs_writel(HOST_GDTR_BASE
, (unsigned long)gdt
); /* 22.2.4 */
1297 rdmsrl(MSR_IA32_SYSENTER_ESP
, sysenter_esp
);
1298 vmcs_writel(HOST_IA32_SYSENTER_ESP
, sysenter_esp
); /* 22.2.3 */
1300 vmx
->loaded_vmcs
->cpu
= cpu
;
1305 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1306 * vcpu mutex is already taken.
1308 static void vmx_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1310 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1312 vmx_vcpu_load_vmcs(vcpu
, cpu
, NULL
);
1314 vmx_vcpu_pi_load(vcpu
, cpu
);
1316 vmx
->host_debugctlmsr
= get_debugctlmsr();
1319 static void vmx_vcpu_put(struct kvm_vcpu
*vcpu
)
1321 vmx_vcpu_pi_put(vcpu
);
1323 vmx_prepare_switch_to_host(to_vmx(vcpu
));
1326 bool vmx_emulation_required(struct kvm_vcpu
*vcpu
)
1328 return emulate_invalid_guest_state
&& !vmx_guest_state_valid(vcpu
);
1331 unsigned long vmx_get_rflags(struct kvm_vcpu
*vcpu
)
1333 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1334 unsigned long rflags
, save_rflags
;
1336 if (!kvm_register_is_available(vcpu
, VCPU_EXREG_RFLAGS
)) {
1337 kvm_register_mark_available(vcpu
, VCPU_EXREG_RFLAGS
);
1338 rflags
= vmcs_readl(GUEST_RFLAGS
);
1339 if (vmx
->rmode
.vm86_active
) {
1340 rflags
&= RMODE_GUEST_OWNED_EFLAGS_BITS
;
1341 save_rflags
= vmx
->rmode
.save_rflags
;
1342 rflags
|= save_rflags
& ~RMODE_GUEST_OWNED_EFLAGS_BITS
;
1344 vmx
->rflags
= rflags
;
1349 void vmx_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
1351 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1352 unsigned long old_rflags
;
1354 if (is_unrestricted_guest(vcpu
)) {
1355 kvm_register_mark_available(vcpu
, VCPU_EXREG_RFLAGS
);
1356 vmx
->rflags
= rflags
;
1357 vmcs_writel(GUEST_RFLAGS
, rflags
);
1361 old_rflags
= vmx_get_rflags(vcpu
);
1362 vmx
->rflags
= rflags
;
1363 if (vmx
->rmode
.vm86_active
) {
1364 vmx
->rmode
.save_rflags
= rflags
;
1365 rflags
|= X86_EFLAGS_IOPL
| X86_EFLAGS_VM
;
1367 vmcs_writel(GUEST_RFLAGS
, rflags
);
1369 if ((old_rflags
^ vmx
->rflags
) & X86_EFLAGS_VM
)
1370 vmx
->emulation_required
= vmx_emulation_required(vcpu
);
1373 u32
vmx_get_interrupt_shadow(struct kvm_vcpu
*vcpu
)
1375 u32 interruptibility
= vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
);
1378 if (interruptibility
& GUEST_INTR_STATE_STI
)
1379 ret
|= KVM_X86_SHADOW_INT_STI
;
1380 if (interruptibility
& GUEST_INTR_STATE_MOV_SS
)
1381 ret
|= KVM_X86_SHADOW_INT_MOV_SS
;
1386 void vmx_set_interrupt_shadow(struct kvm_vcpu
*vcpu
, int mask
)
1388 u32 interruptibility_old
= vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
);
1389 u32 interruptibility
= interruptibility_old
;
1391 interruptibility
&= ~(GUEST_INTR_STATE_STI
| GUEST_INTR_STATE_MOV_SS
);
1393 if (mask
& KVM_X86_SHADOW_INT_MOV_SS
)
1394 interruptibility
|= GUEST_INTR_STATE_MOV_SS
;
1395 else if (mask
& KVM_X86_SHADOW_INT_STI
)
1396 interruptibility
|= GUEST_INTR_STATE_STI
;
1398 if ((interruptibility
!= interruptibility_old
))
1399 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO
, interruptibility
);
1402 static int vmx_rtit_ctl_check(struct kvm_vcpu
*vcpu
, u64 data
)
1404 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1405 unsigned long value
;
1408 * Any MSR write that attempts to change bits marked reserved will
1411 if (data
& vmx
->pt_desc
.ctl_bitmask
)
1415 * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will
1416 * result in a #GP unless the same write also clears TraceEn.
1418 if ((vmx
->pt_desc
.guest
.ctl
& RTIT_CTL_TRACEEN
) &&
1419 ((vmx
->pt_desc
.guest
.ctl
^ data
) & ~RTIT_CTL_TRACEEN
))
1423 * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit
1424 * and FabricEn would cause #GP, if
1425 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0
1427 if ((data
& RTIT_CTL_TRACEEN
) && !(data
& RTIT_CTL_TOPA
) &&
1428 !(data
& RTIT_CTL_FABRIC_EN
) &&
1429 !intel_pt_validate_cap(vmx
->pt_desc
.caps
,
1430 PT_CAP_single_range_output
))
1434 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that
1435 * utilize encodings marked reserved will cause a #GP fault.
1437 value
= intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_mtc_periods
);
1438 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_mtc
) &&
1439 !test_bit((data
& RTIT_CTL_MTC_RANGE
) >>
1440 RTIT_CTL_MTC_RANGE_OFFSET
, &value
))
1442 value
= intel_pt_validate_cap(vmx
->pt_desc
.caps
,
1443 PT_CAP_cycle_thresholds
);
1444 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_psb_cyc
) &&
1445 !test_bit((data
& RTIT_CTL_CYC_THRESH
) >>
1446 RTIT_CTL_CYC_THRESH_OFFSET
, &value
))
1448 value
= intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_psb_periods
);
1449 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_psb_cyc
) &&
1450 !test_bit((data
& RTIT_CTL_PSB_FREQ
) >>
1451 RTIT_CTL_PSB_FREQ_OFFSET
, &value
))
1455 * If ADDRx_CFG is reserved or the encodings is >2 will
1456 * cause a #GP fault.
1458 value
= (data
& RTIT_CTL_ADDR0
) >> RTIT_CTL_ADDR0_OFFSET
;
1459 if ((value
&& (vmx
->pt_desc
.addr_range
< 1)) || (value
> 2))
1461 value
= (data
& RTIT_CTL_ADDR1
) >> RTIT_CTL_ADDR1_OFFSET
;
1462 if ((value
&& (vmx
->pt_desc
.addr_range
< 2)) || (value
> 2))
1464 value
= (data
& RTIT_CTL_ADDR2
) >> RTIT_CTL_ADDR2_OFFSET
;
1465 if ((value
&& (vmx
->pt_desc
.addr_range
< 3)) || (value
> 2))
1467 value
= (data
& RTIT_CTL_ADDR3
) >> RTIT_CTL_ADDR3_OFFSET
;
1468 if ((value
&& (vmx
->pt_desc
.addr_range
< 4)) || (value
> 2))
1474 static bool vmx_can_emulate_instruction(struct kvm_vcpu
*vcpu
, void *insn
, int insn_len
)
1477 * Emulation of instructions in SGX enclaves is impossible as RIP does
1478 * not point tthe failing instruction, and even if it did, the code
1479 * stream is inaccessible. Inject #UD instead of exiting to userspace
1480 * so that guest userspace can't DoS the guest simply by triggering
1481 * emulation (enclaves are CPL3 only).
1483 if (to_vmx(vcpu
)->exit_reason
.enclave_mode
) {
1484 kvm_queue_exception(vcpu
, UD_VECTOR
);
1490 static int skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
1492 union vmx_exit_reason exit_reason
= to_vmx(vcpu
)->exit_reason
;
1493 unsigned long rip
, orig_rip
;
1497 * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
1498 * undefined behavior: Intel's SDM doesn't mandate the VMCS field be
1499 * set when EPT misconfig occurs. In practice, real hardware updates
1500 * VM_EXIT_INSTRUCTION_LEN on EPT misconfig, but other hypervisors
1501 * (namely Hyper-V) don't set it due to it being undefined behavior,
1502 * i.e. we end up advancing IP with some random value.
1504 if (!static_cpu_has(X86_FEATURE_HYPERVISOR
) ||
1505 exit_reason
.basic
!= EXIT_REASON_EPT_MISCONFIG
) {
1506 instr_len
= vmcs_read32(VM_EXIT_INSTRUCTION_LEN
);
1509 * Emulating an enclave's instructions isn't supported as KVM
1510 * cannot access the enclave's memory or its true RIP, e.g. the
1511 * vmcs.GUEST_RIP points at the exit point of the enclave, not
1512 * the RIP that actually triggered the VM-Exit. But, because
1513 * most instructions that cause VM-Exit will #UD in an enclave,
1514 * most instruction-based VM-Exits simply do not occur.
1516 * There are a few exceptions, notably the debug instructions
1517 * INT1ICEBRK and INT3, as they are allowed in debug enclaves
1518 * and generate #DB/#BP as expected, which KVM might intercept.
1519 * But again, the CPU does the dirty work and saves an instr
1520 * length of zero so VMMs don't shoot themselves in the foot.
1521 * WARN if KVM tries to skip a non-zero length instruction on
1522 * a VM-Exit from an enclave.
1527 WARN(exit_reason
.enclave_mode
,
1528 "KVM: skipping instruction after SGX enclave VM-Exit");
1530 orig_rip
= kvm_rip_read(vcpu
);
1531 rip
= orig_rip
+ instr_len
;
1532 #ifdef CONFIG_X86_64
1534 * We need to mask out the high 32 bits of RIP if not in 64-bit
1535 * mode, but just finding out that we are in 64-bit mode is
1536 * quite expensive. Only do it if there was a carry.
1538 if (unlikely(((rip
^ orig_rip
) >> 31) == 3) && !is_64_bit_mode(vcpu
))
1541 kvm_rip_write(vcpu
, rip
);
1543 if (!kvm_emulate_instruction(vcpu
, EMULTYPE_SKIP
))
1548 /* skipping an emulated instruction also counts */
1549 vmx_set_interrupt_shadow(vcpu
, 0);
1555 * Recognizes a pending MTF VM-exit and records the nested state for later
1558 static void vmx_update_emulated_instruction(struct kvm_vcpu
*vcpu
)
1560 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1561 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1563 if (!is_guest_mode(vcpu
))
1567 * Per the SDM, MTF takes priority over debug-trap exceptions besides
1568 * T-bit traps. As instruction emulation is completed (i.e. at the
1569 * instruction boundary), any #DB exception pending delivery must be a
1570 * debug-trap. Record the pending MTF state to be delivered in
1571 * vmx_check_nested_events().
1573 if (nested_cpu_has_mtf(vmcs12
) &&
1574 (!vcpu
->arch
.exception
.pending
||
1575 vcpu
->arch
.exception
.nr
== DB_VECTOR
))
1576 vmx
->nested
.mtf_pending
= true;
1578 vmx
->nested
.mtf_pending
= false;
1581 static int vmx_skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
1583 vmx_update_emulated_instruction(vcpu
);
1584 return skip_emulated_instruction(vcpu
);
1587 static void vmx_clear_hlt(struct kvm_vcpu
*vcpu
)
1590 * Ensure that we clear the HLT state in the VMCS. We don't need to
1591 * explicitly skip the instruction because if the HLT state is set,
1592 * then the instruction is already executing and RIP has already been
1595 if (kvm_hlt_in_guest(vcpu
->kvm
) &&
1596 vmcs_read32(GUEST_ACTIVITY_STATE
) == GUEST_ACTIVITY_HLT
)
1597 vmcs_write32(GUEST_ACTIVITY_STATE
, GUEST_ACTIVITY_ACTIVE
);
1600 static void vmx_queue_exception(struct kvm_vcpu
*vcpu
)
1602 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1603 unsigned nr
= vcpu
->arch
.exception
.nr
;
1604 bool has_error_code
= vcpu
->arch
.exception
.has_error_code
;
1605 u32 error_code
= vcpu
->arch
.exception
.error_code
;
1606 u32 intr_info
= nr
| INTR_INFO_VALID_MASK
;
1608 kvm_deliver_exception_payload(vcpu
);
1610 if (has_error_code
) {
1611 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE
, error_code
);
1612 intr_info
|= INTR_INFO_DELIVER_CODE_MASK
;
1615 if (vmx
->rmode
.vm86_active
) {
1617 if (kvm_exception_is_soft(nr
))
1618 inc_eip
= vcpu
->arch
.event_exit_inst_len
;
1619 kvm_inject_realmode_interrupt(vcpu
, nr
, inc_eip
);
1623 WARN_ON_ONCE(vmx
->emulation_required
);
1625 if (kvm_exception_is_soft(nr
)) {
1626 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN
,
1627 vmx
->vcpu
.arch
.event_exit_inst_len
);
1628 intr_info
|= INTR_TYPE_SOFT_EXCEPTION
;
1630 intr_info
|= INTR_TYPE_HARD_EXCEPTION
;
1632 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
, intr_info
);
1634 vmx_clear_hlt(vcpu
);
1637 static void vmx_setup_uret_msr(struct vcpu_vmx
*vmx
, unsigned int msr
,
1638 bool load_into_hardware
)
1640 struct vmx_uret_msr
*uret_msr
;
1642 uret_msr
= vmx_find_uret_msr(vmx
, msr
);
1646 uret_msr
->load_into_hardware
= load_into_hardware
;
1650 * Configuring user return MSRs to automatically save, load, and restore MSRs
1651 * that need to be shoved into hardware when running the guest. Note, omitting
1652 * an MSR here does _NOT_ mean it's not emulated, only that it will not be
1653 * loaded into hardware when running the guest.
1655 static void vmx_setup_uret_msrs(struct vcpu_vmx
*vmx
)
1657 #ifdef CONFIG_X86_64
1658 bool load_syscall_msrs
;
1661 * The SYSCALL MSRs are only needed on long mode guests, and only
1662 * when EFER.SCE is set.
1664 load_syscall_msrs
= is_long_mode(&vmx
->vcpu
) &&
1665 (vmx
->vcpu
.arch
.efer
& EFER_SCE
);
1667 vmx_setup_uret_msr(vmx
, MSR_STAR
, load_syscall_msrs
);
1668 vmx_setup_uret_msr(vmx
, MSR_LSTAR
, load_syscall_msrs
);
1669 vmx_setup_uret_msr(vmx
, MSR_SYSCALL_MASK
, load_syscall_msrs
);
1671 vmx_setup_uret_msr(vmx
, MSR_EFER
, update_transition_efer(vmx
));
1673 vmx_setup_uret_msr(vmx
, MSR_TSC_AUX
,
1674 guest_cpuid_has(&vmx
->vcpu
, X86_FEATURE_RDTSCP
) ||
1675 guest_cpuid_has(&vmx
->vcpu
, X86_FEATURE_RDPID
));
1678 * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new
1679 * kernel and old userspace. If those guests run on a tsx=off host, do
1680 * allow guests to use TSX_CTRL, but don't change the value in hardware
1681 * so that TSX remains always disabled.
1683 vmx_setup_uret_msr(vmx
, MSR_IA32_TSX_CTRL
, boot_cpu_has(X86_FEATURE_RTM
));
1686 * The set of MSRs to load may have changed, reload MSRs before the
1689 vmx
->guest_uret_msrs_loaded
= false;
1692 u64
vmx_get_l2_tsc_offset(struct kvm_vcpu
*vcpu
)
1694 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1696 if (nested_cpu_has(vmcs12
, CPU_BASED_USE_TSC_OFFSETTING
))
1697 return vmcs12
->tsc_offset
;
1702 u64
vmx_get_l2_tsc_multiplier(struct kvm_vcpu
*vcpu
)
1704 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1706 if (nested_cpu_has(vmcs12
, CPU_BASED_USE_TSC_OFFSETTING
) &&
1707 nested_cpu_has2(vmcs12
, SECONDARY_EXEC_TSC_SCALING
))
1708 return vmcs12
->tsc_multiplier
;
1710 return kvm_default_tsc_scaling_ratio
;
1713 static void vmx_write_tsc_offset(struct kvm_vcpu
*vcpu
, u64 offset
)
1715 vmcs_write64(TSC_OFFSET
, offset
);
1718 static void vmx_write_tsc_multiplier(struct kvm_vcpu
*vcpu
, u64 multiplier
)
1720 vmcs_write64(TSC_MULTIPLIER
, multiplier
);
1724 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
1725 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
1726 * all guests if the "nested" module option is off, and can also be disabled
1727 * for a single guest by disabling its VMX cpuid bit.
1729 bool nested_vmx_allowed(struct kvm_vcpu
*vcpu
)
1731 return nested
&& guest_cpuid_has(vcpu
, X86_FEATURE_VMX
);
1734 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu
*vcpu
,
1737 uint64_t valid_bits
= to_vmx(vcpu
)->msr_ia32_feature_control_valid_bits
;
1739 return !(val
& ~valid_bits
);
1742 static int vmx_get_msr_feature(struct kvm_msr_entry
*msr
)
1744 switch (msr
->index
) {
1745 case MSR_IA32_VMX_BASIC
... MSR_IA32_VMX_VMFUNC
:
1748 return vmx_get_vmx_msr(&vmcs_config
.nested
, msr
->index
, &msr
->data
);
1749 case MSR_IA32_PERF_CAPABILITIES
:
1750 msr
->data
= vmx_get_perf_capabilities();
1753 return KVM_MSR_RET_INVALID
;
1758 * Reads an msr value (of 'msr_index') into 'pdata'.
1759 * Returns 0 on success, non-0 otherwise.
1760 * Assumes vcpu_load() was already called.
1762 static int vmx_get_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
1764 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1765 struct vmx_uret_msr
*msr
;
1768 switch (msr_info
->index
) {
1769 #ifdef CONFIG_X86_64
1771 msr_info
->data
= vmcs_readl(GUEST_FS_BASE
);
1774 msr_info
->data
= vmcs_readl(GUEST_GS_BASE
);
1776 case MSR_KERNEL_GS_BASE
:
1777 msr_info
->data
= vmx_read_guest_kernel_gs_base(vmx
);
1781 return kvm_get_msr_common(vcpu
, msr_info
);
1782 case MSR_IA32_TSX_CTRL
:
1783 if (!msr_info
->host_initiated
&&
1784 !(vcpu
->arch
.arch_capabilities
& ARCH_CAP_TSX_CTRL_MSR
))
1787 case MSR_IA32_UMWAIT_CONTROL
:
1788 if (!msr_info
->host_initiated
&& !vmx_has_waitpkg(vmx
))
1791 msr_info
->data
= vmx
->msr_ia32_umwait_control
;
1793 case MSR_IA32_SPEC_CTRL
:
1794 if (!msr_info
->host_initiated
&&
1795 !guest_has_spec_ctrl_msr(vcpu
))
1798 msr_info
->data
= to_vmx(vcpu
)->spec_ctrl
;
1800 case MSR_IA32_SYSENTER_CS
:
1801 msr_info
->data
= vmcs_read32(GUEST_SYSENTER_CS
);
1803 case MSR_IA32_SYSENTER_EIP
:
1804 msr_info
->data
= vmcs_readl(GUEST_SYSENTER_EIP
);
1806 case MSR_IA32_SYSENTER_ESP
:
1807 msr_info
->data
= vmcs_readl(GUEST_SYSENTER_ESP
);
1809 case MSR_IA32_BNDCFGS
:
1810 if (!kvm_mpx_supported() ||
1811 (!msr_info
->host_initiated
&&
1812 !guest_cpuid_has(vcpu
, X86_FEATURE_MPX
)))
1814 msr_info
->data
= vmcs_read64(GUEST_BNDCFGS
);
1816 case MSR_IA32_MCG_EXT_CTL
:
1817 if (!msr_info
->host_initiated
&&
1818 !(vmx
->msr_ia32_feature_control
&
1819 FEAT_CTL_LMCE_ENABLED
))
1821 msr_info
->data
= vcpu
->arch
.mcg_ext_ctl
;
1823 case MSR_IA32_FEAT_CTL
:
1824 msr_info
->data
= vmx
->msr_ia32_feature_control
;
1826 case MSR_IA32_SGXLEPUBKEYHASH0
... MSR_IA32_SGXLEPUBKEYHASH3
:
1827 if (!msr_info
->host_initiated
&&
1828 !guest_cpuid_has(vcpu
, X86_FEATURE_SGX_LC
))
1830 msr_info
->data
= to_vmx(vcpu
)->msr_ia32_sgxlepubkeyhash
1831 [msr_info
->index
- MSR_IA32_SGXLEPUBKEYHASH0
];
1833 case MSR_IA32_VMX_BASIC
... MSR_IA32_VMX_VMFUNC
:
1834 if (!nested_vmx_allowed(vcpu
))
1836 if (vmx_get_vmx_msr(&vmx
->nested
.msrs
, msr_info
->index
,
1840 * Enlightened VMCS v1 doesn't have certain VMCS fields but
1841 * instead of just ignoring the features, different Hyper-V
1842 * versions are either trying to use them and fail or do some
1843 * sanity checking and refuse to boot. Filter all unsupported
1846 if (!msr_info
->host_initiated
&&
1847 vmx
->nested
.enlightened_vmcs_enabled
)
1848 nested_evmcs_filter_control_msr(msr_info
->index
,
1851 case MSR_IA32_RTIT_CTL
:
1852 if (!vmx_pt_mode_is_host_guest())
1854 msr_info
->data
= vmx
->pt_desc
.guest
.ctl
;
1856 case MSR_IA32_RTIT_STATUS
:
1857 if (!vmx_pt_mode_is_host_guest())
1859 msr_info
->data
= vmx
->pt_desc
.guest
.status
;
1861 case MSR_IA32_RTIT_CR3_MATCH
:
1862 if (!vmx_pt_mode_is_host_guest() ||
1863 !intel_pt_validate_cap(vmx
->pt_desc
.caps
,
1864 PT_CAP_cr3_filtering
))
1866 msr_info
->data
= vmx
->pt_desc
.guest
.cr3_match
;
1868 case MSR_IA32_RTIT_OUTPUT_BASE
:
1869 if (!vmx_pt_mode_is_host_guest() ||
1870 (!intel_pt_validate_cap(vmx
->pt_desc
.caps
,
1871 PT_CAP_topa_output
) &&
1872 !intel_pt_validate_cap(vmx
->pt_desc
.caps
,
1873 PT_CAP_single_range_output
)))
1875 msr_info
->data
= vmx
->pt_desc
.guest
.output_base
;
1877 case MSR_IA32_RTIT_OUTPUT_MASK
:
1878 if (!vmx_pt_mode_is_host_guest() ||
1879 (!intel_pt_validate_cap(vmx
->pt_desc
.caps
,
1880 PT_CAP_topa_output
) &&
1881 !intel_pt_validate_cap(vmx
->pt_desc
.caps
,
1882 PT_CAP_single_range_output
)))
1884 msr_info
->data
= vmx
->pt_desc
.guest
.output_mask
;
1886 case MSR_IA32_RTIT_ADDR0_A
... MSR_IA32_RTIT_ADDR3_B
:
1887 index
= msr_info
->index
- MSR_IA32_RTIT_ADDR0_A
;
1888 if (!vmx_pt_mode_is_host_guest() ||
1889 (index
>= 2 * intel_pt_validate_cap(vmx
->pt_desc
.caps
,
1890 PT_CAP_num_address_ranges
)))
1893 msr_info
->data
= vmx
->pt_desc
.guest
.addr_b
[index
/ 2];
1895 msr_info
->data
= vmx
->pt_desc
.guest
.addr_a
[index
/ 2];
1897 case MSR_IA32_DEBUGCTLMSR
:
1898 msr_info
->data
= vmcs_read64(GUEST_IA32_DEBUGCTL
);
1902 msr
= vmx_find_uret_msr(vmx
, msr_info
->index
);
1904 msr_info
->data
= msr
->data
;
1907 return kvm_get_msr_common(vcpu
, msr_info
);
1913 static u64
nested_vmx_truncate_sysenter_addr(struct kvm_vcpu
*vcpu
,
1916 #ifdef CONFIG_X86_64
1917 if (!guest_cpuid_has(vcpu
, X86_FEATURE_LM
))
1920 return (unsigned long)data
;
1923 static u64
vcpu_supported_debugctl(struct kvm_vcpu
*vcpu
)
1925 u64 debugctl
= vmx_supported_debugctl();
1927 if (!intel_pmu_lbr_is_enabled(vcpu
))
1928 debugctl
&= ~DEBUGCTLMSR_LBR_MASK
;
1930 if (!guest_cpuid_has(vcpu
, X86_FEATURE_BUS_LOCK_DETECT
))
1931 debugctl
&= ~DEBUGCTLMSR_BUS_LOCK_DETECT
;
1937 * Writes msr value into the appropriate "register".
1938 * Returns 0 on success, non-0 otherwise.
1939 * Assumes vcpu_load() was already called.
1941 static int vmx_set_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
1943 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1944 struct vmx_uret_msr
*msr
;
1946 u32 msr_index
= msr_info
->index
;
1947 u64 data
= msr_info
->data
;
1950 switch (msr_index
) {
1952 ret
= kvm_set_msr_common(vcpu
, msr_info
);
1954 #ifdef CONFIG_X86_64
1956 vmx_segment_cache_clear(vmx
);
1957 vmcs_writel(GUEST_FS_BASE
, data
);
1960 vmx_segment_cache_clear(vmx
);
1961 vmcs_writel(GUEST_GS_BASE
, data
);
1963 case MSR_KERNEL_GS_BASE
:
1964 vmx_write_guest_kernel_gs_base(vmx
, data
);
1967 case MSR_IA32_SYSENTER_CS
:
1968 if (is_guest_mode(vcpu
))
1969 get_vmcs12(vcpu
)->guest_sysenter_cs
= data
;
1970 vmcs_write32(GUEST_SYSENTER_CS
, data
);
1972 case MSR_IA32_SYSENTER_EIP
:
1973 if (is_guest_mode(vcpu
)) {
1974 data
= nested_vmx_truncate_sysenter_addr(vcpu
, data
);
1975 get_vmcs12(vcpu
)->guest_sysenter_eip
= data
;
1977 vmcs_writel(GUEST_SYSENTER_EIP
, data
);
1979 case MSR_IA32_SYSENTER_ESP
:
1980 if (is_guest_mode(vcpu
)) {
1981 data
= nested_vmx_truncate_sysenter_addr(vcpu
, data
);
1982 get_vmcs12(vcpu
)->guest_sysenter_esp
= data
;
1984 vmcs_writel(GUEST_SYSENTER_ESP
, data
);
1986 case MSR_IA32_DEBUGCTLMSR
: {
1987 u64 invalid
= data
& ~vcpu_supported_debugctl(vcpu
);
1988 if (invalid
& (DEBUGCTLMSR_BTF
|DEBUGCTLMSR_LBR
)) {
1989 if (report_ignored_msrs
)
1990 vcpu_unimpl(vcpu
, "%s: BTF|LBR in IA32_DEBUGCTLMSR 0x%llx, nop\n",
1992 data
&= ~(DEBUGCTLMSR_BTF
|DEBUGCTLMSR_LBR
);
1993 invalid
&= ~(DEBUGCTLMSR_BTF
|DEBUGCTLMSR_LBR
);
1999 if (is_guest_mode(vcpu
) && get_vmcs12(vcpu
)->vm_exit_controls
&
2000 VM_EXIT_SAVE_DEBUG_CONTROLS
)
2001 get_vmcs12(vcpu
)->guest_ia32_debugctl
= data
;
2003 vmcs_write64(GUEST_IA32_DEBUGCTL
, data
);
2004 if (intel_pmu_lbr_is_enabled(vcpu
) && !to_vmx(vcpu
)->lbr_desc
.event
&&
2005 (data
& DEBUGCTLMSR_LBR
))
2006 intel_pmu_create_guest_lbr_event(vcpu
);
2009 case MSR_IA32_BNDCFGS
:
2010 if (!kvm_mpx_supported() ||
2011 (!msr_info
->host_initiated
&&
2012 !guest_cpuid_has(vcpu
, X86_FEATURE_MPX
)))
2014 if (is_noncanonical_address(data
& PAGE_MASK
, vcpu
) ||
2015 (data
& MSR_IA32_BNDCFGS_RSVD
))
2017 vmcs_write64(GUEST_BNDCFGS
, data
);
2019 case MSR_IA32_UMWAIT_CONTROL
:
2020 if (!msr_info
->host_initiated
&& !vmx_has_waitpkg(vmx
))
2023 /* The reserved bit 1 and non-32 bit [63:32] should be zero */
2024 if (data
& (BIT_ULL(1) | GENMASK_ULL(63, 32)))
2027 vmx
->msr_ia32_umwait_control
= data
;
2029 case MSR_IA32_SPEC_CTRL
:
2030 if (!msr_info
->host_initiated
&&
2031 !guest_has_spec_ctrl_msr(vcpu
))
2034 if (kvm_spec_ctrl_test_value(data
))
2037 vmx
->spec_ctrl
= data
;
2043 * When it's written (to non-zero) for the first time, pass
2047 * The handling of the MSR bitmap for L2 guests is done in
2048 * nested_vmx_prepare_msr_bitmap. We should not touch the
2049 * vmcs02.msr_bitmap here since it gets completely overwritten
2050 * in the merging. We update the vmcs01 here for L1 as well
2051 * since it will end up touching the MSR anyway now.
2053 vmx_disable_intercept_for_msr(vcpu
,
2057 case MSR_IA32_TSX_CTRL
:
2058 if (!msr_info
->host_initiated
&&
2059 !(vcpu
->arch
.arch_capabilities
& ARCH_CAP_TSX_CTRL_MSR
))
2061 if (data
& ~(TSX_CTRL_RTM_DISABLE
| TSX_CTRL_CPUID_CLEAR
))
2064 case MSR_IA32_PRED_CMD
:
2065 if (!msr_info
->host_initiated
&&
2066 !guest_has_pred_cmd_msr(vcpu
))
2069 if (data
& ~PRED_CMD_IBPB
)
2071 if (!boot_cpu_has(X86_FEATURE_IBPB
))
2076 wrmsrl(MSR_IA32_PRED_CMD
, PRED_CMD_IBPB
);
2080 * When it's written (to non-zero) for the first time, pass
2084 * The handling of the MSR bitmap for L2 guests is done in
2085 * nested_vmx_prepare_msr_bitmap. We should not touch the
2086 * vmcs02.msr_bitmap here since it gets completely overwritten
2089 vmx_disable_intercept_for_msr(vcpu
, MSR_IA32_PRED_CMD
, MSR_TYPE_W
);
2091 case MSR_IA32_CR_PAT
:
2092 if (!kvm_pat_valid(data
))
2095 if (is_guest_mode(vcpu
) &&
2096 get_vmcs12(vcpu
)->vm_exit_controls
& VM_EXIT_SAVE_IA32_PAT
)
2097 get_vmcs12(vcpu
)->guest_ia32_pat
= data
;
2099 if (vmcs_config
.vmentry_ctrl
& VM_ENTRY_LOAD_IA32_PAT
) {
2100 vmcs_write64(GUEST_IA32_PAT
, data
);
2101 vcpu
->arch
.pat
= data
;
2104 ret
= kvm_set_msr_common(vcpu
, msr_info
);
2106 case MSR_IA32_TSC_ADJUST
:
2107 ret
= kvm_set_msr_common(vcpu
, msr_info
);
2109 case MSR_IA32_MCG_EXT_CTL
:
2110 if ((!msr_info
->host_initiated
&&
2111 !(to_vmx(vcpu
)->msr_ia32_feature_control
&
2112 FEAT_CTL_LMCE_ENABLED
)) ||
2113 (data
& ~MCG_EXT_CTL_LMCE_EN
))
2115 vcpu
->arch
.mcg_ext_ctl
= data
;
2117 case MSR_IA32_FEAT_CTL
:
2118 if (!vmx_feature_control_msr_valid(vcpu
, data
) ||
2119 (to_vmx(vcpu
)->msr_ia32_feature_control
&
2120 FEAT_CTL_LOCKED
&& !msr_info
->host_initiated
))
2122 vmx
->msr_ia32_feature_control
= data
;
2123 if (msr_info
->host_initiated
&& data
== 0)
2124 vmx_leave_nested(vcpu
);
2126 /* SGX may be enabled/disabled by guest's firmware */
2127 vmx_write_encls_bitmap(vcpu
, NULL
);
2129 case MSR_IA32_SGXLEPUBKEYHASH0
... MSR_IA32_SGXLEPUBKEYHASH3
:
2131 * On real hardware, the LE hash MSRs are writable before
2132 * the firmware sets bit 0 in MSR 0x7a ("activating" SGX),
2133 * at which point SGX related bits in IA32_FEATURE_CONTROL
2136 * KVM does not emulate SGX activation for simplicity, so
2137 * allow writes to the LE hash MSRs if IA32_FEATURE_CONTROL
2138 * is unlocked. This is technically not architectural
2139 * behavior, but it's close enough.
2141 if (!msr_info
->host_initiated
&&
2142 (!guest_cpuid_has(vcpu
, X86_FEATURE_SGX_LC
) ||
2143 ((vmx
->msr_ia32_feature_control
& FEAT_CTL_LOCKED
) &&
2144 !(vmx
->msr_ia32_feature_control
& FEAT_CTL_SGX_LC_ENABLED
))))
2146 vmx
->msr_ia32_sgxlepubkeyhash
2147 [msr_index
- MSR_IA32_SGXLEPUBKEYHASH0
] = data
;
2149 case MSR_IA32_VMX_BASIC
... MSR_IA32_VMX_VMFUNC
:
2150 if (!msr_info
->host_initiated
)
2151 return 1; /* they are read-only */
2152 if (!nested_vmx_allowed(vcpu
))
2154 return vmx_set_vmx_msr(vcpu
, msr_index
, data
);
2155 case MSR_IA32_RTIT_CTL
:
2156 if (!vmx_pt_mode_is_host_guest() ||
2157 vmx_rtit_ctl_check(vcpu
, data
) ||
2160 vmcs_write64(GUEST_IA32_RTIT_CTL
, data
);
2161 vmx
->pt_desc
.guest
.ctl
= data
;
2162 pt_update_intercept_for_msr(vcpu
);
2164 case MSR_IA32_RTIT_STATUS
:
2165 if (!pt_can_write_msr(vmx
))
2167 if (data
& MSR_IA32_RTIT_STATUS_MASK
)
2169 vmx
->pt_desc
.guest
.status
= data
;
2171 case MSR_IA32_RTIT_CR3_MATCH
:
2172 if (!pt_can_write_msr(vmx
))
2174 if (!intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2175 PT_CAP_cr3_filtering
))
2177 vmx
->pt_desc
.guest
.cr3_match
= data
;
2179 case MSR_IA32_RTIT_OUTPUT_BASE
:
2180 if (!pt_can_write_msr(vmx
))
2182 if (!intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2183 PT_CAP_topa_output
) &&
2184 !intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2185 PT_CAP_single_range_output
))
2187 if (!pt_output_base_valid(vcpu
, data
))
2189 vmx
->pt_desc
.guest
.output_base
= data
;
2191 case MSR_IA32_RTIT_OUTPUT_MASK
:
2192 if (!pt_can_write_msr(vmx
))
2194 if (!intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2195 PT_CAP_topa_output
) &&
2196 !intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2197 PT_CAP_single_range_output
))
2199 vmx
->pt_desc
.guest
.output_mask
= data
;
2201 case MSR_IA32_RTIT_ADDR0_A
... MSR_IA32_RTIT_ADDR3_B
:
2202 if (!pt_can_write_msr(vmx
))
2204 index
= msr_info
->index
- MSR_IA32_RTIT_ADDR0_A
;
2205 if (index
>= 2 * intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2206 PT_CAP_num_address_ranges
))
2208 if (is_noncanonical_address(data
, vcpu
))
2211 vmx
->pt_desc
.guest
.addr_b
[index
/ 2] = data
;
2213 vmx
->pt_desc
.guest
.addr_a
[index
/ 2] = data
;
2215 case MSR_IA32_PERF_CAPABILITIES
:
2216 if (data
&& !vcpu_to_pmu(vcpu
)->version
)
2218 if (data
& PMU_CAP_LBR_FMT
) {
2219 if ((data
& PMU_CAP_LBR_FMT
) !=
2220 (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT
))
2222 if (!intel_pmu_lbr_is_compatible(vcpu
))
2225 ret
= kvm_set_msr_common(vcpu
, msr_info
);
2230 msr
= vmx_find_uret_msr(vmx
, msr_index
);
2232 ret
= vmx_set_guest_uret_msr(vmx
, msr
, data
);
2234 ret
= kvm_set_msr_common(vcpu
, msr_info
);
2240 static void vmx_cache_reg(struct kvm_vcpu
*vcpu
, enum kvm_reg reg
)
2242 unsigned long guest_owned_bits
;
2244 kvm_register_mark_available(vcpu
, reg
);
2248 vcpu
->arch
.regs
[VCPU_REGS_RSP
] = vmcs_readl(GUEST_RSP
);
2251 vcpu
->arch
.regs
[VCPU_REGS_RIP
] = vmcs_readl(GUEST_RIP
);
2253 case VCPU_EXREG_PDPTR
:
2255 ept_save_pdptrs(vcpu
);
2257 case VCPU_EXREG_CR0
:
2258 guest_owned_bits
= vcpu
->arch
.cr0_guest_owned_bits
;
2260 vcpu
->arch
.cr0
&= ~guest_owned_bits
;
2261 vcpu
->arch
.cr0
|= vmcs_readl(GUEST_CR0
) & guest_owned_bits
;
2263 case VCPU_EXREG_CR3
:
2265 * When intercepting CR3 loads, e.g. for shadowing paging, KVM's
2266 * CR3 is loaded into hardware, not the guest's CR3.
2268 if (!(exec_controls_get(to_vmx(vcpu
)) & CPU_BASED_CR3_LOAD_EXITING
))
2269 vcpu
->arch
.cr3
= vmcs_readl(GUEST_CR3
);
2271 case VCPU_EXREG_CR4
:
2272 guest_owned_bits
= vcpu
->arch
.cr4_guest_owned_bits
;
2274 vcpu
->arch
.cr4
&= ~guest_owned_bits
;
2275 vcpu
->arch
.cr4
|= vmcs_readl(GUEST_CR4
) & guest_owned_bits
;
2278 KVM_BUG_ON(1, vcpu
->kvm
);
2283 static __init
int cpu_has_kvm_support(void)
2285 return cpu_has_vmx();
2288 static __init
int vmx_disabled_by_bios(void)
2290 return !boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL
) ||
2291 !boot_cpu_has(X86_FEATURE_VMX
);
2294 static int kvm_cpu_vmxon(u64 vmxon_pointer
)
2298 cr4_set_bits(X86_CR4_VMXE
);
2300 asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t"
2301 _ASM_EXTABLE(1b
, %l
[fault
])
2302 : : [vmxon_pointer
] "m"(vmxon_pointer
)
2307 WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
2308 rdmsrl_safe(MSR_IA32_FEAT_CTL
, &msr
) ? 0xdeadbeef : msr
);
2309 cr4_clear_bits(X86_CR4_VMXE
);
2314 static int hardware_enable(void)
2316 int cpu
= raw_smp_processor_id();
2317 u64 phys_addr
= __pa(per_cpu(vmxarea
, cpu
));
2320 if (cr4_read_shadow() & X86_CR4_VMXE
)
2324 * This can happen if we hot-added a CPU but failed to allocate
2325 * VP assist page for it.
2327 if (static_branch_unlikely(&enable_evmcs
) &&
2328 !hv_get_vp_assist_page(cpu
))
2331 intel_pt_handle_vmx(1);
2333 r
= kvm_cpu_vmxon(phys_addr
);
2335 intel_pt_handle_vmx(0);
2345 static void vmclear_local_loaded_vmcss(void)
2347 int cpu
= raw_smp_processor_id();
2348 struct loaded_vmcs
*v
, *n
;
2350 list_for_each_entry_safe(v
, n
, &per_cpu(loaded_vmcss_on_cpu
, cpu
),
2351 loaded_vmcss_on_cpu_link
)
2352 __loaded_vmcs_clear(v
);
2355 static void hardware_disable(void)
2357 vmclear_local_loaded_vmcss();
2360 kvm_spurious_fault();
2362 intel_pt_handle_vmx(0);
2366 * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID
2367 * directly instead of going through cpu_has(), to ensure KVM is trapping
2368 * ENCLS whenever it's supported in hardware. It does not matter whether
2369 * the host OS supports or has enabled SGX.
2371 static bool cpu_has_sgx(void)
2373 return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
2376 static __init
int adjust_vmx_controls(u32 ctl_min
, u32 ctl_opt
,
2377 u32 msr
, u32
*result
)
2379 u32 vmx_msr_low
, vmx_msr_high
;
2380 u32 ctl
= ctl_min
| ctl_opt
;
2382 rdmsr(msr
, vmx_msr_low
, vmx_msr_high
);
2384 ctl
&= vmx_msr_high
; /* bit == 0 in high word ==> must be zero */
2385 ctl
|= vmx_msr_low
; /* bit == 1 in low word ==> must be one */
2387 /* Ensure minimum (required) set of control bits are supported. */
2395 static __init
int setup_vmcs_config(struct vmcs_config
*vmcs_conf
,
2396 struct vmx_capability
*vmx_cap
)
2398 u32 vmx_msr_low
, vmx_msr_high
;
2399 u32 min
, opt
, min2
, opt2
;
2400 u32 _pin_based_exec_control
= 0;
2401 u32 _cpu_based_exec_control
= 0;
2402 u32 _cpu_based_2nd_exec_control
= 0;
2403 u32 _vmexit_control
= 0;
2404 u32 _vmentry_control
= 0;
2406 memset(vmcs_conf
, 0, sizeof(*vmcs_conf
));
2407 min
= CPU_BASED_HLT_EXITING
|
2408 #ifdef CONFIG_X86_64
2409 CPU_BASED_CR8_LOAD_EXITING
|
2410 CPU_BASED_CR8_STORE_EXITING
|
2412 CPU_BASED_CR3_LOAD_EXITING
|
2413 CPU_BASED_CR3_STORE_EXITING
|
2414 CPU_BASED_UNCOND_IO_EXITING
|
2415 CPU_BASED_MOV_DR_EXITING
|
2416 CPU_BASED_USE_TSC_OFFSETTING
|
2417 CPU_BASED_MWAIT_EXITING
|
2418 CPU_BASED_MONITOR_EXITING
|
2419 CPU_BASED_INVLPG_EXITING
|
2420 CPU_BASED_RDPMC_EXITING
;
2422 opt
= CPU_BASED_TPR_SHADOW
|
2423 CPU_BASED_USE_MSR_BITMAPS
|
2424 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
;
2425 if (adjust_vmx_controls(min
, opt
, MSR_IA32_VMX_PROCBASED_CTLS
,
2426 &_cpu_based_exec_control
) < 0)
2428 #ifdef CONFIG_X86_64
2429 if ((_cpu_based_exec_control
& CPU_BASED_TPR_SHADOW
))
2430 _cpu_based_exec_control
&= ~CPU_BASED_CR8_LOAD_EXITING
&
2431 ~CPU_BASED_CR8_STORE_EXITING
;
2433 if (_cpu_based_exec_control
& CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
) {
2435 opt2
= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
|
2436 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
|
2437 SECONDARY_EXEC_WBINVD_EXITING
|
2438 SECONDARY_EXEC_ENABLE_VPID
|
2439 SECONDARY_EXEC_ENABLE_EPT
|
2440 SECONDARY_EXEC_UNRESTRICTED_GUEST
|
2441 SECONDARY_EXEC_PAUSE_LOOP_EXITING
|
2442 SECONDARY_EXEC_DESC
|
2443 SECONDARY_EXEC_ENABLE_RDTSCP
|
2444 SECONDARY_EXEC_ENABLE_INVPCID
|
2445 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
2446 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
|
2447 SECONDARY_EXEC_SHADOW_VMCS
|
2448 SECONDARY_EXEC_XSAVES
|
2449 SECONDARY_EXEC_RDSEED_EXITING
|
2450 SECONDARY_EXEC_RDRAND_EXITING
|
2451 SECONDARY_EXEC_ENABLE_PML
|
2452 SECONDARY_EXEC_TSC_SCALING
|
2453 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE
|
2454 SECONDARY_EXEC_PT_USE_GPA
|
2455 SECONDARY_EXEC_PT_CONCEAL_VMX
|
2456 SECONDARY_EXEC_ENABLE_VMFUNC
|
2457 SECONDARY_EXEC_BUS_LOCK_DETECTION
;
2459 opt2
|= SECONDARY_EXEC_ENCLS_EXITING
;
2460 if (adjust_vmx_controls(min2
, opt2
,
2461 MSR_IA32_VMX_PROCBASED_CTLS2
,
2462 &_cpu_based_2nd_exec_control
) < 0)
2465 #ifndef CONFIG_X86_64
2466 if (!(_cpu_based_2nd_exec_control
&
2467 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
))
2468 _cpu_based_exec_control
&= ~CPU_BASED_TPR_SHADOW
;
2471 if (!(_cpu_based_exec_control
& CPU_BASED_TPR_SHADOW
))
2472 _cpu_based_2nd_exec_control
&= ~(
2473 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
2474 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
|
2475 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
);
2477 rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP
,
2478 &vmx_cap
->ept
, &vmx_cap
->vpid
);
2480 if (_cpu_based_2nd_exec_control
& SECONDARY_EXEC_ENABLE_EPT
) {
2481 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
2483 _cpu_based_exec_control
&= ~(CPU_BASED_CR3_LOAD_EXITING
|
2484 CPU_BASED_CR3_STORE_EXITING
|
2485 CPU_BASED_INVLPG_EXITING
);
2486 } else if (vmx_cap
->ept
) {
2488 pr_warn_once("EPT CAP should not exist if not support "
2489 "1-setting enable EPT VM-execution control\n");
2491 if (!(_cpu_based_2nd_exec_control
& SECONDARY_EXEC_ENABLE_VPID
) &&
2494 pr_warn_once("VPID CAP should not exist if not support "
2495 "1-setting enable VPID VM-execution control\n");
2498 min
= VM_EXIT_SAVE_DEBUG_CONTROLS
| VM_EXIT_ACK_INTR_ON_EXIT
;
2499 #ifdef CONFIG_X86_64
2500 min
|= VM_EXIT_HOST_ADDR_SPACE_SIZE
;
2502 opt
= VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
|
2503 VM_EXIT_LOAD_IA32_PAT
|
2504 VM_EXIT_LOAD_IA32_EFER
|
2505 VM_EXIT_CLEAR_BNDCFGS
|
2506 VM_EXIT_PT_CONCEAL_PIP
|
2507 VM_EXIT_CLEAR_IA32_RTIT_CTL
;
2508 if (adjust_vmx_controls(min
, opt
, MSR_IA32_VMX_EXIT_CTLS
,
2509 &_vmexit_control
) < 0)
2512 min
= PIN_BASED_EXT_INTR_MASK
| PIN_BASED_NMI_EXITING
;
2513 opt
= PIN_BASED_VIRTUAL_NMIS
| PIN_BASED_POSTED_INTR
|
2514 PIN_BASED_VMX_PREEMPTION_TIMER
;
2515 if (adjust_vmx_controls(min
, opt
, MSR_IA32_VMX_PINBASED_CTLS
,
2516 &_pin_based_exec_control
) < 0)
2519 if (cpu_has_broken_vmx_preemption_timer())
2520 _pin_based_exec_control
&= ~PIN_BASED_VMX_PREEMPTION_TIMER
;
2521 if (!(_cpu_based_2nd_exec_control
&
2522 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
))
2523 _pin_based_exec_control
&= ~PIN_BASED_POSTED_INTR
;
2525 min
= VM_ENTRY_LOAD_DEBUG_CONTROLS
;
2526 opt
= VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
|
2527 VM_ENTRY_LOAD_IA32_PAT
|
2528 VM_ENTRY_LOAD_IA32_EFER
|
2529 VM_ENTRY_LOAD_BNDCFGS
|
2530 VM_ENTRY_PT_CONCEAL_PIP
|
2531 VM_ENTRY_LOAD_IA32_RTIT_CTL
;
2532 if (adjust_vmx_controls(min
, opt
, MSR_IA32_VMX_ENTRY_CTLS
,
2533 &_vmentry_control
) < 0)
2537 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
2538 * can't be used due to an errata where VM Exit may incorrectly clear
2539 * IA32_PERF_GLOBAL_CTRL[34:32]. Workaround the errata by using the
2540 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2542 if (boot_cpu_data
.x86
== 0x6) {
2543 switch (boot_cpu_data
.x86_model
) {
2544 case 26: /* AAK155 */
2545 case 30: /* AAP115 */
2546 case 37: /* AAT100 */
2547 case 44: /* BC86,AAY89,BD102 */
2549 _vmentry_control
&= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
;
2550 _vmexit_control
&= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
;
2551 pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
2552 "does not work properly. Using workaround\n");
2560 rdmsr(MSR_IA32_VMX_BASIC
, vmx_msr_low
, vmx_msr_high
);
2562 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2563 if ((vmx_msr_high
& 0x1fff) > PAGE_SIZE
)
2566 #ifdef CONFIG_X86_64
2567 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
2568 if (vmx_msr_high
& (1u<<16))
2572 /* Require Write-Back (WB) memory type for VMCS accesses. */
2573 if (((vmx_msr_high
>> 18) & 15) != 6)
2576 vmcs_conf
->size
= vmx_msr_high
& 0x1fff;
2577 vmcs_conf
->order
= get_order(vmcs_conf
->size
);
2578 vmcs_conf
->basic_cap
= vmx_msr_high
& ~0x1fff;
2580 vmcs_conf
->revision_id
= vmx_msr_low
;
2582 vmcs_conf
->pin_based_exec_ctrl
= _pin_based_exec_control
;
2583 vmcs_conf
->cpu_based_exec_ctrl
= _cpu_based_exec_control
;
2584 vmcs_conf
->cpu_based_2nd_exec_ctrl
= _cpu_based_2nd_exec_control
;
2585 vmcs_conf
->vmexit_ctrl
= _vmexit_control
;
2586 vmcs_conf
->vmentry_ctrl
= _vmentry_control
;
2588 #if IS_ENABLED(CONFIG_HYPERV)
2589 if (enlightened_vmcs
)
2590 evmcs_sanitize_exec_ctrls(vmcs_conf
);
2596 struct vmcs
*alloc_vmcs_cpu(bool shadow
, int cpu
, gfp_t flags
)
2598 int node
= cpu_to_node(cpu
);
2602 pages
= __alloc_pages_node(node
, flags
, vmcs_config
.order
);
2605 vmcs
= page_address(pages
);
2606 memset(vmcs
, 0, vmcs_config
.size
);
2608 /* KVM supports Enlightened VMCS v1 only */
2609 if (static_branch_unlikely(&enable_evmcs
))
2610 vmcs
->hdr
.revision_id
= KVM_EVMCS_VERSION
;
2612 vmcs
->hdr
.revision_id
= vmcs_config
.revision_id
;
2615 vmcs
->hdr
.shadow_vmcs
= 1;
2619 void free_vmcs(struct vmcs
*vmcs
)
2621 free_pages((unsigned long)vmcs
, vmcs_config
.order
);
2625 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
2627 void free_loaded_vmcs(struct loaded_vmcs
*loaded_vmcs
)
2629 if (!loaded_vmcs
->vmcs
)
2631 loaded_vmcs_clear(loaded_vmcs
);
2632 free_vmcs(loaded_vmcs
->vmcs
);
2633 loaded_vmcs
->vmcs
= NULL
;
2634 if (loaded_vmcs
->msr_bitmap
)
2635 free_page((unsigned long)loaded_vmcs
->msr_bitmap
);
2636 WARN_ON(loaded_vmcs
->shadow_vmcs
!= NULL
);
2639 int alloc_loaded_vmcs(struct loaded_vmcs
*loaded_vmcs
)
2641 loaded_vmcs
->vmcs
= alloc_vmcs(false);
2642 if (!loaded_vmcs
->vmcs
)
2645 vmcs_clear(loaded_vmcs
->vmcs
);
2647 loaded_vmcs
->shadow_vmcs
= NULL
;
2648 loaded_vmcs
->hv_timer_soft_disabled
= false;
2649 loaded_vmcs
->cpu
= -1;
2650 loaded_vmcs
->launched
= 0;
2652 if (cpu_has_vmx_msr_bitmap()) {
2653 loaded_vmcs
->msr_bitmap
= (unsigned long *)
2654 __get_free_page(GFP_KERNEL_ACCOUNT
);
2655 if (!loaded_vmcs
->msr_bitmap
)
2657 memset(loaded_vmcs
->msr_bitmap
, 0xff, PAGE_SIZE
);
2659 if (IS_ENABLED(CONFIG_HYPERV
) &&
2660 static_branch_unlikely(&enable_evmcs
) &&
2661 (ms_hyperv
.nested_features
& HV_X64_NESTED_MSR_BITMAP
)) {
2662 struct hv_enlightened_vmcs
*evmcs
=
2663 (struct hv_enlightened_vmcs
*)loaded_vmcs
->vmcs
;
2665 evmcs
->hv_enlightenments_control
.msr_bitmap
= 1;
2669 memset(&loaded_vmcs
->host_state
, 0, sizeof(struct vmcs_host_state
));
2670 memset(&loaded_vmcs
->controls_shadow
, 0,
2671 sizeof(struct vmcs_controls_shadow
));
2676 free_loaded_vmcs(loaded_vmcs
);
2680 static void free_kvm_area(void)
2684 for_each_possible_cpu(cpu
) {
2685 free_vmcs(per_cpu(vmxarea
, cpu
));
2686 per_cpu(vmxarea
, cpu
) = NULL
;
2690 static __init
int alloc_kvm_area(void)
2694 for_each_possible_cpu(cpu
) {
2697 vmcs
= alloc_vmcs_cpu(false, cpu
, GFP_KERNEL
);
2704 * When eVMCS is enabled, alloc_vmcs_cpu() sets
2705 * vmcs->revision_id to KVM_EVMCS_VERSION instead of
2706 * revision_id reported by MSR_IA32_VMX_BASIC.
2708 * However, even though not explicitly documented by
2709 * TLFS, VMXArea passed as VMXON argument should
2710 * still be marked with revision_id reported by
2713 if (static_branch_unlikely(&enable_evmcs
))
2714 vmcs
->hdr
.revision_id
= vmcs_config
.revision_id
;
2716 per_cpu(vmxarea
, cpu
) = vmcs
;
2721 static void fix_pmode_seg(struct kvm_vcpu
*vcpu
, int seg
,
2722 struct kvm_segment
*save
)
2724 if (!emulate_invalid_guest_state
) {
2726 * CS and SS RPL should be equal during guest entry according
2727 * to VMX spec, but in reality it is not always so. Since vcpu
2728 * is in the middle of the transition from real mode to
2729 * protected mode it is safe to assume that RPL 0 is a good
2732 if (seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_SS
)
2733 save
->selector
&= ~SEGMENT_RPL_MASK
;
2734 save
->dpl
= save
->selector
& SEGMENT_RPL_MASK
;
2737 __vmx_set_segment(vcpu
, save
, seg
);
2740 static void enter_pmode(struct kvm_vcpu
*vcpu
)
2742 unsigned long flags
;
2743 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2746 * Update real mode segment cache. It may be not up-to-date if segment
2747 * register was written while vcpu was in a guest mode.
2749 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_ES
], VCPU_SREG_ES
);
2750 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_DS
], VCPU_SREG_DS
);
2751 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_FS
], VCPU_SREG_FS
);
2752 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_GS
], VCPU_SREG_GS
);
2753 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_SS
], VCPU_SREG_SS
);
2754 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_CS
], VCPU_SREG_CS
);
2756 vmx
->rmode
.vm86_active
= 0;
2758 __vmx_set_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_TR
], VCPU_SREG_TR
);
2760 flags
= vmcs_readl(GUEST_RFLAGS
);
2761 flags
&= RMODE_GUEST_OWNED_EFLAGS_BITS
;
2762 flags
|= vmx
->rmode
.save_rflags
& ~RMODE_GUEST_OWNED_EFLAGS_BITS
;
2763 vmcs_writel(GUEST_RFLAGS
, flags
);
2765 vmcs_writel(GUEST_CR4
, (vmcs_readl(GUEST_CR4
) & ~X86_CR4_VME
) |
2766 (vmcs_readl(CR4_READ_SHADOW
) & X86_CR4_VME
));
2768 vmx_update_exception_bitmap(vcpu
);
2770 fix_pmode_seg(vcpu
, VCPU_SREG_CS
, &vmx
->rmode
.segs
[VCPU_SREG_CS
]);
2771 fix_pmode_seg(vcpu
, VCPU_SREG_SS
, &vmx
->rmode
.segs
[VCPU_SREG_SS
]);
2772 fix_pmode_seg(vcpu
, VCPU_SREG_ES
, &vmx
->rmode
.segs
[VCPU_SREG_ES
]);
2773 fix_pmode_seg(vcpu
, VCPU_SREG_DS
, &vmx
->rmode
.segs
[VCPU_SREG_DS
]);
2774 fix_pmode_seg(vcpu
, VCPU_SREG_FS
, &vmx
->rmode
.segs
[VCPU_SREG_FS
]);
2775 fix_pmode_seg(vcpu
, VCPU_SREG_GS
, &vmx
->rmode
.segs
[VCPU_SREG_GS
]);
2778 static void fix_rmode_seg(int seg
, struct kvm_segment
*save
)
2780 const struct kvm_vmx_segment_field
*sf
= &kvm_vmx_segment_fields
[seg
];
2781 struct kvm_segment var
= *save
;
2784 if (seg
== VCPU_SREG_CS
)
2787 if (!emulate_invalid_guest_state
) {
2788 var
.selector
= var
.base
>> 4;
2789 var
.base
= var
.base
& 0xffff0;
2799 if (save
->base
& 0xf)
2800 printk_once(KERN_WARNING
"kvm: segment base is not "
2801 "paragraph aligned when entering "
2802 "protected mode (seg=%d)", seg
);
2805 vmcs_write16(sf
->selector
, var
.selector
);
2806 vmcs_writel(sf
->base
, var
.base
);
2807 vmcs_write32(sf
->limit
, var
.limit
);
2808 vmcs_write32(sf
->ar_bytes
, vmx_segment_access_rights(&var
));
2811 static void enter_rmode(struct kvm_vcpu
*vcpu
)
2813 unsigned long flags
;
2814 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2815 struct kvm_vmx
*kvm_vmx
= to_kvm_vmx(vcpu
->kvm
);
2817 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_TR
], VCPU_SREG_TR
);
2818 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_ES
], VCPU_SREG_ES
);
2819 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_DS
], VCPU_SREG_DS
);
2820 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_FS
], VCPU_SREG_FS
);
2821 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_GS
], VCPU_SREG_GS
);
2822 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_SS
], VCPU_SREG_SS
);
2823 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_CS
], VCPU_SREG_CS
);
2825 vmx
->rmode
.vm86_active
= 1;
2828 * Very old userspace does not call KVM_SET_TSS_ADDR before entering
2829 * vcpu. Warn the user that an update is overdue.
2831 if (!kvm_vmx
->tss_addr
)
2832 printk_once(KERN_WARNING
"kvm: KVM_SET_TSS_ADDR need to be "
2833 "called before entering vcpu\n");
2835 vmx_segment_cache_clear(vmx
);
2837 vmcs_writel(GUEST_TR_BASE
, kvm_vmx
->tss_addr
);
2838 vmcs_write32(GUEST_TR_LIMIT
, RMODE_TSS_SIZE
- 1);
2839 vmcs_write32(GUEST_TR_AR_BYTES
, 0x008b);
2841 flags
= vmcs_readl(GUEST_RFLAGS
);
2842 vmx
->rmode
.save_rflags
= flags
;
2844 flags
|= X86_EFLAGS_IOPL
| X86_EFLAGS_VM
;
2846 vmcs_writel(GUEST_RFLAGS
, flags
);
2847 vmcs_writel(GUEST_CR4
, vmcs_readl(GUEST_CR4
) | X86_CR4_VME
);
2848 vmx_update_exception_bitmap(vcpu
);
2850 fix_rmode_seg(VCPU_SREG_SS
, &vmx
->rmode
.segs
[VCPU_SREG_SS
]);
2851 fix_rmode_seg(VCPU_SREG_CS
, &vmx
->rmode
.segs
[VCPU_SREG_CS
]);
2852 fix_rmode_seg(VCPU_SREG_ES
, &vmx
->rmode
.segs
[VCPU_SREG_ES
]);
2853 fix_rmode_seg(VCPU_SREG_DS
, &vmx
->rmode
.segs
[VCPU_SREG_DS
]);
2854 fix_rmode_seg(VCPU_SREG_GS
, &vmx
->rmode
.segs
[VCPU_SREG_GS
]);
2855 fix_rmode_seg(VCPU_SREG_FS
, &vmx
->rmode
.segs
[VCPU_SREG_FS
]);
2858 int vmx_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
2860 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2861 struct vmx_uret_msr
*msr
= vmx_find_uret_msr(vmx
, MSR_EFER
);
2863 /* Nothing to do if hardware doesn't support EFER. */
2867 vcpu
->arch
.efer
= efer
;
2868 if (efer
& EFER_LMA
) {
2869 vm_entry_controls_setbit(to_vmx(vcpu
), VM_ENTRY_IA32E_MODE
);
2872 vm_entry_controls_clearbit(to_vmx(vcpu
), VM_ENTRY_IA32E_MODE
);
2874 msr
->data
= efer
& ~EFER_LME
;
2876 vmx_setup_uret_msrs(vmx
);
2880 #ifdef CONFIG_X86_64
2882 static void enter_lmode(struct kvm_vcpu
*vcpu
)
2886 vmx_segment_cache_clear(to_vmx(vcpu
));
2888 guest_tr_ar
= vmcs_read32(GUEST_TR_AR_BYTES
);
2889 if ((guest_tr_ar
& VMX_AR_TYPE_MASK
) != VMX_AR_TYPE_BUSY_64_TSS
) {
2890 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
2892 vmcs_write32(GUEST_TR_AR_BYTES
,
2893 (guest_tr_ar
& ~VMX_AR_TYPE_MASK
)
2894 | VMX_AR_TYPE_BUSY_64_TSS
);
2896 vmx_set_efer(vcpu
, vcpu
->arch
.efer
| EFER_LMA
);
2899 static void exit_lmode(struct kvm_vcpu
*vcpu
)
2901 vm_entry_controls_clearbit(to_vmx(vcpu
), VM_ENTRY_IA32E_MODE
);
2902 vmx_set_efer(vcpu
, vcpu
->arch
.efer
& ~EFER_LMA
);
2907 static void vmx_flush_tlb_all(struct kvm_vcpu
*vcpu
)
2909 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2912 * INVEPT must be issued when EPT is enabled, irrespective of VPID, as
2913 * the CPU is not required to invalidate guest-physical mappings on
2914 * VM-Entry, even if VPID is disabled. Guest-physical mappings are
2915 * associated with the root EPT structure and not any particular VPID
2916 * (INVVPID also isn't required to invalidate guest-physical mappings).
2920 } else if (enable_vpid
) {
2921 if (cpu_has_vmx_invvpid_global()) {
2922 vpid_sync_vcpu_global();
2924 vpid_sync_vcpu_single(vmx
->vpid
);
2925 vpid_sync_vcpu_single(vmx
->nested
.vpid02
);
2930 static void vmx_flush_tlb_current(struct kvm_vcpu
*vcpu
)
2932 struct kvm_mmu
*mmu
= vcpu
->arch
.mmu
;
2933 u64 root_hpa
= mmu
->root_hpa
;
2935 /* No flush required if the current context is invalid. */
2936 if (!VALID_PAGE(root_hpa
))
2940 ept_sync_context(construct_eptp(vcpu
, root_hpa
,
2941 mmu
->shadow_root_level
));
2942 else if (!is_guest_mode(vcpu
))
2943 vpid_sync_context(to_vmx(vcpu
)->vpid
);
2945 vpid_sync_context(nested_get_vpid02(vcpu
));
2948 static void vmx_flush_tlb_gva(struct kvm_vcpu
*vcpu
, gva_t addr
)
2951 * vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in
2952 * vmx_flush_tlb_guest() for an explanation of why this is ok.
2954 vpid_sync_vcpu_addr(to_vmx(vcpu
)->vpid
, addr
);
2957 static void vmx_flush_tlb_guest(struct kvm_vcpu
*vcpu
)
2960 * vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0
2961 * or a vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit
2962 * are required to flush GVA->{G,H}PA mappings from the TLB if vpid is
2963 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
2964 * i.e. no explicit INVVPID is necessary.
2966 vpid_sync_context(to_vmx(vcpu
)->vpid
);
2969 void vmx_ept_load_pdptrs(struct kvm_vcpu
*vcpu
)
2971 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
2973 if (!kvm_register_is_dirty(vcpu
, VCPU_EXREG_PDPTR
))
2976 if (is_pae_paging(vcpu
)) {
2977 vmcs_write64(GUEST_PDPTR0
, mmu
->pdptrs
[0]);
2978 vmcs_write64(GUEST_PDPTR1
, mmu
->pdptrs
[1]);
2979 vmcs_write64(GUEST_PDPTR2
, mmu
->pdptrs
[2]);
2980 vmcs_write64(GUEST_PDPTR3
, mmu
->pdptrs
[3]);
2984 void ept_save_pdptrs(struct kvm_vcpu
*vcpu
)
2986 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
2988 if (WARN_ON_ONCE(!is_pae_paging(vcpu
)))
2991 mmu
->pdptrs
[0] = vmcs_read64(GUEST_PDPTR0
);
2992 mmu
->pdptrs
[1] = vmcs_read64(GUEST_PDPTR1
);
2993 mmu
->pdptrs
[2] = vmcs_read64(GUEST_PDPTR2
);
2994 mmu
->pdptrs
[3] = vmcs_read64(GUEST_PDPTR3
);
2996 kvm_register_mark_dirty(vcpu
, VCPU_EXREG_PDPTR
);
2999 #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
3000 CPU_BASED_CR3_STORE_EXITING)
3002 void vmx_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
3004 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3005 unsigned long hw_cr0
, old_cr0_pg
;
3008 old_cr0_pg
= kvm_read_cr0_bits(vcpu
, X86_CR0_PG
);
3010 hw_cr0
= (cr0
& ~KVM_VM_CR0_ALWAYS_OFF
);
3011 if (is_unrestricted_guest(vcpu
))
3012 hw_cr0
|= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST
;
3014 hw_cr0
|= KVM_VM_CR0_ALWAYS_ON
;
3016 hw_cr0
|= X86_CR0_WP
;
3018 if (vmx
->rmode
.vm86_active
&& (cr0
& X86_CR0_PE
))
3021 if (!vmx
->rmode
.vm86_active
&& !(cr0
& X86_CR0_PE
))
3025 vmcs_writel(CR0_READ_SHADOW
, cr0
);
3026 vmcs_writel(GUEST_CR0
, hw_cr0
);
3027 vcpu
->arch
.cr0
= cr0
;
3028 kvm_register_mark_available(vcpu
, VCPU_EXREG_CR0
);
3030 #ifdef CONFIG_X86_64
3031 if (vcpu
->arch
.efer
& EFER_LME
) {
3032 if (!old_cr0_pg
&& (cr0
& X86_CR0_PG
))
3034 else if (old_cr0_pg
&& !(cr0
& X86_CR0_PG
))
3039 if (enable_ept
&& !is_unrestricted_guest(vcpu
)) {
3041 * Ensure KVM has an up-to-date snapshot of the guest's CR3. If
3042 * the below code _enables_ CR3 exiting, vmx_cache_reg() will
3043 * (correctly) stop reading vmcs.GUEST_CR3 because it thinks
3044 * KVM's CR3 is installed.
3046 if (!kvm_register_is_available(vcpu
, VCPU_EXREG_CR3
))
3047 vmx_cache_reg(vcpu
, VCPU_EXREG_CR3
);
3050 * When running with EPT but not unrestricted guest, KVM must
3051 * intercept CR3 accesses when paging is _disabled_. This is
3052 * necessary because restricted guests can't actually run with
3053 * paging disabled, and so KVM stuffs its own CR3 in order to
3054 * run the guest when identity mapped page tables.
3056 * Do _NOT_ check the old CR0.PG, e.g. to optimize away the
3057 * update, it may be stale with respect to CR3 interception,
3058 * e.g. after nested VM-Enter.
3060 * Lastly, honor L1's desires, i.e. intercept CR3 loads and/or
3061 * stores to forward them to L1, even if KVM does not need to
3062 * intercept them to preserve its identity mapped page tables.
3064 if (!(cr0
& X86_CR0_PG
)) {
3065 exec_controls_setbit(vmx
, CR3_EXITING_BITS
);
3066 } else if (!is_guest_mode(vcpu
)) {
3067 exec_controls_clearbit(vmx
, CR3_EXITING_BITS
);
3069 tmp
= exec_controls_get(vmx
);
3070 tmp
&= ~CR3_EXITING_BITS
;
3071 tmp
|= get_vmcs12(vcpu
)->cpu_based_vm_exec_control
& CR3_EXITING_BITS
;
3072 exec_controls_set(vmx
, tmp
);
3075 /* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */
3076 if ((old_cr0_pg
^ cr0
) & X86_CR0_PG
)
3077 vmx_set_cr4(vcpu
, kvm_read_cr4(vcpu
));
3080 /* depends on vcpu->arch.cr0 to be set to a new value */
3081 vmx
->emulation_required
= vmx_emulation_required(vcpu
);
3084 static int vmx_get_max_tdp_level(void)
3086 if (cpu_has_vmx_ept_5levels())
3091 u64
construct_eptp(struct kvm_vcpu
*vcpu
, hpa_t root_hpa
, int root_level
)
3093 u64 eptp
= VMX_EPTP_MT_WB
;
3095 eptp
|= (root_level
== 5) ? VMX_EPTP_PWL_5
: VMX_EPTP_PWL_4
;
3097 if (enable_ept_ad_bits
&&
3098 (!is_guest_mode(vcpu
) || nested_ept_ad_enabled(vcpu
)))
3099 eptp
|= VMX_EPTP_AD_ENABLE_BIT
;
3105 static void vmx_load_mmu_pgd(struct kvm_vcpu
*vcpu
, hpa_t root_hpa
,
3108 struct kvm
*kvm
= vcpu
->kvm
;
3109 bool update_guest_cr3
= true;
3110 unsigned long guest_cr3
;
3114 eptp
= construct_eptp(vcpu
, root_hpa
, root_level
);
3115 vmcs_write64(EPT_POINTER
, eptp
);
3117 hv_track_root_tdp(vcpu
, root_hpa
);
3119 if (!enable_unrestricted_guest
&& !is_paging(vcpu
))
3120 guest_cr3
= to_kvm_vmx(kvm
)->ept_identity_map_addr
;
3121 else if (test_bit(VCPU_EXREG_CR3
, (ulong
*)&vcpu
->arch
.regs_avail
))
3122 guest_cr3
= vcpu
->arch
.cr3
;
3123 else /* vmcs01.GUEST_CR3 is already up-to-date. */
3124 update_guest_cr3
= false;
3125 vmx_ept_load_pdptrs(vcpu
);
3127 guest_cr3
= root_hpa
| kvm_get_active_pcid(vcpu
);
3130 if (update_guest_cr3
)
3131 vmcs_writel(GUEST_CR3
, guest_cr3
);
3134 static bool vmx_is_valid_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
3137 * We operate under the default treatment of SMM, so VMX cannot be
3138 * enabled under SMM. Note, whether or not VMXE is allowed at all is
3139 * handled by kvm_is_valid_cr4().
3141 if ((cr4
& X86_CR4_VMXE
) && is_smm(vcpu
))
3144 if (to_vmx(vcpu
)->nested
.vmxon
&& !nested_cr4_valid(vcpu
, cr4
))
3150 void vmx_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
3152 unsigned long old_cr4
= vcpu
->arch
.cr4
;
3153 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3155 * Pass through host's Machine Check Enable value to hw_cr4, which
3156 * is in force while we are in guest mode. Do not let guests control
3157 * this bit, even if host CR4.MCE == 0.
3159 unsigned long hw_cr4
;
3161 hw_cr4
= (cr4_read_shadow() & X86_CR4_MCE
) | (cr4
& ~X86_CR4_MCE
);
3162 if (is_unrestricted_guest(vcpu
))
3163 hw_cr4
|= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST
;
3164 else if (vmx
->rmode
.vm86_active
)
3165 hw_cr4
|= KVM_RMODE_VM_CR4_ALWAYS_ON
;
3167 hw_cr4
|= KVM_PMODE_VM_CR4_ALWAYS_ON
;
3169 if (!boot_cpu_has(X86_FEATURE_UMIP
) && vmx_umip_emulated()) {
3170 if (cr4
& X86_CR4_UMIP
) {
3171 secondary_exec_controls_setbit(vmx
, SECONDARY_EXEC_DESC
);
3172 hw_cr4
&= ~X86_CR4_UMIP
;
3173 } else if (!is_guest_mode(vcpu
) ||
3174 !nested_cpu_has2(get_vmcs12(vcpu
), SECONDARY_EXEC_DESC
)) {
3175 secondary_exec_controls_clearbit(vmx
, SECONDARY_EXEC_DESC
);
3179 vcpu
->arch
.cr4
= cr4
;
3180 kvm_register_mark_available(vcpu
, VCPU_EXREG_CR4
);
3182 if (!is_unrestricted_guest(vcpu
)) {
3184 if (!is_paging(vcpu
)) {
3185 hw_cr4
&= ~X86_CR4_PAE
;
3186 hw_cr4
|= X86_CR4_PSE
;
3187 } else if (!(cr4
& X86_CR4_PAE
)) {
3188 hw_cr4
&= ~X86_CR4_PAE
;
3193 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
3194 * hardware. To emulate this behavior, SMEP/SMAP/PKU needs
3195 * to be manually disabled when guest switches to non-paging
3198 * If !enable_unrestricted_guest, the CPU is always running
3199 * with CR0.PG=1 and CR4 needs to be modified.
3200 * If enable_unrestricted_guest, the CPU automatically
3201 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
3203 if (!is_paging(vcpu
))
3204 hw_cr4
&= ~(X86_CR4_SMEP
| X86_CR4_SMAP
| X86_CR4_PKE
);
3207 vmcs_writel(CR4_READ_SHADOW
, cr4
);
3208 vmcs_writel(GUEST_CR4
, hw_cr4
);
3210 if ((cr4
^ old_cr4
) & (X86_CR4_OSXSAVE
| X86_CR4_PKE
))
3211 kvm_update_cpuid_runtime(vcpu
);
3214 void vmx_get_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
)
3216 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3219 if (vmx
->rmode
.vm86_active
&& seg
!= VCPU_SREG_LDTR
) {
3220 *var
= vmx
->rmode
.segs
[seg
];
3221 if (seg
== VCPU_SREG_TR
3222 || var
->selector
== vmx_read_guest_seg_selector(vmx
, seg
))
3224 var
->base
= vmx_read_guest_seg_base(vmx
, seg
);
3225 var
->selector
= vmx_read_guest_seg_selector(vmx
, seg
);
3228 var
->base
= vmx_read_guest_seg_base(vmx
, seg
);
3229 var
->limit
= vmx_read_guest_seg_limit(vmx
, seg
);
3230 var
->selector
= vmx_read_guest_seg_selector(vmx
, seg
);
3231 ar
= vmx_read_guest_seg_ar(vmx
, seg
);
3232 var
->unusable
= (ar
>> 16) & 1;
3233 var
->type
= ar
& 15;
3234 var
->s
= (ar
>> 4) & 1;
3235 var
->dpl
= (ar
>> 5) & 3;
3237 * Some userspaces do not preserve unusable property. Since usable
3238 * segment has to be present according to VMX spec we can use present
3239 * property to amend userspace bug by making unusable segment always
3240 * nonpresent. vmx_segment_access_rights() already marks nonpresent
3241 * segment as unusable.
3243 var
->present
= !var
->unusable
;
3244 var
->avl
= (ar
>> 12) & 1;
3245 var
->l
= (ar
>> 13) & 1;
3246 var
->db
= (ar
>> 14) & 1;
3247 var
->g
= (ar
>> 15) & 1;
3250 static u64
vmx_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
3252 struct kvm_segment s
;
3254 if (to_vmx(vcpu
)->rmode
.vm86_active
) {
3255 vmx_get_segment(vcpu
, &s
, seg
);
3258 return vmx_read_guest_seg_base(to_vmx(vcpu
), seg
);
3261 int vmx_get_cpl(struct kvm_vcpu
*vcpu
)
3263 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3265 if (unlikely(vmx
->rmode
.vm86_active
))
3268 int ar
= vmx_read_guest_seg_ar(vmx
, VCPU_SREG_SS
);
3269 return VMX_AR_DPL(ar
);
3273 static u32
vmx_segment_access_rights(struct kvm_segment
*var
)
3277 if (var
->unusable
|| !var
->present
)
3280 ar
= var
->type
& 15;
3281 ar
|= (var
->s
& 1) << 4;
3282 ar
|= (var
->dpl
& 3) << 5;
3283 ar
|= (var
->present
& 1) << 7;
3284 ar
|= (var
->avl
& 1) << 12;
3285 ar
|= (var
->l
& 1) << 13;
3286 ar
|= (var
->db
& 1) << 14;
3287 ar
|= (var
->g
& 1) << 15;
3293 void __vmx_set_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
)
3295 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3296 const struct kvm_vmx_segment_field
*sf
= &kvm_vmx_segment_fields
[seg
];
3298 vmx_segment_cache_clear(vmx
);
3300 if (vmx
->rmode
.vm86_active
&& seg
!= VCPU_SREG_LDTR
) {
3301 vmx
->rmode
.segs
[seg
] = *var
;
3302 if (seg
== VCPU_SREG_TR
)
3303 vmcs_write16(sf
->selector
, var
->selector
);
3305 fix_rmode_seg(seg
, &vmx
->rmode
.segs
[seg
]);
3309 vmcs_writel(sf
->base
, var
->base
);
3310 vmcs_write32(sf
->limit
, var
->limit
);
3311 vmcs_write16(sf
->selector
, var
->selector
);
3314 * Fix the "Accessed" bit in AR field of segment registers for older
3316 * IA32 arch specifies that at the time of processor reset the
3317 * "Accessed" bit in the AR field of segment registers is 1. And qemu
3318 * is setting it to 0 in the userland code. This causes invalid guest
3319 * state vmexit when "unrestricted guest" mode is turned on.
3320 * Fix for this setup issue in cpu_reset is being pushed in the qemu
3321 * tree. Newer qemu binaries with that qemu fix would not need this
3324 if (is_unrestricted_guest(vcpu
) && (seg
!= VCPU_SREG_LDTR
))
3325 var
->type
|= 0x1; /* Accessed */
3327 vmcs_write32(sf
->ar_bytes
, vmx_segment_access_rights(var
));
3330 static void vmx_set_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
)
3332 __vmx_set_segment(vcpu
, var
, seg
);
3334 to_vmx(vcpu
)->emulation_required
= vmx_emulation_required(vcpu
);
3337 static void vmx_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
)
3339 u32 ar
= vmx_read_guest_seg_ar(to_vmx(vcpu
), VCPU_SREG_CS
);
3341 *db
= (ar
>> 14) & 1;
3342 *l
= (ar
>> 13) & 1;
3345 static void vmx_get_idt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
3347 dt
->size
= vmcs_read32(GUEST_IDTR_LIMIT
);
3348 dt
->address
= vmcs_readl(GUEST_IDTR_BASE
);
3351 static void vmx_set_idt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
3353 vmcs_write32(GUEST_IDTR_LIMIT
, dt
->size
);
3354 vmcs_writel(GUEST_IDTR_BASE
, dt
->address
);
3357 static void vmx_get_gdt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
3359 dt
->size
= vmcs_read32(GUEST_GDTR_LIMIT
);
3360 dt
->address
= vmcs_readl(GUEST_GDTR_BASE
);
3363 static void vmx_set_gdt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
3365 vmcs_write32(GUEST_GDTR_LIMIT
, dt
->size
);
3366 vmcs_writel(GUEST_GDTR_BASE
, dt
->address
);
3369 static bool rmode_segment_valid(struct kvm_vcpu
*vcpu
, int seg
)
3371 struct kvm_segment var
;
3374 vmx_get_segment(vcpu
, &var
, seg
);
3376 if (seg
== VCPU_SREG_CS
)
3378 ar
= vmx_segment_access_rights(&var
);
3380 if (var
.base
!= (var
.selector
<< 4))
3382 if (var
.limit
!= 0xffff)
3390 static bool code_segment_valid(struct kvm_vcpu
*vcpu
)
3392 struct kvm_segment cs
;
3393 unsigned int cs_rpl
;
3395 vmx_get_segment(vcpu
, &cs
, VCPU_SREG_CS
);
3396 cs_rpl
= cs
.selector
& SEGMENT_RPL_MASK
;
3400 if (~cs
.type
& (VMX_AR_TYPE_CODE_MASK
|VMX_AR_TYPE_ACCESSES_MASK
))
3404 if (cs
.type
& VMX_AR_TYPE_WRITEABLE_MASK
) {
3405 if (cs
.dpl
> cs_rpl
)
3408 if (cs
.dpl
!= cs_rpl
)
3414 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
3418 static bool stack_segment_valid(struct kvm_vcpu
*vcpu
)
3420 struct kvm_segment ss
;
3421 unsigned int ss_rpl
;
3423 vmx_get_segment(vcpu
, &ss
, VCPU_SREG_SS
);
3424 ss_rpl
= ss
.selector
& SEGMENT_RPL_MASK
;
3428 if (ss
.type
!= 3 && ss
.type
!= 7)
3432 if (ss
.dpl
!= ss_rpl
) /* DPL != RPL */
3440 static bool data_segment_valid(struct kvm_vcpu
*vcpu
, int seg
)
3442 struct kvm_segment var
;
3445 vmx_get_segment(vcpu
, &var
, seg
);
3446 rpl
= var
.selector
& SEGMENT_RPL_MASK
;
3454 if (~var
.type
& (VMX_AR_TYPE_CODE_MASK
|VMX_AR_TYPE_WRITEABLE_MASK
)) {
3455 if (var
.dpl
< rpl
) /* DPL < RPL */
3459 /* TODO: Add other members to kvm_segment_field to allow checking for other access
3465 static bool tr_valid(struct kvm_vcpu
*vcpu
)
3467 struct kvm_segment tr
;
3469 vmx_get_segment(vcpu
, &tr
, VCPU_SREG_TR
);
3473 if (tr
.selector
& SEGMENT_TI_MASK
) /* TI = 1 */
3475 if (tr
.type
!= 3 && tr
.type
!= 11) /* TODO: Check if guest is in IA32e mode */
3483 static bool ldtr_valid(struct kvm_vcpu
*vcpu
)
3485 struct kvm_segment ldtr
;
3487 vmx_get_segment(vcpu
, &ldtr
, VCPU_SREG_LDTR
);
3491 if (ldtr
.selector
& SEGMENT_TI_MASK
) /* TI = 1 */
3501 static bool cs_ss_rpl_check(struct kvm_vcpu
*vcpu
)
3503 struct kvm_segment cs
, ss
;
3505 vmx_get_segment(vcpu
, &cs
, VCPU_SREG_CS
);
3506 vmx_get_segment(vcpu
, &ss
, VCPU_SREG_SS
);
3508 return ((cs
.selector
& SEGMENT_RPL_MASK
) ==
3509 (ss
.selector
& SEGMENT_RPL_MASK
));
3513 * Check if guest state is valid. Returns true if valid, false if
3515 * We assume that registers are always usable
3517 bool __vmx_guest_state_valid(struct kvm_vcpu
*vcpu
)
3519 /* real mode guest state checks */
3520 if (!is_protmode(vcpu
) || (vmx_get_rflags(vcpu
) & X86_EFLAGS_VM
)) {
3521 if (!rmode_segment_valid(vcpu
, VCPU_SREG_CS
))
3523 if (!rmode_segment_valid(vcpu
, VCPU_SREG_SS
))
3525 if (!rmode_segment_valid(vcpu
, VCPU_SREG_DS
))
3527 if (!rmode_segment_valid(vcpu
, VCPU_SREG_ES
))
3529 if (!rmode_segment_valid(vcpu
, VCPU_SREG_FS
))
3531 if (!rmode_segment_valid(vcpu
, VCPU_SREG_GS
))
3534 /* protected mode guest state checks */
3535 if (!cs_ss_rpl_check(vcpu
))
3537 if (!code_segment_valid(vcpu
))
3539 if (!stack_segment_valid(vcpu
))
3541 if (!data_segment_valid(vcpu
, VCPU_SREG_DS
))
3543 if (!data_segment_valid(vcpu
, VCPU_SREG_ES
))
3545 if (!data_segment_valid(vcpu
, VCPU_SREG_FS
))
3547 if (!data_segment_valid(vcpu
, VCPU_SREG_GS
))
3549 if (!tr_valid(vcpu
))
3551 if (!ldtr_valid(vcpu
))
3555 * - Add checks on RIP
3556 * - Add checks on RFLAGS
3562 static int init_rmode_tss(struct kvm
*kvm
, void __user
*ua
)
3564 const void *zero_page
= (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3568 for (i
= 0; i
< 3; i
++) {
3569 if (__copy_to_user(ua
+ PAGE_SIZE
* i
, zero_page
, PAGE_SIZE
))
3573 data
= TSS_BASE_SIZE
+ TSS_REDIRECTION_SIZE
;
3574 if (__copy_to_user(ua
+ TSS_IOPB_BASE_OFFSET
, &data
, sizeof(u16
)))
3578 if (__copy_to_user(ua
+ RMODE_TSS_SIZE
- 1, &data
, sizeof(u8
)))
3584 static int init_rmode_identity_map(struct kvm
*kvm
)
3586 struct kvm_vmx
*kvm_vmx
= to_kvm_vmx(kvm
);
3591 /* Protect kvm_vmx->ept_identity_pagetable_done. */
3592 mutex_lock(&kvm
->slots_lock
);
3594 if (likely(kvm_vmx
->ept_identity_pagetable_done
))
3597 if (!kvm_vmx
->ept_identity_map_addr
)
3598 kvm_vmx
->ept_identity_map_addr
= VMX_EPT_IDENTITY_PAGETABLE_ADDR
;
3600 uaddr
= __x86_set_memory_region(kvm
,
3601 IDENTITY_PAGETABLE_PRIVATE_MEMSLOT
,
3602 kvm_vmx
->ept_identity_map_addr
,
3604 if (IS_ERR(uaddr
)) {
3609 /* Set up identity-mapping pagetable for EPT in real mode */
3610 for (i
= 0; i
< PT32_ENT_PER_PAGE
; i
++) {
3611 tmp
= (i
<< 22) + (_PAGE_PRESENT
| _PAGE_RW
| _PAGE_USER
|
3612 _PAGE_ACCESSED
| _PAGE_DIRTY
| _PAGE_PSE
);
3613 if (__copy_to_user(uaddr
+ i
* sizeof(tmp
), &tmp
, sizeof(tmp
))) {
3618 kvm_vmx
->ept_identity_pagetable_done
= true;
3621 mutex_unlock(&kvm
->slots_lock
);
3625 static void seg_setup(int seg
)
3627 const struct kvm_vmx_segment_field
*sf
= &kvm_vmx_segment_fields
[seg
];
3630 vmcs_write16(sf
->selector
, 0);
3631 vmcs_writel(sf
->base
, 0);
3632 vmcs_write32(sf
->limit
, 0xffff);
3634 if (seg
== VCPU_SREG_CS
)
3635 ar
|= 0x08; /* code segment */
3637 vmcs_write32(sf
->ar_bytes
, ar
);
3640 static int alloc_apic_access_page(struct kvm
*kvm
)
3646 mutex_lock(&kvm
->slots_lock
);
3647 if (kvm
->arch
.apic_access_memslot_enabled
)
3649 hva
= __x86_set_memory_region(kvm
, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
,
3650 APIC_DEFAULT_PHYS_BASE
, PAGE_SIZE
);
3656 page
= gfn_to_page(kvm
, APIC_DEFAULT_PHYS_BASE
>> PAGE_SHIFT
);
3657 if (is_error_page(page
)) {
3663 * Do not pin the page in memory, so that memory hot-unplug
3664 * is able to migrate it.
3667 kvm
->arch
.apic_access_memslot_enabled
= true;
3669 mutex_unlock(&kvm
->slots_lock
);
3673 int allocate_vpid(void)
3679 spin_lock(&vmx_vpid_lock
);
3680 vpid
= find_first_zero_bit(vmx_vpid_bitmap
, VMX_NR_VPIDS
);
3681 if (vpid
< VMX_NR_VPIDS
)
3682 __set_bit(vpid
, vmx_vpid_bitmap
);
3685 spin_unlock(&vmx_vpid_lock
);
3689 void free_vpid(int vpid
)
3691 if (!enable_vpid
|| vpid
== 0)
3693 spin_lock(&vmx_vpid_lock
);
3694 __clear_bit(vpid
, vmx_vpid_bitmap
);
3695 spin_unlock(&vmx_vpid_lock
);
3698 static void vmx_clear_msr_bitmap_read(ulong
*msr_bitmap
, u32 msr
)
3700 int f
= sizeof(unsigned long);
3703 __clear_bit(msr
, msr_bitmap
+ 0x000 / f
);
3704 else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff))
3705 __clear_bit(msr
& 0x1fff, msr_bitmap
+ 0x400 / f
);
3708 static void vmx_clear_msr_bitmap_write(ulong
*msr_bitmap
, u32 msr
)
3710 int f
= sizeof(unsigned long);
3713 __clear_bit(msr
, msr_bitmap
+ 0x800 / f
);
3714 else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff))
3715 __clear_bit(msr
& 0x1fff, msr_bitmap
+ 0xc00 / f
);
3718 static void vmx_set_msr_bitmap_read(ulong
*msr_bitmap
, u32 msr
)
3720 int f
= sizeof(unsigned long);
3723 __set_bit(msr
, msr_bitmap
+ 0x000 / f
);
3724 else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff))
3725 __set_bit(msr
& 0x1fff, msr_bitmap
+ 0x400 / f
);
3728 static void vmx_set_msr_bitmap_write(ulong
*msr_bitmap
, u32 msr
)
3730 int f
= sizeof(unsigned long);
3733 __set_bit(msr
, msr_bitmap
+ 0x800 / f
);
3734 else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff))
3735 __set_bit(msr
& 0x1fff, msr_bitmap
+ 0xc00 / f
);
3738 void vmx_disable_intercept_for_msr(struct kvm_vcpu
*vcpu
, u32 msr
, int type
)
3740 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3741 unsigned long *msr_bitmap
= vmx
->vmcs01
.msr_bitmap
;
3743 if (!cpu_has_vmx_msr_bitmap())
3746 if (static_branch_unlikely(&enable_evmcs
))
3747 evmcs_touch_msr_bitmap();
3750 * Mark the desired intercept state in shadow bitmap, this is needed
3751 * for resync when the MSR filters change.
3753 if (is_valid_passthrough_msr(msr
)) {
3754 int idx
= possible_passthrough_msr_slot(msr
);
3756 if (idx
!= -ENOENT
) {
3757 if (type
& MSR_TYPE_R
)
3758 clear_bit(idx
, vmx
->shadow_msr_intercept
.read
);
3759 if (type
& MSR_TYPE_W
)
3760 clear_bit(idx
, vmx
->shadow_msr_intercept
.write
);
3764 if ((type
& MSR_TYPE_R
) &&
3765 !kvm_msr_allowed(vcpu
, msr
, KVM_MSR_FILTER_READ
)) {
3766 vmx_set_msr_bitmap_read(msr_bitmap
, msr
);
3767 type
&= ~MSR_TYPE_R
;
3770 if ((type
& MSR_TYPE_W
) &&
3771 !kvm_msr_allowed(vcpu
, msr
, KVM_MSR_FILTER_WRITE
)) {
3772 vmx_set_msr_bitmap_write(msr_bitmap
, msr
);
3773 type
&= ~MSR_TYPE_W
;
3776 if (type
& MSR_TYPE_R
)
3777 vmx_clear_msr_bitmap_read(msr_bitmap
, msr
);
3779 if (type
& MSR_TYPE_W
)
3780 vmx_clear_msr_bitmap_write(msr_bitmap
, msr
);
3783 void vmx_enable_intercept_for_msr(struct kvm_vcpu
*vcpu
, u32 msr
, int type
)
3785 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3786 unsigned long *msr_bitmap
= vmx
->vmcs01
.msr_bitmap
;
3788 if (!cpu_has_vmx_msr_bitmap())
3791 if (static_branch_unlikely(&enable_evmcs
))
3792 evmcs_touch_msr_bitmap();
3795 * Mark the desired intercept state in shadow bitmap, this is needed
3796 * for resync when the MSR filter changes.
3798 if (is_valid_passthrough_msr(msr
)) {
3799 int idx
= possible_passthrough_msr_slot(msr
);
3801 if (idx
!= -ENOENT
) {
3802 if (type
& MSR_TYPE_R
)
3803 set_bit(idx
, vmx
->shadow_msr_intercept
.read
);
3804 if (type
& MSR_TYPE_W
)
3805 set_bit(idx
, vmx
->shadow_msr_intercept
.write
);
3809 if (type
& MSR_TYPE_R
)
3810 vmx_set_msr_bitmap_read(msr_bitmap
, msr
);
3812 if (type
& MSR_TYPE_W
)
3813 vmx_set_msr_bitmap_write(msr_bitmap
, msr
);
3816 static void vmx_reset_x2apic_msrs(struct kvm_vcpu
*vcpu
, u8 mode
)
3818 unsigned long *msr_bitmap
= to_vmx(vcpu
)->vmcs01
.msr_bitmap
;
3819 unsigned long read_intercept
;
3822 read_intercept
= (mode
& MSR_BITMAP_MODE_X2APIC_APICV
) ? 0 : ~0;
3824 for (msr
= 0x800; msr
<= 0x8ff; msr
+= BITS_PER_LONG
) {
3825 unsigned int read_idx
= msr
/ BITS_PER_LONG
;
3826 unsigned int write_idx
= read_idx
+ (0x800 / sizeof(long));
3828 msr_bitmap
[read_idx
] = read_intercept
;
3829 msr_bitmap
[write_idx
] = ~0ul;
3833 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu
*vcpu
)
3835 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3838 if (!cpu_has_vmx_msr_bitmap())
3841 if (cpu_has_secondary_exec_ctrls() &&
3842 (secondary_exec_controls_get(vmx
) &
3843 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
)) {
3844 mode
= MSR_BITMAP_MODE_X2APIC
;
3845 if (enable_apicv
&& kvm_vcpu_apicv_active(vcpu
))
3846 mode
|= MSR_BITMAP_MODE_X2APIC_APICV
;
3851 if (mode
== vmx
->x2apic_msr_bitmap_mode
)
3854 vmx
->x2apic_msr_bitmap_mode
= mode
;
3856 vmx_reset_x2apic_msrs(vcpu
, mode
);
3859 * TPR reads and writes can be virtualized even if virtual interrupt
3860 * delivery is not in use.
3862 vmx_set_intercept_for_msr(vcpu
, X2APIC_MSR(APIC_TASKPRI
), MSR_TYPE_RW
,
3863 !(mode
& MSR_BITMAP_MODE_X2APIC
));
3865 if (mode
& MSR_BITMAP_MODE_X2APIC_APICV
) {
3866 vmx_enable_intercept_for_msr(vcpu
, X2APIC_MSR(APIC_TMCCT
), MSR_TYPE_RW
);
3867 vmx_disable_intercept_for_msr(vcpu
, X2APIC_MSR(APIC_EOI
), MSR_TYPE_W
);
3868 vmx_disable_intercept_for_msr(vcpu
, X2APIC_MSR(APIC_SELF_IPI
), MSR_TYPE_W
);
3872 void pt_update_intercept_for_msr(struct kvm_vcpu
*vcpu
)
3874 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3875 bool flag
= !(vmx
->pt_desc
.guest
.ctl
& RTIT_CTL_TRACEEN
);
3878 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_RTIT_STATUS
, MSR_TYPE_RW
, flag
);
3879 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_RTIT_OUTPUT_BASE
, MSR_TYPE_RW
, flag
);
3880 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_RTIT_OUTPUT_MASK
, MSR_TYPE_RW
, flag
);
3881 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_RTIT_CR3_MATCH
, MSR_TYPE_RW
, flag
);
3882 for (i
= 0; i
< vmx
->pt_desc
.addr_range
; i
++) {
3883 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_RTIT_ADDR0_A
+ i
* 2, MSR_TYPE_RW
, flag
);
3884 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_RTIT_ADDR0_B
+ i
* 2, MSR_TYPE_RW
, flag
);
3888 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu
*vcpu
)
3890 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3895 if (WARN_ON_ONCE(!is_guest_mode(vcpu
)) ||
3896 !nested_cpu_has_vid(get_vmcs12(vcpu
)) ||
3897 WARN_ON_ONCE(!vmx
->nested
.virtual_apic_map
.gfn
))
3900 rvi
= vmx_get_rvi();
3902 vapic_page
= vmx
->nested
.virtual_apic_map
.hva
;
3903 vppr
= *((u32
*)(vapic_page
+ APIC_PROCPRI
));
3905 return ((rvi
& 0xf0) > (vppr
& 0xf0));
3908 static void vmx_msr_filter_changed(struct kvm_vcpu
*vcpu
)
3910 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3914 * Set intercept permissions for all potentially passed through MSRs
3915 * again. They will automatically get filtered through the MSR filter,
3916 * so we are back in sync after this.
3918 for (i
= 0; i
< ARRAY_SIZE(vmx_possible_passthrough_msrs
); i
++) {
3919 u32 msr
= vmx_possible_passthrough_msrs
[i
];
3920 bool read
= test_bit(i
, vmx
->shadow_msr_intercept
.read
);
3921 bool write
= test_bit(i
, vmx
->shadow_msr_intercept
.write
);
3923 vmx_set_intercept_for_msr(vcpu
, msr
, MSR_TYPE_R
, read
);
3924 vmx_set_intercept_for_msr(vcpu
, msr
, MSR_TYPE_W
, write
);
3927 pt_update_intercept_for_msr(vcpu
);
3930 static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu
*vcpu
,
3934 int pi_vec
= nested
? POSTED_INTR_NESTED_VECTOR
: POSTED_INTR_VECTOR
;
3936 if (vcpu
->mode
== IN_GUEST_MODE
) {
3938 * The vector of interrupt to be delivered to vcpu had
3939 * been set in PIR before this function.
3941 * Following cases will be reached in this block, and
3942 * we always send a notification event in all cases as
3945 * Case 1: vcpu keeps in non-root mode. Sending a
3946 * notification event posts the interrupt to vcpu.
3948 * Case 2: vcpu exits to root mode and is still
3949 * runnable. PIR will be synced to vIRR before the
3950 * next vcpu entry. Sending a notification event in
3951 * this case has no effect, as vcpu is not in root
3954 * Case 3: vcpu exits to root mode and is blocked.
3955 * vcpu_block() has already synced PIR to vIRR and
3956 * never blocks vcpu if vIRR is not cleared. Therefore,
3957 * a blocked vcpu here does not wait for any requested
3958 * interrupts in PIR, and sending a notification event
3959 * which has no effect is safe here.
3962 apic
->send_IPI_mask(get_cpu_mask(vcpu
->cpu
), pi_vec
);
3969 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu
*vcpu
,
3972 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3974 if (is_guest_mode(vcpu
) &&
3975 vector
== vmx
->nested
.posted_intr_nv
) {
3977 * If a posted intr is not recognized by hardware,
3978 * we will accomplish it in the next vmentry.
3980 vmx
->nested
.pi_pending
= true;
3981 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
3982 /* the PIR and ON have been set by L1. */
3983 if (!kvm_vcpu_trigger_posted_interrupt(vcpu
, true))
3984 kvm_vcpu_kick(vcpu
);
3990 * Send interrupt to vcpu via posted interrupt way.
3991 * 1. If target vcpu is running(non-root mode), send posted interrupt
3992 * notification to vcpu and hardware will sync PIR to vIRR atomically.
3993 * 2. If target vcpu isn't running(root mode), kick it to pick up the
3994 * interrupt from PIR in next vmentry.
3996 static int vmx_deliver_posted_interrupt(struct kvm_vcpu
*vcpu
, int vector
)
3998 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4001 r
= vmx_deliver_nested_posted_interrupt(vcpu
, vector
);
4005 if (!vcpu
->arch
.apicv_active
)
4008 if (pi_test_and_set_pir(vector
, &vmx
->pi_desc
))
4011 /* If a previous notification has sent the IPI, nothing to do. */
4012 if (pi_test_and_set_on(&vmx
->pi_desc
))
4015 if (vcpu
!= kvm_get_running_vcpu() &&
4016 !kvm_vcpu_trigger_posted_interrupt(vcpu
, false))
4017 kvm_vcpu_kick(vcpu
);
4023 * Set up the vmcs's constant host-state fields, i.e., host-state fields that
4024 * will not change in the lifetime of the guest.
4025 * Note that host-state that does change is set elsewhere. E.g., host-state
4026 * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
4028 void vmx_set_constant_host_state(struct vcpu_vmx
*vmx
)
4032 unsigned long cr0
, cr3
, cr4
;
4035 WARN_ON(cr0
& X86_CR0_TS
);
4036 vmcs_writel(HOST_CR0
, cr0
); /* 22.2.3 */
4039 * Save the most likely value for this task's CR3 in the VMCS.
4040 * We can't use __get_current_cr3_fast() because we're not atomic.
4043 vmcs_writel(HOST_CR3
, cr3
); /* 22.2.3 FIXME: shadow tables */
4044 vmx
->loaded_vmcs
->host_state
.cr3
= cr3
;
4046 /* Save the most likely value for this task's CR4 in the VMCS. */
4047 cr4
= cr4_read_shadow();
4048 vmcs_writel(HOST_CR4
, cr4
); /* 22.2.3, 22.2.5 */
4049 vmx
->loaded_vmcs
->host_state
.cr4
= cr4
;
4051 vmcs_write16(HOST_CS_SELECTOR
, __KERNEL_CS
); /* 22.2.4 */
4052 #ifdef CONFIG_X86_64
4054 * Load null selectors, so we can avoid reloading them in
4055 * vmx_prepare_switch_to_host(), in case userspace uses
4056 * the null selectors too (the expected case).
4058 vmcs_write16(HOST_DS_SELECTOR
, 0);
4059 vmcs_write16(HOST_ES_SELECTOR
, 0);
4061 vmcs_write16(HOST_DS_SELECTOR
, __KERNEL_DS
); /* 22.2.4 */
4062 vmcs_write16(HOST_ES_SELECTOR
, __KERNEL_DS
); /* 22.2.4 */
4064 vmcs_write16(HOST_SS_SELECTOR
, __KERNEL_DS
); /* 22.2.4 */
4065 vmcs_write16(HOST_TR_SELECTOR
, GDT_ENTRY_TSS
*8); /* 22.2.4 */
4067 vmcs_writel(HOST_IDTR_BASE
, host_idt_base
); /* 22.2.4 */
4069 vmcs_writel(HOST_RIP
, (unsigned long)vmx_vmexit
); /* 22.2.5 */
4071 rdmsr(MSR_IA32_SYSENTER_CS
, low32
, high32
);
4072 vmcs_write32(HOST_IA32_SYSENTER_CS
, low32
);
4073 rdmsrl(MSR_IA32_SYSENTER_EIP
, tmpl
);
4074 vmcs_writel(HOST_IA32_SYSENTER_EIP
, tmpl
); /* 22.2.3 */
4076 if (vmcs_config
.vmexit_ctrl
& VM_EXIT_LOAD_IA32_PAT
) {
4077 rdmsr(MSR_IA32_CR_PAT
, low32
, high32
);
4078 vmcs_write64(HOST_IA32_PAT
, low32
| ((u64
) high32
<< 32));
4081 if (cpu_has_load_ia32_efer())
4082 vmcs_write64(HOST_IA32_EFER
, host_efer
);
4085 void set_cr4_guest_host_mask(struct vcpu_vmx
*vmx
)
4087 struct kvm_vcpu
*vcpu
= &vmx
->vcpu
;
4089 vcpu
->arch
.cr4_guest_owned_bits
= KVM_POSSIBLE_CR4_GUEST_BITS
&
4090 ~vcpu
->arch
.cr4_guest_rsvd_bits
;
4092 vcpu
->arch
.cr4_guest_owned_bits
&= ~X86_CR4_PGE
;
4093 if (is_guest_mode(&vmx
->vcpu
))
4094 vcpu
->arch
.cr4_guest_owned_bits
&=
4095 ~get_vmcs12(vcpu
)->cr4_guest_host_mask
;
4096 vmcs_writel(CR4_GUEST_HOST_MASK
, ~vcpu
->arch
.cr4_guest_owned_bits
);
4099 static u32
vmx_pin_based_exec_ctrl(struct vcpu_vmx
*vmx
)
4101 u32 pin_based_exec_ctrl
= vmcs_config
.pin_based_exec_ctrl
;
4103 if (!kvm_vcpu_apicv_active(&vmx
->vcpu
))
4104 pin_based_exec_ctrl
&= ~PIN_BASED_POSTED_INTR
;
4107 pin_based_exec_ctrl
&= ~PIN_BASED_VIRTUAL_NMIS
;
4109 if (!enable_preemption_timer
)
4110 pin_based_exec_ctrl
&= ~PIN_BASED_VMX_PREEMPTION_TIMER
;
4112 return pin_based_exec_ctrl
;
4115 static u32
vmx_vmentry_ctrl(void)
4117 u32 vmentry_ctrl
= vmcs_config
.vmentry_ctrl
;
4119 if (vmx_pt_mode_is_system())
4120 vmentry_ctrl
&= ~(VM_ENTRY_PT_CONCEAL_PIP
|
4121 VM_ENTRY_LOAD_IA32_RTIT_CTL
);
4122 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
4123 return vmentry_ctrl
&
4124 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
| VM_ENTRY_LOAD_IA32_EFER
);
4127 static u32
vmx_vmexit_ctrl(void)
4129 u32 vmexit_ctrl
= vmcs_config
.vmexit_ctrl
;
4131 if (vmx_pt_mode_is_system())
4132 vmexit_ctrl
&= ~(VM_EXIT_PT_CONCEAL_PIP
|
4133 VM_EXIT_CLEAR_IA32_RTIT_CTL
);
4134 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
4135 return vmexit_ctrl
&
4136 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
| VM_EXIT_LOAD_IA32_EFER
);
4139 static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu
*vcpu
)
4141 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4143 pin_controls_set(vmx
, vmx_pin_based_exec_ctrl(vmx
));
4144 if (cpu_has_secondary_exec_ctrls()) {
4145 if (kvm_vcpu_apicv_active(vcpu
))
4146 secondary_exec_controls_setbit(vmx
,
4147 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
4148 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
);
4150 secondary_exec_controls_clearbit(vmx
,
4151 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
4152 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
);
4155 vmx_update_msr_bitmap_x2apic(vcpu
);
4158 static u32
vmx_exec_control(struct vcpu_vmx
*vmx
)
4160 u32 exec_control
= vmcs_config
.cpu_based_exec_ctrl
;
4162 if (vmx
->vcpu
.arch
.switch_db_regs
& KVM_DEBUGREG_WONT_EXIT
)
4163 exec_control
&= ~CPU_BASED_MOV_DR_EXITING
;
4165 if (!cpu_need_tpr_shadow(&vmx
->vcpu
)) {
4166 exec_control
&= ~CPU_BASED_TPR_SHADOW
;
4167 #ifdef CONFIG_X86_64
4168 exec_control
|= CPU_BASED_CR8_STORE_EXITING
|
4169 CPU_BASED_CR8_LOAD_EXITING
;
4173 exec_control
|= CPU_BASED_CR3_STORE_EXITING
|
4174 CPU_BASED_CR3_LOAD_EXITING
|
4175 CPU_BASED_INVLPG_EXITING
;
4176 if (kvm_mwait_in_guest(vmx
->vcpu
.kvm
))
4177 exec_control
&= ~(CPU_BASED_MWAIT_EXITING
|
4178 CPU_BASED_MONITOR_EXITING
);
4179 if (kvm_hlt_in_guest(vmx
->vcpu
.kvm
))
4180 exec_control
&= ~CPU_BASED_HLT_EXITING
;
4181 return exec_control
;
4185 * Adjust a single secondary execution control bit to intercept/allow an
4186 * instruction in the guest. This is usually done based on whether or not a
4187 * feature has been exposed to the guest in order to correctly emulate faults.
4190 vmx_adjust_secondary_exec_control(struct vcpu_vmx
*vmx
, u32
*exec_control
,
4191 u32 control
, bool enabled
, bool exiting
)
4194 * If the control is for an opt-in feature, clear the control if the
4195 * feature is not exposed to the guest, i.e. not enabled. If the
4196 * control is opt-out, i.e. an exiting control, clear the control if
4197 * the feature _is_ exposed to the guest, i.e. exiting/interception is
4198 * disabled for the associated instruction. Note, the caller is
4199 * responsible presetting exec_control to set all supported bits.
4201 if (enabled
== exiting
)
4202 *exec_control
&= ~control
;
4205 * Update the nested MSR settings so that a nested VMM can/can't set
4206 * controls for features that are/aren't exposed to the guest.
4210 vmx
->nested
.msrs
.secondary_ctls_high
|= control
;
4212 vmx
->nested
.msrs
.secondary_ctls_high
&= ~control
;
4217 * Wrapper macro for the common case of adjusting a secondary execution control
4218 * based on a single guest CPUID bit, with a dedicated feature bit. This also
4219 * verifies that the control is actually supported by KVM and hardware.
4221 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \
4225 if (cpu_has_vmx_##name()) { \
4226 __enabled = guest_cpuid_has(&(vmx)->vcpu, \
4227 X86_FEATURE_##feat_name); \
4228 vmx_adjust_secondary_exec_control(vmx, exec_control, \
4229 SECONDARY_EXEC_##ctrl_name, __enabled, exiting); \
4233 /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */
4234 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \
4235 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false)
4237 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \
4238 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true)
4240 static u32
vmx_secondary_exec_control(struct vcpu_vmx
*vmx
)
4242 struct kvm_vcpu
*vcpu
= &vmx
->vcpu
;
4244 u32 exec_control
= vmcs_config
.cpu_based_2nd_exec_ctrl
;
4246 if (vmx_pt_mode_is_system())
4247 exec_control
&= ~(SECONDARY_EXEC_PT_USE_GPA
| SECONDARY_EXEC_PT_CONCEAL_VMX
);
4248 if (!cpu_need_virtualize_apic_accesses(vcpu
))
4249 exec_control
&= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
;
4251 exec_control
&= ~SECONDARY_EXEC_ENABLE_VPID
;
4253 exec_control
&= ~SECONDARY_EXEC_ENABLE_EPT
;
4254 enable_unrestricted_guest
= 0;
4256 if (!enable_unrestricted_guest
)
4257 exec_control
&= ~SECONDARY_EXEC_UNRESTRICTED_GUEST
;
4258 if (kvm_pause_in_guest(vmx
->vcpu
.kvm
))
4259 exec_control
&= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING
;
4260 if (!kvm_vcpu_apicv_active(vcpu
))
4261 exec_control
&= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT
|
4262 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
);
4263 exec_control
&= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
;
4265 /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
4266 * in vmx_set_cr4. */
4267 exec_control
&= ~SECONDARY_EXEC_DESC
;
4269 /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
4271 We can NOT enable shadow_vmcs here because we don't have yet
4274 exec_control
&= ~SECONDARY_EXEC_SHADOW_VMCS
;
4277 * PML is enabled/disabled when dirty logging of memsmlots changes, but
4278 * it needs to be set here when dirty logging is already active, e.g.
4279 * if this vCPU was created after dirty logging was enabled.
4281 if (!vcpu
->kvm
->arch
.cpu_dirty_logging_count
)
4282 exec_control
&= ~SECONDARY_EXEC_ENABLE_PML
;
4284 if (cpu_has_vmx_xsaves()) {
4285 /* Exposing XSAVES only when XSAVE is exposed */
4286 bool xsaves_enabled
=
4287 boot_cpu_has(X86_FEATURE_XSAVE
) &&
4288 guest_cpuid_has(vcpu
, X86_FEATURE_XSAVE
) &&
4289 guest_cpuid_has(vcpu
, X86_FEATURE_XSAVES
);
4291 vcpu
->arch
.xsaves_enabled
= xsaves_enabled
;
4293 vmx_adjust_secondary_exec_control(vmx
, &exec_control
,
4294 SECONDARY_EXEC_XSAVES
,
4295 xsaves_enabled
, false);
4299 * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
4300 * feature is exposed to the guest. This creates a virtualization hole
4301 * if both are supported in hardware but only one is exposed to the
4302 * guest, but letting the guest execute RDTSCP or RDPID when either one
4303 * is advertised is preferable to emulating the advertised instruction
4304 * in KVM on #UD, and obviously better than incorrectly injecting #UD.
4306 if (cpu_has_vmx_rdtscp()) {
4307 bool rdpid_or_rdtscp_enabled
=
4308 guest_cpuid_has(vcpu
, X86_FEATURE_RDTSCP
) ||
4309 guest_cpuid_has(vcpu
, X86_FEATURE_RDPID
);
4311 vmx_adjust_secondary_exec_control(vmx
, &exec_control
,
4312 SECONDARY_EXEC_ENABLE_RDTSCP
,
4313 rdpid_or_rdtscp_enabled
, false);
4315 vmx_adjust_sec_exec_feature(vmx
, &exec_control
, invpcid
, INVPCID
);
4317 vmx_adjust_sec_exec_exiting(vmx
, &exec_control
, rdrand
, RDRAND
);
4318 vmx_adjust_sec_exec_exiting(vmx
, &exec_control
, rdseed
, RDSEED
);
4320 vmx_adjust_sec_exec_control(vmx
, &exec_control
, waitpkg
, WAITPKG
,
4321 ENABLE_USR_WAIT_PAUSE
, false);
4323 if (!vcpu
->kvm
->arch
.bus_lock_detection_enabled
)
4324 exec_control
&= ~SECONDARY_EXEC_BUS_LOCK_DETECTION
;
4326 return exec_control
;
4329 #define VMX_XSS_EXIT_BITMAP 0
4332 * Noting that the initialization of Guest-state Area of VMCS is in
4335 static void init_vmcs(struct vcpu_vmx
*vmx
)
4338 nested_vmx_set_vmcs_shadowing_bitmap();
4340 if (cpu_has_vmx_msr_bitmap())
4341 vmcs_write64(MSR_BITMAP
, __pa(vmx
->vmcs01
.msr_bitmap
));
4343 vmcs_write64(VMCS_LINK_POINTER
, -1ull); /* 22.3.1.5 */
4346 pin_controls_set(vmx
, vmx_pin_based_exec_ctrl(vmx
));
4348 exec_controls_set(vmx
, vmx_exec_control(vmx
));
4350 if (cpu_has_secondary_exec_ctrls())
4351 secondary_exec_controls_set(vmx
, vmx_secondary_exec_control(vmx
));
4353 if (kvm_vcpu_apicv_active(&vmx
->vcpu
)) {
4354 vmcs_write64(EOI_EXIT_BITMAP0
, 0);
4355 vmcs_write64(EOI_EXIT_BITMAP1
, 0);
4356 vmcs_write64(EOI_EXIT_BITMAP2
, 0);
4357 vmcs_write64(EOI_EXIT_BITMAP3
, 0);
4359 vmcs_write16(GUEST_INTR_STATUS
, 0);
4361 vmcs_write16(POSTED_INTR_NV
, POSTED_INTR_VECTOR
);
4362 vmcs_write64(POSTED_INTR_DESC_ADDR
, __pa((&vmx
->pi_desc
)));
4365 if (!kvm_pause_in_guest(vmx
->vcpu
.kvm
)) {
4366 vmcs_write32(PLE_GAP
, ple_gap
);
4367 vmx
->ple_window
= ple_window
;
4368 vmx
->ple_window_dirty
= true;
4371 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK
, 0);
4372 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH
, 0);
4373 vmcs_write32(CR3_TARGET_COUNT
, 0); /* 22.2.1 */
4375 vmcs_write16(HOST_FS_SELECTOR
, 0); /* 22.2.4 */
4376 vmcs_write16(HOST_GS_SELECTOR
, 0); /* 22.2.4 */
4377 vmx_set_constant_host_state(vmx
);
4378 vmcs_writel(HOST_FS_BASE
, 0); /* 22.2.4 */
4379 vmcs_writel(HOST_GS_BASE
, 0); /* 22.2.4 */
4381 if (cpu_has_vmx_vmfunc())
4382 vmcs_write64(VM_FUNCTION_CONTROL
, 0);
4384 vmcs_write32(VM_EXIT_MSR_STORE_COUNT
, 0);
4385 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, 0);
4386 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR
, __pa(vmx
->msr_autoload
.host
.val
));
4387 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, 0);
4388 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR
, __pa(vmx
->msr_autoload
.guest
.val
));
4390 if (vmcs_config
.vmentry_ctrl
& VM_ENTRY_LOAD_IA32_PAT
)
4391 vmcs_write64(GUEST_IA32_PAT
, vmx
->vcpu
.arch
.pat
);
4393 vm_exit_controls_set(vmx
, vmx_vmexit_ctrl());
4395 /* 22.2.1, 20.8.1 */
4396 vm_entry_controls_set(vmx
, vmx_vmentry_ctrl());
4398 vmx
->vcpu
.arch
.cr0_guest_owned_bits
= KVM_POSSIBLE_CR0_GUEST_BITS
;
4399 vmcs_writel(CR0_GUEST_HOST_MASK
, ~vmx
->vcpu
.arch
.cr0_guest_owned_bits
);
4401 set_cr4_guest_host_mask(vmx
);
4404 vmcs_write16(VIRTUAL_PROCESSOR_ID
, vmx
->vpid
);
4406 if (cpu_has_vmx_xsaves())
4407 vmcs_write64(XSS_EXIT_BITMAP
, VMX_XSS_EXIT_BITMAP
);
4410 vmcs_write64(PML_ADDRESS
, page_to_phys(vmx
->pml_pg
));
4411 vmcs_write16(GUEST_PML_INDEX
, PML_ENTITY_NUM
- 1);
4414 vmx_write_encls_bitmap(&vmx
->vcpu
, NULL
);
4416 if (vmx_pt_mode_is_host_guest()) {
4417 memset(&vmx
->pt_desc
, 0, sizeof(vmx
->pt_desc
));
4418 /* Bit[6~0] are forced to 1, writes are ignored. */
4419 vmx
->pt_desc
.guest
.output_mask
= 0x7F;
4420 vmcs_write64(GUEST_IA32_RTIT_CTL
, 0);
4423 vmcs_write32(GUEST_SYSENTER_CS
, 0);
4424 vmcs_writel(GUEST_SYSENTER_ESP
, 0);
4425 vmcs_writel(GUEST_SYSENTER_EIP
, 0);
4426 vmcs_write64(GUEST_IA32_DEBUGCTL
, 0);
4428 if (cpu_has_vmx_tpr_shadow()) {
4429 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
, 0);
4430 if (cpu_need_tpr_shadow(&vmx
->vcpu
))
4431 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
,
4432 __pa(vmx
->vcpu
.arch
.apic
->regs
));
4433 vmcs_write32(TPR_THRESHOLD
, 0);
4436 vmx_setup_uret_msrs(vmx
);
4439 static void vmx_vcpu_reset(struct kvm_vcpu
*vcpu
, bool init_event
)
4441 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4443 vmx
->rmode
.vm86_active
= 0;
4446 vmx
->msr_ia32_umwait_control
= 0;
4448 vmx
->hv_deadline_tsc
= -1;
4449 kvm_set_cr8(vcpu
, 0);
4451 vmx_segment_cache_clear(vmx
);
4453 seg_setup(VCPU_SREG_CS
);
4454 vmcs_write16(GUEST_CS_SELECTOR
, 0xf000);
4455 vmcs_writel(GUEST_CS_BASE
, 0xffff0000ul
);
4457 seg_setup(VCPU_SREG_DS
);
4458 seg_setup(VCPU_SREG_ES
);
4459 seg_setup(VCPU_SREG_FS
);
4460 seg_setup(VCPU_SREG_GS
);
4461 seg_setup(VCPU_SREG_SS
);
4463 vmcs_write16(GUEST_TR_SELECTOR
, 0);
4464 vmcs_writel(GUEST_TR_BASE
, 0);
4465 vmcs_write32(GUEST_TR_LIMIT
, 0xffff);
4466 vmcs_write32(GUEST_TR_AR_BYTES
, 0x008b);
4468 vmcs_write16(GUEST_LDTR_SELECTOR
, 0);
4469 vmcs_writel(GUEST_LDTR_BASE
, 0);
4470 vmcs_write32(GUEST_LDTR_LIMIT
, 0xffff);
4471 vmcs_write32(GUEST_LDTR_AR_BYTES
, 0x00082);
4473 vmcs_writel(GUEST_GDTR_BASE
, 0);
4474 vmcs_write32(GUEST_GDTR_LIMIT
, 0xffff);
4476 vmcs_writel(GUEST_IDTR_BASE
, 0);
4477 vmcs_write32(GUEST_IDTR_LIMIT
, 0xffff);
4479 vmcs_write32(GUEST_ACTIVITY_STATE
, GUEST_ACTIVITY_ACTIVE
);
4480 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO
, 0);
4481 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS
, 0);
4482 if (kvm_mpx_supported())
4483 vmcs_write64(GUEST_BNDCFGS
, 0);
4485 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
, 0); /* 22.2.1 */
4487 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD
, vcpu
);
4489 vpid_sync_context(vmx
->vpid
);
4492 static void vmx_enable_irq_window(struct kvm_vcpu
*vcpu
)
4494 exec_controls_setbit(to_vmx(vcpu
), CPU_BASED_INTR_WINDOW_EXITING
);
4497 static void vmx_enable_nmi_window(struct kvm_vcpu
*vcpu
)
4500 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
) & GUEST_INTR_STATE_STI
) {
4501 vmx_enable_irq_window(vcpu
);
4505 exec_controls_setbit(to_vmx(vcpu
), CPU_BASED_NMI_WINDOW_EXITING
);
4508 static void vmx_inject_irq(struct kvm_vcpu
*vcpu
)
4510 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4512 int irq
= vcpu
->arch
.interrupt
.nr
;
4514 trace_kvm_inj_virq(irq
);
4516 ++vcpu
->stat
.irq_injections
;
4517 if (vmx
->rmode
.vm86_active
) {
4519 if (vcpu
->arch
.interrupt
.soft
)
4520 inc_eip
= vcpu
->arch
.event_exit_inst_len
;
4521 kvm_inject_realmode_interrupt(vcpu
, irq
, inc_eip
);
4524 intr
= irq
| INTR_INFO_VALID_MASK
;
4525 if (vcpu
->arch
.interrupt
.soft
) {
4526 intr
|= INTR_TYPE_SOFT_INTR
;
4527 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN
,
4528 vmx
->vcpu
.arch
.event_exit_inst_len
);
4530 intr
|= INTR_TYPE_EXT_INTR
;
4531 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
, intr
);
4533 vmx_clear_hlt(vcpu
);
4536 static void vmx_inject_nmi(struct kvm_vcpu
*vcpu
)
4538 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4542 * Tracking the NMI-blocked state in software is built upon
4543 * finding the next open IRQ window. This, in turn, depends on
4544 * well-behaving guests: They have to keep IRQs disabled at
4545 * least as long as the NMI handler runs. Otherwise we may
4546 * cause NMI nesting, maybe breaking the guest. But as this is
4547 * highly unlikely, we can live with the residual risk.
4549 vmx
->loaded_vmcs
->soft_vnmi_blocked
= 1;
4550 vmx
->loaded_vmcs
->vnmi_blocked_time
= 0;
4553 ++vcpu
->stat
.nmi_injections
;
4554 vmx
->loaded_vmcs
->nmi_known_unmasked
= false;
4556 if (vmx
->rmode
.vm86_active
) {
4557 kvm_inject_realmode_interrupt(vcpu
, NMI_VECTOR
, 0);
4561 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
,
4562 INTR_TYPE_NMI_INTR
| INTR_INFO_VALID_MASK
| NMI_VECTOR
);
4564 vmx_clear_hlt(vcpu
);
4567 bool vmx_get_nmi_mask(struct kvm_vcpu
*vcpu
)
4569 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4573 return vmx
->loaded_vmcs
->soft_vnmi_blocked
;
4574 if (vmx
->loaded_vmcs
->nmi_known_unmasked
)
4576 masked
= vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
) & GUEST_INTR_STATE_NMI
;
4577 vmx
->loaded_vmcs
->nmi_known_unmasked
= !masked
;
4581 void vmx_set_nmi_mask(struct kvm_vcpu
*vcpu
, bool masked
)
4583 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4586 if (vmx
->loaded_vmcs
->soft_vnmi_blocked
!= masked
) {
4587 vmx
->loaded_vmcs
->soft_vnmi_blocked
= masked
;
4588 vmx
->loaded_vmcs
->vnmi_blocked_time
= 0;
4591 vmx
->loaded_vmcs
->nmi_known_unmasked
= !masked
;
4593 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO
,
4594 GUEST_INTR_STATE_NMI
);
4596 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO
,
4597 GUEST_INTR_STATE_NMI
);
4601 bool vmx_nmi_blocked(struct kvm_vcpu
*vcpu
)
4603 if (is_guest_mode(vcpu
) && nested_exit_on_nmi(vcpu
))
4606 if (!enable_vnmi
&& to_vmx(vcpu
)->loaded_vmcs
->soft_vnmi_blocked
)
4609 return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
) &
4610 (GUEST_INTR_STATE_MOV_SS
| GUEST_INTR_STATE_STI
|
4611 GUEST_INTR_STATE_NMI
));
4614 static int vmx_nmi_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
)
4616 if (to_vmx(vcpu
)->nested
.nested_run_pending
)
4619 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
4620 if (for_injection
&& is_guest_mode(vcpu
) && nested_exit_on_nmi(vcpu
))
4623 return !vmx_nmi_blocked(vcpu
);
4626 bool vmx_interrupt_blocked(struct kvm_vcpu
*vcpu
)
4628 if (is_guest_mode(vcpu
) && nested_exit_on_intr(vcpu
))
4631 return !(vmx_get_rflags(vcpu
) & X86_EFLAGS_IF
) ||
4632 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
) &
4633 (GUEST_INTR_STATE_STI
| GUEST_INTR_STATE_MOV_SS
));
4636 static int vmx_interrupt_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
)
4638 if (to_vmx(vcpu
)->nested
.nested_run_pending
)
4642 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
4643 * e.g. if the IRQ arrived asynchronously after checking nested events.
4645 if (for_injection
&& is_guest_mode(vcpu
) && nested_exit_on_intr(vcpu
))
4648 return !vmx_interrupt_blocked(vcpu
);
4651 static int vmx_set_tss_addr(struct kvm
*kvm
, unsigned int addr
)
4655 if (enable_unrestricted_guest
)
4658 mutex_lock(&kvm
->slots_lock
);
4659 ret
= __x86_set_memory_region(kvm
, TSS_PRIVATE_MEMSLOT
, addr
,
4661 mutex_unlock(&kvm
->slots_lock
);
4664 return PTR_ERR(ret
);
4666 to_kvm_vmx(kvm
)->tss_addr
= addr
;
4668 return init_rmode_tss(kvm
, ret
);
4671 static int vmx_set_identity_map_addr(struct kvm
*kvm
, u64 ident_addr
)
4673 to_kvm_vmx(kvm
)->ept_identity_map_addr
= ident_addr
;
4677 static bool rmode_exception(struct kvm_vcpu
*vcpu
, int vec
)
4682 * Update instruction length as we may reinject the exception
4683 * from user space while in guest debugging mode.
4685 to_vmx(vcpu
)->vcpu
.arch
.event_exit_inst_len
=
4686 vmcs_read32(VM_EXIT_INSTRUCTION_LEN
);
4687 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_SW_BP
)
4691 return !(vcpu
->guest_debug
&
4692 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
));
4706 static int handle_rmode_exception(struct kvm_vcpu
*vcpu
,
4707 int vec
, u32 err_code
)
4710 * Instruction with address size override prefix opcode 0x67
4711 * Cause the #SS fault with 0 error code in VM86 mode.
4713 if (((vec
== GP_VECTOR
) || (vec
== SS_VECTOR
)) && err_code
== 0) {
4714 if (kvm_emulate_instruction(vcpu
, 0)) {
4715 if (vcpu
->arch
.halt_request
) {
4716 vcpu
->arch
.halt_request
= 0;
4717 return kvm_vcpu_halt(vcpu
);
4725 * Forward all other exceptions that are valid in real mode.
4726 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
4727 * the required debugging infrastructure rework.
4729 kvm_queue_exception(vcpu
, vec
);
4733 static int handle_machine_check(struct kvm_vcpu
*vcpu
)
4735 /* handled by vmx_vcpu_run() */
4740 * If the host has split lock detection disabled, then #AC is
4741 * unconditionally injected into the guest, which is the pre split lock
4742 * detection behaviour.
4744 * If the host has split lock detection enabled then #AC is
4745 * only injected into the guest when:
4746 * - Guest CPL == 3 (user mode)
4747 * - Guest has #AC detection enabled in CR0
4748 * - Guest EFLAGS has AC bit set
4750 bool vmx_guest_inject_ac(struct kvm_vcpu
*vcpu
)
4752 if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT
))
4755 return vmx_get_cpl(vcpu
) == 3 && kvm_read_cr0_bits(vcpu
, X86_CR0_AM
) &&
4756 (kvm_get_rflags(vcpu
) & X86_EFLAGS_AC
);
4759 static int handle_exception_nmi(struct kvm_vcpu
*vcpu
)
4761 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4762 struct kvm_run
*kvm_run
= vcpu
->run
;
4763 u32 intr_info
, ex_no
, error_code
;
4764 unsigned long cr2
, dr6
;
4767 vect_info
= vmx
->idt_vectoring_info
;
4768 intr_info
= vmx_get_intr_info(vcpu
);
4770 if (is_machine_check(intr_info
) || is_nmi(intr_info
))
4771 return 1; /* handled by handle_exception_nmi_irqoff() */
4773 if (is_invalid_opcode(intr_info
))
4774 return handle_ud(vcpu
);
4777 if (intr_info
& INTR_INFO_DELIVER_CODE_MASK
)
4778 error_code
= vmcs_read32(VM_EXIT_INTR_ERROR_CODE
);
4780 if (!vmx
->rmode
.vm86_active
&& is_gp_fault(intr_info
)) {
4781 WARN_ON_ONCE(!enable_vmware_backdoor
);
4784 * VMware backdoor emulation on #GP interception only handles
4785 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero
4786 * error code on #GP.
4789 kvm_queue_exception_e(vcpu
, GP_VECTOR
, error_code
);
4792 return kvm_emulate_instruction(vcpu
, EMULTYPE_VMWARE_GP
);
4796 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
4797 * MMIO, it is better to report an internal error.
4798 * See the comments in vmx_handle_exit.
4800 if ((vect_info
& VECTORING_INFO_VALID_MASK
) &&
4801 !(is_page_fault(intr_info
) && !(error_code
& PFERR_RSVD_MASK
))) {
4802 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
4803 vcpu
->run
->internal
.suberror
= KVM_INTERNAL_ERROR_SIMUL_EX
;
4804 vcpu
->run
->internal
.ndata
= 4;
4805 vcpu
->run
->internal
.data
[0] = vect_info
;
4806 vcpu
->run
->internal
.data
[1] = intr_info
;
4807 vcpu
->run
->internal
.data
[2] = error_code
;
4808 vcpu
->run
->internal
.data
[3] = vcpu
->arch
.last_vmentry_cpu
;
4812 if (is_page_fault(intr_info
)) {
4813 cr2
= vmx_get_exit_qual(vcpu
);
4814 if (enable_ept
&& !vcpu
->arch
.apf
.host_apf_flags
) {
4816 * EPT will cause page fault only if we need to
4817 * detect illegal GPAs.
4819 WARN_ON_ONCE(!allow_smaller_maxphyaddr
);
4820 kvm_fixup_and_inject_pf_error(vcpu
, cr2
, error_code
);
4823 return kvm_handle_page_fault(vcpu
, error_code
, cr2
, NULL
, 0);
4826 ex_no
= intr_info
& INTR_INFO_VECTOR_MASK
;
4828 if (vmx
->rmode
.vm86_active
&& rmode_exception(vcpu
, ex_no
))
4829 return handle_rmode_exception(vcpu
, ex_no
, error_code
);
4833 dr6
= vmx_get_exit_qual(vcpu
);
4834 if (!(vcpu
->guest_debug
&
4835 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
))) {
4836 if (is_icebp(intr_info
))
4837 WARN_ON(!skip_emulated_instruction(vcpu
));
4839 kvm_queue_exception_p(vcpu
, DB_VECTOR
, dr6
);
4842 kvm_run
->debug
.arch
.dr6
= dr6
| DR6_ACTIVE_LOW
;
4843 kvm_run
->debug
.arch
.dr7
= vmcs_readl(GUEST_DR7
);
4847 * Update instruction length as we may reinject #BP from
4848 * user space while in guest debugging mode. Reading it for
4849 * #DB as well causes no harm, it is not used in that case.
4851 vmx
->vcpu
.arch
.event_exit_inst_len
=
4852 vmcs_read32(VM_EXIT_INSTRUCTION_LEN
);
4853 kvm_run
->exit_reason
= KVM_EXIT_DEBUG
;
4854 kvm_run
->debug
.arch
.pc
= kvm_get_linear_rip(vcpu
);
4855 kvm_run
->debug
.arch
.exception
= ex_no
;
4858 if (vmx_guest_inject_ac(vcpu
)) {
4859 kvm_queue_exception_e(vcpu
, AC_VECTOR
, error_code
);
4864 * Handle split lock. Depending on detection mode this will
4865 * either warn and disable split lock detection for this
4866 * task or force SIGBUS on it.
4868 if (handle_guest_split_lock(kvm_rip_read(vcpu
)))
4872 kvm_run
->exit_reason
= KVM_EXIT_EXCEPTION
;
4873 kvm_run
->ex
.exception
= ex_no
;
4874 kvm_run
->ex
.error_code
= error_code
;
4880 static __always_inline
int handle_external_interrupt(struct kvm_vcpu
*vcpu
)
4882 ++vcpu
->stat
.irq_exits
;
4886 static int handle_triple_fault(struct kvm_vcpu
*vcpu
)
4888 vcpu
->run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
4889 vcpu
->mmio_needed
= 0;
4893 static int handle_io(struct kvm_vcpu
*vcpu
)
4895 unsigned long exit_qualification
;
4896 int size
, in
, string
;
4899 exit_qualification
= vmx_get_exit_qual(vcpu
);
4900 string
= (exit_qualification
& 16) != 0;
4902 ++vcpu
->stat
.io_exits
;
4905 return kvm_emulate_instruction(vcpu
, 0);
4907 port
= exit_qualification
>> 16;
4908 size
= (exit_qualification
& 7) + 1;
4909 in
= (exit_qualification
& 8) != 0;
4911 return kvm_fast_pio(vcpu
, size
, port
, in
);
4915 vmx_patch_hypercall(struct kvm_vcpu
*vcpu
, unsigned char *hypercall
)
4918 * Patch in the VMCALL instruction:
4920 hypercall
[0] = 0x0f;
4921 hypercall
[1] = 0x01;
4922 hypercall
[2] = 0xc1;
4925 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
4926 static int handle_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long val
)
4928 if (is_guest_mode(vcpu
)) {
4929 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
4930 unsigned long orig_val
= val
;
4933 * We get here when L2 changed cr0 in a way that did not change
4934 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
4935 * but did change L0 shadowed bits. So we first calculate the
4936 * effective cr0 value that L1 would like to write into the
4937 * hardware. It consists of the L2-owned bits from the new
4938 * value combined with the L1-owned bits from L1's guest_cr0.
4940 val
= (val
& ~vmcs12
->cr0_guest_host_mask
) |
4941 (vmcs12
->guest_cr0
& vmcs12
->cr0_guest_host_mask
);
4943 if (!nested_guest_cr0_valid(vcpu
, val
))
4946 if (kvm_set_cr0(vcpu
, val
))
4948 vmcs_writel(CR0_READ_SHADOW
, orig_val
);
4951 if (to_vmx(vcpu
)->nested
.vmxon
&&
4952 !nested_host_cr0_valid(vcpu
, val
))
4955 return kvm_set_cr0(vcpu
, val
);
4959 static int handle_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long val
)
4961 if (is_guest_mode(vcpu
)) {
4962 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
4963 unsigned long orig_val
= val
;
4965 /* analogously to handle_set_cr0 */
4966 val
= (val
& ~vmcs12
->cr4_guest_host_mask
) |
4967 (vmcs12
->guest_cr4
& vmcs12
->cr4_guest_host_mask
);
4968 if (kvm_set_cr4(vcpu
, val
))
4970 vmcs_writel(CR4_READ_SHADOW
, orig_val
);
4973 return kvm_set_cr4(vcpu
, val
);
4976 static int handle_desc(struct kvm_vcpu
*vcpu
)
4978 WARN_ON(!(vcpu
->arch
.cr4
& X86_CR4_UMIP
));
4979 return kvm_emulate_instruction(vcpu
, 0);
4982 static int handle_cr(struct kvm_vcpu
*vcpu
)
4984 unsigned long exit_qualification
, val
;
4990 exit_qualification
= vmx_get_exit_qual(vcpu
);
4991 cr
= exit_qualification
& 15;
4992 reg
= (exit_qualification
>> 8) & 15;
4993 switch ((exit_qualification
>> 4) & 3) {
4994 case 0: /* mov to cr */
4995 val
= kvm_register_read(vcpu
, reg
);
4996 trace_kvm_cr_write(cr
, val
);
4999 err
= handle_set_cr0(vcpu
, val
);
5000 return kvm_complete_insn_gp(vcpu
, err
);
5002 WARN_ON_ONCE(enable_unrestricted_guest
);
5004 err
= kvm_set_cr3(vcpu
, val
);
5005 return kvm_complete_insn_gp(vcpu
, err
);
5007 err
= handle_set_cr4(vcpu
, val
);
5008 return kvm_complete_insn_gp(vcpu
, err
);
5010 u8 cr8_prev
= kvm_get_cr8(vcpu
);
5012 err
= kvm_set_cr8(vcpu
, cr8
);
5013 ret
= kvm_complete_insn_gp(vcpu
, err
);
5014 if (lapic_in_kernel(vcpu
))
5016 if (cr8_prev
<= cr8
)
5019 * TODO: we might be squashing a
5020 * KVM_GUESTDBG_SINGLESTEP-triggered
5021 * KVM_EXIT_DEBUG here.
5023 vcpu
->run
->exit_reason
= KVM_EXIT_SET_TPR
;
5029 KVM_BUG(1, vcpu
->kvm
, "Guest always owns CR0.TS");
5031 case 1: /*mov from cr*/
5034 WARN_ON_ONCE(enable_unrestricted_guest
);
5036 val
= kvm_read_cr3(vcpu
);
5037 kvm_register_write(vcpu
, reg
, val
);
5038 trace_kvm_cr_read(cr
, val
);
5039 return kvm_skip_emulated_instruction(vcpu
);
5041 val
= kvm_get_cr8(vcpu
);
5042 kvm_register_write(vcpu
, reg
, val
);
5043 trace_kvm_cr_read(cr
, val
);
5044 return kvm_skip_emulated_instruction(vcpu
);
5048 val
= (exit_qualification
>> LMSW_SOURCE_DATA_SHIFT
) & 0x0f;
5049 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu
) & ~0xful
) | val
);
5050 kvm_lmsw(vcpu
, val
);
5052 return kvm_skip_emulated_instruction(vcpu
);
5056 vcpu
->run
->exit_reason
= 0;
5057 vcpu_unimpl(vcpu
, "unhandled control register: op %d cr %d\n",
5058 (int)(exit_qualification
>> 4) & 3, cr
);
5062 static int handle_dr(struct kvm_vcpu
*vcpu
)
5064 unsigned long exit_qualification
;
5068 exit_qualification
= vmx_get_exit_qual(vcpu
);
5069 dr
= exit_qualification
& DEBUG_REG_ACCESS_NUM
;
5071 /* First, if DR does not exist, trigger UD */
5072 if (!kvm_require_dr(vcpu
, dr
))
5075 if (kvm_x86_ops
.get_cpl(vcpu
) > 0)
5078 dr7
= vmcs_readl(GUEST_DR7
);
5081 * As the vm-exit takes precedence over the debug trap, we
5082 * need to emulate the latter, either for the host or the
5083 * guest debugging itself.
5085 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
) {
5086 vcpu
->run
->debug
.arch
.dr6
= DR6_BD
| DR6_ACTIVE_LOW
;
5087 vcpu
->run
->debug
.arch
.dr7
= dr7
;
5088 vcpu
->run
->debug
.arch
.pc
= kvm_get_linear_rip(vcpu
);
5089 vcpu
->run
->debug
.arch
.exception
= DB_VECTOR
;
5090 vcpu
->run
->exit_reason
= KVM_EXIT_DEBUG
;
5093 kvm_queue_exception_p(vcpu
, DB_VECTOR
, DR6_BD
);
5098 if (vcpu
->guest_debug
== 0) {
5099 exec_controls_clearbit(to_vmx(vcpu
), CPU_BASED_MOV_DR_EXITING
);
5102 * No more DR vmexits; force a reload of the debug registers
5103 * and reenter on this instruction. The next vmexit will
5104 * retrieve the full state of the debug registers.
5106 vcpu
->arch
.switch_db_regs
|= KVM_DEBUGREG_WONT_EXIT
;
5110 reg
= DEBUG_REG_ACCESS_REG(exit_qualification
);
5111 if (exit_qualification
& TYPE_MOV_FROM_DR
) {
5114 kvm_get_dr(vcpu
, dr
, &val
);
5115 kvm_register_write(vcpu
, reg
, val
);
5118 err
= kvm_set_dr(vcpu
, dr
, kvm_register_read(vcpu
, reg
));
5122 return kvm_complete_insn_gp(vcpu
, err
);
5125 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu
*vcpu
)
5127 get_debugreg(vcpu
->arch
.db
[0], 0);
5128 get_debugreg(vcpu
->arch
.db
[1], 1);
5129 get_debugreg(vcpu
->arch
.db
[2], 2);
5130 get_debugreg(vcpu
->arch
.db
[3], 3);
5131 get_debugreg(vcpu
->arch
.dr6
, 6);
5132 vcpu
->arch
.dr7
= vmcs_readl(GUEST_DR7
);
5134 vcpu
->arch
.switch_db_regs
&= ~KVM_DEBUGREG_WONT_EXIT
;
5135 exec_controls_setbit(to_vmx(vcpu
), CPU_BASED_MOV_DR_EXITING
);
5138 * exc_debug expects dr6 to be cleared after it runs, avoid that it sees
5139 * a stale dr6 from the guest.
5141 set_debugreg(DR6_RESERVED
, 6);
5144 static void vmx_set_dr7(struct kvm_vcpu
*vcpu
, unsigned long val
)
5146 vmcs_writel(GUEST_DR7
, val
);
5149 static int handle_tpr_below_threshold(struct kvm_vcpu
*vcpu
)
5151 kvm_apic_update_ppr(vcpu
);
5155 static int handle_interrupt_window(struct kvm_vcpu
*vcpu
)
5157 exec_controls_clearbit(to_vmx(vcpu
), CPU_BASED_INTR_WINDOW_EXITING
);
5159 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
5161 ++vcpu
->stat
.irq_window_exits
;
5165 static int handle_invlpg(struct kvm_vcpu
*vcpu
)
5167 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5169 kvm_mmu_invlpg(vcpu
, exit_qualification
);
5170 return kvm_skip_emulated_instruction(vcpu
);
5173 static int handle_apic_access(struct kvm_vcpu
*vcpu
)
5175 if (likely(fasteoi
)) {
5176 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5177 int access_type
, offset
;
5179 access_type
= exit_qualification
& APIC_ACCESS_TYPE
;
5180 offset
= exit_qualification
& APIC_ACCESS_OFFSET
;
5182 * Sane guest uses MOV to write EOI, with written value
5183 * not cared. So make a short-circuit here by avoiding
5184 * heavy instruction emulation.
5186 if ((access_type
== TYPE_LINEAR_APIC_INST_WRITE
) &&
5187 (offset
== APIC_EOI
)) {
5188 kvm_lapic_set_eoi(vcpu
);
5189 return kvm_skip_emulated_instruction(vcpu
);
5192 return kvm_emulate_instruction(vcpu
, 0);
5195 static int handle_apic_eoi_induced(struct kvm_vcpu
*vcpu
)
5197 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5198 int vector
= exit_qualification
& 0xff;
5200 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
5201 kvm_apic_set_eoi_accelerated(vcpu
, vector
);
5205 static int handle_apic_write(struct kvm_vcpu
*vcpu
)
5207 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5208 u32 offset
= exit_qualification
& 0xfff;
5210 /* APIC-write VM exit is trap-like and thus no need to adjust IP */
5211 kvm_apic_write_nodecode(vcpu
, offset
);
5215 static int handle_task_switch(struct kvm_vcpu
*vcpu
)
5217 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5218 unsigned long exit_qualification
;
5219 bool has_error_code
= false;
5222 int reason
, type
, idt_v
, idt_index
;
5224 idt_v
= (vmx
->idt_vectoring_info
& VECTORING_INFO_VALID_MASK
);
5225 idt_index
= (vmx
->idt_vectoring_info
& VECTORING_INFO_VECTOR_MASK
);
5226 type
= (vmx
->idt_vectoring_info
& VECTORING_INFO_TYPE_MASK
);
5228 exit_qualification
= vmx_get_exit_qual(vcpu
);
5230 reason
= (u32
)exit_qualification
>> 30;
5231 if (reason
== TASK_SWITCH_GATE
&& idt_v
) {
5233 case INTR_TYPE_NMI_INTR
:
5234 vcpu
->arch
.nmi_injected
= false;
5235 vmx_set_nmi_mask(vcpu
, true);
5237 case INTR_TYPE_EXT_INTR
:
5238 case INTR_TYPE_SOFT_INTR
:
5239 kvm_clear_interrupt_queue(vcpu
);
5241 case INTR_TYPE_HARD_EXCEPTION
:
5242 if (vmx
->idt_vectoring_info
&
5243 VECTORING_INFO_DELIVER_CODE_MASK
) {
5244 has_error_code
= true;
5246 vmcs_read32(IDT_VECTORING_ERROR_CODE
);
5249 case INTR_TYPE_SOFT_EXCEPTION
:
5250 kvm_clear_exception_queue(vcpu
);
5256 tss_selector
= exit_qualification
;
5258 if (!idt_v
|| (type
!= INTR_TYPE_HARD_EXCEPTION
&&
5259 type
!= INTR_TYPE_EXT_INTR
&&
5260 type
!= INTR_TYPE_NMI_INTR
))
5261 WARN_ON(!skip_emulated_instruction(vcpu
));
5264 * TODO: What about debug traps on tss switch?
5265 * Are we supposed to inject them and update dr6?
5267 return kvm_task_switch(vcpu
, tss_selector
,
5268 type
== INTR_TYPE_SOFT_INTR
? idt_index
: -1,
5269 reason
, has_error_code
, error_code
);
5272 static int handle_ept_violation(struct kvm_vcpu
*vcpu
)
5274 unsigned long exit_qualification
;
5278 exit_qualification
= vmx_get_exit_qual(vcpu
);
5281 * EPT violation happened while executing iret from NMI,
5282 * "blocked by NMI" bit has to be set before next VM entry.
5283 * There are errata that may cause this bit to not be set:
5286 if (!(to_vmx(vcpu
)->idt_vectoring_info
& VECTORING_INFO_VALID_MASK
) &&
5288 (exit_qualification
& INTR_INFO_UNBLOCK_NMI
))
5289 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO
, GUEST_INTR_STATE_NMI
);
5291 gpa
= vmcs_read64(GUEST_PHYSICAL_ADDRESS
);
5292 trace_kvm_page_fault(gpa
, exit_qualification
);
5294 /* Is it a read fault? */
5295 error_code
= (exit_qualification
& EPT_VIOLATION_ACC_READ
)
5296 ? PFERR_USER_MASK
: 0;
5297 /* Is it a write fault? */
5298 error_code
|= (exit_qualification
& EPT_VIOLATION_ACC_WRITE
)
5299 ? PFERR_WRITE_MASK
: 0;
5300 /* Is it a fetch fault? */
5301 error_code
|= (exit_qualification
& EPT_VIOLATION_ACC_INSTR
)
5302 ? PFERR_FETCH_MASK
: 0;
5303 /* ept page table entry is present? */
5304 error_code
|= (exit_qualification
&
5305 (EPT_VIOLATION_READABLE
| EPT_VIOLATION_WRITABLE
|
5306 EPT_VIOLATION_EXECUTABLE
))
5307 ? PFERR_PRESENT_MASK
: 0;
5309 error_code
|= (exit_qualification
& EPT_VIOLATION_GVA_TRANSLATED
) != 0 ?
5310 PFERR_GUEST_FINAL_MASK
: PFERR_GUEST_PAGE_MASK
;
5312 vcpu
->arch
.exit_qualification
= exit_qualification
;
5315 * Check that the GPA doesn't exceed physical memory limits, as that is
5316 * a guest page fault. We have to emulate the instruction here, because
5317 * if the illegal address is that of a paging structure, then
5318 * EPT_VIOLATION_ACC_WRITE bit is set. Alternatively, if supported we
5319 * would also use advanced VM-exit information for EPT violations to
5320 * reconstruct the page fault error code.
5322 if (unlikely(allow_smaller_maxphyaddr
&& kvm_vcpu_is_illegal_gpa(vcpu
, gpa
)))
5323 return kvm_emulate_instruction(vcpu
, 0);
5325 return kvm_mmu_page_fault(vcpu
, gpa
, error_code
, NULL
, 0);
5328 static int handle_ept_misconfig(struct kvm_vcpu
*vcpu
)
5332 if (!vmx_can_emulate_instruction(vcpu
, NULL
, 0))
5336 * A nested guest cannot optimize MMIO vmexits, because we have an
5337 * nGPA here instead of the required GPA.
5339 gpa
= vmcs_read64(GUEST_PHYSICAL_ADDRESS
);
5340 if (!is_guest_mode(vcpu
) &&
5341 !kvm_io_bus_write(vcpu
, KVM_FAST_MMIO_BUS
, gpa
, 0, NULL
)) {
5342 trace_kvm_fast_mmio(gpa
);
5343 return kvm_skip_emulated_instruction(vcpu
);
5346 return kvm_mmu_page_fault(vcpu
, gpa
, PFERR_RSVD_MASK
, NULL
, 0);
5349 static int handle_nmi_window(struct kvm_vcpu
*vcpu
)
5351 if (KVM_BUG_ON(!enable_vnmi
, vcpu
->kvm
))
5354 exec_controls_clearbit(to_vmx(vcpu
), CPU_BASED_NMI_WINDOW_EXITING
);
5355 ++vcpu
->stat
.nmi_window_exits
;
5356 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
5361 static int handle_invalid_guest_state(struct kvm_vcpu
*vcpu
)
5363 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5364 bool intr_window_requested
;
5365 unsigned count
= 130;
5367 intr_window_requested
= exec_controls_get(vmx
) &
5368 CPU_BASED_INTR_WINDOW_EXITING
;
5370 while (vmx
->emulation_required
&& count
-- != 0) {
5371 if (intr_window_requested
&& !vmx_interrupt_blocked(vcpu
))
5372 return handle_interrupt_window(&vmx
->vcpu
);
5374 if (kvm_test_request(KVM_REQ_EVENT
, vcpu
))
5377 if (!kvm_emulate_instruction(vcpu
, 0))
5380 if (vmx
->emulation_required
&& !vmx
->rmode
.vm86_active
&&
5381 vcpu
->arch
.exception
.pending
) {
5382 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
5383 vcpu
->run
->internal
.suberror
=
5384 KVM_INTERNAL_ERROR_EMULATION
;
5385 vcpu
->run
->internal
.ndata
= 0;
5389 if (vcpu
->arch
.halt_request
) {
5390 vcpu
->arch
.halt_request
= 0;
5391 return kvm_vcpu_halt(vcpu
);
5395 * Note, return 1 and not 0, vcpu_run() will invoke
5396 * xfer_to_guest_mode() which will create a proper return
5399 if (__xfer_to_guest_mode_work_pending())
5406 static void grow_ple_window(struct kvm_vcpu
*vcpu
)
5408 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5409 unsigned int old
= vmx
->ple_window
;
5411 vmx
->ple_window
= __grow_ple_window(old
, ple_window
,
5415 if (vmx
->ple_window
!= old
) {
5416 vmx
->ple_window_dirty
= true;
5417 trace_kvm_ple_window_update(vcpu
->vcpu_id
,
5418 vmx
->ple_window
, old
);
5422 static void shrink_ple_window(struct kvm_vcpu
*vcpu
)
5424 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5425 unsigned int old
= vmx
->ple_window
;
5427 vmx
->ple_window
= __shrink_ple_window(old
, ple_window
,
5431 if (vmx
->ple_window
!= old
) {
5432 vmx
->ple_window_dirty
= true;
5433 trace_kvm_ple_window_update(vcpu
->vcpu_id
,
5434 vmx
->ple_window
, old
);
5439 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
5440 * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
5442 static int handle_pause(struct kvm_vcpu
*vcpu
)
5444 if (!kvm_pause_in_guest(vcpu
->kvm
))
5445 grow_ple_window(vcpu
);
5448 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
5449 * VM-execution control is ignored if CPL > 0. OTOH, KVM
5450 * never set PAUSE_EXITING and just set PLE if supported,
5451 * so the vcpu must be CPL=0 if it gets a PAUSE exit.
5453 kvm_vcpu_on_spin(vcpu
, true);
5454 return kvm_skip_emulated_instruction(vcpu
);
5457 static int handle_monitor_trap(struct kvm_vcpu
*vcpu
)
5462 static int handle_invpcid(struct kvm_vcpu
*vcpu
)
5464 u32 vmx_instruction_info
;
5472 if (!guest_cpuid_has(vcpu
, X86_FEATURE_INVPCID
)) {
5473 kvm_queue_exception(vcpu
, UD_VECTOR
);
5477 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5478 type
= kvm_register_read(vcpu
, (vmx_instruction_info
>> 28) & 0xf);
5481 kvm_inject_gp(vcpu
, 0);
5485 /* According to the Intel instruction reference, the memory operand
5486 * is read even if it isn't needed (e.g., for type==all)
5488 if (get_vmx_mem_address(vcpu
, vmx_get_exit_qual(vcpu
),
5489 vmx_instruction_info
, false,
5490 sizeof(operand
), &gva
))
5493 return kvm_handle_invpcid(vcpu
, type
, gva
);
5496 static int handle_pml_full(struct kvm_vcpu
*vcpu
)
5498 unsigned long exit_qualification
;
5500 trace_kvm_pml_full(vcpu
->vcpu_id
);
5502 exit_qualification
= vmx_get_exit_qual(vcpu
);
5505 * PML buffer FULL happened while executing iret from NMI,
5506 * "blocked by NMI" bit has to be set before next VM entry.
5508 if (!(to_vmx(vcpu
)->idt_vectoring_info
& VECTORING_INFO_VALID_MASK
) &&
5510 (exit_qualification
& INTR_INFO_UNBLOCK_NMI
))
5511 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO
,
5512 GUEST_INTR_STATE_NMI
);
5515 * PML buffer already flushed at beginning of VMEXIT. Nothing to do
5516 * here.., and there's no userspace involvement needed for PML.
5521 static fastpath_t
handle_fastpath_preemption_timer(struct kvm_vcpu
*vcpu
)
5523 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5525 if (!vmx
->req_immediate_exit
&&
5526 !unlikely(vmx
->loaded_vmcs
->hv_timer_soft_disabled
)) {
5527 kvm_lapic_expired_hv_timer(vcpu
);
5528 return EXIT_FASTPATH_REENTER_GUEST
;
5531 return EXIT_FASTPATH_NONE
;
5534 static int handle_preemption_timer(struct kvm_vcpu
*vcpu
)
5536 handle_fastpath_preemption_timer(vcpu
);
5541 * When nested=0, all VMX instruction VM Exits filter here. The handlers
5542 * are overwritten by nested_vmx_setup() when nested=1.
5544 static int handle_vmx_instruction(struct kvm_vcpu
*vcpu
)
5546 kvm_queue_exception(vcpu
, UD_VECTOR
);
5550 #ifndef CONFIG_X86_SGX_KVM
5551 static int handle_encls(struct kvm_vcpu
*vcpu
)
5554 * SGX virtualization is disabled. There is no software enable bit for
5555 * SGX, so KVM intercepts all ENCLS leafs and injects a #UD to prevent
5556 * the guest from executing ENCLS (when SGX is supported by hardware).
5558 kvm_queue_exception(vcpu
, UD_VECTOR
);
5561 #endif /* CONFIG_X86_SGX_KVM */
5563 static int handle_bus_lock_vmexit(struct kvm_vcpu
*vcpu
)
5566 * Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK
5567 * VM-Exits. Unconditionally set the flag here and leave the handling to
5568 * vmx_handle_exit().
5570 to_vmx(vcpu
)->exit_reason
.bus_lock_detected
= true;
5575 * The exit handlers return 1 if the exit was handled fully and guest execution
5576 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
5577 * to be done to userspace and return 0.
5579 static int (*kvm_vmx_exit_handlers
[])(struct kvm_vcpu
*vcpu
) = {
5580 [EXIT_REASON_EXCEPTION_NMI
] = handle_exception_nmi
,
5581 [EXIT_REASON_EXTERNAL_INTERRUPT
] = handle_external_interrupt
,
5582 [EXIT_REASON_TRIPLE_FAULT
] = handle_triple_fault
,
5583 [EXIT_REASON_NMI_WINDOW
] = handle_nmi_window
,
5584 [EXIT_REASON_IO_INSTRUCTION
] = handle_io
,
5585 [EXIT_REASON_CR_ACCESS
] = handle_cr
,
5586 [EXIT_REASON_DR_ACCESS
] = handle_dr
,
5587 [EXIT_REASON_CPUID
] = kvm_emulate_cpuid
,
5588 [EXIT_REASON_MSR_READ
] = kvm_emulate_rdmsr
,
5589 [EXIT_REASON_MSR_WRITE
] = kvm_emulate_wrmsr
,
5590 [EXIT_REASON_INTERRUPT_WINDOW
] = handle_interrupt_window
,
5591 [EXIT_REASON_HLT
] = kvm_emulate_halt
,
5592 [EXIT_REASON_INVD
] = kvm_emulate_invd
,
5593 [EXIT_REASON_INVLPG
] = handle_invlpg
,
5594 [EXIT_REASON_RDPMC
] = kvm_emulate_rdpmc
,
5595 [EXIT_REASON_VMCALL
] = kvm_emulate_hypercall
,
5596 [EXIT_REASON_VMCLEAR
] = handle_vmx_instruction
,
5597 [EXIT_REASON_VMLAUNCH
] = handle_vmx_instruction
,
5598 [EXIT_REASON_VMPTRLD
] = handle_vmx_instruction
,
5599 [EXIT_REASON_VMPTRST
] = handle_vmx_instruction
,
5600 [EXIT_REASON_VMREAD
] = handle_vmx_instruction
,
5601 [EXIT_REASON_VMRESUME
] = handle_vmx_instruction
,
5602 [EXIT_REASON_VMWRITE
] = handle_vmx_instruction
,
5603 [EXIT_REASON_VMOFF
] = handle_vmx_instruction
,
5604 [EXIT_REASON_VMON
] = handle_vmx_instruction
,
5605 [EXIT_REASON_TPR_BELOW_THRESHOLD
] = handle_tpr_below_threshold
,
5606 [EXIT_REASON_APIC_ACCESS
] = handle_apic_access
,
5607 [EXIT_REASON_APIC_WRITE
] = handle_apic_write
,
5608 [EXIT_REASON_EOI_INDUCED
] = handle_apic_eoi_induced
,
5609 [EXIT_REASON_WBINVD
] = kvm_emulate_wbinvd
,
5610 [EXIT_REASON_XSETBV
] = kvm_emulate_xsetbv
,
5611 [EXIT_REASON_TASK_SWITCH
] = handle_task_switch
,
5612 [EXIT_REASON_MCE_DURING_VMENTRY
] = handle_machine_check
,
5613 [EXIT_REASON_GDTR_IDTR
] = handle_desc
,
5614 [EXIT_REASON_LDTR_TR
] = handle_desc
,
5615 [EXIT_REASON_EPT_VIOLATION
] = handle_ept_violation
,
5616 [EXIT_REASON_EPT_MISCONFIG
] = handle_ept_misconfig
,
5617 [EXIT_REASON_PAUSE_INSTRUCTION
] = handle_pause
,
5618 [EXIT_REASON_MWAIT_INSTRUCTION
] = kvm_emulate_mwait
,
5619 [EXIT_REASON_MONITOR_TRAP_FLAG
] = handle_monitor_trap
,
5620 [EXIT_REASON_MONITOR_INSTRUCTION
] = kvm_emulate_monitor
,
5621 [EXIT_REASON_INVEPT
] = handle_vmx_instruction
,
5622 [EXIT_REASON_INVVPID
] = handle_vmx_instruction
,
5623 [EXIT_REASON_RDRAND
] = kvm_handle_invalid_op
,
5624 [EXIT_REASON_RDSEED
] = kvm_handle_invalid_op
,
5625 [EXIT_REASON_PML_FULL
] = handle_pml_full
,
5626 [EXIT_REASON_INVPCID
] = handle_invpcid
,
5627 [EXIT_REASON_VMFUNC
] = handle_vmx_instruction
,
5628 [EXIT_REASON_PREEMPTION_TIMER
] = handle_preemption_timer
,
5629 [EXIT_REASON_ENCLS
] = handle_encls
,
5630 [EXIT_REASON_BUS_LOCK
] = handle_bus_lock_vmexit
,
5633 static const int kvm_vmx_max_exit_handlers
=
5634 ARRAY_SIZE(kvm_vmx_exit_handlers
);
5636 static void vmx_get_exit_info(struct kvm_vcpu
*vcpu
, u64
*info1
, u64
*info2
,
5637 u32
*intr_info
, u32
*error_code
)
5639 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5641 *info1
= vmx_get_exit_qual(vcpu
);
5642 if (!(vmx
->exit_reason
.failed_vmentry
)) {
5643 *info2
= vmx
->idt_vectoring_info
;
5644 *intr_info
= vmx_get_intr_info(vcpu
);
5645 if (is_exception_with_error_code(*intr_info
))
5646 *error_code
= vmcs_read32(VM_EXIT_INTR_ERROR_CODE
);
5656 static void vmx_destroy_pml_buffer(struct vcpu_vmx
*vmx
)
5659 __free_page(vmx
->pml_pg
);
5664 static void vmx_flush_pml_buffer(struct kvm_vcpu
*vcpu
)
5666 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5670 pml_idx
= vmcs_read16(GUEST_PML_INDEX
);
5672 /* Do nothing if PML buffer is empty */
5673 if (pml_idx
== (PML_ENTITY_NUM
- 1))
5676 /* PML index always points to next available PML buffer entity */
5677 if (pml_idx
>= PML_ENTITY_NUM
)
5682 pml_buf
= page_address(vmx
->pml_pg
);
5683 for (; pml_idx
< PML_ENTITY_NUM
; pml_idx
++) {
5686 gpa
= pml_buf
[pml_idx
];
5687 WARN_ON(gpa
& (PAGE_SIZE
- 1));
5688 kvm_vcpu_mark_page_dirty(vcpu
, gpa
>> PAGE_SHIFT
);
5691 /* reset PML index */
5692 vmcs_write16(GUEST_PML_INDEX
, PML_ENTITY_NUM
- 1);
5695 static void vmx_dump_sel(char *name
, uint32_t sel
)
5697 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
5698 name
, vmcs_read16(sel
),
5699 vmcs_read32(sel
+ GUEST_ES_AR_BYTES
- GUEST_ES_SELECTOR
),
5700 vmcs_read32(sel
+ GUEST_ES_LIMIT
- GUEST_ES_SELECTOR
),
5701 vmcs_readl(sel
+ GUEST_ES_BASE
- GUEST_ES_SELECTOR
));
5704 static void vmx_dump_dtsel(char *name
, uint32_t limit
)
5706 pr_err("%s limit=0x%08x, base=0x%016lx\n",
5707 name
, vmcs_read32(limit
),
5708 vmcs_readl(limit
+ GUEST_GDTR_BASE
- GUEST_GDTR_LIMIT
));
5711 static void vmx_dump_msrs(char *name
, struct vmx_msrs
*m
)
5714 struct vmx_msr_entry
*e
;
5716 pr_err("MSR %s:\n", name
);
5717 for (i
= 0, e
= m
->val
; i
< m
->nr
; ++i
, ++e
)
5718 pr_err(" %2d: msr=0x%08x value=0x%016llx\n", i
, e
->index
, e
->value
);
5721 void dump_vmcs(struct kvm_vcpu
*vcpu
)
5723 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5724 u32 vmentry_ctl
, vmexit_ctl
;
5725 u32 cpu_based_exec_ctrl
, pin_based_exec_ctrl
, secondary_exec_control
;
5729 if (!dump_invalid_vmcs
) {
5730 pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
5734 vmentry_ctl
= vmcs_read32(VM_ENTRY_CONTROLS
);
5735 vmexit_ctl
= vmcs_read32(VM_EXIT_CONTROLS
);
5736 cpu_based_exec_ctrl
= vmcs_read32(CPU_BASED_VM_EXEC_CONTROL
);
5737 pin_based_exec_ctrl
= vmcs_read32(PIN_BASED_VM_EXEC_CONTROL
);
5738 cr4
= vmcs_readl(GUEST_CR4
);
5739 secondary_exec_control
= 0;
5740 if (cpu_has_secondary_exec_ctrls())
5741 secondary_exec_control
= vmcs_read32(SECONDARY_VM_EXEC_CONTROL
);
5743 pr_err("VMCS %p, last attempted VM-entry on CPU %d\n",
5744 vmx
->loaded_vmcs
->vmcs
, vcpu
->arch
.last_vmentry_cpu
);
5745 pr_err("*** Guest State ***\n");
5746 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
5747 vmcs_readl(GUEST_CR0
), vmcs_readl(CR0_READ_SHADOW
),
5748 vmcs_readl(CR0_GUEST_HOST_MASK
));
5749 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
5750 cr4
, vmcs_readl(CR4_READ_SHADOW
), vmcs_readl(CR4_GUEST_HOST_MASK
));
5751 pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3
));
5752 if (cpu_has_vmx_ept()) {
5753 pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
5754 vmcs_read64(GUEST_PDPTR0
), vmcs_read64(GUEST_PDPTR1
));
5755 pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
5756 vmcs_read64(GUEST_PDPTR2
), vmcs_read64(GUEST_PDPTR3
));
5758 pr_err("RSP = 0x%016lx RIP = 0x%016lx\n",
5759 vmcs_readl(GUEST_RSP
), vmcs_readl(GUEST_RIP
));
5760 pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n",
5761 vmcs_readl(GUEST_RFLAGS
), vmcs_readl(GUEST_DR7
));
5762 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
5763 vmcs_readl(GUEST_SYSENTER_ESP
),
5764 vmcs_read32(GUEST_SYSENTER_CS
), vmcs_readl(GUEST_SYSENTER_EIP
));
5765 vmx_dump_sel("CS: ", GUEST_CS_SELECTOR
);
5766 vmx_dump_sel("DS: ", GUEST_DS_SELECTOR
);
5767 vmx_dump_sel("SS: ", GUEST_SS_SELECTOR
);
5768 vmx_dump_sel("ES: ", GUEST_ES_SELECTOR
);
5769 vmx_dump_sel("FS: ", GUEST_FS_SELECTOR
);
5770 vmx_dump_sel("GS: ", GUEST_GS_SELECTOR
);
5771 vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT
);
5772 vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR
);
5773 vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT
);
5774 vmx_dump_sel("TR: ", GUEST_TR_SELECTOR
);
5775 efer_slot
= vmx_find_loadstore_msr_slot(&vmx
->msr_autoload
.guest
, MSR_EFER
);
5776 if (vmentry_ctl
& VM_ENTRY_LOAD_IA32_EFER
)
5777 pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER
));
5778 else if (efer_slot
>= 0)
5779 pr_err("EFER= 0x%016llx (autoload)\n",
5780 vmx
->msr_autoload
.guest
.val
[efer_slot
].value
);
5781 else if (vmentry_ctl
& VM_ENTRY_IA32E_MODE
)
5782 pr_err("EFER= 0x%016llx (effective)\n",
5783 vcpu
->arch
.efer
| (EFER_LMA
| EFER_LME
));
5785 pr_err("EFER= 0x%016llx (effective)\n",
5786 vcpu
->arch
.efer
& ~(EFER_LMA
| EFER_LME
));
5787 if (vmentry_ctl
& VM_ENTRY_LOAD_IA32_PAT
)
5788 pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT
));
5789 pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
5790 vmcs_read64(GUEST_IA32_DEBUGCTL
),
5791 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS
));
5792 if (cpu_has_load_perf_global_ctrl() &&
5793 vmentry_ctl
& VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
)
5794 pr_err("PerfGlobCtl = 0x%016llx\n",
5795 vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL
));
5796 if (vmentry_ctl
& VM_ENTRY_LOAD_BNDCFGS
)
5797 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS
));
5798 pr_err("Interruptibility = %08x ActivityState = %08x\n",
5799 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
),
5800 vmcs_read32(GUEST_ACTIVITY_STATE
));
5801 if (secondary_exec_control
& SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
)
5802 pr_err("InterruptStatus = %04x\n",
5803 vmcs_read16(GUEST_INTR_STATUS
));
5804 if (vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT
) > 0)
5805 vmx_dump_msrs("guest autoload", &vmx
->msr_autoload
.guest
);
5806 if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT
) > 0)
5807 vmx_dump_msrs("guest autostore", &vmx
->msr_autostore
.guest
);
5809 pr_err("*** Host State ***\n");
5810 pr_err("RIP = 0x%016lx RSP = 0x%016lx\n",
5811 vmcs_readl(HOST_RIP
), vmcs_readl(HOST_RSP
));
5812 pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
5813 vmcs_read16(HOST_CS_SELECTOR
), vmcs_read16(HOST_SS_SELECTOR
),
5814 vmcs_read16(HOST_DS_SELECTOR
), vmcs_read16(HOST_ES_SELECTOR
),
5815 vmcs_read16(HOST_FS_SELECTOR
), vmcs_read16(HOST_GS_SELECTOR
),
5816 vmcs_read16(HOST_TR_SELECTOR
));
5817 pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
5818 vmcs_readl(HOST_FS_BASE
), vmcs_readl(HOST_GS_BASE
),
5819 vmcs_readl(HOST_TR_BASE
));
5820 pr_err("GDTBase=%016lx IDTBase=%016lx\n",
5821 vmcs_readl(HOST_GDTR_BASE
), vmcs_readl(HOST_IDTR_BASE
));
5822 pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
5823 vmcs_readl(HOST_CR0
), vmcs_readl(HOST_CR3
),
5824 vmcs_readl(HOST_CR4
));
5825 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
5826 vmcs_readl(HOST_IA32_SYSENTER_ESP
),
5827 vmcs_read32(HOST_IA32_SYSENTER_CS
),
5828 vmcs_readl(HOST_IA32_SYSENTER_EIP
));
5829 if (vmexit_ctl
& VM_EXIT_LOAD_IA32_EFER
)
5830 pr_err("EFER= 0x%016llx\n", vmcs_read64(HOST_IA32_EFER
));
5831 if (vmexit_ctl
& VM_EXIT_LOAD_IA32_PAT
)
5832 pr_err("PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_PAT
));
5833 if (cpu_has_load_perf_global_ctrl() &&
5834 vmexit_ctl
& VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
)
5835 pr_err("PerfGlobCtl = 0x%016llx\n",
5836 vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL
));
5837 if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT
) > 0)
5838 vmx_dump_msrs("host autoload", &vmx
->msr_autoload
.host
);
5840 pr_err("*** Control State ***\n");
5841 pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
5842 pin_based_exec_ctrl
, cpu_based_exec_ctrl
, secondary_exec_control
);
5843 pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl
, vmexit_ctl
);
5844 pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
5845 vmcs_read32(EXCEPTION_BITMAP
),
5846 vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK
),
5847 vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH
));
5848 pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
5849 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD
),
5850 vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE
),
5851 vmcs_read32(VM_ENTRY_INSTRUCTION_LEN
));
5852 pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
5853 vmcs_read32(VM_EXIT_INTR_INFO
),
5854 vmcs_read32(VM_EXIT_INTR_ERROR_CODE
),
5855 vmcs_read32(VM_EXIT_INSTRUCTION_LEN
));
5856 pr_err(" reason=%08x qualification=%016lx\n",
5857 vmcs_read32(VM_EXIT_REASON
), vmcs_readl(EXIT_QUALIFICATION
));
5858 pr_err("IDTVectoring: info=%08x errcode=%08x\n",
5859 vmcs_read32(IDT_VECTORING_INFO_FIELD
),
5860 vmcs_read32(IDT_VECTORING_ERROR_CODE
));
5861 pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET
));
5862 if (secondary_exec_control
& SECONDARY_EXEC_TSC_SCALING
)
5863 pr_err("TSC Multiplier = 0x%016llx\n",
5864 vmcs_read64(TSC_MULTIPLIER
));
5865 if (cpu_based_exec_ctrl
& CPU_BASED_TPR_SHADOW
) {
5866 if (secondary_exec_control
& SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
) {
5867 u16 status
= vmcs_read16(GUEST_INTR_STATUS
);
5868 pr_err("SVI|RVI = %02x|%02x ", status
>> 8, status
& 0xff);
5870 pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD
));
5871 if (secondary_exec_control
& SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)
5872 pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR
));
5873 pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR
));
5875 if (pin_based_exec_ctrl
& PIN_BASED_POSTED_INTR
)
5876 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV
));
5877 if ((secondary_exec_control
& SECONDARY_EXEC_ENABLE_EPT
))
5878 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER
));
5879 if (secondary_exec_control
& SECONDARY_EXEC_PAUSE_LOOP_EXITING
)
5880 pr_err("PLE Gap=%08x Window=%08x\n",
5881 vmcs_read32(PLE_GAP
), vmcs_read32(PLE_WINDOW
));
5882 if (secondary_exec_control
& SECONDARY_EXEC_ENABLE_VPID
)
5883 pr_err("Virtual processor ID = 0x%04x\n",
5884 vmcs_read16(VIRTUAL_PROCESSOR_ID
));
5888 * The guest has exited. See if we can fix it or if we need userspace
5891 static int __vmx_handle_exit(struct kvm_vcpu
*vcpu
, fastpath_t exit_fastpath
)
5893 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5894 union vmx_exit_reason exit_reason
= vmx
->exit_reason
;
5895 u32 vectoring_info
= vmx
->idt_vectoring_info
;
5896 u16 exit_handler_index
;
5899 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
5900 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
5901 * querying dirty_bitmap, we only need to kick all vcpus out of guest
5902 * mode as if vcpus is in root mode, the PML buffer must has been
5903 * flushed already. Note, PML is never enabled in hardware while
5906 if (enable_pml
&& !is_guest_mode(vcpu
))
5907 vmx_flush_pml_buffer(vcpu
);
5910 * We should never reach this point with a pending nested VM-Enter, and
5911 * more specifically emulation of L2 due to invalid guest state (see
5912 * below) should never happen as that means we incorrectly allowed a
5913 * nested VM-Enter with an invalid vmcs12.
5915 if (KVM_BUG_ON(vmx
->nested
.nested_run_pending
, vcpu
->kvm
))
5918 /* If guest state is invalid, start emulating */
5919 if (vmx
->emulation_required
)
5920 return handle_invalid_guest_state(vcpu
);
5922 if (is_guest_mode(vcpu
)) {
5924 * PML is never enabled when running L2, bail immediately if a
5925 * PML full exit occurs as something is horribly wrong.
5927 if (exit_reason
.basic
== EXIT_REASON_PML_FULL
)
5928 goto unexpected_vmexit
;
5931 * The host physical addresses of some pages of guest memory
5932 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
5933 * Page). The CPU may write to these pages via their host
5934 * physical address while L2 is running, bypassing any
5935 * address-translation-based dirty tracking (e.g. EPT write
5938 * Mark them dirty on every exit from L2 to prevent them from
5939 * getting out of sync with dirty tracking.
5941 nested_mark_vmcs12_pages_dirty(vcpu
);
5943 if (nested_vmx_reflect_vmexit(vcpu
))
5947 if (exit_reason
.failed_vmentry
) {
5949 vcpu
->run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
5950 vcpu
->run
->fail_entry
.hardware_entry_failure_reason
5952 vcpu
->run
->fail_entry
.cpu
= vcpu
->arch
.last_vmentry_cpu
;
5956 if (unlikely(vmx
->fail
)) {
5958 vcpu
->run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
5959 vcpu
->run
->fail_entry
.hardware_entry_failure_reason
5960 = vmcs_read32(VM_INSTRUCTION_ERROR
);
5961 vcpu
->run
->fail_entry
.cpu
= vcpu
->arch
.last_vmentry_cpu
;
5967 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
5968 * delivery event since it indicates guest is accessing MMIO.
5969 * The vm-exit can be triggered again after return to guest that
5970 * will cause infinite loop.
5972 if ((vectoring_info
& VECTORING_INFO_VALID_MASK
) &&
5973 (exit_reason
.basic
!= EXIT_REASON_EXCEPTION_NMI
&&
5974 exit_reason
.basic
!= EXIT_REASON_EPT_VIOLATION
&&
5975 exit_reason
.basic
!= EXIT_REASON_PML_FULL
&&
5976 exit_reason
.basic
!= EXIT_REASON_APIC_ACCESS
&&
5977 exit_reason
.basic
!= EXIT_REASON_TASK_SWITCH
)) {
5980 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
5981 vcpu
->run
->internal
.suberror
= KVM_INTERNAL_ERROR_DELIVERY_EV
;
5982 vcpu
->run
->internal
.data
[0] = vectoring_info
;
5983 vcpu
->run
->internal
.data
[1] = exit_reason
.full
;
5984 vcpu
->run
->internal
.data
[2] = vcpu
->arch
.exit_qualification
;
5985 if (exit_reason
.basic
== EXIT_REASON_EPT_MISCONFIG
) {
5986 vcpu
->run
->internal
.data
[ndata
++] =
5987 vmcs_read64(GUEST_PHYSICAL_ADDRESS
);
5989 vcpu
->run
->internal
.data
[ndata
++] = vcpu
->arch
.last_vmentry_cpu
;
5990 vcpu
->run
->internal
.ndata
= ndata
;
5994 if (unlikely(!enable_vnmi
&&
5995 vmx
->loaded_vmcs
->soft_vnmi_blocked
)) {
5996 if (!vmx_interrupt_blocked(vcpu
)) {
5997 vmx
->loaded_vmcs
->soft_vnmi_blocked
= 0;
5998 } else if (vmx
->loaded_vmcs
->vnmi_blocked_time
> 1000000000LL &&
5999 vcpu
->arch
.nmi_pending
) {
6001 * This CPU don't support us in finding the end of an
6002 * NMI-blocked window if the guest runs with IRQs
6003 * disabled. So we pull the trigger after 1 s of
6004 * futile waiting, but inform the user about this.
6006 printk(KERN_WARNING
"%s: Breaking out of NMI-blocked "
6007 "state on VCPU %d after 1 s timeout\n",
6008 __func__
, vcpu
->vcpu_id
);
6009 vmx
->loaded_vmcs
->soft_vnmi_blocked
= 0;
6013 if (exit_fastpath
!= EXIT_FASTPATH_NONE
)
6016 if (exit_reason
.basic
>= kvm_vmx_max_exit_handlers
)
6017 goto unexpected_vmexit
;
6018 #ifdef CONFIG_RETPOLINE
6019 if (exit_reason
.basic
== EXIT_REASON_MSR_WRITE
)
6020 return kvm_emulate_wrmsr(vcpu
);
6021 else if (exit_reason
.basic
== EXIT_REASON_PREEMPTION_TIMER
)
6022 return handle_preemption_timer(vcpu
);
6023 else if (exit_reason
.basic
== EXIT_REASON_INTERRUPT_WINDOW
)
6024 return handle_interrupt_window(vcpu
);
6025 else if (exit_reason
.basic
== EXIT_REASON_EXTERNAL_INTERRUPT
)
6026 return handle_external_interrupt(vcpu
);
6027 else if (exit_reason
.basic
== EXIT_REASON_HLT
)
6028 return kvm_emulate_halt(vcpu
);
6029 else if (exit_reason
.basic
== EXIT_REASON_EPT_MISCONFIG
)
6030 return handle_ept_misconfig(vcpu
);
6033 exit_handler_index
= array_index_nospec((u16
)exit_reason
.basic
,
6034 kvm_vmx_max_exit_handlers
);
6035 if (!kvm_vmx_exit_handlers
[exit_handler_index
])
6036 goto unexpected_vmexit
;
6038 return kvm_vmx_exit_handlers
[exit_handler_index
](vcpu
);
6041 vcpu_unimpl(vcpu
, "vmx: unexpected exit reason 0x%x\n",
6044 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
6045 vcpu
->run
->internal
.suberror
=
6046 KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON
;
6047 vcpu
->run
->internal
.ndata
= 2;
6048 vcpu
->run
->internal
.data
[0] = exit_reason
.full
;
6049 vcpu
->run
->internal
.data
[1] = vcpu
->arch
.last_vmentry_cpu
;
6053 static int vmx_handle_exit(struct kvm_vcpu
*vcpu
, fastpath_t exit_fastpath
)
6055 int ret
= __vmx_handle_exit(vcpu
, exit_fastpath
);
6058 * Exit to user space when bus lock detected to inform that there is
6059 * a bus lock in guest.
6061 if (to_vmx(vcpu
)->exit_reason
.bus_lock_detected
) {
6063 vcpu
->run
->exit_reason
= KVM_EXIT_X86_BUS_LOCK
;
6065 vcpu
->run
->flags
|= KVM_RUN_X86_BUS_LOCK
;
6072 * Software based L1D cache flush which is used when microcode providing
6073 * the cache control MSR is not loaded.
6075 * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
6076 * flush it is required to read in 64 KiB because the replacement algorithm
6077 * is not exactly LRU. This could be sized at runtime via topology
6078 * information but as all relevant affected CPUs have 32KiB L1D cache size
6079 * there is no point in doing so.
6081 static noinstr
void vmx_l1d_flush(struct kvm_vcpu
*vcpu
)
6083 int size
= PAGE_SIZE
<< L1D_CACHE_ORDER
;
6086 * This code is only executed when the the flush mode is 'cond' or
6089 if (static_branch_likely(&vmx_l1d_flush_cond
)) {
6093 * Clear the per-vcpu flush bit, it gets set again
6094 * either from vcpu_run() or from one of the unsafe
6097 flush_l1d
= vcpu
->arch
.l1tf_flush_l1d
;
6098 vcpu
->arch
.l1tf_flush_l1d
= false;
6101 * Clear the per-cpu flush bit, it gets set again from
6102 * the interrupt handlers.
6104 flush_l1d
|= kvm_get_cpu_l1tf_flush_l1d();
6105 kvm_clear_cpu_l1tf_flush_l1d();
6111 vcpu
->stat
.l1d_flush
++;
6113 if (static_cpu_has(X86_FEATURE_FLUSH_L1D
)) {
6114 native_wrmsrl(MSR_IA32_FLUSH_CMD
, L1D_FLUSH
);
6119 /* First ensure the pages are in the TLB */
6120 "xorl %%eax, %%eax\n"
6121 ".Lpopulate_tlb:\n\t"
6122 "movzbl (%[flush_pages], %%" _ASM_AX
"), %%ecx\n\t"
6123 "addl $4096, %%eax\n\t"
6124 "cmpl %%eax, %[size]\n\t"
6125 "jne .Lpopulate_tlb\n\t"
6126 "xorl %%eax, %%eax\n\t"
6128 /* Now fill the cache */
6129 "xorl %%eax, %%eax\n"
6131 "movzbl (%[flush_pages], %%" _ASM_AX
"), %%ecx\n\t"
6132 "addl $64, %%eax\n\t"
6133 "cmpl %%eax, %[size]\n\t"
6134 "jne .Lfill_cache\n\t"
6136 :: [flush_pages
] "r" (vmx_l1d_flush_pages
),
6138 : "eax", "ebx", "ecx", "edx");
6141 static void vmx_update_cr8_intercept(struct kvm_vcpu
*vcpu
, int tpr
, int irr
)
6143 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
6146 if (is_guest_mode(vcpu
) &&
6147 nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
))
6150 tpr_threshold
= (irr
== -1 || tpr
< irr
) ? 0 : irr
;
6151 if (is_guest_mode(vcpu
))
6152 to_vmx(vcpu
)->nested
.l1_tpr_threshold
= tpr_threshold
;
6154 vmcs_write32(TPR_THRESHOLD
, tpr_threshold
);
6157 void vmx_set_virtual_apic_mode(struct kvm_vcpu
*vcpu
)
6159 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6160 u32 sec_exec_control
;
6162 if (!lapic_in_kernel(vcpu
))
6165 if (!flexpriority_enabled
&&
6166 !cpu_has_vmx_virtualize_x2apic_mode())
6169 /* Postpone execution until vmcs01 is the current VMCS. */
6170 if (is_guest_mode(vcpu
)) {
6171 vmx
->nested
.change_vmcs01_virtual_apic_mode
= true;
6175 sec_exec_control
= secondary_exec_controls_get(vmx
);
6176 sec_exec_control
&= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
|
6177 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
);
6179 switch (kvm_get_apic_mode(vcpu
)) {
6180 case LAPIC_MODE_INVALID
:
6181 WARN_ONCE(true, "Invalid local APIC state");
6183 case LAPIC_MODE_DISABLED
:
6185 case LAPIC_MODE_XAPIC
:
6186 if (flexpriority_enabled
) {
6188 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
;
6189 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD
, vcpu
);
6192 * Flush the TLB, reloading the APIC access page will
6193 * only do so if its physical address has changed, but
6194 * the guest may have inserted a non-APIC mapping into
6195 * the TLB while the APIC access page was disabled.
6197 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT
, vcpu
);
6200 case LAPIC_MODE_X2APIC
:
6201 if (cpu_has_vmx_virtualize_x2apic_mode())
6203 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
;
6206 secondary_exec_controls_set(vmx
, sec_exec_control
);
6208 vmx_update_msr_bitmap_x2apic(vcpu
);
6211 static void vmx_set_apic_access_page_addr(struct kvm_vcpu
*vcpu
)
6215 /* Defer reload until vmcs01 is the current VMCS. */
6216 if (is_guest_mode(vcpu
)) {
6217 to_vmx(vcpu
)->nested
.reload_vmcs01_apic_access_page
= true;
6221 if (!(secondary_exec_controls_get(to_vmx(vcpu
)) &
6222 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
))
6225 page
= gfn_to_page(vcpu
->kvm
, APIC_DEFAULT_PHYS_BASE
>> PAGE_SHIFT
);
6226 if (is_error_page(page
))
6229 vmcs_write64(APIC_ACCESS_ADDR
, page_to_phys(page
));
6230 vmx_flush_tlb_current(vcpu
);
6233 * Do not pin apic access page in memory, the MMU notifier
6234 * will call us again if it is migrated or swapped out.
6239 static void vmx_hwapic_isr_update(struct kvm_vcpu
*vcpu
, int max_isr
)
6247 status
= vmcs_read16(GUEST_INTR_STATUS
);
6249 if (max_isr
!= old
) {
6251 status
|= max_isr
<< 8;
6252 vmcs_write16(GUEST_INTR_STATUS
, status
);
6256 static void vmx_set_rvi(int vector
)
6264 status
= vmcs_read16(GUEST_INTR_STATUS
);
6265 old
= (u8
)status
& 0xff;
6266 if ((u8
)vector
!= old
) {
6268 status
|= (u8
)vector
;
6269 vmcs_write16(GUEST_INTR_STATUS
, status
);
6273 static void vmx_hwapic_irr_update(struct kvm_vcpu
*vcpu
, int max_irr
)
6276 * When running L2, updating RVI is only relevant when
6277 * vmcs12 virtual-interrupt-delivery enabled.
6278 * However, it can be enabled only when L1 also
6279 * intercepts external-interrupts and in that case
6280 * we should not update vmcs02 RVI but instead intercept
6281 * interrupt. Therefore, do nothing when running L2.
6283 if (!is_guest_mode(vcpu
))
6284 vmx_set_rvi(max_irr
);
6287 static int vmx_sync_pir_to_irr(struct kvm_vcpu
*vcpu
)
6289 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6291 bool max_irr_updated
;
6293 if (KVM_BUG_ON(!vcpu
->arch
.apicv_active
, vcpu
->kvm
))
6296 if (pi_test_on(&vmx
->pi_desc
)) {
6297 pi_clear_on(&vmx
->pi_desc
);
6299 * IOMMU can write to PID.ON, so the barrier matters even on UP.
6300 * But on x86 this is just a compiler barrier anyway.
6302 smp_mb__after_atomic();
6304 kvm_apic_update_irr(vcpu
, vmx
->pi_desc
.pir
, &max_irr
);
6307 * If we are running L2 and L1 has a new pending interrupt
6308 * which can be injected, this may cause a vmexit or it may
6309 * be injected into L2. Either way, this interrupt will be
6310 * processed via KVM_REQ_EVENT, not RVI, because we do not use
6311 * virtual interrupt delivery to inject L1 interrupts into L2.
6313 if (is_guest_mode(vcpu
) && max_irr_updated
)
6314 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
6316 max_irr
= kvm_lapic_find_highest_irr(vcpu
);
6318 vmx_hwapic_irr_update(vcpu
, max_irr
);
6322 static void vmx_load_eoi_exitmap(struct kvm_vcpu
*vcpu
, u64
*eoi_exit_bitmap
)
6324 if (!kvm_vcpu_apicv_active(vcpu
))
6327 vmcs_write64(EOI_EXIT_BITMAP0
, eoi_exit_bitmap
[0]);
6328 vmcs_write64(EOI_EXIT_BITMAP1
, eoi_exit_bitmap
[1]);
6329 vmcs_write64(EOI_EXIT_BITMAP2
, eoi_exit_bitmap
[2]);
6330 vmcs_write64(EOI_EXIT_BITMAP3
, eoi_exit_bitmap
[3]);
6333 static void vmx_apicv_post_state_restore(struct kvm_vcpu
*vcpu
)
6335 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6337 pi_clear_on(&vmx
->pi_desc
);
6338 memset(vmx
->pi_desc
.pir
, 0, sizeof(vmx
->pi_desc
.pir
));
6341 void vmx_do_interrupt_nmi_irqoff(unsigned long entry
);
6343 static void handle_interrupt_nmi_irqoff(struct kvm_vcpu
*vcpu
,
6344 unsigned long entry
)
6346 kvm_before_interrupt(vcpu
);
6347 vmx_do_interrupt_nmi_irqoff(entry
);
6348 kvm_after_interrupt(vcpu
);
6351 static void handle_exception_nmi_irqoff(struct vcpu_vmx
*vmx
)
6353 const unsigned long nmi_entry
= (unsigned long)asm_exc_nmi_noist
;
6354 u32 intr_info
= vmx_get_intr_info(&vmx
->vcpu
);
6356 /* if exit due to PF check for async PF */
6357 if (is_page_fault(intr_info
))
6358 vmx
->vcpu
.arch
.apf
.host_apf_flags
= kvm_read_and_reset_apf_flags();
6359 /* Handle machine checks before interrupts are enabled */
6360 else if (is_machine_check(intr_info
))
6361 kvm_machine_check();
6362 /* We need to handle NMIs before interrupts are enabled */
6363 else if (is_nmi(intr_info
))
6364 handle_interrupt_nmi_irqoff(&vmx
->vcpu
, nmi_entry
);
6367 static void handle_external_interrupt_irqoff(struct kvm_vcpu
*vcpu
)
6369 u32 intr_info
= vmx_get_intr_info(vcpu
);
6370 unsigned int vector
= intr_info
& INTR_INFO_VECTOR_MASK
;
6371 gate_desc
*desc
= (gate_desc
*)host_idt_base
+ vector
;
6373 if (KVM_BUG(!is_external_intr(intr_info
), vcpu
->kvm
,
6374 "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info
))
6377 handle_interrupt_nmi_irqoff(vcpu
, gate_offset(desc
));
6380 static void vmx_handle_exit_irqoff(struct kvm_vcpu
*vcpu
)
6382 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6384 if (vmx
->emulation_required
)
6387 if (vmx
->exit_reason
.basic
== EXIT_REASON_EXTERNAL_INTERRUPT
)
6388 handle_external_interrupt_irqoff(vcpu
);
6389 else if (vmx
->exit_reason
.basic
== EXIT_REASON_EXCEPTION_NMI
)
6390 handle_exception_nmi_irqoff(vmx
);
6394 * The kvm parameter can be NULL (module initialization, or invocation before
6395 * VM creation). Be sure to check the kvm parameter before using it.
6397 static bool vmx_has_emulated_msr(struct kvm
*kvm
, u32 index
)
6400 case MSR_IA32_SMBASE
:
6402 * We cannot do SMM unless we can run the guest in big
6405 return enable_unrestricted_guest
|| emulate_invalid_guest_state
;
6406 case MSR_IA32_VMX_BASIC
... MSR_IA32_VMX_VMFUNC
:
6408 case MSR_AMD64_VIRT_SPEC_CTRL
:
6409 /* This is AMD only. */
6416 static void vmx_recover_nmi_blocking(struct vcpu_vmx
*vmx
)
6421 bool idtv_info_valid
;
6423 idtv_info_valid
= vmx
->idt_vectoring_info
& VECTORING_INFO_VALID_MASK
;
6426 if (vmx
->loaded_vmcs
->nmi_known_unmasked
)
6429 exit_intr_info
= vmx_get_intr_info(&vmx
->vcpu
);
6430 unblock_nmi
= (exit_intr_info
& INTR_INFO_UNBLOCK_NMI
) != 0;
6431 vector
= exit_intr_info
& INTR_INFO_VECTOR_MASK
;
6433 * SDM 3: 27.7.1.2 (September 2008)
6434 * Re-set bit "block by NMI" before VM entry if vmexit caused by
6435 * a guest IRET fault.
6436 * SDM 3: 23.2.2 (September 2008)
6437 * Bit 12 is undefined in any of the following cases:
6438 * If the VM exit sets the valid bit in the IDT-vectoring
6439 * information field.
6440 * If the VM exit is due to a double fault.
6442 if ((exit_intr_info
& INTR_INFO_VALID_MASK
) && unblock_nmi
&&
6443 vector
!= DF_VECTOR
&& !idtv_info_valid
)
6444 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO
,
6445 GUEST_INTR_STATE_NMI
);
6447 vmx
->loaded_vmcs
->nmi_known_unmasked
=
6448 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
)
6449 & GUEST_INTR_STATE_NMI
);
6450 } else if (unlikely(vmx
->loaded_vmcs
->soft_vnmi_blocked
))
6451 vmx
->loaded_vmcs
->vnmi_blocked_time
+=
6452 ktime_to_ns(ktime_sub(ktime_get(),
6453 vmx
->loaded_vmcs
->entry_time
));
6456 static void __vmx_complete_interrupts(struct kvm_vcpu
*vcpu
,
6457 u32 idt_vectoring_info
,
6458 int instr_len_field
,
6459 int error_code_field
)
6463 bool idtv_info_valid
;
6465 idtv_info_valid
= idt_vectoring_info
& VECTORING_INFO_VALID_MASK
;
6467 vcpu
->arch
.nmi_injected
= false;
6468 kvm_clear_exception_queue(vcpu
);
6469 kvm_clear_interrupt_queue(vcpu
);
6471 if (!idtv_info_valid
)
6474 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
6476 vector
= idt_vectoring_info
& VECTORING_INFO_VECTOR_MASK
;
6477 type
= idt_vectoring_info
& VECTORING_INFO_TYPE_MASK
;
6480 case INTR_TYPE_NMI_INTR
:
6481 vcpu
->arch
.nmi_injected
= true;
6483 * SDM 3: 27.7.1.2 (September 2008)
6484 * Clear bit "block by NMI" before VM entry if a NMI
6487 vmx_set_nmi_mask(vcpu
, false);
6489 case INTR_TYPE_SOFT_EXCEPTION
:
6490 vcpu
->arch
.event_exit_inst_len
= vmcs_read32(instr_len_field
);
6492 case INTR_TYPE_HARD_EXCEPTION
:
6493 if (idt_vectoring_info
& VECTORING_INFO_DELIVER_CODE_MASK
) {
6494 u32 err
= vmcs_read32(error_code_field
);
6495 kvm_requeue_exception_e(vcpu
, vector
, err
);
6497 kvm_requeue_exception(vcpu
, vector
);
6499 case INTR_TYPE_SOFT_INTR
:
6500 vcpu
->arch
.event_exit_inst_len
= vmcs_read32(instr_len_field
);
6502 case INTR_TYPE_EXT_INTR
:
6503 kvm_queue_interrupt(vcpu
, vector
, type
== INTR_TYPE_SOFT_INTR
);
6510 static void vmx_complete_interrupts(struct vcpu_vmx
*vmx
)
6512 __vmx_complete_interrupts(&vmx
->vcpu
, vmx
->idt_vectoring_info
,
6513 VM_EXIT_INSTRUCTION_LEN
,
6514 IDT_VECTORING_ERROR_CODE
);
6517 static void vmx_cancel_injection(struct kvm_vcpu
*vcpu
)
6519 __vmx_complete_interrupts(vcpu
,
6520 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD
),
6521 VM_ENTRY_INSTRUCTION_LEN
,
6522 VM_ENTRY_EXCEPTION_ERROR_CODE
);
6524 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
, 0);
6527 static void atomic_switch_perf_msrs(struct vcpu_vmx
*vmx
)
6530 struct perf_guest_switch_msr
*msrs
;
6532 /* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */
6533 msrs
= perf_guest_get_msrs(&nr_msrs
);
6537 for (i
= 0; i
< nr_msrs
; i
++)
6538 if (msrs
[i
].host
== msrs
[i
].guest
)
6539 clear_atomic_switch_msr(vmx
, msrs
[i
].msr
);
6541 add_atomic_switch_msr(vmx
, msrs
[i
].msr
, msrs
[i
].guest
,
6542 msrs
[i
].host
, false);
6545 static void vmx_update_hv_timer(struct kvm_vcpu
*vcpu
)
6547 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6551 if (vmx
->req_immediate_exit
) {
6552 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE
, 0);
6553 vmx
->loaded_vmcs
->hv_timer_soft_disabled
= false;
6554 } else if (vmx
->hv_deadline_tsc
!= -1) {
6556 if (vmx
->hv_deadline_tsc
> tscl
)
6557 /* set_hv_timer ensures the delta fits in 32-bits */
6558 delta_tsc
= (u32
)((vmx
->hv_deadline_tsc
- tscl
) >>
6559 cpu_preemption_timer_multi
);
6563 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE
, delta_tsc
);
6564 vmx
->loaded_vmcs
->hv_timer_soft_disabled
= false;
6565 } else if (!vmx
->loaded_vmcs
->hv_timer_soft_disabled
) {
6566 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE
, -1);
6567 vmx
->loaded_vmcs
->hv_timer_soft_disabled
= true;
6571 void noinstr
vmx_update_host_rsp(struct vcpu_vmx
*vmx
, unsigned long host_rsp
)
6573 if (unlikely(host_rsp
!= vmx
->loaded_vmcs
->host_state
.rsp
)) {
6574 vmx
->loaded_vmcs
->host_state
.rsp
= host_rsp
;
6575 vmcs_writel(HOST_RSP
, host_rsp
);
6579 static fastpath_t
vmx_exit_handlers_fastpath(struct kvm_vcpu
*vcpu
)
6581 switch (to_vmx(vcpu
)->exit_reason
.basic
) {
6582 case EXIT_REASON_MSR_WRITE
:
6583 return handle_fastpath_set_msr_irqoff(vcpu
);
6584 case EXIT_REASON_PREEMPTION_TIMER
:
6585 return handle_fastpath_preemption_timer(vcpu
);
6587 return EXIT_FASTPATH_NONE
;
6591 static noinstr
void vmx_vcpu_enter_exit(struct kvm_vcpu
*vcpu
,
6592 struct vcpu_vmx
*vmx
)
6594 kvm_guest_enter_irqoff();
6596 /* L1D Flush includes CPU buffer clear to mitigate MDS */
6597 if (static_branch_unlikely(&vmx_l1d_should_flush
))
6598 vmx_l1d_flush(vcpu
);
6599 else if (static_branch_unlikely(&mds_user_clear
))
6600 mds_clear_cpu_buffers();
6602 if (vcpu
->arch
.cr2
!= native_read_cr2())
6603 native_write_cr2(vcpu
->arch
.cr2
);
6605 vmx
->fail
= __vmx_vcpu_run(vmx
, (unsigned long *)&vcpu
->arch
.regs
,
6606 vmx
->loaded_vmcs
->launched
);
6608 vcpu
->arch
.cr2
= native_read_cr2();
6610 kvm_guest_exit_irqoff();
6613 static fastpath_t
vmx_vcpu_run(struct kvm_vcpu
*vcpu
)
6615 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6616 unsigned long cr3
, cr4
;
6618 /* Record the guest's net vcpu time for enforced NMI injections. */
6619 if (unlikely(!enable_vnmi
&&
6620 vmx
->loaded_vmcs
->soft_vnmi_blocked
))
6621 vmx
->loaded_vmcs
->entry_time
= ktime_get();
6624 * Don't enter VMX if guest state is invalid, let the exit handler
6625 * start emulation until we arrive back to a valid state. Synthesize a
6626 * consistency check VM-Exit due to invalid guest state and bail.
6628 if (unlikely(vmx
->emulation_required
)) {
6630 /* We don't emulate invalid state of a nested guest */
6631 vmx
->fail
= is_guest_mode(vcpu
);
6633 vmx
->exit_reason
.full
= EXIT_REASON_INVALID_STATE
;
6634 vmx
->exit_reason
.failed_vmentry
= 1;
6635 kvm_register_mark_available(vcpu
, VCPU_EXREG_EXIT_INFO_1
);
6636 vmx
->exit_qualification
= ENTRY_FAIL_DEFAULT
;
6637 kvm_register_mark_available(vcpu
, VCPU_EXREG_EXIT_INFO_2
);
6638 vmx
->exit_intr_info
= 0;
6639 return EXIT_FASTPATH_NONE
;
6642 trace_kvm_entry(vcpu
);
6644 if (vmx
->ple_window_dirty
) {
6645 vmx
->ple_window_dirty
= false;
6646 vmcs_write32(PLE_WINDOW
, vmx
->ple_window
);
6650 * We did this in prepare_switch_to_guest, because it needs to
6651 * be within srcu_read_lock.
6653 WARN_ON_ONCE(vmx
->nested
.need_vmcs12_to_shadow_sync
);
6655 if (kvm_register_is_dirty(vcpu
, VCPU_REGS_RSP
))
6656 vmcs_writel(GUEST_RSP
, vcpu
->arch
.regs
[VCPU_REGS_RSP
]);
6657 if (kvm_register_is_dirty(vcpu
, VCPU_REGS_RIP
))
6658 vmcs_writel(GUEST_RIP
, vcpu
->arch
.regs
[VCPU_REGS_RIP
]);
6660 cr3
= __get_current_cr3_fast();
6661 if (unlikely(cr3
!= vmx
->loaded_vmcs
->host_state
.cr3
)) {
6662 vmcs_writel(HOST_CR3
, cr3
);
6663 vmx
->loaded_vmcs
->host_state
.cr3
= cr3
;
6666 cr4
= cr4_read_shadow();
6667 if (unlikely(cr4
!= vmx
->loaded_vmcs
->host_state
.cr4
)) {
6668 vmcs_writel(HOST_CR4
, cr4
);
6669 vmx
->loaded_vmcs
->host_state
.cr4
= cr4
;
6672 /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
6673 if (unlikely(vcpu
->arch
.switch_db_regs
& KVM_DEBUGREG_WONT_EXIT
))
6674 set_debugreg(vcpu
->arch
.dr6
, 6);
6676 /* When single-stepping over STI and MOV SS, we must clear the
6677 * corresponding interruptibility bits in the guest state. Otherwise
6678 * vmentry fails as it then expects bit 14 (BS) in pending debug
6679 * exceptions being set, but that's not correct for the guest debugging
6681 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
)
6682 vmx_set_interrupt_shadow(vcpu
, 0);
6684 kvm_load_guest_xsave_state(vcpu
);
6686 pt_guest_enter(vmx
);
6688 atomic_switch_perf_msrs(vmx
);
6689 if (intel_pmu_lbr_is_enabled(vcpu
))
6690 vmx_passthrough_lbr_msrs(vcpu
);
6692 if (enable_preemption_timer
)
6693 vmx_update_hv_timer(vcpu
);
6695 kvm_wait_lapic_expire(vcpu
);
6698 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
6699 * it's non-zero. Since vmentry is serialising on affected CPUs, there
6700 * is no need to worry about the conditional branch over the wrmsr
6701 * being speculatively taken.
6703 x86_spec_ctrl_set_guest(vmx
->spec_ctrl
, 0);
6705 /* The actual VMENTER/EXIT is in the .noinstr.text section. */
6706 vmx_vcpu_enter_exit(vcpu
, vmx
);
6709 * We do not use IBRS in the kernel. If this vCPU has used the
6710 * SPEC_CTRL MSR it may have left it on; save the value and
6711 * turn it off. This is much more efficient than blindly adding
6712 * it to the atomic save/restore list. Especially as the former
6713 * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
6715 * For non-nested case:
6716 * If the L01 MSR bitmap does not intercept the MSR, then we need to
6720 * If the L02 MSR bitmap does not intercept the MSR, then we need to
6723 if (unlikely(!msr_write_intercepted(vcpu
, MSR_IA32_SPEC_CTRL
)))
6724 vmx
->spec_ctrl
= native_read_msr(MSR_IA32_SPEC_CTRL
);
6726 x86_spec_ctrl_restore_host(vmx
->spec_ctrl
, 0);
6728 /* All fields are clean at this point */
6729 if (static_branch_unlikely(&enable_evmcs
)) {
6730 current_evmcs
->hv_clean_fields
|=
6731 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL
;
6733 current_evmcs
->hv_vp_id
= kvm_hv_get_vpindex(vcpu
);
6736 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
6737 if (vmx
->host_debugctlmsr
)
6738 update_debugctlmsr(vmx
->host_debugctlmsr
);
6740 #ifndef CONFIG_X86_64
6742 * The sysexit path does not restore ds/es, so we must set them to
6743 * a reasonable value ourselves.
6745 * We can't defer this to vmx_prepare_switch_to_host() since that
6746 * function may be executed in interrupt context, which saves and
6747 * restore segments around it, nullifying its effect.
6749 loadsegment(ds
, __USER_DS
);
6750 loadsegment(es
, __USER_DS
);
6753 vmx_register_cache_reset(vcpu
);
6757 kvm_load_host_xsave_state(vcpu
);
6759 if (is_guest_mode(vcpu
)) {
6761 * Track VMLAUNCH/VMRESUME that have made past guest state
6764 if (vmx
->nested
.nested_run_pending
&&
6765 !vmx
->exit_reason
.failed_vmentry
)
6766 ++vcpu
->stat
.nested_run
;
6768 vmx
->nested
.nested_run_pending
= 0;
6771 vmx
->idt_vectoring_info
= 0;
6773 if (unlikely(vmx
->fail
)) {
6774 vmx
->exit_reason
.full
= 0xdead;
6775 return EXIT_FASTPATH_NONE
;
6778 vmx
->exit_reason
.full
= vmcs_read32(VM_EXIT_REASON
);
6779 if (unlikely((u16
)vmx
->exit_reason
.basic
== EXIT_REASON_MCE_DURING_VMENTRY
))
6780 kvm_machine_check();
6782 if (likely(!vmx
->exit_reason
.failed_vmentry
))
6783 vmx
->idt_vectoring_info
= vmcs_read32(IDT_VECTORING_INFO_FIELD
);
6785 trace_kvm_exit(vmx
->exit_reason
.full
, vcpu
, KVM_ISA_VMX
);
6787 if (unlikely(vmx
->exit_reason
.failed_vmentry
))
6788 return EXIT_FASTPATH_NONE
;
6790 vmx
->loaded_vmcs
->launched
= 1;
6792 vmx_recover_nmi_blocking(vmx
);
6793 vmx_complete_interrupts(vmx
);
6795 if (is_guest_mode(vcpu
))
6796 return EXIT_FASTPATH_NONE
;
6798 return vmx_exit_handlers_fastpath(vcpu
);
6801 static void vmx_free_vcpu(struct kvm_vcpu
*vcpu
)
6803 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6806 vmx_destroy_pml_buffer(vmx
);
6807 free_vpid(vmx
->vpid
);
6808 nested_vmx_free_vcpu(vcpu
);
6809 free_loaded_vmcs(vmx
->loaded_vmcs
);
6812 static int vmx_create_vcpu(struct kvm_vcpu
*vcpu
)
6814 struct vmx_uret_msr
*tsx_ctrl
;
6815 struct vcpu_vmx
*vmx
;
6818 BUILD_BUG_ON(offsetof(struct vcpu_vmx
, vcpu
) != 0);
6823 vmx
->vpid
= allocate_vpid();
6826 * If PML is turned on, failure on enabling PML just results in failure
6827 * of creating the vcpu, therefore we can simplify PML logic (by
6828 * avoiding dealing with cases, such as enabling PML partially on vcpus
6829 * for the guest), etc.
6832 vmx
->pml_pg
= alloc_page(GFP_KERNEL_ACCOUNT
| __GFP_ZERO
);
6837 for (i
= 0; i
< kvm_nr_uret_msrs
; ++i
) {
6838 vmx
->guest_uret_msrs
[i
].data
= 0;
6839 vmx
->guest_uret_msrs
[i
].mask
= -1ull;
6841 if (boot_cpu_has(X86_FEATURE_RTM
)) {
6843 * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception.
6844 * Keep the host value unchanged to avoid changing CPUID bits
6845 * under the host kernel's feet.
6847 tsx_ctrl
= vmx_find_uret_msr(vmx
, MSR_IA32_TSX_CTRL
);
6849 tsx_ctrl
->mask
= ~(u64
)TSX_CTRL_CPUID_CLEAR
;
6852 err
= alloc_loaded_vmcs(&vmx
->vmcs01
);
6856 /* The MSR bitmap starts with all ones */
6857 bitmap_fill(vmx
->shadow_msr_intercept
.read
, MAX_POSSIBLE_PASSTHROUGH_MSRS
);
6858 bitmap_fill(vmx
->shadow_msr_intercept
.write
, MAX_POSSIBLE_PASSTHROUGH_MSRS
);
6860 vmx_disable_intercept_for_msr(vcpu
, MSR_IA32_TSC
, MSR_TYPE_R
);
6861 #ifdef CONFIG_X86_64
6862 vmx_disable_intercept_for_msr(vcpu
, MSR_FS_BASE
, MSR_TYPE_RW
);
6863 vmx_disable_intercept_for_msr(vcpu
, MSR_GS_BASE
, MSR_TYPE_RW
);
6864 vmx_disable_intercept_for_msr(vcpu
, MSR_KERNEL_GS_BASE
, MSR_TYPE_RW
);
6866 vmx_disable_intercept_for_msr(vcpu
, MSR_IA32_SYSENTER_CS
, MSR_TYPE_RW
);
6867 vmx_disable_intercept_for_msr(vcpu
, MSR_IA32_SYSENTER_ESP
, MSR_TYPE_RW
);
6868 vmx_disable_intercept_for_msr(vcpu
, MSR_IA32_SYSENTER_EIP
, MSR_TYPE_RW
);
6869 if (kvm_cstate_in_guest(vcpu
->kvm
)) {
6870 vmx_disable_intercept_for_msr(vcpu
, MSR_CORE_C1_RES
, MSR_TYPE_R
);
6871 vmx_disable_intercept_for_msr(vcpu
, MSR_CORE_C3_RESIDENCY
, MSR_TYPE_R
);
6872 vmx_disable_intercept_for_msr(vcpu
, MSR_CORE_C6_RESIDENCY
, MSR_TYPE_R
);
6873 vmx_disable_intercept_for_msr(vcpu
, MSR_CORE_C7_RESIDENCY
, MSR_TYPE_R
);
6876 vmx
->loaded_vmcs
= &vmx
->vmcs01
;
6878 vmx_vcpu_load(vcpu
, cpu
);
6883 if (cpu_need_virtualize_apic_accesses(vcpu
)) {
6884 err
= alloc_apic_access_page(vcpu
->kvm
);
6889 if (enable_ept
&& !enable_unrestricted_guest
) {
6890 err
= init_rmode_identity_map(vcpu
->kvm
);
6896 memcpy(&vmx
->nested
.msrs
, &vmcs_config
.nested
, sizeof(vmx
->nested
.msrs
));
6898 memset(&vmx
->nested
.msrs
, 0, sizeof(vmx
->nested
.msrs
));
6900 vcpu_setup_sgx_lepubkeyhash(vcpu
);
6902 vmx
->nested
.posted_intr_nv
= -1;
6903 vmx
->nested
.current_vmptr
= -1ull;
6904 vmx
->nested
.hv_evmcs_vmptr
= EVMPTR_INVALID
;
6906 vcpu
->arch
.microcode_version
= 0x100000000ULL
;
6907 vmx
->msr_ia32_feature_control_valid_bits
= FEAT_CTL_LOCKED
;
6910 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
6911 * or POSTED_INTR_WAKEUP_VECTOR.
6913 vmx
->pi_desc
.nv
= POSTED_INTR_VECTOR
;
6914 vmx
->pi_desc
.sn
= 1;
6919 free_loaded_vmcs(vmx
->loaded_vmcs
);
6921 vmx_destroy_pml_buffer(vmx
);
6923 free_vpid(vmx
->vpid
);
6927 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
6928 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
6930 static int vmx_vm_init(struct kvm
*kvm
)
6933 kvm
->arch
.pause_in_guest
= true;
6935 if (boot_cpu_has(X86_BUG_L1TF
) && enable_ept
) {
6936 switch (l1tf_mitigation
) {
6937 case L1TF_MITIGATION_OFF
:
6938 case L1TF_MITIGATION_FLUSH_NOWARN
:
6939 /* 'I explicitly don't care' is set */
6941 case L1TF_MITIGATION_FLUSH
:
6942 case L1TF_MITIGATION_FLUSH_NOSMT
:
6943 case L1TF_MITIGATION_FULL
:
6945 * Warn upon starting the first VM in a potentially
6946 * insecure environment.
6948 if (sched_smt_active())
6949 pr_warn_once(L1TF_MSG_SMT
);
6950 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_NEVER
)
6951 pr_warn_once(L1TF_MSG_L1D
);
6953 case L1TF_MITIGATION_FULL_FORCE
:
6954 /* Flush is enforced */
6961 static int __init
vmx_check_processor_compat(void)
6963 struct vmcs_config vmcs_conf
;
6964 struct vmx_capability vmx_cap
;
6966 if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL
) ||
6967 !this_cpu_has(X86_FEATURE_VMX
)) {
6968 pr_err("kvm: VMX is disabled on CPU %d\n", smp_processor_id());
6972 if (setup_vmcs_config(&vmcs_conf
, &vmx_cap
) < 0)
6975 nested_vmx_setup_ctls_msrs(&vmcs_conf
.nested
, vmx_cap
.ept
);
6976 if (memcmp(&vmcs_config
, &vmcs_conf
, sizeof(struct vmcs_config
)) != 0) {
6977 printk(KERN_ERR
"kvm: CPU %d feature inconsistency!\n",
6978 smp_processor_id());
6984 static u64
vmx_get_mt_mask(struct kvm_vcpu
*vcpu
, gfn_t gfn
, bool is_mmio
)
6989 /* We wanted to honor guest CD/MTRR/PAT, but doing so could result in
6990 * memory aliases with conflicting memory types and sometimes MCEs.
6991 * We have to be careful as to what are honored and when.
6993 * For MMIO, guest CD/MTRR are ignored. The EPT memory type is set to
6994 * UC. The effective memory type is UC or WC depending on guest PAT.
6995 * This was historically the source of MCEs and we want to be
6998 * When there is no need to deal with noncoherent DMA (e.g., no VT-d
6999 * or VT-d has snoop control), guest CD/MTRR/PAT are all ignored. The
7000 * EPT memory type is set to WB. The effective memory type is forced
7003 * Otherwise, we trust guest. Guest CD/MTRR/PAT are all honored. The
7004 * EPT memory type is used to emulate guest CD/MTRR.
7008 cache
= MTRR_TYPE_UNCACHABLE
;
7012 if (!kvm_arch_has_noncoherent_dma(vcpu
->kvm
)) {
7013 ipat
= VMX_EPT_IPAT_BIT
;
7014 cache
= MTRR_TYPE_WRBACK
;
7018 if (kvm_read_cr0(vcpu
) & X86_CR0_CD
) {
7019 ipat
= VMX_EPT_IPAT_BIT
;
7020 if (kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_CD_NW_CLEARED
))
7021 cache
= MTRR_TYPE_WRBACK
;
7023 cache
= MTRR_TYPE_UNCACHABLE
;
7027 cache
= kvm_mtrr_get_guest_memory_type(vcpu
, gfn
);
7030 return (cache
<< VMX_EPT_MT_EPTE_SHIFT
) | ipat
;
7033 static void vmcs_set_secondary_exec_control(struct vcpu_vmx
*vmx
, u32 new_ctl
)
7036 * These bits in the secondary execution controls field
7037 * are dynamic, the others are mostly based on the hypervisor
7038 * architecture and the guest's CPUID. Do not touch the
7042 SECONDARY_EXEC_SHADOW_VMCS
|
7043 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
|
7044 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
|
7045 SECONDARY_EXEC_DESC
;
7047 u32 cur_ctl
= secondary_exec_controls_get(vmx
);
7049 secondary_exec_controls_set(vmx
, (new_ctl
& ~mask
) | (cur_ctl
& mask
));
7053 * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
7054 * (indicating "allowed-1") if they are supported in the guest's CPUID.
7056 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu
*vcpu
)
7058 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7059 struct kvm_cpuid_entry2
*entry
;
7061 vmx
->nested
.msrs
.cr0_fixed1
= 0xffffffff;
7062 vmx
->nested
.msrs
.cr4_fixed1
= X86_CR4_PCE
;
7064 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \
7065 if (entry && (entry->_reg & (_cpuid_mask))) \
7066 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \
7069 entry
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
7070 cr4_fixed1_update(X86_CR4_VME
, edx
, feature_bit(VME
));
7071 cr4_fixed1_update(X86_CR4_PVI
, edx
, feature_bit(VME
));
7072 cr4_fixed1_update(X86_CR4_TSD
, edx
, feature_bit(TSC
));
7073 cr4_fixed1_update(X86_CR4_DE
, edx
, feature_bit(DE
));
7074 cr4_fixed1_update(X86_CR4_PSE
, edx
, feature_bit(PSE
));
7075 cr4_fixed1_update(X86_CR4_PAE
, edx
, feature_bit(PAE
));
7076 cr4_fixed1_update(X86_CR4_MCE
, edx
, feature_bit(MCE
));
7077 cr4_fixed1_update(X86_CR4_PGE
, edx
, feature_bit(PGE
));
7078 cr4_fixed1_update(X86_CR4_OSFXSR
, edx
, feature_bit(FXSR
));
7079 cr4_fixed1_update(X86_CR4_OSXMMEXCPT
, edx
, feature_bit(XMM
));
7080 cr4_fixed1_update(X86_CR4_VMXE
, ecx
, feature_bit(VMX
));
7081 cr4_fixed1_update(X86_CR4_SMXE
, ecx
, feature_bit(SMX
));
7082 cr4_fixed1_update(X86_CR4_PCIDE
, ecx
, feature_bit(PCID
));
7083 cr4_fixed1_update(X86_CR4_OSXSAVE
, ecx
, feature_bit(XSAVE
));
7085 entry
= kvm_find_cpuid_entry(vcpu
, 0x7, 0);
7086 cr4_fixed1_update(X86_CR4_FSGSBASE
, ebx
, feature_bit(FSGSBASE
));
7087 cr4_fixed1_update(X86_CR4_SMEP
, ebx
, feature_bit(SMEP
));
7088 cr4_fixed1_update(X86_CR4_SMAP
, ebx
, feature_bit(SMAP
));
7089 cr4_fixed1_update(X86_CR4_PKE
, ecx
, feature_bit(PKU
));
7090 cr4_fixed1_update(X86_CR4_UMIP
, ecx
, feature_bit(UMIP
));
7091 cr4_fixed1_update(X86_CR4_LA57
, ecx
, feature_bit(LA57
));
7093 #undef cr4_fixed1_update
7096 static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu
*vcpu
)
7098 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7100 if (kvm_mpx_supported()) {
7101 bool mpx_enabled
= guest_cpuid_has(vcpu
, X86_FEATURE_MPX
);
7104 vmx
->nested
.msrs
.entry_ctls_high
|= VM_ENTRY_LOAD_BNDCFGS
;
7105 vmx
->nested
.msrs
.exit_ctls_high
|= VM_EXIT_CLEAR_BNDCFGS
;
7107 vmx
->nested
.msrs
.entry_ctls_high
&= ~VM_ENTRY_LOAD_BNDCFGS
;
7108 vmx
->nested
.msrs
.exit_ctls_high
&= ~VM_EXIT_CLEAR_BNDCFGS
;
7113 static void update_intel_pt_cfg(struct kvm_vcpu
*vcpu
)
7115 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7116 struct kvm_cpuid_entry2
*best
= NULL
;
7119 for (i
= 0; i
< PT_CPUID_LEAVES
; i
++) {
7120 best
= kvm_find_cpuid_entry(vcpu
, 0x14, i
);
7123 vmx
->pt_desc
.caps
[CPUID_EAX
+ i
*PT_CPUID_REGS_NUM
] = best
->eax
;
7124 vmx
->pt_desc
.caps
[CPUID_EBX
+ i
*PT_CPUID_REGS_NUM
] = best
->ebx
;
7125 vmx
->pt_desc
.caps
[CPUID_ECX
+ i
*PT_CPUID_REGS_NUM
] = best
->ecx
;
7126 vmx
->pt_desc
.caps
[CPUID_EDX
+ i
*PT_CPUID_REGS_NUM
] = best
->edx
;
7129 /* Get the number of configurable Address Ranges for filtering */
7130 vmx
->pt_desc
.addr_range
= intel_pt_validate_cap(vmx
->pt_desc
.caps
,
7131 PT_CAP_num_address_ranges
);
7133 /* Initialize and clear the no dependency bits */
7134 vmx
->pt_desc
.ctl_bitmask
= ~(RTIT_CTL_TRACEEN
| RTIT_CTL_OS
|
7135 RTIT_CTL_USR
| RTIT_CTL_TSC_EN
| RTIT_CTL_DISRETC
);
7138 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise
7139 * will inject an #GP
7141 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_cr3_filtering
))
7142 vmx
->pt_desc
.ctl_bitmask
&= ~RTIT_CTL_CR3EN
;
7145 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and
7146 * PSBFreq can be set
7148 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_psb_cyc
))
7149 vmx
->pt_desc
.ctl_bitmask
&= ~(RTIT_CTL_CYCLEACC
|
7150 RTIT_CTL_CYC_THRESH
| RTIT_CTL_PSB_FREQ
);
7153 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn BranchEn and
7154 * MTCFreq can be set
7156 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_mtc
))
7157 vmx
->pt_desc
.ctl_bitmask
&= ~(RTIT_CTL_MTC_EN
|
7158 RTIT_CTL_BRANCH_EN
| RTIT_CTL_MTC_RANGE
);
7160 /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */
7161 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_ptwrite
))
7162 vmx
->pt_desc
.ctl_bitmask
&= ~(RTIT_CTL_FUP_ON_PTW
|
7165 /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */
7166 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_power_event_trace
))
7167 vmx
->pt_desc
.ctl_bitmask
&= ~RTIT_CTL_PWR_EVT_EN
;
7169 /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */
7170 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_topa_output
))
7171 vmx
->pt_desc
.ctl_bitmask
&= ~RTIT_CTL_TOPA
;
7173 /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */
7174 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_output_subsys
))
7175 vmx
->pt_desc
.ctl_bitmask
&= ~RTIT_CTL_FABRIC_EN
;
7177 /* unmask address range configure area */
7178 for (i
= 0; i
< vmx
->pt_desc
.addr_range
; i
++)
7179 vmx
->pt_desc
.ctl_bitmask
&= ~(0xfULL
<< (32 + i
* 4));
7182 static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu
*vcpu
)
7184 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7186 /* xsaves_enabled is recomputed in vmx_compute_secondary_exec_control(). */
7187 vcpu
->arch
.xsaves_enabled
= false;
7189 vmx_setup_uret_msrs(vmx
);
7191 if (cpu_has_secondary_exec_ctrls())
7192 vmcs_set_secondary_exec_control(vmx
,
7193 vmx_secondary_exec_control(vmx
));
7195 if (nested_vmx_allowed(vcpu
))
7196 to_vmx(vcpu
)->msr_ia32_feature_control_valid_bits
|=
7197 FEAT_CTL_VMX_ENABLED_INSIDE_SMX
|
7198 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX
;
7200 to_vmx(vcpu
)->msr_ia32_feature_control_valid_bits
&=
7201 ~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX
|
7202 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX
);
7204 if (nested_vmx_allowed(vcpu
)) {
7205 nested_vmx_cr_fixed1_bits_update(vcpu
);
7206 nested_vmx_entry_exit_ctls_update(vcpu
);
7209 if (boot_cpu_has(X86_FEATURE_INTEL_PT
) &&
7210 guest_cpuid_has(vcpu
, X86_FEATURE_INTEL_PT
))
7211 update_intel_pt_cfg(vcpu
);
7213 if (boot_cpu_has(X86_FEATURE_RTM
)) {
7214 struct vmx_uret_msr
*msr
;
7215 msr
= vmx_find_uret_msr(vmx
, MSR_IA32_TSX_CTRL
);
7217 bool enabled
= guest_cpuid_has(vcpu
, X86_FEATURE_RTM
);
7218 vmx_set_guest_uret_msr(vmx
, msr
, enabled
? 0 : TSX_CTRL_RTM_DISABLE
);
7222 set_cr4_guest_host_mask(vmx
);
7224 vmx_write_encls_bitmap(vcpu
, NULL
);
7225 if (guest_cpuid_has(vcpu
, X86_FEATURE_SGX
))
7226 vmx
->msr_ia32_feature_control_valid_bits
|= FEAT_CTL_SGX_ENABLED
;
7228 vmx
->msr_ia32_feature_control_valid_bits
&= ~FEAT_CTL_SGX_ENABLED
;
7230 if (guest_cpuid_has(vcpu
, X86_FEATURE_SGX_LC
))
7231 vmx
->msr_ia32_feature_control_valid_bits
|=
7232 FEAT_CTL_SGX_LC_ENABLED
;
7234 vmx
->msr_ia32_feature_control_valid_bits
&=
7235 ~FEAT_CTL_SGX_LC_ENABLED
;
7237 /* Refresh #PF interception to account for MAXPHYADDR changes. */
7238 vmx_update_exception_bitmap(vcpu
);
7241 static __init
void vmx_set_cpu_caps(void)
7247 kvm_cpu_cap_set(X86_FEATURE_VMX
);
7250 if (kvm_mpx_supported())
7251 kvm_cpu_cap_check_and_set(X86_FEATURE_MPX
);
7252 if (!cpu_has_vmx_invpcid())
7253 kvm_cpu_cap_clear(X86_FEATURE_INVPCID
);
7254 if (vmx_pt_mode_is_host_guest())
7255 kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT
);
7258 kvm_cpu_cap_clear(X86_FEATURE_SGX
);
7259 kvm_cpu_cap_clear(X86_FEATURE_SGX_LC
);
7260 kvm_cpu_cap_clear(X86_FEATURE_SGX1
);
7261 kvm_cpu_cap_clear(X86_FEATURE_SGX2
);
7264 if (vmx_umip_emulated())
7265 kvm_cpu_cap_set(X86_FEATURE_UMIP
);
7269 if (!cpu_has_vmx_xsaves())
7270 kvm_cpu_cap_clear(X86_FEATURE_XSAVES
);
7272 /* CPUID 0x80000001 and 0x7 (RDPID) */
7273 if (!cpu_has_vmx_rdtscp()) {
7274 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP
);
7275 kvm_cpu_cap_clear(X86_FEATURE_RDPID
);
7278 if (cpu_has_vmx_waitpkg())
7279 kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG
);
7282 static void vmx_request_immediate_exit(struct kvm_vcpu
*vcpu
)
7284 to_vmx(vcpu
)->req_immediate_exit
= true;
7287 static int vmx_check_intercept_io(struct kvm_vcpu
*vcpu
,
7288 struct x86_instruction_info
*info
)
7290 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
7291 unsigned short port
;
7295 if (info
->intercept
== x86_intercept_in
||
7296 info
->intercept
== x86_intercept_ins
) {
7297 port
= info
->src_val
;
7298 size
= info
->dst_bytes
;
7300 port
= info
->dst_val
;
7301 size
= info
->src_bytes
;
7305 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
7306 * VM-exits depend on the 'unconditional IO exiting' VM-execution
7309 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
7311 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_IO_BITMAPS
))
7312 intercept
= nested_cpu_has(vmcs12
,
7313 CPU_BASED_UNCOND_IO_EXITING
);
7315 intercept
= nested_vmx_check_io_bitmaps(vcpu
, port
, size
);
7317 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
7318 return intercept
? X86EMUL_UNHANDLEABLE
: X86EMUL_CONTINUE
;
7321 static int vmx_check_intercept(struct kvm_vcpu
*vcpu
,
7322 struct x86_instruction_info
*info
,
7323 enum x86_intercept_stage stage
,
7324 struct x86_exception
*exception
)
7326 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
7328 switch (info
->intercept
) {
7330 * RDPID causes #UD if disabled through secondary execution controls.
7331 * Because it is marked as EmulateOnUD, we need to intercept it here.
7332 * Note, RDPID is hidden behind ENABLE_RDTSCP.
7334 case x86_intercept_rdpid
:
7335 if (!nested_cpu_has2(vmcs12
, SECONDARY_EXEC_ENABLE_RDTSCP
)) {
7336 exception
->vector
= UD_VECTOR
;
7337 exception
->error_code_valid
= false;
7338 return X86EMUL_PROPAGATE_FAULT
;
7342 case x86_intercept_in
:
7343 case x86_intercept_ins
:
7344 case x86_intercept_out
:
7345 case x86_intercept_outs
:
7346 return vmx_check_intercept_io(vcpu
, info
);
7348 case x86_intercept_lgdt
:
7349 case x86_intercept_lidt
:
7350 case x86_intercept_lldt
:
7351 case x86_intercept_ltr
:
7352 case x86_intercept_sgdt
:
7353 case x86_intercept_sidt
:
7354 case x86_intercept_sldt
:
7355 case x86_intercept_str
:
7356 if (!nested_cpu_has2(vmcs12
, SECONDARY_EXEC_DESC
))
7357 return X86EMUL_CONTINUE
;
7359 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
7362 /* TODO: check more intercepts... */
7367 return X86EMUL_UNHANDLEABLE
;
7370 #ifdef CONFIG_X86_64
7371 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */
7372 static inline int u64_shl_div_u64(u64 a
, unsigned int shift
,
7373 u64 divisor
, u64
*result
)
7375 u64 low
= a
<< shift
, high
= a
>> (64 - shift
);
7377 /* To avoid the overflow on divq */
7378 if (high
>= divisor
)
7381 /* Low hold the result, high hold rem which is discarded */
7382 asm("divq %2\n\t" : "=a" (low
), "=d" (high
) :
7383 "rm" (divisor
), "0" (low
), "1" (high
));
7389 static int vmx_set_hv_timer(struct kvm_vcpu
*vcpu
, u64 guest_deadline_tsc
,
7392 struct vcpu_vmx
*vmx
;
7393 u64 tscl
, guest_tscl
, delta_tsc
, lapic_timer_advance_cycles
;
7394 struct kvm_timer
*ktimer
= &vcpu
->arch
.apic
->lapic_timer
;
7398 guest_tscl
= kvm_read_l1_tsc(vcpu
, tscl
);
7399 delta_tsc
= max(guest_deadline_tsc
, guest_tscl
) - guest_tscl
;
7400 lapic_timer_advance_cycles
= nsec_to_cycles(vcpu
,
7401 ktimer
->timer_advance_ns
);
7403 if (delta_tsc
> lapic_timer_advance_cycles
)
7404 delta_tsc
-= lapic_timer_advance_cycles
;
7408 /* Convert to host delta tsc if tsc scaling is enabled */
7409 if (vcpu
->arch
.l1_tsc_scaling_ratio
!= kvm_default_tsc_scaling_ratio
&&
7410 delta_tsc
&& u64_shl_div_u64(delta_tsc
,
7411 kvm_tsc_scaling_ratio_frac_bits
,
7412 vcpu
->arch
.l1_tsc_scaling_ratio
, &delta_tsc
))
7416 * If the delta tsc can't fit in the 32 bit after the multi shift,
7417 * we can't use the preemption timer.
7418 * It's possible that it fits on later vmentries, but checking
7419 * on every vmentry is costly so we just use an hrtimer.
7421 if (delta_tsc
>> (cpu_preemption_timer_multi
+ 32))
7424 vmx
->hv_deadline_tsc
= tscl
+ delta_tsc
;
7425 *expired
= !delta_tsc
;
7429 static void vmx_cancel_hv_timer(struct kvm_vcpu
*vcpu
)
7431 to_vmx(vcpu
)->hv_deadline_tsc
= -1;
7435 static void vmx_sched_in(struct kvm_vcpu
*vcpu
, int cpu
)
7437 if (!kvm_pause_in_guest(vcpu
->kvm
))
7438 shrink_ple_window(vcpu
);
7441 void vmx_update_cpu_dirty_logging(struct kvm_vcpu
*vcpu
)
7443 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7445 if (is_guest_mode(vcpu
)) {
7446 vmx
->nested
.update_vmcs01_cpu_dirty_logging
= true;
7451 * Note, cpu_dirty_logging_count can be changed concurrent with this
7452 * code, but in that case another update request will be made and so
7453 * the guest will never run with a stale PML value.
7455 if (vcpu
->kvm
->arch
.cpu_dirty_logging_count
)
7456 secondary_exec_controls_setbit(vmx
, SECONDARY_EXEC_ENABLE_PML
);
7458 secondary_exec_controls_clearbit(vmx
, SECONDARY_EXEC_ENABLE_PML
);
7461 static int vmx_pre_block(struct kvm_vcpu
*vcpu
)
7463 if (pi_pre_block(vcpu
))
7466 if (kvm_lapic_hv_timer_in_use(vcpu
))
7467 kvm_lapic_switch_to_sw_timer(vcpu
);
7472 static void vmx_post_block(struct kvm_vcpu
*vcpu
)
7474 if (kvm_x86_ops
.set_hv_timer
)
7475 kvm_lapic_switch_to_hv_timer(vcpu
);
7477 pi_post_block(vcpu
);
7480 static void vmx_setup_mce(struct kvm_vcpu
*vcpu
)
7482 if (vcpu
->arch
.mcg_cap
& MCG_LMCE_P
)
7483 to_vmx(vcpu
)->msr_ia32_feature_control_valid_bits
|=
7484 FEAT_CTL_LMCE_ENABLED
;
7486 to_vmx(vcpu
)->msr_ia32_feature_control_valid_bits
&=
7487 ~FEAT_CTL_LMCE_ENABLED
;
7490 static int vmx_smi_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
)
7492 /* we need a nested vmexit to enter SMM, postpone if run is pending */
7493 if (to_vmx(vcpu
)->nested
.nested_run_pending
)
7495 return !is_smm(vcpu
);
7498 static int vmx_enter_smm(struct kvm_vcpu
*vcpu
, char *smstate
)
7500 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7502 vmx
->nested
.smm
.guest_mode
= is_guest_mode(vcpu
);
7503 if (vmx
->nested
.smm
.guest_mode
)
7504 nested_vmx_vmexit(vcpu
, -1, 0, 0);
7506 vmx
->nested
.smm
.vmxon
= vmx
->nested
.vmxon
;
7507 vmx
->nested
.vmxon
= false;
7508 vmx_clear_hlt(vcpu
);
7512 static int vmx_leave_smm(struct kvm_vcpu
*vcpu
, const char *smstate
)
7514 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7517 if (vmx
->nested
.smm
.vmxon
) {
7518 vmx
->nested
.vmxon
= true;
7519 vmx
->nested
.smm
.vmxon
= false;
7522 if (vmx
->nested
.smm
.guest_mode
) {
7523 ret
= nested_vmx_enter_non_root_mode(vcpu
, false);
7527 vmx
->nested
.smm
.guest_mode
= false;
7532 static void vmx_enable_smi_window(struct kvm_vcpu
*vcpu
)
7534 /* RSM will cause a vmexit anyway. */
7537 static bool vmx_apic_init_signal_blocked(struct kvm_vcpu
*vcpu
)
7539 return to_vmx(vcpu
)->nested
.vmxon
&& !is_guest_mode(vcpu
);
7542 static void vmx_migrate_timers(struct kvm_vcpu
*vcpu
)
7544 if (is_guest_mode(vcpu
)) {
7545 struct hrtimer
*timer
= &to_vmx(vcpu
)->nested
.preemption_timer
;
7547 if (hrtimer_try_to_cancel(timer
) == 1)
7548 hrtimer_start_expires(timer
, HRTIMER_MODE_ABS_PINNED
);
7552 static void hardware_unsetup(void)
7555 nested_vmx_hardware_unsetup();
7560 static bool vmx_check_apicv_inhibit_reasons(ulong bit
)
7562 ulong supported
= BIT(APICV_INHIBIT_REASON_DISABLE
) |
7563 BIT(APICV_INHIBIT_REASON_HYPERV
);
7565 return supported
& BIT(bit
);
7568 static struct kvm_x86_ops vmx_x86_ops __initdata
= {
7569 .hardware_unsetup
= hardware_unsetup
,
7571 .hardware_enable
= hardware_enable
,
7572 .hardware_disable
= hardware_disable
,
7573 .cpu_has_accelerated_tpr
= report_flexpriority
,
7574 .has_emulated_msr
= vmx_has_emulated_msr
,
7576 .vm_size
= sizeof(struct kvm_vmx
),
7577 .vm_init
= vmx_vm_init
,
7579 .vcpu_create
= vmx_create_vcpu
,
7580 .vcpu_free
= vmx_free_vcpu
,
7581 .vcpu_reset
= vmx_vcpu_reset
,
7583 .prepare_guest_switch
= vmx_prepare_switch_to_guest
,
7584 .vcpu_load
= vmx_vcpu_load
,
7585 .vcpu_put
= vmx_vcpu_put
,
7587 .update_exception_bitmap
= vmx_update_exception_bitmap
,
7588 .get_msr_feature
= vmx_get_msr_feature
,
7589 .get_msr
= vmx_get_msr
,
7590 .set_msr
= vmx_set_msr
,
7591 .get_segment_base
= vmx_get_segment_base
,
7592 .get_segment
= vmx_get_segment
,
7593 .set_segment
= vmx_set_segment
,
7594 .get_cpl
= vmx_get_cpl
,
7595 .get_cs_db_l_bits
= vmx_get_cs_db_l_bits
,
7596 .set_cr0
= vmx_set_cr0
,
7597 .is_valid_cr4
= vmx_is_valid_cr4
,
7598 .set_cr4
= vmx_set_cr4
,
7599 .set_efer
= vmx_set_efer
,
7600 .get_idt
= vmx_get_idt
,
7601 .set_idt
= vmx_set_idt
,
7602 .get_gdt
= vmx_get_gdt
,
7603 .set_gdt
= vmx_set_gdt
,
7604 .set_dr7
= vmx_set_dr7
,
7605 .sync_dirty_debug_regs
= vmx_sync_dirty_debug_regs
,
7606 .cache_reg
= vmx_cache_reg
,
7607 .get_rflags
= vmx_get_rflags
,
7608 .set_rflags
= vmx_set_rflags
,
7610 .tlb_flush_all
= vmx_flush_tlb_all
,
7611 .tlb_flush_current
= vmx_flush_tlb_current
,
7612 .tlb_flush_gva
= vmx_flush_tlb_gva
,
7613 .tlb_flush_guest
= vmx_flush_tlb_guest
,
7615 .run
= vmx_vcpu_run
,
7616 .handle_exit
= vmx_handle_exit
,
7617 .skip_emulated_instruction
= vmx_skip_emulated_instruction
,
7618 .update_emulated_instruction
= vmx_update_emulated_instruction
,
7619 .set_interrupt_shadow
= vmx_set_interrupt_shadow
,
7620 .get_interrupt_shadow
= vmx_get_interrupt_shadow
,
7621 .patch_hypercall
= vmx_patch_hypercall
,
7622 .set_irq
= vmx_inject_irq
,
7623 .set_nmi
= vmx_inject_nmi
,
7624 .queue_exception
= vmx_queue_exception
,
7625 .cancel_injection
= vmx_cancel_injection
,
7626 .interrupt_allowed
= vmx_interrupt_allowed
,
7627 .nmi_allowed
= vmx_nmi_allowed
,
7628 .get_nmi_mask
= vmx_get_nmi_mask
,
7629 .set_nmi_mask
= vmx_set_nmi_mask
,
7630 .enable_nmi_window
= vmx_enable_nmi_window
,
7631 .enable_irq_window
= vmx_enable_irq_window
,
7632 .update_cr8_intercept
= vmx_update_cr8_intercept
,
7633 .set_virtual_apic_mode
= vmx_set_virtual_apic_mode
,
7634 .set_apic_access_page_addr
= vmx_set_apic_access_page_addr
,
7635 .refresh_apicv_exec_ctrl
= vmx_refresh_apicv_exec_ctrl
,
7636 .load_eoi_exitmap
= vmx_load_eoi_exitmap
,
7637 .apicv_post_state_restore
= vmx_apicv_post_state_restore
,
7638 .check_apicv_inhibit_reasons
= vmx_check_apicv_inhibit_reasons
,
7639 .hwapic_irr_update
= vmx_hwapic_irr_update
,
7640 .hwapic_isr_update
= vmx_hwapic_isr_update
,
7641 .guest_apic_has_interrupt
= vmx_guest_apic_has_interrupt
,
7642 .sync_pir_to_irr
= vmx_sync_pir_to_irr
,
7643 .deliver_posted_interrupt
= vmx_deliver_posted_interrupt
,
7644 .dy_apicv_has_pending_interrupt
= pi_has_pending_interrupt
,
7646 .set_tss_addr
= vmx_set_tss_addr
,
7647 .set_identity_map_addr
= vmx_set_identity_map_addr
,
7648 .get_mt_mask
= vmx_get_mt_mask
,
7650 .get_exit_info
= vmx_get_exit_info
,
7652 .vcpu_after_set_cpuid
= vmx_vcpu_after_set_cpuid
,
7654 .has_wbinvd_exit
= cpu_has_vmx_wbinvd_exit
,
7656 .get_l2_tsc_offset
= vmx_get_l2_tsc_offset
,
7657 .get_l2_tsc_multiplier
= vmx_get_l2_tsc_multiplier
,
7658 .write_tsc_offset
= vmx_write_tsc_offset
,
7659 .write_tsc_multiplier
= vmx_write_tsc_multiplier
,
7661 .load_mmu_pgd
= vmx_load_mmu_pgd
,
7663 .check_intercept
= vmx_check_intercept
,
7664 .handle_exit_irqoff
= vmx_handle_exit_irqoff
,
7666 .request_immediate_exit
= vmx_request_immediate_exit
,
7668 .sched_in
= vmx_sched_in
,
7670 .cpu_dirty_log_size
= PML_ENTITY_NUM
,
7671 .update_cpu_dirty_logging
= vmx_update_cpu_dirty_logging
,
7673 .pre_block
= vmx_pre_block
,
7674 .post_block
= vmx_post_block
,
7676 .pmu_ops
= &intel_pmu_ops
,
7677 .nested_ops
= &vmx_nested_ops
,
7679 .update_pi_irte
= pi_update_irte
,
7680 .start_assignment
= vmx_pi_start_assignment
,
7682 #ifdef CONFIG_X86_64
7683 .set_hv_timer
= vmx_set_hv_timer
,
7684 .cancel_hv_timer
= vmx_cancel_hv_timer
,
7687 .setup_mce
= vmx_setup_mce
,
7689 .smi_allowed
= vmx_smi_allowed
,
7690 .enter_smm
= vmx_enter_smm
,
7691 .leave_smm
= vmx_leave_smm
,
7692 .enable_smi_window
= vmx_enable_smi_window
,
7694 .can_emulate_instruction
= vmx_can_emulate_instruction
,
7695 .apic_init_signal_blocked
= vmx_apic_init_signal_blocked
,
7696 .migrate_timers
= vmx_migrate_timers
,
7698 .msr_filter_changed
= vmx_msr_filter_changed
,
7699 .complete_emulated_msr
= kvm_complete_insn_gp
,
7701 .vcpu_deliver_sipi_vector
= kvm_vcpu_deliver_sipi_vector
,
7704 static __init
void vmx_setup_user_return_msrs(void)
7708 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
7709 * will emulate SYSCALL in legacy mode if the vendor string in guest
7710 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
7711 * support this emulation, MSR_STAR is included in the list for i386,
7712 * but is never loaded into hardware. MSR_CSTAR is also never loaded
7713 * into hardware and is here purely for emulation purposes.
7715 const u32 vmx_uret_msrs_list
[] = {
7716 #ifdef CONFIG_X86_64
7717 MSR_SYSCALL_MASK
, MSR_LSTAR
, MSR_CSTAR
,
7719 MSR_EFER
, MSR_TSC_AUX
, MSR_STAR
,
7724 BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list
) != MAX_NR_USER_RETURN_MSRS
);
7726 for (i
= 0; i
< ARRAY_SIZE(vmx_uret_msrs_list
); ++i
)
7727 kvm_add_user_return_msr(vmx_uret_msrs_list
[i
]);
7730 static __init
int hardware_setup(void)
7732 unsigned long host_bndcfgs
;
7734 int r
, ept_lpage_level
;
7737 host_idt_base
= dt
.address
;
7739 vmx_setup_user_return_msrs();
7741 if (setup_vmcs_config(&vmcs_config
, &vmx_capability
) < 0)
7744 if (boot_cpu_has(X86_FEATURE_NX
))
7745 kvm_enable_efer_bits(EFER_NX
);
7747 if (boot_cpu_has(X86_FEATURE_MPX
)) {
7748 rdmsrl(MSR_IA32_BNDCFGS
, host_bndcfgs
);
7749 WARN_ONCE(host_bndcfgs
, "KVM: BNDCFGS in host will be lost");
7752 if (!cpu_has_vmx_mpx())
7753 supported_xcr0
&= ~(XFEATURE_MASK_BNDREGS
|
7754 XFEATURE_MASK_BNDCSR
);
7756 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
7757 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
7760 if (!cpu_has_vmx_ept() ||
7761 !cpu_has_vmx_ept_4levels() ||
7762 !cpu_has_vmx_ept_mt_wb() ||
7763 !cpu_has_vmx_invept_global())
7766 /* NX support is required for shadow paging. */
7767 if (!enable_ept
&& !boot_cpu_has(X86_FEATURE_NX
)) {
7768 pr_err_ratelimited("kvm: NX (Execute Disable) not supported\n");
7772 if (!cpu_has_vmx_ept_ad_bits() || !enable_ept
)
7773 enable_ept_ad_bits
= 0;
7775 if (!cpu_has_vmx_unrestricted_guest() || !enable_ept
)
7776 enable_unrestricted_guest
= 0;
7778 if (!cpu_has_vmx_flexpriority())
7779 flexpriority_enabled
= 0;
7781 if (!cpu_has_virtual_nmis())
7785 * set_apic_access_page_addr() is used to reload apic access
7786 * page upon invalidation. No need to do anything if not
7787 * using the APIC_ACCESS_ADDR VMCS field.
7789 if (!flexpriority_enabled
)
7790 vmx_x86_ops
.set_apic_access_page_addr
= NULL
;
7792 if (!cpu_has_vmx_tpr_shadow())
7793 vmx_x86_ops
.update_cr8_intercept
= NULL
;
7795 #if IS_ENABLED(CONFIG_HYPERV)
7796 if (ms_hyperv
.nested_features
& HV_X64_NESTED_GUEST_MAPPING_FLUSH
7798 vmx_x86_ops
.tlb_remote_flush
= hv_remote_flush_tlb
;
7799 vmx_x86_ops
.tlb_remote_flush_with_range
=
7800 hv_remote_flush_tlb_with_range
;
7804 if (!cpu_has_vmx_ple()) {
7807 ple_window_grow
= 0;
7809 ple_window_shrink
= 0;
7812 if (!cpu_has_vmx_apicv()) {
7814 vmx_x86_ops
.sync_pir_to_irr
= NULL
;
7817 if (cpu_has_vmx_tsc_scaling()) {
7818 kvm_has_tsc_control
= true;
7819 kvm_max_tsc_scaling_ratio
= KVM_VMX_TSC_MULTIPLIER_MAX
;
7820 kvm_tsc_scaling_ratio_frac_bits
= 48;
7823 kvm_has_bus_lock_exit
= cpu_has_vmx_bus_lock_detection();
7825 set_bit(0, vmx_vpid_bitmap
); /* 0 is reserved for host */
7828 kvm_mmu_set_ept_masks(enable_ept_ad_bits
,
7829 cpu_has_vmx_ept_execute_only());
7832 ept_lpage_level
= 0;
7833 else if (cpu_has_vmx_ept_1g_page())
7834 ept_lpage_level
= PG_LEVEL_1G
;
7835 else if (cpu_has_vmx_ept_2m_page())
7836 ept_lpage_level
= PG_LEVEL_2M
;
7838 ept_lpage_level
= PG_LEVEL_4K
;
7839 kvm_configure_mmu(enable_ept
, 0, vmx_get_max_tdp_level(),
7843 * Only enable PML when hardware supports PML feature, and both EPT
7844 * and EPT A/D bit features are enabled -- PML depends on them to work.
7846 if (!enable_ept
|| !enable_ept_ad_bits
|| !cpu_has_vmx_pml())
7850 vmx_x86_ops
.cpu_dirty_log_size
= 0;
7852 if (!cpu_has_vmx_preemption_timer())
7853 enable_preemption_timer
= false;
7855 if (enable_preemption_timer
) {
7856 u64 use_timer_freq
= 5000ULL * 1000 * 1000;
7859 rdmsrl(MSR_IA32_VMX_MISC
, vmx_msr
);
7860 cpu_preemption_timer_multi
=
7861 vmx_msr
& VMX_MISC_PREEMPTION_TIMER_RATE_MASK
;
7864 use_timer_freq
= (u64
)tsc_khz
* 1000;
7865 use_timer_freq
>>= cpu_preemption_timer_multi
;
7868 * KVM "disables" the preemption timer by setting it to its max
7869 * value. Don't use the timer if it might cause spurious exits
7870 * at a rate faster than 0.1 Hz (of uninterrupted guest time).
7872 if (use_timer_freq
> 0xffffffffu
/ 10)
7873 enable_preemption_timer
= false;
7876 if (!enable_preemption_timer
) {
7877 vmx_x86_ops
.set_hv_timer
= NULL
;
7878 vmx_x86_ops
.cancel_hv_timer
= NULL
;
7879 vmx_x86_ops
.request_immediate_exit
= __kvm_request_immediate_exit
;
7882 kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler
);
7884 kvm_mce_cap_supported
|= MCG_LMCE_P
;
7886 if (pt_mode
!= PT_MODE_SYSTEM
&& pt_mode
!= PT_MODE_HOST_GUEST
)
7888 if (!enable_ept
|| !cpu_has_vmx_intel_pt())
7889 pt_mode
= PT_MODE_SYSTEM
;
7891 setup_default_sgx_lepubkeyhash();
7894 nested_vmx_setup_ctls_msrs(&vmcs_config
.nested
,
7895 vmx_capability
.ept
);
7897 r
= nested_vmx_hardware_setup(kvm_vmx_exit_handlers
);
7904 r
= alloc_kvm_area();
7906 nested_vmx_hardware_unsetup();
7910 static struct kvm_x86_init_ops vmx_init_ops __initdata
= {
7911 .cpu_has_kvm_support
= cpu_has_kvm_support
,
7912 .disabled_by_bios
= vmx_disabled_by_bios
,
7913 .check_processor_compatibility
= vmx_check_processor_compat
,
7914 .hardware_setup
= hardware_setup
,
7916 .runtime_ops
= &vmx_x86_ops
,
7919 static void vmx_cleanup_l1d_flush(void)
7921 if (vmx_l1d_flush_pages
) {
7922 free_pages((unsigned long)vmx_l1d_flush_pages
, L1D_CACHE_ORDER
);
7923 vmx_l1d_flush_pages
= NULL
;
7925 /* Restore state so sysfs ignores VMX */
7926 l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_AUTO
;
7929 static void vmx_exit(void)
7931 #ifdef CONFIG_KEXEC_CORE
7932 RCU_INIT_POINTER(crash_vmclear_loaded_vmcss
, NULL
);
7938 #if IS_ENABLED(CONFIG_HYPERV)
7939 if (static_branch_unlikely(&enable_evmcs
)) {
7941 struct hv_vp_assist_page
*vp_ap
;
7943 * Reset everything to support using non-enlightened VMCS
7944 * access later (e.g. when we reload the module with
7945 * enlightened_vmcs=0)
7947 for_each_online_cpu(cpu
) {
7948 vp_ap
= hv_get_vp_assist_page(cpu
);
7953 vp_ap
->nested_control
.features
.directhypercall
= 0;
7954 vp_ap
->current_nested_vmcs
= 0;
7955 vp_ap
->enlighten_vmentry
= 0;
7958 static_branch_disable(&enable_evmcs
);
7961 vmx_cleanup_l1d_flush();
7963 allow_smaller_maxphyaddr
= false;
7965 module_exit(vmx_exit
);
7967 static int __init
vmx_init(void)
7971 #if IS_ENABLED(CONFIG_HYPERV)
7973 * Enlightened VMCS usage should be recommended and the host needs
7974 * to support eVMCS v1 or above. We can also disable eVMCS support
7975 * with module parameter.
7977 if (enlightened_vmcs
&&
7978 ms_hyperv
.hints
& HV_X64_ENLIGHTENED_VMCS_RECOMMENDED
&&
7979 (ms_hyperv
.nested_features
& HV_X64_ENLIGHTENED_VMCS_VERSION
) >=
7980 KVM_EVMCS_VERSION
) {
7983 /* Check that we have assist pages on all online CPUs */
7984 for_each_online_cpu(cpu
) {
7985 if (!hv_get_vp_assist_page(cpu
)) {
7986 enlightened_vmcs
= false;
7991 if (enlightened_vmcs
) {
7992 pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n");
7993 static_branch_enable(&enable_evmcs
);
7996 if (ms_hyperv
.nested_features
& HV_X64_NESTED_DIRECT_FLUSH
)
7997 vmx_x86_ops
.enable_direct_tlbflush
7998 = hv_enable_direct_tlbflush
;
8001 enlightened_vmcs
= false;
8005 r
= kvm_init(&vmx_init_ops
, sizeof(struct vcpu_vmx
),
8006 __alignof__(struct vcpu_vmx
), THIS_MODULE
);
8011 * Must be called after kvm_init() so enable_ept is properly set
8012 * up. Hand the parameter mitigation value in which was stored in
8013 * the pre module init parser. If no parameter was given, it will
8014 * contain 'auto' which will be turned into the default 'cond'
8017 r
= vmx_setup_l1d_flush(vmentry_l1d_flush_param
);
8023 for_each_possible_cpu(cpu
) {
8024 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu
, cpu
));
8029 #ifdef CONFIG_KEXEC_CORE
8030 rcu_assign_pointer(crash_vmclear_loaded_vmcss
,
8031 crash_vmclear_local_loaded_vmcss
);
8033 vmx_check_vmcs12_offsets();
8036 * Shadow paging doesn't have a (further) performance penalty
8037 * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
8041 allow_smaller_maxphyaddr
= true;
8045 module_init(vmx_init
);