1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/objtool.h>
4 #include <linux/percpu.h>
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
17 static bool __read_mostly enable_shadow_vmcs
= 1;
18 module_param_named(enable_shadow_vmcs
, enable_shadow_vmcs
, bool, S_IRUGO
);
20 static bool __read_mostly nested_early_check
= 0;
21 module_param(nested_early_check
, bool, S_IRUGO
);
23 #define CC(consistency_check) \
25 bool failed = (consistency_check); \
27 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
32 * Hyper-V requires all of these, so mark them as supported even though
33 * they are just treated the same as all-context.
35 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
36 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
37 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
38 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
39 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
41 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
48 static unsigned long *vmx_bitmap
[VMX_BITMAP_NR
];
50 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
51 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
53 struct shadow_vmcs_field
{
57 static struct shadow_vmcs_field shadow_read_only_fields
[] = {
58 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
59 #include "vmcs_shadow_fields.h"
61 static int max_shadow_read_only_fields
=
62 ARRAY_SIZE(shadow_read_only_fields
);
64 static struct shadow_vmcs_field shadow_read_write_fields
[] = {
65 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
66 #include "vmcs_shadow_fields.h"
68 static int max_shadow_read_write_fields
=
69 ARRAY_SIZE(shadow_read_write_fields
);
71 static void init_vmcs_shadow_fields(void)
75 memset(vmx_vmread_bitmap
, 0xff, PAGE_SIZE
);
76 memset(vmx_vmwrite_bitmap
, 0xff, PAGE_SIZE
);
78 for (i
= j
= 0; i
< max_shadow_read_only_fields
; i
++) {
79 struct shadow_vmcs_field entry
= shadow_read_only_fields
[i
];
80 u16 field
= entry
.encoding
;
82 if (vmcs_field_width(field
) == VMCS_FIELD_WIDTH_U64
&&
83 (i
+ 1 == max_shadow_read_only_fields
||
84 shadow_read_only_fields
[i
+ 1].encoding
!= field
+ 1))
85 pr_err("Missing field from shadow_read_only_field %x\n",
88 clear_bit(field
, vmx_vmread_bitmap
);
93 entry
.offset
+= sizeof(u32
);
95 shadow_read_only_fields
[j
++] = entry
;
97 max_shadow_read_only_fields
= j
;
99 for (i
= j
= 0; i
< max_shadow_read_write_fields
; i
++) {
100 struct shadow_vmcs_field entry
= shadow_read_write_fields
[i
];
101 u16 field
= entry
.encoding
;
103 if (vmcs_field_width(field
) == VMCS_FIELD_WIDTH_U64
&&
104 (i
+ 1 == max_shadow_read_write_fields
||
105 shadow_read_write_fields
[i
+ 1].encoding
!= field
+ 1))
106 pr_err("Missing field from shadow_read_write_field %x\n",
109 WARN_ONCE(field
>= GUEST_ES_AR_BYTES
&&
110 field
<= GUEST_TR_AR_BYTES
,
111 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
114 * PML and the preemption timer can be emulated, but the
115 * processor cannot vmwrite to fields that don't exist
119 case GUEST_PML_INDEX
:
120 if (!cpu_has_vmx_pml())
123 case VMX_PREEMPTION_TIMER_VALUE
:
124 if (!cpu_has_vmx_preemption_timer())
127 case GUEST_INTR_STATUS
:
128 if (!cpu_has_vmx_apicv())
135 clear_bit(field
, vmx_vmwrite_bitmap
);
136 clear_bit(field
, vmx_vmread_bitmap
);
141 entry
.offset
+= sizeof(u32
);
143 shadow_read_write_fields
[j
++] = entry
;
145 max_shadow_read_write_fields
= j
;
149 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
150 * set the success or error code of an emulated VMX instruction (as specified
151 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
154 static int nested_vmx_succeed(struct kvm_vcpu
*vcpu
)
156 vmx_set_rflags(vcpu
, vmx_get_rflags(vcpu
)
157 & ~(X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
158 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_OF
));
159 return kvm_skip_emulated_instruction(vcpu
);
162 static int nested_vmx_failInvalid(struct kvm_vcpu
*vcpu
)
164 vmx_set_rflags(vcpu
, (vmx_get_rflags(vcpu
)
165 & ~(X86_EFLAGS_PF
| X86_EFLAGS_AF
| X86_EFLAGS_ZF
|
166 X86_EFLAGS_SF
| X86_EFLAGS_OF
))
168 return kvm_skip_emulated_instruction(vcpu
);
171 static int nested_vmx_failValid(struct kvm_vcpu
*vcpu
,
172 u32 vm_instruction_error
)
174 vmx_set_rflags(vcpu
, (vmx_get_rflags(vcpu
)
175 & ~(X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
176 X86_EFLAGS_SF
| X86_EFLAGS_OF
))
178 get_vmcs12(vcpu
)->vm_instruction_error
= vm_instruction_error
;
180 * We don't need to force a shadow sync because
181 * VM_INSTRUCTION_ERROR is not shadowed
183 return kvm_skip_emulated_instruction(vcpu
);
186 static int nested_vmx_fail(struct kvm_vcpu
*vcpu
, u32 vm_instruction_error
)
188 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
191 * failValid writes the error number to the current VMCS, which
192 * can't be done if there isn't a current VMCS.
194 if (vmx
->nested
.current_vmptr
== -1ull && !vmx
->nested
.hv_evmcs
)
195 return nested_vmx_failInvalid(vcpu
);
197 return nested_vmx_failValid(vcpu
, vm_instruction_error
);
200 static void nested_vmx_abort(struct kvm_vcpu
*vcpu
, u32 indicator
)
202 /* TODO: not to reset guest simply here. */
203 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
204 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator
);
207 static inline bool vmx_control_verify(u32 control
, u32 low
, u32 high
)
209 return fixed_bits_valid(control
, low
, high
);
212 static inline u64
vmx_control_msr(u32 low
, u32 high
)
214 return low
| ((u64
)high
<< 32);
217 static void vmx_disable_shadow_vmcs(struct vcpu_vmx
*vmx
)
219 secondary_exec_controls_clearbit(vmx
, SECONDARY_EXEC_SHADOW_VMCS
);
220 vmcs_write64(VMCS_LINK_POINTER
, -1ull);
221 vmx
->nested
.need_vmcs12_to_shadow_sync
= false;
224 static inline void nested_release_evmcs(struct kvm_vcpu
*vcpu
)
226 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
228 if (!vmx
->nested
.hv_evmcs
)
231 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.hv_evmcs_map
, true);
232 vmx
->nested
.hv_evmcs_vmptr
= 0;
233 vmx
->nested
.hv_evmcs
= NULL
;
236 static void vmx_sync_vmcs_host_state(struct vcpu_vmx
*vmx
,
237 struct loaded_vmcs
*prev
)
239 struct vmcs_host_state
*dest
, *src
;
241 if (unlikely(!vmx
->guest_state_loaded
))
244 src
= &prev
->host_state
;
245 dest
= &vmx
->loaded_vmcs
->host_state
;
247 vmx_set_host_fs_gs(dest
, src
->fs_sel
, src
->gs_sel
, src
->fs_base
, src
->gs_base
);
248 dest
->ldt_sel
= src
->ldt_sel
;
250 dest
->ds_sel
= src
->ds_sel
;
251 dest
->es_sel
= src
->es_sel
;
255 static void vmx_switch_vmcs(struct kvm_vcpu
*vcpu
, struct loaded_vmcs
*vmcs
)
257 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
258 struct loaded_vmcs
*prev
;
261 if (WARN_ON_ONCE(vmx
->loaded_vmcs
== vmcs
))
265 prev
= vmx
->loaded_vmcs
;
266 vmx
->loaded_vmcs
= vmcs
;
267 vmx_vcpu_load_vmcs(vcpu
, cpu
, prev
);
268 vmx_sync_vmcs_host_state(vmx
, prev
);
271 vmx_register_cache_reset(vcpu
);
275 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
276 * just stops using VMX.
278 static void free_nested(struct kvm_vcpu
*vcpu
)
280 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
282 if (WARN_ON_ONCE(vmx
->loaded_vmcs
!= &vmx
->vmcs01
))
283 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
285 if (!vmx
->nested
.vmxon
&& !vmx
->nested
.smm
.vmxon
)
288 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES
, vcpu
);
290 vmx
->nested
.vmxon
= false;
291 vmx
->nested
.smm
.vmxon
= false;
292 free_vpid(vmx
->nested
.vpid02
);
293 vmx
->nested
.posted_intr_nv
= -1;
294 vmx
->nested
.current_vmptr
= -1ull;
295 if (enable_shadow_vmcs
) {
296 vmx_disable_shadow_vmcs(vmx
);
297 vmcs_clear(vmx
->vmcs01
.shadow_vmcs
);
298 free_vmcs(vmx
->vmcs01
.shadow_vmcs
);
299 vmx
->vmcs01
.shadow_vmcs
= NULL
;
301 kfree(vmx
->nested
.cached_vmcs12
);
302 vmx
->nested
.cached_vmcs12
= NULL
;
303 kfree(vmx
->nested
.cached_shadow_vmcs12
);
304 vmx
->nested
.cached_shadow_vmcs12
= NULL
;
305 /* Unpin physical memory we referred to in the vmcs02 */
306 if (vmx
->nested
.apic_access_page
) {
307 kvm_release_page_clean(vmx
->nested
.apic_access_page
);
308 vmx
->nested
.apic_access_page
= NULL
;
310 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.virtual_apic_map
, true);
311 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.pi_desc_map
, true);
312 vmx
->nested
.pi_desc
= NULL
;
314 kvm_mmu_free_roots(vcpu
, &vcpu
->arch
.guest_mmu
, KVM_MMU_ROOTS_ALL
);
316 nested_release_evmcs(vcpu
);
318 free_loaded_vmcs(&vmx
->nested
.vmcs02
);
322 * Ensure that the current vmcs of the logical processor is the
323 * vmcs01 of the vcpu before calling free_nested().
325 void nested_vmx_free_vcpu(struct kvm_vcpu
*vcpu
)
328 vmx_leave_nested(vcpu
);
332 static void nested_ept_inject_page_fault(struct kvm_vcpu
*vcpu
,
333 struct x86_exception
*fault
)
335 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
336 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
338 unsigned long exit_qualification
= vcpu
->arch
.exit_qualification
;
340 if (vmx
->nested
.pml_full
) {
341 vm_exit_reason
= EXIT_REASON_PML_FULL
;
342 vmx
->nested
.pml_full
= false;
343 exit_qualification
&= INTR_INFO_UNBLOCK_NMI
;
344 } else if (fault
->error_code
& PFERR_RSVD_MASK
)
345 vm_exit_reason
= EXIT_REASON_EPT_MISCONFIG
;
347 vm_exit_reason
= EXIT_REASON_EPT_VIOLATION
;
349 nested_vmx_vmexit(vcpu
, vm_exit_reason
, 0, exit_qualification
);
350 vmcs12
->guest_physical_address
= fault
->address
;
353 static void nested_ept_init_mmu_context(struct kvm_vcpu
*vcpu
)
355 WARN_ON(mmu_is_nested(vcpu
));
357 vcpu
->arch
.mmu
= &vcpu
->arch
.guest_mmu
;
358 kvm_init_shadow_ept_mmu(vcpu
,
359 to_vmx(vcpu
)->nested
.msrs
.ept_caps
&
360 VMX_EPT_EXECUTE_ONLY_BIT
,
361 nested_ept_ad_enabled(vcpu
),
362 nested_ept_get_eptp(vcpu
));
363 vcpu
->arch
.mmu
->get_guest_pgd
= nested_ept_get_eptp
;
364 vcpu
->arch
.mmu
->inject_page_fault
= nested_ept_inject_page_fault
;
365 vcpu
->arch
.mmu
->get_pdptr
= kvm_pdptr_read
;
367 vcpu
->arch
.walk_mmu
= &vcpu
->arch
.nested_mmu
;
370 static void nested_ept_uninit_mmu_context(struct kvm_vcpu
*vcpu
)
372 vcpu
->arch
.mmu
= &vcpu
->arch
.root_mmu
;
373 vcpu
->arch
.walk_mmu
= &vcpu
->arch
.root_mmu
;
376 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12
*vmcs12
,
379 bool inequality
, bit
;
381 bit
= (vmcs12
->exception_bitmap
& (1u << PF_VECTOR
)) != 0;
383 (error_code
& vmcs12
->page_fault_error_code_mask
) !=
384 vmcs12
->page_fault_error_code_match
;
385 return inequality
^ bit
;
390 * KVM wants to inject page-faults which it got to the guest. This function
391 * checks whether in a nested guest, we need to inject them to L1 or L2.
393 static int nested_vmx_check_exception(struct kvm_vcpu
*vcpu
, unsigned long *exit_qual
)
395 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
396 unsigned int nr
= vcpu
->arch
.exception
.nr
;
397 bool has_payload
= vcpu
->arch
.exception
.has_payload
;
398 unsigned long payload
= vcpu
->arch
.exception
.payload
;
400 if (nr
== PF_VECTOR
) {
401 if (vcpu
->arch
.exception
.nested_apf
) {
402 *exit_qual
= vcpu
->arch
.apf
.nested_apf_token
;
405 if (nested_vmx_is_page_fault_vmexit(vmcs12
,
406 vcpu
->arch
.exception
.error_code
)) {
407 *exit_qual
= has_payload
? payload
: vcpu
->arch
.cr2
;
410 } else if (vmcs12
->exception_bitmap
& (1u << nr
)) {
411 if (nr
== DB_VECTOR
) {
413 payload
= vcpu
->arch
.dr6
;
414 payload
&= ~(DR6_FIXED_1
| DR6_BT
);
417 *exit_qual
= payload
;
427 static void vmx_inject_page_fault_nested(struct kvm_vcpu
*vcpu
,
428 struct x86_exception
*fault
)
430 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
432 WARN_ON(!is_guest_mode(vcpu
));
434 if (nested_vmx_is_page_fault_vmexit(vmcs12
, fault
->error_code
) &&
435 !to_vmx(vcpu
)->nested
.nested_run_pending
) {
436 vmcs12
->vm_exit_intr_error_code
= fault
->error_code
;
437 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXCEPTION_NMI
,
438 PF_VECTOR
| INTR_TYPE_HARD_EXCEPTION
|
439 INTR_INFO_DELIVER_CODE_MASK
| INTR_INFO_VALID_MASK
,
442 kvm_inject_page_fault(vcpu
, fault
);
446 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu
*vcpu
,
447 struct vmcs12
*vmcs12
)
449 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_IO_BITMAPS
))
452 if (CC(!page_address_valid(vcpu
, vmcs12
->io_bitmap_a
)) ||
453 CC(!page_address_valid(vcpu
, vmcs12
->io_bitmap_b
)))
459 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu
*vcpu
,
460 struct vmcs12
*vmcs12
)
462 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_MSR_BITMAPS
))
465 if (CC(!page_address_valid(vcpu
, vmcs12
->msr_bitmap
)))
471 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu
*vcpu
,
472 struct vmcs12
*vmcs12
)
474 if (!nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
))
477 if (CC(!page_address_valid(vcpu
, vmcs12
->virtual_apic_page_addr
)))
484 * Check if MSR is intercepted for L01 MSR bitmap.
486 static bool msr_write_intercepted_l01(struct kvm_vcpu
*vcpu
, u32 msr
)
488 unsigned long *msr_bitmap
;
489 int f
= sizeof(unsigned long);
491 if (!cpu_has_vmx_msr_bitmap())
494 msr_bitmap
= to_vmx(vcpu
)->vmcs01
.msr_bitmap
;
497 return !!test_bit(msr
, msr_bitmap
+ 0x800 / f
);
498 } else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff)) {
500 return !!test_bit(msr
, msr_bitmap
+ 0xc00 / f
);
507 * If a msr is allowed by L0, we should check whether it is allowed by L1.
508 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
510 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1
,
511 unsigned long *msr_bitmap_nested
,
514 int f
= sizeof(unsigned long);
517 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
518 * have the write-low and read-high bitmap offsets the wrong way round.
519 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
522 if (type
& MSR_TYPE_R
&&
523 !test_bit(msr
, msr_bitmap_l1
+ 0x000 / f
))
525 __clear_bit(msr
, msr_bitmap_nested
+ 0x000 / f
);
527 if (type
& MSR_TYPE_W
&&
528 !test_bit(msr
, msr_bitmap_l1
+ 0x800 / f
))
530 __clear_bit(msr
, msr_bitmap_nested
+ 0x800 / f
);
532 } else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff)) {
534 if (type
& MSR_TYPE_R
&&
535 !test_bit(msr
, msr_bitmap_l1
+ 0x400 / f
))
537 __clear_bit(msr
, msr_bitmap_nested
+ 0x400 / f
);
539 if (type
& MSR_TYPE_W
&&
540 !test_bit(msr
, msr_bitmap_l1
+ 0xc00 / f
))
542 __clear_bit(msr
, msr_bitmap_nested
+ 0xc00 / f
);
547 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap
)
551 for (msr
= 0x800; msr
<= 0x8ff; msr
+= BITS_PER_LONG
) {
552 unsigned word
= msr
/ BITS_PER_LONG
;
554 msr_bitmap
[word
] = ~0;
555 msr_bitmap
[word
+ (0x800 / sizeof(long))] = ~0;
560 * Merge L0's and L1's MSR bitmap, return false to indicate that
561 * we do not use the hardware.
563 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu
*vcpu
,
564 struct vmcs12
*vmcs12
)
567 unsigned long *msr_bitmap_l1
;
568 unsigned long *msr_bitmap_l0
= to_vmx(vcpu
)->nested
.vmcs02
.msr_bitmap
;
569 struct kvm_host_map
*map
= &to_vmx(vcpu
)->nested
.msr_bitmap_map
;
571 /* Nothing to do if the MSR bitmap is not in use. */
572 if (!cpu_has_vmx_msr_bitmap() ||
573 !nested_cpu_has(vmcs12
, CPU_BASED_USE_MSR_BITMAPS
))
576 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->msr_bitmap
), map
))
579 msr_bitmap_l1
= (unsigned long *)map
->hva
;
582 * To keep the control flow simple, pay eight 8-byte writes (sixteen
583 * 4-byte writes on 32-bit systems) up front to enable intercepts for
584 * the x2APIC MSR range and selectively disable them below.
586 enable_x2apic_msr_intercepts(msr_bitmap_l0
);
588 if (nested_cpu_has_virt_x2apic_mode(vmcs12
)) {
589 if (nested_cpu_has_apic_reg_virt(vmcs12
)) {
591 * L0 need not intercept reads for MSRs between 0x800
592 * and 0x8ff, it just lets the processor take the value
593 * from the virtual-APIC page; take those 256 bits
594 * directly from the L1 bitmap.
596 for (msr
= 0x800; msr
<= 0x8ff; msr
+= BITS_PER_LONG
) {
597 unsigned word
= msr
/ BITS_PER_LONG
;
599 msr_bitmap_l0
[word
] = msr_bitmap_l1
[word
];
603 nested_vmx_disable_intercept_for_msr(
604 msr_bitmap_l1
, msr_bitmap_l0
,
605 X2APIC_MSR(APIC_TASKPRI
),
606 MSR_TYPE_R
| MSR_TYPE_W
);
608 if (nested_cpu_has_vid(vmcs12
)) {
609 nested_vmx_disable_intercept_for_msr(
610 msr_bitmap_l1
, msr_bitmap_l0
,
611 X2APIC_MSR(APIC_EOI
),
613 nested_vmx_disable_intercept_for_msr(
614 msr_bitmap_l1
, msr_bitmap_l0
,
615 X2APIC_MSR(APIC_SELF_IPI
),
620 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
621 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1
, msr_bitmap_l0
,
622 MSR_FS_BASE
, MSR_TYPE_RW
);
624 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1
, msr_bitmap_l0
,
625 MSR_GS_BASE
, MSR_TYPE_RW
);
627 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1
, msr_bitmap_l0
,
628 MSR_KERNEL_GS_BASE
, MSR_TYPE_RW
);
631 * Checking the L0->L1 bitmap is trying to verify two things:
633 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
634 * ensures that we do not accidentally generate an L02 MSR bitmap
635 * from the L12 MSR bitmap that is too permissive.
636 * 2. That L1 or L2s have actually used the MSR. This avoids
637 * unnecessarily merging of the bitmap if the MSR is unused. This
638 * works properly because we only update the L01 MSR bitmap lazily.
639 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
640 * updated to reflect this when L1 (or its L2s) actually write to
643 if (!msr_write_intercepted_l01(vcpu
, MSR_IA32_SPEC_CTRL
))
644 nested_vmx_disable_intercept_for_msr(
645 msr_bitmap_l1
, msr_bitmap_l0
,
647 MSR_TYPE_R
| MSR_TYPE_W
);
649 if (!msr_write_intercepted_l01(vcpu
, MSR_IA32_PRED_CMD
))
650 nested_vmx_disable_intercept_for_msr(
651 msr_bitmap_l1
, msr_bitmap_l0
,
655 kvm_vcpu_unmap(vcpu
, &to_vmx(vcpu
)->nested
.msr_bitmap_map
, false);
660 static void nested_cache_shadow_vmcs12(struct kvm_vcpu
*vcpu
,
661 struct vmcs12
*vmcs12
)
663 struct kvm_host_map map
;
664 struct vmcs12
*shadow
;
666 if (!nested_cpu_has_shadow_vmcs(vmcs12
) ||
667 vmcs12
->vmcs_link_pointer
== -1ull)
670 shadow
= get_shadow_vmcs12(vcpu
);
672 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->vmcs_link_pointer
), &map
))
675 memcpy(shadow
, map
.hva
, VMCS12_SIZE
);
676 kvm_vcpu_unmap(vcpu
, &map
, false);
679 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu
*vcpu
,
680 struct vmcs12
*vmcs12
)
682 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
684 if (!nested_cpu_has_shadow_vmcs(vmcs12
) ||
685 vmcs12
->vmcs_link_pointer
== -1ull)
688 kvm_write_guest(vmx
->vcpu
.kvm
, vmcs12
->vmcs_link_pointer
,
689 get_shadow_vmcs12(vcpu
), VMCS12_SIZE
);
693 * In nested virtualization, check if L1 has set
694 * VM_EXIT_ACK_INTR_ON_EXIT
696 static bool nested_exit_intr_ack_set(struct kvm_vcpu
*vcpu
)
698 return get_vmcs12(vcpu
)->vm_exit_controls
&
699 VM_EXIT_ACK_INTR_ON_EXIT
;
702 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu
*vcpu
,
703 struct vmcs12
*vmcs12
)
705 if (nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
) &&
706 CC(!page_address_valid(vcpu
, vmcs12
->apic_access_addr
)))
712 static int nested_vmx_check_apicv_controls(struct kvm_vcpu
*vcpu
,
713 struct vmcs12
*vmcs12
)
715 if (!nested_cpu_has_virt_x2apic_mode(vmcs12
) &&
716 !nested_cpu_has_apic_reg_virt(vmcs12
) &&
717 !nested_cpu_has_vid(vmcs12
) &&
718 !nested_cpu_has_posted_intr(vmcs12
))
722 * If virtualize x2apic mode is enabled,
723 * virtualize apic access must be disabled.
725 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12
) &&
726 nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)))
730 * If virtual interrupt delivery is enabled,
731 * we must exit on external interrupts.
733 if (CC(nested_cpu_has_vid(vmcs12
) && !nested_exit_on_intr(vcpu
)))
737 * bits 15:8 should be zero in posted_intr_nv,
738 * the descriptor address has been already checked
739 * in nested_get_vmcs12_pages.
741 * bits 5:0 of posted_intr_desc_addr should be zero.
743 if (nested_cpu_has_posted_intr(vmcs12
) &&
744 (CC(!nested_cpu_has_vid(vmcs12
)) ||
745 CC(!nested_exit_intr_ack_set(vcpu
)) ||
746 CC((vmcs12
->posted_intr_nv
& 0xff00)) ||
747 CC((vmcs12
->posted_intr_desc_addr
& 0x3f)) ||
748 CC((vmcs12
->posted_intr_desc_addr
>> cpuid_maxphyaddr(vcpu
)))))
751 /* tpr shadow is needed by all apicv features. */
752 if (CC(!nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
)))
758 static int nested_vmx_check_msr_switch(struct kvm_vcpu
*vcpu
,
765 maxphyaddr
= cpuid_maxphyaddr(vcpu
);
766 if (!IS_ALIGNED(addr
, 16) || addr
>> maxphyaddr
||
767 (addr
+ count
* sizeof(struct vmx_msr_entry
) - 1) >> maxphyaddr
)
773 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu
*vcpu
,
774 struct vmcs12
*vmcs12
)
776 if (CC(nested_vmx_check_msr_switch(vcpu
,
777 vmcs12
->vm_exit_msr_load_count
,
778 vmcs12
->vm_exit_msr_load_addr
)) ||
779 CC(nested_vmx_check_msr_switch(vcpu
,
780 vmcs12
->vm_exit_msr_store_count
,
781 vmcs12
->vm_exit_msr_store_addr
)))
787 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu
*vcpu
,
788 struct vmcs12
*vmcs12
)
790 if (CC(nested_vmx_check_msr_switch(vcpu
,
791 vmcs12
->vm_entry_msr_load_count
,
792 vmcs12
->vm_entry_msr_load_addr
)))
798 static int nested_vmx_check_pml_controls(struct kvm_vcpu
*vcpu
,
799 struct vmcs12
*vmcs12
)
801 if (!nested_cpu_has_pml(vmcs12
))
804 if (CC(!nested_cpu_has_ept(vmcs12
)) ||
805 CC(!page_address_valid(vcpu
, vmcs12
->pml_address
)))
811 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu
*vcpu
,
812 struct vmcs12
*vmcs12
)
814 if (CC(nested_cpu_has2(vmcs12
, SECONDARY_EXEC_UNRESTRICTED_GUEST
) &&
815 !nested_cpu_has_ept(vmcs12
)))
820 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu
*vcpu
,
821 struct vmcs12
*vmcs12
)
823 if (CC(nested_cpu_has2(vmcs12
, SECONDARY_EXEC_MODE_BASED_EPT_EXEC
) &&
824 !nested_cpu_has_ept(vmcs12
)))
829 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu
*vcpu
,
830 struct vmcs12
*vmcs12
)
832 if (!nested_cpu_has_shadow_vmcs(vmcs12
))
835 if (CC(!page_address_valid(vcpu
, vmcs12
->vmread_bitmap
)) ||
836 CC(!page_address_valid(vcpu
, vmcs12
->vmwrite_bitmap
)))
842 static int nested_vmx_msr_check_common(struct kvm_vcpu
*vcpu
,
843 struct vmx_msr_entry
*e
)
845 /* x2APIC MSR accesses are not allowed */
846 if (CC(vcpu
->arch
.apic_base
& X2APIC_ENABLE
&& e
->index
>> 8 == 0x8))
848 if (CC(e
->index
== MSR_IA32_UCODE_WRITE
) || /* SDM Table 35-2 */
849 CC(e
->index
== MSR_IA32_UCODE_REV
))
851 if (CC(e
->reserved
!= 0))
856 static int nested_vmx_load_msr_check(struct kvm_vcpu
*vcpu
,
857 struct vmx_msr_entry
*e
)
859 if (CC(e
->index
== MSR_FS_BASE
) ||
860 CC(e
->index
== MSR_GS_BASE
) ||
861 CC(e
->index
== MSR_IA32_SMM_MONITOR_CTL
) || /* SMM is not supported */
862 nested_vmx_msr_check_common(vcpu
, e
))
867 static int nested_vmx_store_msr_check(struct kvm_vcpu
*vcpu
,
868 struct vmx_msr_entry
*e
)
870 if (CC(e
->index
== MSR_IA32_SMBASE
) || /* SMM is not supported */
871 nested_vmx_msr_check_common(vcpu
, e
))
876 static u32
nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu
*vcpu
)
878 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
879 u64 vmx_misc
= vmx_control_msr(vmx
->nested
.msrs
.misc_low
,
880 vmx
->nested
.msrs
.misc_high
);
882 return (vmx_misc_max_msr(vmx_misc
) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER
;
886 * Load guest's/host's msr at nested entry/exit.
887 * return 0 for success, entry index for failure.
889 * One of the failure modes for MSR load/store is when a list exceeds the
890 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch
891 * as possible, process all valid entries before failing rather than precheck
892 * for a capacity violation.
894 static u32
nested_vmx_load_msr(struct kvm_vcpu
*vcpu
, u64 gpa
, u32 count
)
897 struct vmx_msr_entry e
;
898 u32 max_msr_list_size
= nested_vmx_max_atomic_switch_msrs(vcpu
);
900 for (i
= 0; i
< count
; i
++) {
901 if (unlikely(i
>= max_msr_list_size
))
904 if (kvm_vcpu_read_guest(vcpu
, gpa
+ i
* sizeof(e
),
906 pr_debug_ratelimited(
907 "%s cannot read MSR entry (%u, 0x%08llx)\n",
908 __func__
, i
, gpa
+ i
* sizeof(e
));
911 if (nested_vmx_load_msr_check(vcpu
, &e
)) {
912 pr_debug_ratelimited(
913 "%s check failed (%u, 0x%x, 0x%x)\n",
914 __func__
, i
, e
.index
, e
.reserved
);
917 if (kvm_set_msr(vcpu
, e
.index
, e
.value
)) {
918 pr_debug_ratelimited(
919 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
920 __func__
, i
, e
.index
, e
.value
);
926 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */
930 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu
*vcpu
,
934 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
937 * If the L0 hypervisor stored a more accurate value for the TSC that
938 * does not include the time taken for emulation of the L2->L1
939 * VM-exit in L0, use the more accurate value.
941 if (msr_index
== MSR_IA32_TSC
) {
942 int i
= vmx_find_loadstore_msr_slot(&vmx
->msr_autostore
.guest
,
946 u64 val
= vmx
->msr_autostore
.guest
.val
[i
].value
;
948 *data
= kvm_read_l1_tsc(vcpu
, val
);
953 if (kvm_get_msr(vcpu
, msr_index
, data
)) {
954 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__
,
961 static bool read_and_check_msr_entry(struct kvm_vcpu
*vcpu
, u64 gpa
, int i
,
962 struct vmx_msr_entry
*e
)
964 if (kvm_vcpu_read_guest(vcpu
,
965 gpa
+ i
* sizeof(*e
),
966 e
, 2 * sizeof(u32
))) {
967 pr_debug_ratelimited(
968 "%s cannot read MSR entry (%u, 0x%08llx)\n",
969 __func__
, i
, gpa
+ i
* sizeof(*e
));
972 if (nested_vmx_store_msr_check(vcpu
, e
)) {
973 pr_debug_ratelimited(
974 "%s check failed (%u, 0x%x, 0x%x)\n",
975 __func__
, i
, e
->index
, e
->reserved
);
981 static int nested_vmx_store_msr(struct kvm_vcpu
*vcpu
, u64 gpa
, u32 count
)
985 struct vmx_msr_entry e
;
986 u32 max_msr_list_size
= nested_vmx_max_atomic_switch_msrs(vcpu
);
988 for (i
= 0; i
< count
; i
++) {
989 if (unlikely(i
>= max_msr_list_size
))
992 if (!read_and_check_msr_entry(vcpu
, gpa
, i
, &e
))
995 if (!nested_vmx_get_vmexit_msr_value(vcpu
, e
.index
, &data
))
998 if (kvm_vcpu_write_guest(vcpu
,
999 gpa
+ i
* sizeof(e
) +
1000 offsetof(struct vmx_msr_entry
, value
),
1001 &data
, sizeof(data
))) {
1002 pr_debug_ratelimited(
1003 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
1004 __func__
, i
, e
.index
, data
);
1011 static bool nested_msr_store_list_has_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
)
1013 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1014 u32 count
= vmcs12
->vm_exit_msr_store_count
;
1015 u64 gpa
= vmcs12
->vm_exit_msr_store_addr
;
1016 struct vmx_msr_entry e
;
1019 for (i
= 0; i
< count
; i
++) {
1020 if (!read_and_check_msr_entry(vcpu
, gpa
, i
, &e
))
1023 if (e
.index
== msr_index
)
1029 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu
*vcpu
,
1032 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1033 struct vmx_msrs
*autostore
= &vmx
->msr_autostore
.guest
;
1034 bool in_vmcs12_store_list
;
1035 int msr_autostore_slot
;
1036 bool in_autostore_list
;
1039 msr_autostore_slot
= vmx_find_loadstore_msr_slot(autostore
, msr_index
);
1040 in_autostore_list
= msr_autostore_slot
>= 0;
1041 in_vmcs12_store_list
= nested_msr_store_list_has_msr(vcpu
, msr_index
);
1043 if (in_vmcs12_store_list
&& !in_autostore_list
) {
1044 if (autostore
->nr
== MAX_NR_LOADSTORE_MSRS
) {
1046 * Emulated VMEntry does not fail here. Instead a less
1047 * accurate value will be returned by
1048 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
1049 * instead of reading the value from the vmcs02 VMExit
1052 pr_warn_ratelimited(
1053 "Not enough msr entries in msr_autostore. Can't add msr %x\n",
1057 last
= autostore
->nr
++;
1058 autostore
->val
[last
].index
= msr_index
;
1059 } else if (!in_vmcs12_store_list
&& in_autostore_list
) {
1060 last
= --autostore
->nr
;
1061 autostore
->val
[msr_autostore_slot
] = autostore
->val
[last
];
1065 static bool nested_cr3_valid(struct kvm_vcpu
*vcpu
, unsigned long val
)
1067 unsigned long invalid_mask
;
1069 invalid_mask
= (~0ULL) << cpuid_maxphyaddr(vcpu
);
1070 return (val
& invalid_mask
) == 0;
1074 * Returns true if the MMU needs to be sync'd on nested VM-Enter/VM-Exit.
1075 * tl;dr: the MMU needs a sync if L0 is using shadow paging and L1 didn't
1076 * enable VPID for L2 (implying it expects a TLB flush on VMX transitions).
1079 * If EPT is enabled by L0 a sync is never needed:
1080 * - if it is disabled by L1, then L0 is not shadowing L1 or L2 PTEs, there
1081 * cannot be unsync'd SPTEs for either L1 or L2.
1083 * - if it is also enabled by L1, then L0 doesn't need to sync on VM-Enter
1084 * VM-Enter as VM-Enter isn't required to invalidate guest-physical mappings
1085 * (irrespective of VPID), i.e. L1 can't rely on the (virtual) CPU to flush
1086 * stale guest-physical mappings for L2 from the TLB. And as above, L0 isn't
1087 * shadowing L1 PTEs so there are no unsync'd SPTEs to sync on VM-Exit.
1089 * If EPT is disabled by L0:
1090 * - if VPID is enabled by L1 (for L2), the situation is similar to when L1
1091 * enables EPT: L0 doesn't need to sync as VM-Enter and VM-Exit aren't
1092 * required to invalidate linear mappings (EPT is disabled so there are
1093 * no combined or guest-physical mappings), i.e. L1 can't rely on the
1094 * (virtual) CPU to flush stale linear mappings for either L2 or itself (L1).
1096 * - however if VPID is disabled by L1, then a sync is needed as L1 expects all
1097 * linear mappings (EPT is disabled so there are no combined or guest-physical
1098 * mappings) to be invalidated on both VM-Enter and VM-Exit.
1100 * Note, this logic is subtly different than nested_has_guest_tlb_tag(), which
1101 * additionally checks that L2 has been assigned a VPID (when EPT is disabled).
1102 * Whether or not L2 has been assigned a VPID by L0 is irrelevant with respect
1103 * to L1's expectations, e.g. L0 needs to invalidate hardware TLB entries if L2
1104 * doesn't have a unique VPID to prevent reusing L1's entries (assuming L1 has
1105 * been assigned a VPID), but L0 doesn't need to do a MMU sync because L1
1106 * doesn't expect stale (virtual) TLB entries to be flushed, i.e. L1 doesn't
1107 * know that L0 will flush the TLB and so L1 will do INVVPID as needed to flush
1108 * stale TLB entries, at which point L0 will sync L2's MMU.
1110 static bool nested_vmx_transition_mmu_sync(struct kvm_vcpu
*vcpu
)
1112 return !enable_ept
&& !nested_cpu_has_vpid(get_vmcs12(vcpu
));
1116 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1117 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected
1118 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
1119 * @entry_failure_code.
1121 static int nested_vmx_load_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr3
, bool nested_ept
,
1122 enum vm_entry_failure_code
*entry_failure_code
)
1124 if (CC(!nested_cr3_valid(vcpu
, cr3
))) {
1125 *entry_failure_code
= ENTRY_FAIL_DEFAULT
;
1130 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
1131 * must not be dereferenced.
1133 if (!nested_ept
&& is_pae_paging(vcpu
) &&
1134 (cr3
!= kvm_read_cr3(vcpu
) || pdptrs_changed(vcpu
))) {
1135 if (CC(!load_pdptrs(vcpu
, vcpu
->arch
.walk_mmu
, cr3
))) {
1136 *entry_failure_code
= ENTRY_FAIL_PDPTE
;
1142 * Unconditionally skip the TLB flush on fast CR3 switch, all TLB
1143 * flushes are handled by nested_vmx_transition_tlb_flush(). See
1144 * nested_vmx_transition_mmu_sync for details on skipping the MMU sync.
1147 kvm_mmu_new_pgd(vcpu
, cr3
, true,
1148 !nested_vmx_transition_mmu_sync(vcpu
));
1150 vcpu
->arch
.cr3
= cr3
;
1151 kvm_register_mark_available(vcpu
, VCPU_EXREG_CR3
);
1153 kvm_init_mmu(vcpu
, false);
1159 * Returns if KVM is able to config CPU to tag TLB entries
1160 * populated by L2 differently than TLB entries populated
1163 * If L0 uses EPT, L1 and L2 run with different EPTP because
1164 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries
1165 * are tagged with different EPTP.
1167 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
1168 * with different VPID (L1 entries are tagged with vmx->vpid
1169 * while L2 entries are tagged with vmx->nested.vpid02).
1171 static bool nested_has_guest_tlb_tag(struct kvm_vcpu
*vcpu
)
1173 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1175 return enable_ept
||
1176 (nested_cpu_has_vpid(vmcs12
) && to_vmx(vcpu
)->nested
.vpid02
);
1179 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu
*vcpu
,
1180 struct vmcs12
*vmcs12
,
1183 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1186 * If VPID is disabled, linear and combined mappings are flushed on
1187 * VM-Enter/VM-Exit, and guest-physical mappings are valid only for
1188 * their associated EPTP.
1194 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1195 * for *all* contexts to be flushed on VM-Enter/VM-Exit.
1197 * If VPID is enabled and used by vmc12, but L2 does not have a unique
1198 * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
1199 * a VPID for L2, flush the current context as the effective ASID is
1200 * common to both L1 and L2.
1202 * Defer the flush so that it runs after vmcs02.EPTP has been set by
1203 * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
1204 * redundant flushes further down the nested pipeline.
1206 * If a TLB flush isn't required due to any of the above, and vpid12 is
1207 * changing then the new "virtual" VPID (vpid12) will reuse the same
1208 * "real" VPID (vpid02), and so needs to be sync'd. There is no direct
1209 * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
1212 if (!nested_cpu_has_vpid(vmcs12
)) {
1213 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1214 } else if (!nested_has_guest_tlb_tag(vcpu
)) {
1215 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT
, vcpu
);
1216 } else if (is_vmenter
&&
1217 vmcs12
->virtual_processor_id
!= vmx
->nested
.last_vpid
) {
1218 vmx
->nested
.last_vpid
= vmcs12
->virtual_processor_id
;
1219 vpid_sync_context(nested_get_vpid02(vcpu
));
1223 static bool is_bitwise_subset(u64 superset
, u64 subset
, u64 mask
)
1228 return (superset
| subset
) == superset
;
1231 static int vmx_restore_vmx_basic(struct vcpu_vmx
*vmx
, u64 data
)
1233 const u64 feature_and_reserved
=
1234 /* feature (except bit 48; see below) */
1235 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1237 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1238 u64 vmx_basic
= vmx
->nested
.msrs
.basic
;
1240 if (!is_bitwise_subset(vmx_basic
, data
, feature_and_reserved
))
1244 * KVM does not emulate a version of VMX that constrains physical
1245 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1247 if (data
& BIT_ULL(48))
1250 if (vmx_basic_vmcs_revision_id(vmx_basic
) !=
1251 vmx_basic_vmcs_revision_id(data
))
1254 if (vmx_basic_vmcs_size(vmx_basic
) > vmx_basic_vmcs_size(data
))
1257 vmx
->nested
.msrs
.basic
= data
;
1262 vmx_restore_control_msr(struct vcpu_vmx
*vmx
, u32 msr_index
, u64 data
)
1267 switch (msr_index
) {
1268 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
1269 lowp
= &vmx
->nested
.msrs
.pinbased_ctls_low
;
1270 highp
= &vmx
->nested
.msrs
.pinbased_ctls_high
;
1272 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
1273 lowp
= &vmx
->nested
.msrs
.procbased_ctls_low
;
1274 highp
= &vmx
->nested
.msrs
.procbased_ctls_high
;
1276 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
1277 lowp
= &vmx
->nested
.msrs
.exit_ctls_low
;
1278 highp
= &vmx
->nested
.msrs
.exit_ctls_high
;
1280 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
1281 lowp
= &vmx
->nested
.msrs
.entry_ctls_low
;
1282 highp
= &vmx
->nested
.msrs
.entry_ctls_high
;
1284 case MSR_IA32_VMX_PROCBASED_CTLS2
:
1285 lowp
= &vmx
->nested
.msrs
.secondary_ctls_low
;
1286 highp
= &vmx
->nested
.msrs
.secondary_ctls_high
;
1292 supported
= vmx_control_msr(*lowp
, *highp
);
1294 /* Check must-be-1 bits are still 1. */
1295 if (!is_bitwise_subset(data
, supported
, GENMASK_ULL(31, 0)))
1298 /* Check must-be-0 bits are still 0. */
1299 if (!is_bitwise_subset(supported
, data
, GENMASK_ULL(63, 32)))
1303 *highp
= data
>> 32;
1307 static int vmx_restore_vmx_misc(struct vcpu_vmx
*vmx
, u64 data
)
1309 const u64 feature_and_reserved_bits
=
1311 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1312 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1314 GENMASK_ULL(13, 9) | BIT_ULL(31);
1317 vmx_misc
= vmx_control_msr(vmx
->nested
.msrs
.misc_low
,
1318 vmx
->nested
.msrs
.misc_high
);
1320 if (!is_bitwise_subset(vmx_misc
, data
, feature_and_reserved_bits
))
1323 if ((vmx
->nested
.msrs
.pinbased_ctls_high
&
1324 PIN_BASED_VMX_PREEMPTION_TIMER
) &&
1325 vmx_misc_preemption_timer_rate(data
) !=
1326 vmx_misc_preemption_timer_rate(vmx_misc
))
1329 if (vmx_misc_cr3_count(data
) > vmx_misc_cr3_count(vmx_misc
))
1332 if (vmx_misc_max_msr(data
) > vmx_misc_max_msr(vmx_misc
))
1335 if (vmx_misc_mseg_revid(data
) != vmx_misc_mseg_revid(vmx_misc
))
1338 vmx
->nested
.msrs
.misc_low
= data
;
1339 vmx
->nested
.msrs
.misc_high
= data
>> 32;
1344 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx
*vmx
, u64 data
)
1346 u64 vmx_ept_vpid_cap
;
1348 vmx_ept_vpid_cap
= vmx_control_msr(vmx
->nested
.msrs
.ept_caps
,
1349 vmx
->nested
.msrs
.vpid_caps
);
1351 /* Every bit is either reserved or a feature bit. */
1352 if (!is_bitwise_subset(vmx_ept_vpid_cap
, data
, -1ULL))
1355 vmx
->nested
.msrs
.ept_caps
= data
;
1356 vmx
->nested
.msrs
.vpid_caps
= data
>> 32;
1360 static int vmx_restore_fixed0_msr(struct vcpu_vmx
*vmx
, u32 msr_index
, u64 data
)
1364 switch (msr_index
) {
1365 case MSR_IA32_VMX_CR0_FIXED0
:
1366 msr
= &vmx
->nested
.msrs
.cr0_fixed0
;
1368 case MSR_IA32_VMX_CR4_FIXED0
:
1369 msr
= &vmx
->nested
.msrs
.cr4_fixed0
;
1376 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1377 * must be 1 in the restored value.
1379 if (!is_bitwise_subset(data
, *msr
, -1ULL))
1387 * Called when userspace is restoring VMX MSRs.
1389 * Returns 0 on success, non-0 otherwise.
1391 int vmx_set_vmx_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
)
1393 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1396 * Don't allow changes to the VMX capability MSRs while the vCPU
1397 * is in VMX operation.
1399 if (vmx
->nested
.vmxon
)
1402 switch (msr_index
) {
1403 case MSR_IA32_VMX_BASIC
:
1404 return vmx_restore_vmx_basic(vmx
, data
);
1405 case MSR_IA32_VMX_PINBASED_CTLS
:
1406 case MSR_IA32_VMX_PROCBASED_CTLS
:
1407 case MSR_IA32_VMX_EXIT_CTLS
:
1408 case MSR_IA32_VMX_ENTRY_CTLS
:
1410 * The "non-true" VMX capability MSRs are generated from the
1411 * "true" MSRs, so we do not support restoring them directly.
1413 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1414 * should restore the "true" MSRs with the must-be-1 bits
1415 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1416 * DEFAULT SETTINGS".
1419 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
1420 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
1421 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
1422 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
1423 case MSR_IA32_VMX_PROCBASED_CTLS2
:
1424 return vmx_restore_control_msr(vmx
, msr_index
, data
);
1425 case MSR_IA32_VMX_MISC
:
1426 return vmx_restore_vmx_misc(vmx
, data
);
1427 case MSR_IA32_VMX_CR0_FIXED0
:
1428 case MSR_IA32_VMX_CR4_FIXED0
:
1429 return vmx_restore_fixed0_msr(vmx
, msr_index
, data
);
1430 case MSR_IA32_VMX_CR0_FIXED1
:
1431 case MSR_IA32_VMX_CR4_FIXED1
:
1433 * These MSRs are generated based on the vCPU's CPUID, so we
1434 * do not support restoring them directly.
1437 case MSR_IA32_VMX_EPT_VPID_CAP
:
1438 return vmx_restore_vmx_ept_vpid_cap(vmx
, data
);
1439 case MSR_IA32_VMX_VMCS_ENUM
:
1440 vmx
->nested
.msrs
.vmcs_enum
= data
;
1442 case MSR_IA32_VMX_VMFUNC
:
1443 if (data
& ~vmx
->nested
.msrs
.vmfunc_controls
)
1445 vmx
->nested
.msrs
.vmfunc_controls
= data
;
1449 * The rest of the VMX capability MSRs do not support restore.
1455 /* Returns 0 on success, non-0 otherwise. */
1456 int vmx_get_vmx_msr(struct nested_vmx_msrs
*msrs
, u32 msr_index
, u64
*pdata
)
1458 switch (msr_index
) {
1459 case MSR_IA32_VMX_BASIC
:
1460 *pdata
= msrs
->basic
;
1462 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
1463 case MSR_IA32_VMX_PINBASED_CTLS
:
1464 *pdata
= vmx_control_msr(
1465 msrs
->pinbased_ctls_low
,
1466 msrs
->pinbased_ctls_high
);
1467 if (msr_index
== MSR_IA32_VMX_PINBASED_CTLS
)
1468 *pdata
|= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
1470 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
1471 case MSR_IA32_VMX_PROCBASED_CTLS
:
1472 *pdata
= vmx_control_msr(
1473 msrs
->procbased_ctls_low
,
1474 msrs
->procbased_ctls_high
);
1475 if (msr_index
== MSR_IA32_VMX_PROCBASED_CTLS
)
1476 *pdata
|= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
1478 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
1479 case MSR_IA32_VMX_EXIT_CTLS
:
1480 *pdata
= vmx_control_msr(
1481 msrs
->exit_ctls_low
,
1482 msrs
->exit_ctls_high
);
1483 if (msr_index
== MSR_IA32_VMX_EXIT_CTLS
)
1484 *pdata
|= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR
;
1486 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
1487 case MSR_IA32_VMX_ENTRY_CTLS
:
1488 *pdata
= vmx_control_msr(
1489 msrs
->entry_ctls_low
,
1490 msrs
->entry_ctls_high
);
1491 if (msr_index
== MSR_IA32_VMX_ENTRY_CTLS
)
1492 *pdata
|= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR
;
1494 case MSR_IA32_VMX_MISC
:
1495 *pdata
= vmx_control_msr(
1499 case MSR_IA32_VMX_CR0_FIXED0
:
1500 *pdata
= msrs
->cr0_fixed0
;
1502 case MSR_IA32_VMX_CR0_FIXED1
:
1503 *pdata
= msrs
->cr0_fixed1
;
1505 case MSR_IA32_VMX_CR4_FIXED0
:
1506 *pdata
= msrs
->cr4_fixed0
;
1508 case MSR_IA32_VMX_CR4_FIXED1
:
1509 *pdata
= msrs
->cr4_fixed1
;
1511 case MSR_IA32_VMX_VMCS_ENUM
:
1512 *pdata
= msrs
->vmcs_enum
;
1514 case MSR_IA32_VMX_PROCBASED_CTLS2
:
1515 *pdata
= vmx_control_msr(
1516 msrs
->secondary_ctls_low
,
1517 msrs
->secondary_ctls_high
);
1519 case MSR_IA32_VMX_EPT_VPID_CAP
:
1520 *pdata
= msrs
->ept_caps
|
1521 ((u64
)msrs
->vpid_caps
<< 32);
1523 case MSR_IA32_VMX_VMFUNC
:
1524 *pdata
= msrs
->vmfunc_controls
;
1534 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1535 * been modified by the L1 guest. Note, "writable" in this context means
1536 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1537 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1538 * VM-exit information fields (which are actually writable if the vCPU is
1539 * configured to support "VMWRITE to any supported field in the VMCS").
1541 static void copy_shadow_to_vmcs12(struct vcpu_vmx
*vmx
)
1543 struct vmcs
*shadow_vmcs
= vmx
->vmcs01
.shadow_vmcs
;
1544 struct vmcs12
*vmcs12
= get_vmcs12(&vmx
->vcpu
);
1545 struct shadow_vmcs_field field
;
1549 if (WARN_ON(!shadow_vmcs
))
1554 vmcs_load(shadow_vmcs
);
1556 for (i
= 0; i
< max_shadow_read_write_fields
; i
++) {
1557 field
= shadow_read_write_fields
[i
];
1558 val
= __vmcs_readl(field
.encoding
);
1559 vmcs12_write_any(vmcs12
, field
.encoding
, field
.offset
, val
);
1562 vmcs_clear(shadow_vmcs
);
1563 vmcs_load(vmx
->loaded_vmcs
->vmcs
);
1568 static void copy_vmcs12_to_shadow(struct vcpu_vmx
*vmx
)
1570 const struct shadow_vmcs_field
*fields
[] = {
1571 shadow_read_write_fields
,
1572 shadow_read_only_fields
1574 const int max_fields
[] = {
1575 max_shadow_read_write_fields
,
1576 max_shadow_read_only_fields
1578 struct vmcs
*shadow_vmcs
= vmx
->vmcs01
.shadow_vmcs
;
1579 struct vmcs12
*vmcs12
= get_vmcs12(&vmx
->vcpu
);
1580 struct shadow_vmcs_field field
;
1584 if (WARN_ON(!shadow_vmcs
))
1587 vmcs_load(shadow_vmcs
);
1589 for (q
= 0; q
< ARRAY_SIZE(fields
); q
++) {
1590 for (i
= 0; i
< max_fields
[q
]; i
++) {
1591 field
= fields
[q
][i
];
1592 val
= vmcs12_read_any(vmcs12
, field
.encoding
,
1594 __vmcs_writel(field
.encoding
, val
);
1598 vmcs_clear(shadow_vmcs
);
1599 vmcs_load(vmx
->loaded_vmcs
->vmcs
);
1602 static int copy_enlightened_to_vmcs12(struct vcpu_vmx
*vmx
)
1604 struct vmcs12
*vmcs12
= vmx
->nested
.cached_vmcs12
;
1605 struct hv_enlightened_vmcs
*evmcs
= vmx
->nested
.hv_evmcs
;
1607 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1608 vmcs12
->tpr_threshold
= evmcs
->tpr_threshold
;
1609 vmcs12
->guest_rip
= evmcs
->guest_rip
;
1611 if (unlikely(!(evmcs
->hv_clean_fields
&
1612 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC
))) {
1613 vmcs12
->guest_rsp
= evmcs
->guest_rsp
;
1614 vmcs12
->guest_rflags
= evmcs
->guest_rflags
;
1615 vmcs12
->guest_interruptibility_info
=
1616 evmcs
->guest_interruptibility_info
;
1619 if (unlikely(!(evmcs
->hv_clean_fields
&
1620 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC
))) {
1621 vmcs12
->cpu_based_vm_exec_control
=
1622 evmcs
->cpu_based_vm_exec_control
;
1625 if (unlikely(!(evmcs
->hv_clean_fields
&
1626 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN
))) {
1627 vmcs12
->exception_bitmap
= evmcs
->exception_bitmap
;
1630 if (unlikely(!(evmcs
->hv_clean_fields
&
1631 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY
))) {
1632 vmcs12
->vm_entry_controls
= evmcs
->vm_entry_controls
;
1635 if (unlikely(!(evmcs
->hv_clean_fields
&
1636 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT
))) {
1637 vmcs12
->vm_entry_intr_info_field
=
1638 evmcs
->vm_entry_intr_info_field
;
1639 vmcs12
->vm_entry_exception_error_code
=
1640 evmcs
->vm_entry_exception_error_code
;
1641 vmcs12
->vm_entry_instruction_len
=
1642 evmcs
->vm_entry_instruction_len
;
1645 if (unlikely(!(evmcs
->hv_clean_fields
&
1646 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1
))) {
1647 vmcs12
->host_ia32_pat
= evmcs
->host_ia32_pat
;
1648 vmcs12
->host_ia32_efer
= evmcs
->host_ia32_efer
;
1649 vmcs12
->host_cr0
= evmcs
->host_cr0
;
1650 vmcs12
->host_cr3
= evmcs
->host_cr3
;
1651 vmcs12
->host_cr4
= evmcs
->host_cr4
;
1652 vmcs12
->host_ia32_sysenter_esp
= evmcs
->host_ia32_sysenter_esp
;
1653 vmcs12
->host_ia32_sysenter_eip
= evmcs
->host_ia32_sysenter_eip
;
1654 vmcs12
->host_rip
= evmcs
->host_rip
;
1655 vmcs12
->host_ia32_sysenter_cs
= evmcs
->host_ia32_sysenter_cs
;
1656 vmcs12
->host_es_selector
= evmcs
->host_es_selector
;
1657 vmcs12
->host_cs_selector
= evmcs
->host_cs_selector
;
1658 vmcs12
->host_ss_selector
= evmcs
->host_ss_selector
;
1659 vmcs12
->host_ds_selector
= evmcs
->host_ds_selector
;
1660 vmcs12
->host_fs_selector
= evmcs
->host_fs_selector
;
1661 vmcs12
->host_gs_selector
= evmcs
->host_gs_selector
;
1662 vmcs12
->host_tr_selector
= evmcs
->host_tr_selector
;
1665 if (unlikely(!(evmcs
->hv_clean_fields
&
1666 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1
))) {
1667 vmcs12
->pin_based_vm_exec_control
=
1668 evmcs
->pin_based_vm_exec_control
;
1669 vmcs12
->vm_exit_controls
= evmcs
->vm_exit_controls
;
1670 vmcs12
->secondary_vm_exec_control
=
1671 evmcs
->secondary_vm_exec_control
;
1674 if (unlikely(!(evmcs
->hv_clean_fields
&
1675 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP
))) {
1676 vmcs12
->io_bitmap_a
= evmcs
->io_bitmap_a
;
1677 vmcs12
->io_bitmap_b
= evmcs
->io_bitmap_b
;
1680 if (unlikely(!(evmcs
->hv_clean_fields
&
1681 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP
))) {
1682 vmcs12
->msr_bitmap
= evmcs
->msr_bitmap
;
1685 if (unlikely(!(evmcs
->hv_clean_fields
&
1686 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2
))) {
1687 vmcs12
->guest_es_base
= evmcs
->guest_es_base
;
1688 vmcs12
->guest_cs_base
= evmcs
->guest_cs_base
;
1689 vmcs12
->guest_ss_base
= evmcs
->guest_ss_base
;
1690 vmcs12
->guest_ds_base
= evmcs
->guest_ds_base
;
1691 vmcs12
->guest_fs_base
= evmcs
->guest_fs_base
;
1692 vmcs12
->guest_gs_base
= evmcs
->guest_gs_base
;
1693 vmcs12
->guest_ldtr_base
= evmcs
->guest_ldtr_base
;
1694 vmcs12
->guest_tr_base
= evmcs
->guest_tr_base
;
1695 vmcs12
->guest_gdtr_base
= evmcs
->guest_gdtr_base
;
1696 vmcs12
->guest_idtr_base
= evmcs
->guest_idtr_base
;
1697 vmcs12
->guest_es_limit
= evmcs
->guest_es_limit
;
1698 vmcs12
->guest_cs_limit
= evmcs
->guest_cs_limit
;
1699 vmcs12
->guest_ss_limit
= evmcs
->guest_ss_limit
;
1700 vmcs12
->guest_ds_limit
= evmcs
->guest_ds_limit
;
1701 vmcs12
->guest_fs_limit
= evmcs
->guest_fs_limit
;
1702 vmcs12
->guest_gs_limit
= evmcs
->guest_gs_limit
;
1703 vmcs12
->guest_ldtr_limit
= evmcs
->guest_ldtr_limit
;
1704 vmcs12
->guest_tr_limit
= evmcs
->guest_tr_limit
;
1705 vmcs12
->guest_gdtr_limit
= evmcs
->guest_gdtr_limit
;
1706 vmcs12
->guest_idtr_limit
= evmcs
->guest_idtr_limit
;
1707 vmcs12
->guest_es_ar_bytes
= evmcs
->guest_es_ar_bytes
;
1708 vmcs12
->guest_cs_ar_bytes
= evmcs
->guest_cs_ar_bytes
;
1709 vmcs12
->guest_ss_ar_bytes
= evmcs
->guest_ss_ar_bytes
;
1710 vmcs12
->guest_ds_ar_bytes
= evmcs
->guest_ds_ar_bytes
;
1711 vmcs12
->guest_fs_ar_bytes
= evmcs
->guest_fs_ar_bytes
;
1712 vmcs12
->guest_gs_ar_bytes
= evmcs
->guest_gs_ar_bytes
;
1713 vmcs12
->guest_ldtr_ar_bytes
= evmcs
->guest_ldtr_ar_bytes
;
1714 vmcs12
->guest_tr_ar_bytes
= evmcs
->guest_tr_ar_bytes
;
1715 vmcs12
->guest_es_selector
= evmcs
->guest_es_selector
;
1716 vmcs12
->guest_cs_selector
= evmcs
->guest_cs_selector
;
1717 vmcs12
->guest_ss_selector
= evmcs
->guest_ss_selector
;
1718 vmcs12
->guest_ds_selector
= evmcs
->guest_ds_selector
;
1719 vmcs12
->guest_fs_selector
= evmcs
->guest_fs_selector
;
1720 vmcs12
->guest_gs_selector
= evmcs
->guest_gs_selector
;
1721 vmcs12
->guest_ldtr_selector
= evmcs
->guest_ldtr_selector
;
1722 vmcs12
->guest_tr_selector
= evmcs
->guest_tr_selector
;
1725 if (unlikely(!(evmcs
->hv_clean_fields
&
1726 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2
))) {
1727 vmcs12
->tsc_offset
= evmcs
->tsc_offset
;
1728 vmcs12
->virtual_apic_page_addr
= evmcs
->virtual_apic_page_addr
;
1729 vmcs12
->xss_exit_bitmap
= evmcs
->xss_exit_bitmap
;
1732 if (unlikely(!(evmcs
->hv_clean_fields
&
1733 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR
))) {
1734 vmcs12
->cr0_guest_host_mask
= evmcs
->cr0_guest_host_mask
;
1735 vmcs12
->cr4_guest_host_mask
= evmcs
->cr4_guest_host_mask
;
1736 vmcs12
->cr0_read_shadow
= evmcs
->cr0_read_shadow
;
1737 vmcs12
->cr4_read_shadow
= evmcs
->cr4_read_shadow
;
1738 vmcs12
->guest_cr0
= evmcs
->guest_cr0
;
1739 vmcs12
->guest_cr3
= evmcs
->guest_cr3
;
1740 vmcs12
->guest_cr4
= evmcs
->guest_cr4
;
1741 vmcs12
->guest_dr7
= evmcs
->guest_dr7
;
1744 if (unlikely(!(evmcs
->hv_clean_fields
&
1745 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER
))) {
1746 vmcs12
->host_fs_base
= evmcs
->host_fs_base
;
1747 vmcs12
->host_gs_base
= evmcs
->host_gs_base
;
1748 vmcs12
->host_tr_base
= evmcs
->host_tr_base
;
1749 vmcs12
->host_gdtr_base
= evmcs
->host_gdtr_base
;
1750 vmcs12
->host_idtr_base
= evmcs
->host_idtr_base
;
1751 vmcs12
->host_rsp
= evmcs
->host_rsp
;
1754 if (unlikely(!(evmcs
->hv_clean_fields
&
1755 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT
))) {
1756 vmcs12
->ept_pointer
= evmcs
->ept_pointer
;
1757 vmcs12
->virtual_processor_id
= evmcs
->virtual_processor_id
;
1760 if (unlikely(!(evmcs
->hv_clean_fields
&
1761 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1
))) {
1762 vmcs12
->vmcs_link_pointer
= evmcs
->vmcs_link_pointer
;
1763 vmcs12
->guest_ia32_debugctl
= evmcs
->guest_ia32_debugctl
;
1764 vmcs12
->guest_ia32_pat
= evmcs
->guest_ia32_pat
;
1765 vmcs12
->guest_ia32_efer
= evmcs
->guest_ia32_efer
;
1766 vmcs12
->guest_pdptr0
= evmcs
->guest_pdptr0
;
1767 vmcs12
->guest_pdptr1
= evmcs
->guest_pdptr1
;
1768 vmcs12
->guest_pdptr2
= evmcs
->guest_pdptr2
;
1769 vmcs12
->guest_pdptr3
= evmcs
->guest_pdptr3
;
1770 vmcs12
->guest_pending_dbg_exceptions
=
1771 evmcs
->guest_pending_dbg_exceptions
;
1772 vmcs12
->guest_sysenter_esp
= evmcs
->guest_sysenter_esp
;
1773 vmcs12
->guest_sysenter_eip
= evmcs
->guest_sysenter_eip
;
1774 vmcs12
->guest_bndcfgs
= evmcs
->guest_bndcfgs
;
1775 vmcs12
->guest_activity_state
= evmcs
->guest_activity_state
;
1776 vmcs12
->guest_sysenter_cs
= evmcs
->guest_sysenter_cs
;
1781 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1782 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1783 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1784 * vmcs12->page_fault_error_code_mask =
1785 * evmcs->page_fault_error_code_mask;
1786 * vmcs12->page_fault_error_code_match =
1787 * evmcs->page_fault_error_code_match;
1788 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1789 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1790 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1791 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1796 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1797 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1798 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1799 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1800 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1801 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1802 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1803 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1804 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1805 * vmcs12->exit_qualification = evmcs->exit_qualification;
1806 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1808 * Not present in struct vmcs12:
1809 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1810 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1811 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1812 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1818 static int copy_vmcs12_to_enlightened(struct vcpu_vmx
*vmx
)
1820 struct vmcs12
*vmcs12
= vmx
->nested
.cached_vmcs12
;
1821 struct hv_enlightened_vmcs
*evmcs
= vmx
->nested
.hv_evmcs
;
1824 * Should not be changed by KVM:
1826 * evmcs->host_es_selector = vmcs12->host_es_selector;
1827 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1828 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1829 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1830 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1831 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1832 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1833 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1834 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1835 * evmcs->host_cr0 = vmcs12->host_cr0;
1836 * evmcs->host_cr3 = vmcs12->host_cr3;
1837 * evmcs->host_cr4 = vmcs12->host_cr4;
1838 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1839 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1840 * evmcs->host_rip = vmcs12->host_rip;
1841 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1842 * evmcs->host_fs_base = vmcs12->host_fs_base;
1843 * evmcs->host_gs_base = vmcs12->host_gs_base;
1844 * evmcs->host_tr_base = vmcs12->host_tr_base;
1845 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1846 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1847 * evmcs->host_rsp = vmcs12->host_rsp;
1848 * sync_vmcs02_to_vmcs12() doesn't read these:
1849 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1850 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1851 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1852 * evmcs->ept_pointer = vmcs12->ept_pointer;
1853 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1854 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1855 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1856 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1857 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1858 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1859 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1860 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1861 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1862 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1863 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1864 * evmcs->page_fault_error_code_mask =
1865 * vmcs12->page_fault_error_code_mask;
1866 * evmcs->page_fault_error_code_match =
1867 * vmcs12->page_fault_error_code_match;
1868 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1869 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1870 * evmcs->tsc_offset = vmcs12->tsc_offset;
1871 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1872 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1873 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1874 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1875 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1876 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1877 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1878 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1880 * Not present in struct vmcs12:
1881 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1882 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1883 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1884 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1887 evmcs
->guest_es_selector
= vmcs12
->guest_es_selector
;
1888 evmcs
->guest_cs_selector
= vmcs12
->guest_cs_selector
;
1889 evmcs
->guest_ss_selector
= vmcs12
->guest_ss_selector
;
1890 evmcs
->guest_ds_selector
= vmcs12
->guest_ds_selector
;
1891 evmcs
->guest_fs_selector
= vmcs12
->guest_fs_selector
;
1892 evmcs
->guest_gs_selector
= vmcs12
->guest_gs_selector
;
1893 evmcs
->guest_ldtr_selector
= vmcs12
->guest_ldtr_selector
;
1894 evmcs
->guest_tr_selector
= vmcs12
->guest_tr_selector
;
1896 evmcs
->guest_es_limit
= vmcs12
->guest_es_limit
;
1897 evmcs
->guest_cs_limit
= vmcs12
->guest_cs_limit
;
1898 evmcs
->guest_ss_limit
= vmcs12
->guest_ss_limit
;
1899 evmcs
->guest_ds_limit
= vmcs12
->guest_ds_limit
;
1900 evmcs
->guest_fs_limit
= vmcs12
->guest_fs_limit
;
1901 evmcs
->guest_gs_limit
= vmcs12
->guest_gs_limit
;
1902 evmcs
->guest_ldtr_limit
= vmcs12
->guest_ldtr_limit
;
1903 evmcs
->guest_tr_limit
= vmcs12
->guest_tr_limit
;
1904 evmcs
->guest_gdtr_limit
= vmcs12
->guest_gdtr_limit
;
1905 evmcs
->guest_idtr_limit
= vmcs12
->guest_idtr_limit
;
1907 evmcs
->guest_es_ar_bytes
= vmcs12
->guest_es_ar_bytes
;
1908 evmcs
->guest_cs_ar_bytes
= vmcs12
->guest_cs_ar_bytes
;
1909 evmcs
->guest_ss_ar_bytes
= vmcs12
->guest_ss_ar_bytes
;
1910 evmcs
->guest_ds_ar_bytes
= vmcs12
->guest_ds_ar_bytes
;
1911 evmcs
->guest_fs_ar_bytes
= vmcs12
->guest_fs_ar_bytes
;
1912 evmcs
->guest_gs_ar_bytes
= vmcs12
->guest_gs_ar_bytes
;
1913 evmcs
->guest_ldtr_ar_bytes
= vmcs12
->guest_ldtr_ar_bytes
;
1914 evmcs
->guest_tr_ar_bytes
= vmcs12
->guest_tr_ar_bytes
;
1916 evmcs
->guest_es_base
= vmcs12
->guest_es_base
;
1917 evmcs
->guest_cs_base
= vmcs12
->guest_cs_base
;
1918 evmcs
->guest_ss_base
= vmcs12
->guest_ss_base
;
1919 evmcs
->guest_ds_base
= vmcs12
->guest_ds_base
;
1920 evmcs
->guest_fs_base
= vmcs12
->guest_fs_base
;
1921 evmcs
->guest_gs_base
= vmcs12
->guest_gs_base
;
1922 evmcs
->guest_ldtr_base
= vmcs12
->guest_ldtr_base
;
1923 evmcs
->guest_tr_base
= vmcs12
->guest_tr_base
;
1924 evmcs
->guest_gdtr_base
= vmcs12
->guest_gdtr_base
;
1925 evmcs
->guest_idtr_base
= vmcs12
->guest_idtr_base
;
1927 evmcs
->guest_ia32_pat
= vmcs12
->guest_ia32_pat
;
1928 evmcs
->guest_ia32_efer
= vmcs12
->guest_ia32_efer
;
1930 evmcs
->guest_pdptr0
= vmcs12
->guest_pdptr0
;
1931 evmcs
->guest_pdptr1
= vmcs12
->guest_pdptr1
;
1932 evmcs
->guest_pdptr2
= vmcs12
->guest_pdptr2
;
1933 evmcs
->guest_pdptr3
= vmcs12
->guest_pdptr3
;
1935 evmcs
->guest_pending_dbg_exceptions
=
1936 vmcs12
->guest_pending_dbg_exceptions
;
1937 evmcs
->guest_sysenter_esp
= vmcs12
->guest_sysenter_esp
;
1938 evmcs
->guest_sysenter_eip
= vmcs12
->guest_sysenter_eip
;
1940 evmcs
->guest_activity_state
= vmcs12
->guest_activity_state
;
1941 evmcs
->guest_sysenter_cs
= vmcs12
->guest_sysenter_cs
;
1943 evmcs
->guest_cr0
= vmcs12
->guest_cr0
;
1944 evmcs
->guest_cr3
= vmcs12
->guest_cr3
;
1945 evmcs
->guest_cr4
= vmcs12
->guest_cr4
;
1946 evmcs
->guest_dr7
= vmcs12
->guest_dr7
;
1948 evmcs
->guest_physical_address
= vmcs12
->guest_physical_address
;
1950 evmcs
->vm_instruction_error
= vmcs12
->vm_instruction_error
;
1951 evmcs
->vm_exit_reason
= vmcs12
->vm_exit_reason
;
1952 evmcs
->vm_exit_intr_info
= vmcs12
->vm_exit_intr_info
;
1953 evmcs
->vm_exit_intr_error_code
= vmcs12
->vm_exit_intr_error_code
;
1954 evmcs
->idt_vectoring_info_field
= vmcs12
->idt_vectoring_info_field
;
1955 evmcs
->idt_vectoring_error_code
= vmcs12
->idt_vectoring_error_code
;
1956 evmcs
->vm_exit_instruction_len
= vmcs12
->vm_exit_instruction_len
;
1957 evmcs
->vmx_instruction_info
= vmcs12
->vmx_instruction_info
;
1959 evmcs
->exit_qualification
= vmcs12
->exit_qualification
;
1961 evmcs
->guest_linear_address
= vmcs12
->guest_linear_address
;
1962 evmcs
->guest_rsp
= vmcs12
->guest_rsp
;
1963 evmcs
->guest_rflags
= vmcs12
->guest_rflags
;
1965 evmcs
->guest_interruptibility_info
=
1966 vmcs12
->guest_interruptibility_info
;
1967 evmcs
->cpu_based_vm_exec_control
= vmcs12
->cpu_based_vm_exec_control
;
1968 evmcs
->vm_entry_controls
= vmcs12
->vm_entry_controls
;
1969 evmcs
->vm_entry_intr_info_field
= vmcs12
->vm_entry_intr_info_field
;
1970 evmcs
->vm_entry_exception_error_code
=
1971 vmcs12
->vm_entry_exception_error_code
;
1972 evmcs
->vm_entry_instruction_len
= vmcs12
->vm_entry_instruction_len
;
1974 evmcs
->guest_rip
= vmcs12
->guest_rip
;
1976 evmcs
->guest_bndcfgs
= vmcs12
->guest_bndcfgs
;
1982 * This is an equivalent of the nested hypervisor executing the vmptrld
1985 static enum nested_evmptrld_status
nested_vmx_handle_enlightened_vmptrld(
1986 struct kvm_vcpu
*vcpu
, bool from_launch
)
1988 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1989 bool evmcs_gpa_changed
= false;
1992 if (likely(!vmx
->nested
.enlightened_vmcs_enabled
))
1993 return EVMPTRLD_DISABLED
;
1995 if (!nested_enlightened_vmentry(vcpu
, &evmcs_gpa
))
1996 return EVMPTRLD_DISABLED
;
1998 if (unlikely(!vmx
->nested
.hv_evmcs
||
1999 evmcs_gpa
!= vmx
->nested
.hv_evmcs_vmptr
)) {
2000 if (!vmx
->nested
.hv_evmcs
)
2001 vmx
->nested
.current_vmptr
= -1ull;
2003 nested_release_evmcs(vcpu
);
2005 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(evmcs_gpa
),
2006 &vmx
->nested
.hv_evmcs_map
))
2007 return EVMPTRLD_ERROR
;
2009 vmx
->nested
.hv_evmcs
= vmx
->nested
.hv_evmcs_map
.hva
;
2012 * Currently, KVM only supports eVMCS version 1
2013 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
2014 * value to first u32 field of eVMCS which should specify eVMCS
2017 * Guest should be aware of supported eVMCS versions by host by
2018 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
2019 * expected to set this CPUID leaf according to the value
2020 * returned in vmcs_version from nested_enable_evmcs().
2022 * However, it turns out that Microsoft Hyper-V fails to comply
2023 * to their own invented interface: When Hyper-V use eVMCS, it
2024 * just sets first u32 field of eVMCS to revision_id specified
2025 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
2026 * which is one of the supported versions specified in
2027 * CPUID.0x4000000A.EAX[0:15].
2029 * To overcome Hyper-V bug, we accept here either a supported
2030 * eVMCS version or VMCS12 revision_id as valid values for first
2031 * u32 field of eVMCS.
2033 if ((vmx
->nested
.hv_evmcs
->revision_id
!= KVM_EVMCS_VERSION
) &&
2034 (vmx
->nested
.hv_evmcs
->revision_id
!= VMCS12_REVISION
)) {
2035 nested_release_evmcs(vcpu
);
2036 return EVMPTRLD_VMFAIL
;
2039 vmx
->nested
.dirty_vmcs12
= true;
2040 vmx
->nested
.hv_evmcs_vmptr
= evmcs_gpa
;
2042 evmcs_gpa_changed
= true;
2044 * Unlike normal vmcs12, enlightened vmcs12 is not fully
2045 * reloaded from guest's memory (read only fields, fields not
2046 * present in struct hv_enlightened_vmcs, ...). Make sure there
2050 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
2051 memset(vmcs12
, 0, sizeof(*vmcs12
));
2052 vmcs12
->hdr
.revision_id
= VMCS12_REVISION
;
2058 * Clean fields data can't be used on VMLAUNCH and when we switch
2059 * between different L2 guests as KVM keeps a single VMCS12 per L1.
2061 if (from_launch
|| evmcs_gpa_changed
)
2062 vmx
->nested
.hv_evmcs
->hv_clean_fields
&=
2063 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL
;
2065 return EVMPTRLD_SUCCEEDED
;
2068 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu
*vcpu
)
2070 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2072 if (vmx
->nested
.hv_evmcs
) {
2073 copy_vmcs12_to_enlightened(vmx
);
2074 /* All fields are clean */
2075 vmx
->nested
.hv_evmcs
->hv_clean_fields
|=
2076 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL
;
2078 copy_vmcs12_to_shadow(vmx
);
2081 vmx
->nested
.need_vmcs12_to_shadow_sync
= false;
2084 static enum hrtimer_restart
vmx_preemption_timer_fn(struct hrtimer
*timer
)
2086 struct vcpu_vmx
*vmx
=
2087 container_of(timer
, struct vcpu_vmx
, nested
.preemption_timer
);
2089 vmx
->nested
.preemption_timer_expired
= true;
2090 kvm_make_request(KVM_REQ_EVENT
, &vmx
->vcpu
);
2091 kvm_vcpu_kick(&vmx
->vcpu
);
2093 return HRTIMER_NORESTART
;
2096 static u64
vmx_calc_preemption_timer_value(struct kvm_vcpu
*vcpu
)
2098 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2099 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
2101 u64 l1_scaled_tsc
= kvm_read_l1_tsc(vcpu
, rdtsc()) >>
2102 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
;
2104 if (!vmx
->nested
.has_preemption_timer_deadline
) {
2105 vmx
->nested
.preemption_timer_deadline
=
2106 vmcs12
->vmx_preemption_timer_value
+ l1_scaled_tsc
;
2107 vmx
->nested
.has_preemption_timer_deadline
= true;
2109 return vmx
->nested
.preemption_timer_deadline
- l1_scaled_tsc
;
2112 static void vmx_start_preemption_timer(struct kvm_vcpu
*vcpu
,
2113 u64 preemption_timeout
)
2115 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2118 * A timer value of zero is architecturally guaranteed to cause
2119 * a VMExit prior to executing any instructions in the guest.
2121 if (preemption_timeout
== 0) {
2122 vmx_preemption_timer_fn(&vmx
->nested
.preemption_timer
);
2126 if (vcpu
->arch
.virtual_tsc_khz
== 0)
2129 preemption_timeout
<<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
;
2130 preemption_timeout
*= 1000000;
2131 do_div(preemption_timeout
, vcpu
->arch
.virtual_tsc_khz
);
2132 hrtimer_start(&vmx
->nested
.preemption_timer
,
2133 ktime_add_ns(ktime_get(), preemption_timeout
),
2134 HRTIMER_MODE_ABS_PINNED
);
2137 static u64
nested_vmx_calc_efer(struct vcpu_vmx
*vmx
, struct vmcs12
*vmcs12
)
2139 if (vmx
->nested
.nested_run_pending
&&
2140 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_EFER
))
2141 return vmcs12
->guest_ia32_efer
;
2142 else if (vmcs12
->vm_entry_controls
& VM_ENTRY_IA32E_MODE
)
2143 return vmx
->vcpu
.arch
.efer
| (EFER_LMA
| EFER_LME
);
2145 return vmx
->vcpu
.arch
.efer
& ~(EFER_LMA
| EFER_LME
);
2148 static void prepare_vmcs02_constant_state(struct vcpu_vmx
*vmx
)
2151 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
2152 * according to L0's settings (vmcs12 is irrelevant here). Host
2153 * fields that come from L0 and are not constant, e.g. HOST_CR3,
2154 * will be set as needed prior to VMLAUNCH/VMRESUME.
2156 if (vmx
->nested
.vmcs02_initialized
)
2158 vmx
->nested
.vmcs02_initialized
= true;
2161 * We don't care what the EPTP value is we just need to guarantee
2162 * it's valid so we don't get a false positive when doing early
2163 * consistency checks.
2165 if (enable_ept
&& nested_early_check
)
2166 vmcs_write64(EPT_POINTER
,
2167 construct_eptp(&vmx
->vcpu
, 0, PT64_ROOT_4LEVEL
));
2169 /* All VMFUNCs are currently emulated through L0 vmexits. */
2170 if (cpu_has_vmx_vmfunc())
2171 vmcs_write64(VM_FUNCTION_CONTROL
, 0);
2173 if (cpu_has_vmx_posted_intr())
2174 vmcs_write16(POSTED_INTR_NV
, POSTED_INTR_NESTED_VECTOR
);
2176 if (cpu_has_vmx_msr_bitmap())
2177 vmcs_write64(MSR_BITMAP
, __pa(vmx
->nested
.vmcs02
.msr_bitmap
));
2180 * The PML address never changes, so it is constant in vmcs02.
2181 * Conceptually we want to copy the PML index from vmcs01 here,
2182 * and then back to vmcs01 on nested vmexit. But since we flush
2183 * the log and reset GUEST_PML_INDEX on each vmexit, the PML
2184 * index is also effectively constant in vmcs02.
2187 vmcs_write64(PML_ADDRESS
, page_to_phys(vmx
->pml_pg
));
2188 vmcs_write16(GUEST_PML_INDEX
, PML_ENTITY_NUM
- 1);
2191 if (cpu_has_vmx_encls_vmexit())
2192 vmcs_write64(ENCLS_EXITING_BITMAP
, -1ull);
2195 * Set the MSR load/store lists to match L0's settings. Only the
2196 * addresses are constant (for vmcs02), the counts can change based
2197 * on L2's behavior, e.g. switching to/from long mode.
2199 vmcs_write64(VM_EXIT_MSR_STORE_ADDR
, __pa(vmx
->msr_autostore
.guest
.val
));
2200 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR
, __pa(vmx
->msr_autoload
.host
.val
));
2201 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR
, __pa(vmx
->msr_autoload
.guest
.val
));
2203 vmx_set_constant_host_state(vmx
);
2206 static void prepare_vmcs02_early_rare(struct vcpu_vmx
*vmx
,
2207 struct vmcs12
*vmcs12
)
2209 prepare_vmcs02_constant_state(vmx
);
2211 vmcs_write64(VMCS_LINK_POINTER
, -1ull);
2214 if (nested_cpu_has_vpid(vmcs12
) && vmx
->nested
.vpid02
)
2215 vmcs_write16(VIRTUAL_PROCESSOR_ID
, vmx
->nested
.vpid02
);
2217 vmcs_write16(VIRTUAL_PROCESSOR_ID
, vmx
->vpid
);
2221 static void prepare_vmcs02_early(struct vcpu_vmx
*vmx
, struct vmcs12
*vmcs12
)
2223 u32 exec_control
, vmcs12_exec_ctrl
;
2224 u64 guest_efer
= nested_vmx_calc_efer(vmx
, vmcs12
);
2226 if (vmx
->nested
.dirty_vmcs12
|| vmx
->nested
.hv_evmcs
)
2227 prepare_vmcs02_early_rare(vmx
, vmcs12
);
2232 exec_control
= vmx_pin_based_exec_ctrl(vmx
);
2233 exec_control
|= (vmcs12
->pin_based_vm_exec_control
&
2234 ~PIN_BASED_VMX_PREEMPTION_TIMER
);
2236 /* Posted interrupts setting is only taken from vmcs12. */
2237 if (nested_cpu_has_posted_intr(vmcs12
)) {
2238 vmx
->nested
.posted_intr_nv
= vmcs12
->posted_intr_nv
;
2239 vmx
->nested
.pi_pending
= false;
2241 exec_control
&= ~PIN_BASED_POSTED_INTR
;
2243 pin_controls_set(vmx
, exec_control
);
2248 exec_control
= vmx_exec_control(vmx
); /* L0's desires */
2249 exec_control
&= ~CPU_BASED_INTR_WINDOW_EXITING
;
2250 exec_control
&= ~CPU_BASED_NMI_WINDOW_EXITING
;
2251 exec_control
&= ~CPU_BASED_TPR_SHADOW
;
2252 exec_control
|= vmcs12
->cpu_based_vm_exec_control
;
2254 vmx
->nested
.l1_tpr_threshold
= -1;
2255 if (exec_control
& CPU_BASED_TPR_SHADOW
)
2256 vmcs_write32(TPR_THRESHOLD
, vmcs12
->tpr_threshold
);
2257 #ifdef CONFIG_X86_64
2259 exec_control
|= CPU_BASED_CR8_LOAD_EXITING
|
2260 CPU_BASED_CR8_STORE_EXITING
;
2264 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2265 * for I/O port accesses.
2267 exec_control
|= CPU_BASED_UNCOND_IO_EXITING
;
2268 exec_control
&= ~CPU_BASED_USE_IO_BITMAPS
;
2271 * This bit will be computed in nested_get_vmcs12_pages, because
2272 * we do not have access to L1's MSR bitmap yet. For now, keep
2273 * the same bit as before, hoping to avoid multiple VMWRITEs that
2274 * only set/clear this bit.
2276 exec_control
&= ~CPU_BASED_USE_MSR_BITMAPS
;
2277 exec_control
|= exec_controls_get(vmx
) & CPU_BASED_USE_MSR_BITMAPS
;
2279 exec_controls_set(vmx
, exec_control
);
2282 * SECONDARY EXEC CONTROLS
2284 if (cpu_has_secondary_exec_ctrls()) {
2285 exec_control
= vmx
->secondary_exec_control
;
2287 /* Take the following fields only from vmcs12 */
2288 exec_control
&= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
|
2289 SECONDARY_EXEC_ENABLE_INVPCID
|
2290 SECONDARY_EXEC_ENABLE_RDTSCP
|
2291 SECONDARY_EXEC_XSAVES
|
2292 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE
|
2293 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
|
2294 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
2295 SECONDARY_EXEC_ENABLE_VMFUNC
);
2296 if (nested_cpu_has(vmcs12
,
2297 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
)) {
2298 vmcs12_exec_ctrl
= vmcs12
->secondary_vm_exec_control
&
2299 ~SECONDARY_EXEC_ENABLE_PML
;
2300 exec_control
|= vmcs12_exec_ctrl
;
2303 /* VMCS shadowing for L2 is emulated for now */
2304 exec_control
&= ~SECONDARY_EXEC_SHADOW_VMCS
;
2307 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
2308 * will not have to rewrite the controls just for this bit.
2310 if (!boot_cpu_has(X86_FEATURE_UMIP
) && vmx_umip_emulated() &&
2311 (vmcs12
->guest_cr4
& X86_CR4_UMIP
))
2312 exec_control
|= SECONDARY_EXEC_DESC
;
2314 if (exec_control
& SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
)
2315 vmcs_write16(GUEST_INTR_STATUS
,
2316 vmcs12
->guest_intr_status
);
2318 if (!nested_cpu_has2(vmcs12
, SECONDARY_EXEC_UNRESTRICTED_GUEST
))
2319 exec_control
&= ~SECONDARY_EXEC_UNRESTRICTED_GUEST
;
2321 secondary_exec_controls_set(vmx
, exec_control
);
2327 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2328 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2329 * on the related bits (if supported by the CPU) in the hope that
2330 * we can avoid VMWrites during vmx_set_efer().
2332 exec_control
= (vmcs12
->vm_entry_controls
| vmx_vmentry_ctrl()) &
2333 ~VM_ENTRY_IA32E_MODE
& ~VM_ENTRY_LOAD_IA32_EFER
;
2334 if (cpu_has_load_ia32_efer()) {
2335 if (guest_efer
& EFER_LMA
)
2336 exec_control
|= VM_ENTRY_IA32E_MODE
;
2337 if (guest_efer
!= host_efer
)
2338 exec_control
|= VM_ENTRY_LOAD_IA32_EFER
;
2340 vm_entry_controls_set(vmx
, exec_control
);
2345 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2346 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2347 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2349 exec_control
= vmx_vmexit_ctrl();
2350 if (cpu_has_load_ia32_efer() && guest_efer
!= host_efer
)
2351 exec_control
|= VM_EXIT_LOAD_IA32_EFER
;
2352 vm_exit_controls_set(vmx
, exec_control
);
2355 * Interrupt/Exception Fields
2357 if (vmx
->nested
.nested_run_pending
) {
2358 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
,
2359 vmcs12
->vm_entry_intr_info_field
);
2360 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE
,
2361 vmcs12
->vm_entry_exception_error_code
);
2362 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN
,
2363 vmcs12
->vm_entry_instruction_len
);
2364 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO
,
2365 vmcs12
->guest_interruptibility_info
);
2366 vmx
->loaded_vmcs
->nmi_known_unmasked
=
2367 !(vmcs12
->guest_interruptibility_info
& GUEST_INTR_STATE_NMI
);
2369 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
, 0);
2373 static void prepare_vmcs02_rare(struct vcpu_vmx
*vmx
, struct vmcs12
*vmcs12
)
2375 struct hv_enlightened_vmcs
*hv_evmcs
= vmx
->nested
.hv_evmcs
;
2377 if (!hv_evmcs
|| !(hv_evmcs
->hv_clean_fields
&
2378 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2
)) {
2379 vmcs_write16(GUEST_ES_SELECTOR
, vmcs12
->guest_es_selector
);
2380 vmcs_write16(GUEST_CS_SELECTOR
, vmcs12
->guest_cs_selector
);
2381 vmcs_write16(GUEST_SS_SELECTOR
, vmcs12
->guest_ss_selector
);
2382 vmcs_write16(GUEST_DS_SELECTOR
, vmcs12
->guest_ds_selector
);
2383 vmcs_write16(GUEST_FS_SELECTOR
, vmcs12
->guest_fs_selector
);
2384 vmcs_write16(GUEST_GS_SELECTOR
, vmcs12
->guest_gs_selector
);
2385 vmcs_write16(GUEST_LDTR_SELECTOR
, vmcs12
->guest_ldtr_selector
);
2386 vmcs_write16(GUEST_TR_SELECTOR
, vmcs12
->guest_tr_selector
);
2387 vmcs_write32(GUEST_ES_LIMIT
, vmcs12
->guest_es_limit
);
2388 vmcs_write32(GUEST_CS_LIMIT
, vmcs12
->guest_cs_limit
);
2389 vmcs_write32(GUEST_SS_LIMIT
, vmcs12
->guest_ss_limit
);
2390 vmcs_write32(GUEST_DS_LIMIT
, vmcs12
->guest_ds_limit
);
2391 vmcs_write32(GUEST_FS_LIMIT
, vmcs12
->guest_fs_limit
);
2392 vmcs_write32(GUEST_GS_LIMIT
, vmcs12
->guest_gs_limit
);
2393 vmcs_write32(GUEST_LDTR_LIMIT
, vmcs12
->guest_ldtr_limit
);
2394 vmcs_write32(GUEST_TR_LIMIT
, vmcs12
->guest_tr_limit
);
2395 vmcs_write32(GUEST_GDTR_LIMIT
, vmcs12
->guest_gdtr_limit
);
2396 vmcs_write32(GUEST_IDTR_LIMIT
, vmcs12
->guest_idtr_limit
);
2397 vmcs_write32(GUEST_CS_AR_BYTES
, vmcs12
->guest_cs_ar_bytes
);
2398 vmcs_write32(GUEST_SS_AR_BYTES
, vmcs12
->guest_ss_ar_bytes
);
2399 vmcs_write32(GUEST_ES_AR_BYTES
, vmcs12
->guest_es_ar_bytes
);
2400 vmcs_write32(GUEST_DS_AR_BYTES
, vmcs12
->guest_ds_ar_bytes
);
2401 vmcs_write32(GUEST_FS_AR_BYTES
, vmcs12
->guest_fs_ar_bytes
);
2402 vmcs_write32(GUEST_GS_AR_BYTES
, vmcs12
->guest_gs_ar_bytes
);
2403 vmcs_write32(GUEST_LDTR_AR_BYTES
, vmcs12
->guest_ldtr_ar_bytes
);
2404 vmcs_write32(GUEST_TR_AR_BYTES
, vmcs12
->guest_tr_ar_bytes
);
2405 vmcs_writel(GUEST_ES_BASE
, vmcs12
->guest_es_base
);
2406 vmcs_writel(GUEST_CS_BASE
, vmcs12
->guest_cs_base
);
2407 vmcs_writel(GUEST_SS_BASE
, vmcs12
->guest_ss_base
);
2408 vmcs_writel(GUEST_DS_BASE
, vmcs12
->guest_ds_base
);
2409 vmcs_writel(GUEST_FS_BASE
, vmcs12
->guest_fs_base
);
2410 vmcs_writel(GUEST_GS_BASE
, vmcs12
->guest_gs_base
);
2411 vmcs_writel(GUEST_LDTR_BASE
, vmcs12
->guest_ldtr_base
);
2412 vmcs_writel(GUEST_TR_BASE
, vmcs12
->guest_tr_base
);
2413 vmcs_writel(GUEST_GDTR_BASE
, vmcs12
->guest_gdtr_base
);
2414 vmcs_writel(GUEST_IDTR_BASE
, vmcs12
->guest_idtr_base
);
2416 vmx
->segment_cache
.bitmask
= 0;
2419 if (!hv_evmcs
|| !(hv_evmcs
->hv_clean_fields
&
2420 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1
)) {
2421 vmcs_write32(GUEST_SYSENTER_CS
, vmcs12
->guest_sysenter_cs
);
2422 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS
,
2423 vmcs12
->guest_pending_dbg_exceptions
);
2424 vmcs_writel(GUEST_SYSENTER_ESP
, vmcs12
->guest_sysenter_esp
);
2425 vmcs_writel(GUEST_SYSENTER_EIP
, vmcs12
->guest_sysenter_eip
);
2428 * L1 may access the L2's PDPTR, so save them to construct
2432 vmcs_write64(GUEST_PDPTR0
, vmcs12
->guest_pdptr0
);
2433 vmcs_write64(GUEST_PDPTR1
, vmcs12
->guest_pdptr1
);
2434 vmcs_write64(GUEST_PDPTR2
, vmcs12
->guest_pdptr2
);
2435 vmcs_write64(GUEST_PDPTR3
, vmcs12
->guest_pdptr3
);
2438 if (kvm_mpx_supported() && vmx
->nested
.nested_run_pending
&&
2439 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
))
2440 vmcs_write64(GUEST_BNDCFGS
, vmcs12
->guest_bndcfgs
);
2443 if (nested_cpu_has_xsaves(vmcs12
))
2444 vmcs_write64(XSS_EXIT_BITMAP
, vmcs12
->xss_exit_bitmap
);
2447 * Whether page-faults are trapped is determined by a combination of
2448 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0
2449 * doesn't care about page faults then we should set all of these to
2450 * L1's desires. However, if L0 does care about (some) page faults, it
2451 * is not easy (if at all possible?) to merge L0 and L1's desires, we
2452 * simply ask to exit on each and every L2 page fault. This is done by
2453 * setting MASK=MATCH=0 and (see below) EB.PF=1.
2454 * Note that below we don't need special code to set EB.PF beyond the
2455 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2456 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2457 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2459 if (vmx_need_pf_intercept(&vmx
->vcpu
)) {
2461 * TODO: if both L0 and L1 need the same MASK and MATCH,
2462 * go ahead and use it?
2464 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK
, 0);
2465 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH
, 0);
2467 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK
, vmcs12
->page_fault_error_code_mask
);
2468 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH
, vmcs12
->page_fault_error_code_match
);
2471 if (cpu_has_vmx_apicv()) {
2472 vmcs_write64(EOI_EXIT_BITMAP0
, vmcs12
->eoi_exit_bitmap0
);
2473 vmcs_write64(EOI_EXIT_BITMAP1
, vmcs12
->eoi_exit_bitmap1
);
2474 vmcs_write64(EOI_EXIT_BITMAP2
, vmcs12
->eoi_exit_bitmap2
);
2475 vmcs_write64(EOI_EXIT_BITMAP3
, vmcs12
->eoi_exit_bitmap3
);
2479 * Make sure the msr_autostore list is up to date before we set the
2480 * count in the vmcs02.
2482 prepare_vmx_msr_autostore_list(&vmx
->vcpu
, MSR_IA32_TSC
);
2484 vmcs_write32(VM_EXIT_MSR_STORE_COUNT
, vmx
->msr_autostore
.guest
.nr
);
2485 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, vmx
->msr_autoload
.host
.nr
);
2486 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, vmx
->msr_autoload
.guest
.nr
);
2488 set_cr4_guest_host_mask(vmx
);
2492 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2493 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2494 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2495 * guest in a way that will both be appropriate to L1's requests, and our
2496 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2497 * function also has additional necessary side-effects, like setting various
2498 * vcpu->arch fields.
2499 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2500 * is assigned to entry_failure_code on failure.
2502 static int prepare_vmcs02(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
,
2503 enum vm_entry_failure_code
*entry_failure_code
)
2505 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2506 struct hv_enlightened_vmcs
*hv_evmcs
= vmx
->nested
.hv_evmcs
;
2507 bool load_guest_pdptrs_vmcs12
= false;
2509 if (vmx
->nested
.dirty_vmcs12
|| hv_evmcs
) {
2510 prepare_vmcs02_rare(vmx
, vmcs12
);
2511 vmx
->nested
.dirty_vmcs12
= false;
2513 load_guest_pdptrs_vmcs12
= !hv_evmcs
||
2514 !(hv_evmcs
->hv_clean_fields
&
2515 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1
);
2518 if (vmx
->nested
.nested_run_pending
&&
2519 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
)) {
2520 kvm_set_dr(vcpu
, 7, vmcs12
->guest_dr7
);
2521 vmcs_write64(GUEST_IA32_DEBUGCTL
, vmcs12
->guest_ia32_debugctl
);
2523 kvm_set_dr(vcpu
, 7, vcpu
->arch
.dr7
);
2524 vmcs_write64(GUEST_IA32_DEBUGCTL
, vmx
->nested
.vmcs01_debugctl
);
2526 if (kvm_mpx_supported() && (!vmx
->nested
.nested_run_pending
||
2527 !(vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
)))
2528 vmcs_write64(GUEST_BNDCFGS
, vmx
->nested
.vmcs01_guest_bndcfgs
);
2529 vmx_set_rflags(vcpu
, vmcs12
->guest_rflags
);
2531 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2532 * bitwise-or of what L1 wants to trap for L2, and what we want to
2533 * trap. Note that CR0.TS also needs updating - we do this later.
2535 update_exception_bitmap(vcpu
);
2536 vcpu
->arch
.cr0_guest_owned_bits
&= ~vmcs12
->cr0_guest_host_mask
;
2537 vmcs_writel(CR0_GUEST_HOST_MASK
, ~vcpu
->arch
.cr0_guest_owned_bits
);
2539 if (vmx
->nested
.nested_run_pending
&&
2540 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PAT
)) {
2541 vmcs_write64(GUEST_IA32_PAT
, vmcs12
->guest_ia32_pat
);
2542 vcpu
->arch
.pat
= vmcs12
->guest_ia32_pat
;
2543 } else if (vmcs_config
.vmentry_ctrl
& VM_ENTRY_LOAD_IA32_PAT
) {
2544 vmcs_write64(GUEST_IA32_PAT
, vmx
->vcpu
.arch
.pat
);
2547 vmcs_write64(TSC_OFFSET
, vcpu
->arch
.tsc_offset
);
2549 if (kvm_has_tsc_control
)
2550 decache_tsc_multiplier(vmx
);
2552 nested_vmx_transition_tlb_flush(vcpu
, vmcs12
, true);
2554 if (nested_cpu_has_ept(vmcs12
))
2555 nested_ept_init_mmu_context(vcpu
);
2558 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2559 * bits which we consider mandatory enabled.
2560 * The CR0_READ_SHADOW is what L2 should have expected to read given
2561 * the specifications by L1; It's not enough to take
2562 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2563 * have more bits than L1 expected.
2565 vmx_set_cr0(vcpu
, vmcs12
->guest_cr0
);
2566 vmcs_writel(CR0_READ_SHADOW
, nested_read_cr0(vmcs12
));
2568 vmx_set_cr4(vcpu
, vmcs12
->guest_cr4
);
2569 vmcs_writel(CR4_READ_SHADOW
, nested_read_cr4(vmcs12
));
2571 vcpu
->arch
.efer
= nested_vmx_calc_efer(vmx
, vmcs12
);
2572 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2573 vmx_set_efer(vcpu
, vcpu
->arch
.efer
);
2576 * Guest state is invalid and unrestricted guest is disabled,
2577 * which means L1 attempted VMEntry to L2 with invalid state.
2580 if (CC(!vmx_guest_state_valid(vcpu
))) {
2581 *entry_failure_code
= ENTRY_FAIL_DEFAULT
;
2585 /* Shadow page tables on either EPT or shadow page tables. */
2586 if (nested_vmx_load_cr3(vcpu
, vmcs12
->guest_cr3
, nested_cpu_has_ept(vmcs12
),
2587 entry_failure_code
))
2591 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2592 * on nested VM-Exit, which can occur without actually running L2 and
2593 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
2594 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2595 * transition to HLT instead of running L2.
2598 vmcs_writel(GUEST_CR3
, vmcs12
->guest_cr3
);
2600 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2601 if (load_guest_pdptrs_vmcs12
&& nested_cpu_has_ept(vmcs12
) &&
2602 is_pae_paging(vcpu
)) {
2603 vmcs_write64(GUEST_PDPTR0
, vmcs12
->guest_pdptr0
);
2604 vmcs_write64(GUEST_PDPTR1
, vmcs12
->guest_pdptr1
);
2605 vmcs_write64(GUEST_PDPTR2
, vmcs12
->guest_pdptr2
);
2606 vmcs_write64(GUEST_PDPTR3
, vmcs12
->guest_pdptr3
);
2610 vcpu
->arch
.walk_mmu
->inject_page_fault
= vmx_inject_page_fault_nested
;
2612 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
) &&
2613 WARN_ON_ONCE(kvm_set_msr(vcpu
, MSR_CORE_PERF_GLOBAL_CTRL
,
2614 vmcs12
->guest_ia32_perf_global_ctrl
)))
2617 kvm_rsp_write(vcpu
, vmcs12
->guest_rsp
);
2618 kvm_rip_write(vcpu
, vmcs12
->guest_rip
);
2622 static int nested_vmx_check_nmi_controls(struct vmcs12
*vmcs12
)
2624 if (CC(!nested_cpu_has_nmi_exiting(vmcs12
) &&
2625 nested_cpu_has_virtual_nmis(vmcs12
)))
2628 if (CC(!nested_cpu_has_virtual_nmis(vmcs12
) &&
2629 nested_cpu_has(vmcs12
, CPU_BASED_NMI_WINDOW_EXITING
)))
2635 static bool nested_vmx_check_eptp(struct kvm_vcpu
*vcpu
, u64 new_eptp
)
2637 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2638 int maxphyaddr
= cpuid_maxphyaddr(vcpu
);
2640 /* Check for memory type validity */
2641 switch (new_eptp
& VMX_EPTP_MT_MASK
) {
2642 case VMX_EPTP_MT_UC
:
2643 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPTP_UC_BIT
)))
2646 case VMX_EPTP_MT_WB
:
2647 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPTP_WB_BIT
)))
2654 /* Page-walk levels validity. */
2655 switch (new_eptp
& VMX_EPTP_PWL_MASK
) {
2656 case VMX_EPTP_PWL_5
:
2657 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_PAGE_WALK_5_BIT
)))
2660 case VMX_EPTP_PWL_4
:
2661 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_PAGE_WALK_4_BIT
)))
2668 /* Reserved bits should not be set */
2669 if (CC(new_eptp
>> maxphyaddr
|| ((new_eptp
>> 7) & 0x1f)))
2672 /* AD, if set, should be supported */
2673 if (new_eptp
& VMX_EPTP_AD_ENABLE_BIT
) {
2674 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_AD_BIT
)))
2682 * Checks related to VM-Execution Control Fields
2684 static int nested_check_vm_execution_controls(struct kvm_vcpu
*vcpu
,
2685 struct vmcs12
*vmcs12
)
2687 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2689 if (CC(!vmx_control_verify(vmcs12
->pin_based_vm_exec_control
,
2690 vmx
->nested
.msrs
.pinbased_ctls_low
,
2691 vmx
->nested
.msrs
.pinbased_ctls_high
)) ||
2692 CC(!vmx_control_verify(vmcs12
->cpu_based_vm_exec_control
,
2693 vmx
->nested
.msrs
.procbased_ctls_low
,
2694 vmx
->nested
.msrs
.procbased_ctls_high
)))
2697 if (nested_cpu_has(vmcs12
, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
) &&
2698 CC(!vmx_control_verify(vmcs12
->secondary_vm_exec_control
,
2699 vmx
->nested
.msrs
.secondary_ctls_low
,
2700 vmx
->nested
.msrs
.secondary_ctls_high
)))
2703 if (CC(vmcs12
->cr3_target_count
> nested_cpu_vmx_misc_cr3_count(vcpu
)) ||
2704 nested_vmx_check_io_bitmap_controls(vcpu
, vmcs12
) ||
2705 nested_vmx_check_msr_bitmap_controls(vcpu
, vmcs12
) ||
2706 nested_vmx_check_tpr_shadow_controls(vcpu
, vmcs12
) ||
2707 nested_vmx_check_apic_access_controls(vcpu
, vmcs12
) ||
2708 nested_vmx_check_apicv_controls(vcpu
, vmcs12
) ||
2709 nested_vmx_check_nmi_controls(vmcs12
) ||
2710 nested_vmx_check_pml_controls(vcpu
, vmcs12
) ||
2711 nested_vmx_check_unrestricted_guest_controls(vcpu
, vmcs12
) ||
2712 nested_vmx_check_mode_based_ept_exec_controls(vcpu
, vmcs12
) ||
2713 nested_vmx_check_shadow_vmcs_controls(vcpu
, vmcs12
) ||
2714 CC(nested_cpu_has_vpid(vmcs12
) && !vmcs12
->virtual_processor_id
))
2717 if (!nested_cpu_has_preemption_timer(vmcs12
) &&
2718 nested_cpu_has_save_preemption_timer(vmcs12
))
2721 if (nested_cpu_has_ept(vmcs12
) &&
2722 CC(!nested_vmx_check_eptp(vcpu
, vmcs12
->ept_pointer
)))
2725 if (nested_cpu_has_vmfunc(vmcs12
)) {
2726 if (CC(vmcs12
->vm_function_control
&
2727 ~vmx
->nested
.msrs
.vmfunc_controls
))
2730 if (nested_cpu_has_eptp_switching(vmcs12
)) {
2731 if (CC(!nested_cpu_has_ept(vmcs12
)) ||
2732 CC(!page_address_valid(vcpu
, vmcs12
->eptp_list_address
)))
2741 * Checks related to VM-Exit Control Fields
2743 static int nested_check_vm_exit_controls(struct kvm_vcpu
*vcpu
,
2744 struct vmcs12
*vmcs12
)
2746 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2748 if (CC(!vmx_control_verify(vmcs12
->vm_exit_controls
,
2749 vmx
->nested
.msrs
.exit_ctls_low
,
2750 vmx
->nested
.msrs
.exit_ctls_high
)) ||
2751 CC(nested_vmx_check_exit_msr_switch_controls(vcpu
, vmcs12
)))
2758 * Checks related to VM-Entry Control Fields
2760 static int nested_check_vm_entry_controls(struct kvm_vcpu
*vcpu
,
2761 struct vmcs12
*vmcs12
)
2763 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2765 if (CC(!vmx_control_verify(vmcs12
->vm_entry_controls
,
2766 vmx
->nested
.msrs
.entry_ctls_low
,
2767 vmx
->nested
.msrs
.entry_ctls_high
)))
2771 * From the Intel SDM, volume 3:
2772 * Fields relevant to VM-entry event injection must be set properly.
2773 * These fields are the VM-entry interruption-information field, the
2774 * VM-entry exception error code, and the VM-entry instruction length.
2776 if (vmcs12
->vm_entry_intr_info_field
& INTR_INFO_VALID_MASK
) {
2777 u32 intr_info
= vmcs12
->vm_entry_intr_info_field
;
2778 u8 vector
= intr_info
& INTR_INFO_VECTOR_MASK
;
2779 u32 intr_type
= intr_info
& INTR_INFO_INTR_TYPE_MASK
;
2780 bool has_error_code
= intr_info
& INTR_INFO_DELIVER_CODE_MASK
;
2781 bool should_have_error_code
;
2782 bool urg
= nested_cpu_has2(vmcs12
,
2783 SECONDARY_EXEC_UNRESTRICTED_GUEST
);
2784 bool prot_mode
= !urg
|| vmcs12
->guest_cr0
& X86_CR0_PE
;
2786 /* VM-entry interruption-info field: interruption type */
2787 if (CC(intr_type
== INTR_TYPE_RESERVED
) ||
2788 CC(intr_type
== INTR_TYPE_OTHER_EVENT
&&
2789 !nested_cpu_supports_monitor_trap_flag(vcpu
)))
2792 /* VM-entry interruption-info field: vector */
2793 if (CC(intr_type
== INTR_TYPE_NMI_INTR
&& vector
!= NMI_VECTOR
) ||
2794 CC(intr_type
== INTR_TYPE_HARD_EXCEPTION
&& vector
> 31) ||
2795 CC(intr_type
== INTR_TYPE_OTHER_EVENT
&& vector
!= 0))
2798 /* VM-entry interruption-info field: deliver error code */
2799 should_have_error_code
=
2800 intr_type
== INTR_TYPE_HARD_EXCEPTION
&& prot_mode
&&
2801 x86_exception_has_error_code(vector
);
2802 if (CC(has_error_code
!= should_have_error_code
))
2805 /* VM-entry exception error code */
2806 if (CC(has_error_code
&&
2807 vmcs12
->vm_entry_exception_error_code
& GENMASK(31, 16)))
2810 /* VM-entry interruption-info field: reserved bits */
2811 if (CC(intr_info
& INTR_INFO_RESVD_BITS_MASK
))
2814 /* VM-entry instruction length */
2815 switch (intr_type
) {
2816 case INTR_TYPE_SOFT_EXCEPTION
:
2817 case INTR_TYPE_SOFT_INTR
:
2818 case INTR_TYPE_PRIV_SW_EXCEPTION
:
2819 if (CC(vmcs12
->vm_entry_instruction_len
> 15) ||
2820 CC(vmcs12
->vm_entry_instruction_len
== 0 &&
2821 CC(!nested_cpu_has_zero_length_injection(vcpu
))))
2826 if (nested_vmx_check_entry_msr_switch_controls(vcpu
, vmcs12
))
2832 static int nested_vmx_check_controls(struct kvm_vcpu
*vcpu
,
2833 struct vmcs12
*vmcs12
)
2835 if (nested_check_vm_execution_controls(vcpu
, vmcs12
) ||
2836 nested_check_vm_exit_controls(vcpu
, vmcs12
) ||
2837 nested_check_vm_entry_controls(vcpu
, vmcs12
))
2840 if (to_vmx(vcpu
)->nested
.enlightened_vmcs_enabled
)
2841 return nested_evmcs_check_controls(vmcs12
);
2846 static int nested_vmx_check_host_state(struct kvm_vcpu
*vcpu
,
2847 struct vmcs12
*vmcs12
)
2851 if (CC(!nested_host_cr0_valid(vcpu
, vmcs12
->host_cr0
)) ||
2852 CC(!nested_host_cr4_valid(vcpu
, vmcs12
->host_cr4
)) ||
2853 CC(!nested_cr3_valid(vcpu
, vmcs12
->host_cr3
)))
2856 if (CC(is_noncanonical_address(vmcs12
->host_ia32_sysenter_esp
, vcpu
)) ||
2857 CC(is_noncanonical_address(vmcs12
->host_ia32_sysenter_eip
, vcpu
)))
2860 if ((vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PAT
) &&
2861 CC(!kvm_pat_valid(vmcs12
->host_ia32_pat
)))
2864 if ((vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
) &&
2865 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu
),
2866 vmcs12
->host_ia32_perf_global_ctrl
)))
2869 #ifdef CONFIG_X86_64
2870 ia32e
= !!(vcpu
->arch
.efer
& EFER_LMA
);
2876 if (CC(!(vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
)) ||
2877 CC(!(vmcs12
->host_cr4
& X86_CR4_PAE
)))
2880 if (CC(vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
) ||
2881 CC(vmcs12
->vm_entry_controls
& VM_ENTRY_IA32E_MODE
) ||
2882 CC(vmcs12
->host_cr4
& X86_CR4_PCIDE
) ||
2883 CC((vmcs12
->host_rip
) >> 32))
2887 if (CC(vmcs12
->host_cs_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2888 CC(vmcs12
->host_ss_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2889 CC(vmcs12
->host_ds_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2890 CC(vmcs12
->host_es_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2891 CC(vmcs12
->host_fs_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2892 CC(vmcs12
->host_gs_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2893 CC(vmcs12
->host_tr_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2894 CC(vmcs12
->host_cs_selector
== 0) ||
2895 CC(vmcs12
->host_tr_selector
== 0) ||
2896 CC(vmcs12
->host_ss_selector
== 0 && !ia32e
))
2899 if (CC(is_noncanonical_address(vmcs12
->host_fs_base
, vcpu
)) ||
2900 CC(is_noncanonical_address(vmcs12
->host_gs_base
, vcpu
)) ||
2901 CC(is_noncanonical_address(vmcs12
->host_gdtr_base
, vcpu
)) ||
2902 CC(is_noncanonical_address(vmcs12
->host_idtr_base
, vcpu
)) ||
2903 CC(is_noncanonical_address(vmcs12
->host_tr_base
, vcpu
)) ||
2904 CC(is_noncanonical_address(vmcs12
->host_rip
, vcpu
)))
2908 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2909 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2910 * the values of the LMA and LME bits in the field must each be that of
2911 * the host address-space size VM-exit control.
2913 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_EFER
) {
2914 if (CC(!kvm_valid_efer(vcpu
, vmcs12
->host_ia32_efer
)) ||
2915 CC(ia32e
!= !!(vmcs12
->host_ia32_efer
& EFER_LMA
)) ||
2916 CC(ia32e
!= !!(vmcs12
->host_ia32_efer
& EFER_LME
)))
2923 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu
*vcpu
,
2924 struct vmcs12
*vmcs12
)
2927 struct vmcs12
*shadow
;
2928 struct kvm_host_map map
;
2930 if (vmcs12
->vmcs_link_pointer
== -1ull)
2933 if (CC(!page_address_valid(vcpu
, vmcs12
->vmcs_link_pointer
)))
2936 if (CC(kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->vmcs_link_pointer
), &map
)))
2941 if (CC(shadow
->hdr
.revision_id
!= VMCS12_REVISION
) ||
2942 CC(shadow
->hdr
.shadow_vmcs
!= nested_cpu_has_shadow_vmcs(vmcs12
)))
2945 kvm_vcpu_unmap(vcpu
, &map
, false);
2950 * Checks related to Guest Non-register State
2952 static int nested_check_guest_non_reg_state(struct vmcs12
*vmcs12
)
2954 if (CC(vmcs12
->guest_activity_state
!= GUEST_ACTIVITY_ACTIVE
&&
2955 vmcs12
->guest_activity_state
!= GUEST_ACTIVITY_HLT
&&
2956 vmcs12
->guest_activity_state
!= GUEST_ACTIVITY_WAIT_SIPI
))
2962 static int nested_vmx_check_guest_state(struct kvm_vcpu
*vcpu
,
2963 struct vmcs12
*vmcs12
,
2964 enum vm_entry_failure_code
*entry_failure_code
)
2968 *entry_failure_code
= ENTRY_FAIL_DEFAULT
;
2970 if (CC(!nested_guest_cr0_valid(vcpu
, vmcs12
->guest_cr0
)) ||
2971 CC(!nested_guest_cr4_valid(vcpu
, vmcs12
->guest_cr4
)))
2974 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
) &&
2975 CC(!kvm_dr7_valid(vmcs12
->guest_dr7
)))
2978 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PAT
) &&
2979 CC(!kvm_pat_valid(vmcs12
->guest_ia32_pat
)))
2982 if (nested_vmx_check_vmcs_link_ptr(vcpu
, vmcs12
)) {
2983 *entry_failure_code
= ENTRY_FAIL_VMCS_LINK_PTR
;
2987 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
) &&
2988 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu
),
2989 vmcs12
->guest_ia32_perf_global_ctrl
)))
2993 * If the load IA32_EFER VM-entry control is 1, the following checks
2994 * are performed on the field for the IA32_EFER MSR:
2995 * - Bits reserved in the IA32_EFER MSR must be 0.
2996 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
2997 * the IA-32e mode guest VM-exit control. It must also be identical
2998 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
3001 if (to_vmx(vcpu
)->nested
.nested_run_pending
&&
3002 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_EFER
)) {
3003 ia32e
= (vmcs12
->vm_entry_controls
& VM_ENTRY_IA32E_MODE
) != 0;
3004 if (CC(!kvm_valid_efer(vcpu
, vmcs12
->guest_ia32_efer
)) ||
3005 CC(ia32e
!= !!(vmcs12
->guest_ia32_efer
& EFER_LMA
)) ||
3006 CC(((vmcs12
->guest_cr0
& X86_CR0_PG
) &&
3007 ia32e
!= !!(vmcs12
->guest_ia32_efer
& EFER_LME
))))
3011 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
) &&
3012 (CC(is_noncanonical_address(vmcs12
->guest_bndcfgs
& PAGE_MASK
, vcpu
)) ||
3013 CC((vmcs12
->guest_bndcfgs
& MSR_IA32_BNDCFGS_RSVD
))))
3016 if (nested_check_guest_non_reg_state(vmcs12
))
3022 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu
*vcpu
)
3024 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3025 unsigned long cr3
, cr4
;
3028 if (!nested_early_check
)
3031 if (vmx
->msr_autoload
.host
.nr
)
3032 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, 0);
3033 if (vmx
->msr_autoload
.guest
.nr
)
3034 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, 0);
3038 vmx_prepare_switch_to_guest(vcpu
);
3041 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
3042 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
3043 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e.
3044 * there is no need to preserve other bits or save/restore the field.
3046 vmcs_writel(GUEST_RFLAGS
, 0);
3048 cr3
= __get_current_cr3_fast();
3049 if (unlikely(cr3
!= vmx
->loaded_vmcs
->host_state
.cr3
)) {
3050 vmcs_writel(HOST_CR3
, cr3
);
3051 vmx
->loaded_vmcs
->host_state
.cr3
= cr3
;
3054 cr4
= cr4_read_shadow();
3055 if (unlikely(cr4
!= vmx
->loaded_vmcs
->host_state
.cr4
)) {
3056 vmcs_writel(HOST_CR4
, cr4
);
3057 vmx
->loaded_vmcs
->host_state
.cr4
= cr4
;
3061 "sub $%c[wordsize], %%" _ASM_SP
"\n\t" /* temporarily adjust RSP for CALL */
3062 "cmp %%" _ASM_SP
", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
3064 __ex("vmwrite %%" _ASM_SP
", %[HOST_RSP]") "\n\t"
3065 "mov %%" _ASM_SP
", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
3067 "add $%c[wordsize], %%" _ASM_SP
"\n\t" /* un-adjust RSP */
3069 /* Check if vmlaunch or vmresume is needed */
3070 "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
3073 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
3074 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
3075 * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
3076 * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
3078 "call vmx_vmenter\n\t"
3081 : ASM_CALL_CONSTRAINT
, CC_OUT(be
) (vm_fail
)
3082 : [HOST_RSP
]"r"((unsigned long)HOST_RSP
),
3083 [loaded_vmcs
]"r"(vmx
->loaded_vmcs
),
3084 [launched
]"i"(offsetof(struct loaded_vmcs
, launched
)),
3085 [host_state_rsp
]"i"(offsetof(struct loaded_vmcs
, host_state
.rsp
)),
3086 [wordsize
]"i"(sizeof(ulong
))
3090 if (vmx
->msr_autoload
.host
.nr
)
3091 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, vmx
->msr_autoload
.host
.nr
);
3092 if (vmx
->msr_autoload
.guest
.nr
)
3093 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, vmx
->msr_autoload
.guest
.nr
);
3096 u32 error
= vmcs_read32(VM_INSTRUCTION_ERROR
);
3100 trace_kvm_nested_vmenter_failed(
3101 "early hardware check VM-instruction error: ", error
);
3102 WARN_ON_ONCE(error
!= VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
3107 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
3109 if (hw_breakpoint_active())
3110 set_debugreg(__this_cpu_read(cpu_dr7
), 7);
3115 * A non-failing VMEntry means we somehow entered guest mode with
3116 * an illegal RIP, and that's just the tip of the iceberg. There
3117 * is no telling what memory has been modified or what state has
3118 * been exposed to unknown code. Hitting this all but guarantees
3119 * a (very critical) hardware issue.
3121 WARN_ON(!(vmcs_read32(VM_EXIT_REASON
) &
3122 VMX_EXIT_REASONS_FAILED_VMENTRY
));
3127 static bool nested_get_vmcs12_pages(struct kvm_vcpu
*vcpu
)
3129 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3130 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3131 struct kvm_host_map
*map
;
3136 * hv_evmcs may end up being not mapped after migration (when
3137 * L2 was running), map it here to make sure vmcs12 changes are
3138 * properly reflected.
3140 if (vmx
->nested
.enlightened_vmcs_enabled
&& !vmx
->nested
.hv_evmcs
) {
3141 enum nested_evmptrld_status evmptrld_status
=
3142 nested_vmx_handle_enlightened_vmptrld(vcpu
, false);
3144 if (evmptrld_status
== EVMPTRLD_VMFAIL
||
3145 evmptrld_status
== EVMPTRLD_ERROR
) {
3146 pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
3148 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
3149 vcpu
->run
->internal
.suberror
=
3150 KVM_INTERNAL_ERROR_EMULATION
;
3151 vcpu
->run
->internal
.ndata
= 0;
3156 if (nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)) {
3158 * Translate L1 physical address to host physical
3159 * address for vmcs02. Keep the page pinned, so this
3160 * physical address remains valid. We keep a reference
3161 * to it so we can release it later.
3163 if (vmx
->nested
.apic_access_page
) { /* shouldn't happen */
3164 kvm_release_page_clean(vmx
->nested
.apic_access_page
);
3165 vmx
->nested
.apic_access_page
= NULL
;
3167 page
= kvm_vcpu_gpa_to_page(vcpu
, vmcs12
->apic_access_addr
);
3168 if (!is_error_page(page
)) {
3169 vmx
->nested
.apic_access_page
= page
;
3170 hpa
= page_to_phys(vmx
->nested
.apic_access_page
);
3171 vmcs_write64(APIC_ACCESS_ADDR
, hpa
);
3173 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
3175 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
3176 vcpu
->run
->internal
.suberror
=
3177 KVM_INTERNAL_ERROR_EMULATION
;
3178 vcpu
->run
->internal
.ndata
= 0;
3183 if (nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
)) {
3184 map
= &vmx
->nested
.virtual_apic_map
;
3186 if (!kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->virtual_apic_page_addr
), map
)) {
3187 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
, pfn_to_hpa(map
->pfn
));
3188 } else if (nested_cpu_has(vmcs12
, CPU_BASED_CR8_LOAD_EXITING
) &&
3189 nested_cpu_has(vmcs12
, CPU_BASED_CR8_STORE_EXITING
) &&
3190 !nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)) {
3192 * The processor will never use the TPR shadow, simply
3193 * clear the bit from the execution control. Such a
3194 * configuration is useless, but it happens in tests.
3195 * For any other configuration, failing the vm entry is
3196 * _not_ what the processor does but it's basically the
3197 * only possibility we have.
3199 exec_controls_clearbit(vmx
, CPU_BASED_TPR_SHADOW
);
3202 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
3203 * force VM-Entry to fail.
3205 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
, -1ull);
3209 if (nested_cpu_has_posted_intr(vmcs12
)) {
3210 map
= &vmx
->nested
.pi_desc_map
;
3212 if (!kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->posted_intr_desc_addr
), map
)) {
3213 vmx
->nested
.pi_desc
=
3214 (struct pi_desc
*)(((void *)map
->hva
) +
3215 offset_in_page(vmcs12
->posted_intr_desc_addr
));
3216 vmcs_write64(POSTED_INTR_DESC_ADDR
,
3217 pfn_to_hpa(map
->pfn
) + offset_in_page(vmcs12
->posted_intr_desc_addr
));
3220 if (nested_vmx_prepare_msr_bitmap(vcpu
, vmcs12
))
3221 exec_controls_setbit(vmx
, CPU_BASED_USE_MSR_BITMAPS
);
3223 exec_controls_clearbit(vmx
, CPU_BASED_USE_MSR_BITMAPS
);
3227 static int nested_vmx_write_pml_buffer(struct kvm_vcpu
*vcpu
, gpa_t gpa
)
3229 struct vmcs12
*vmcs12
;
3230 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3233 if (WARN_ON_ONCE(!is_guest_mode(vcpu
)))
3236 if (WARN_ON_ONCE(vmx
->nested
.pml_full
))
3240 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
3241 * set is already checked as part of A/D emulation.
3243 vmcs12
= get_vmcs12(vcpu
);
3244 if (!nested_cpu_has_pml(vmcs12
))
3247 if (vmcs12
->guest_pml_index
>= PML_ENTITY_NUM
) {
3248 vmx
->nested
.pml_full
= true;
3253 dst
= vmcs12
->pml_address
+ sizeof(u64
) * vmcs12
->guest_pml_index
;
3255 if (kvm_write_guest_page(vcpu
->kvm
, gpa_to_gfn(dst
), &gpa
,
3256 offset_in_page(dst
), sizeof(gpa
)))
3259 vmcs12
->guest_pml_index
--;
3265 * Intel's VMX Instruction Reference specifies a common set of prerequisites
3266 * for running VMX instructions (except VMXON, whose prerequisites are
3267 * slightly different). It also specifies what exception to inject otherwise.
3268 * Note that many of these exceptions have priority over VM exits, so they
3269 * don't have to be checked again here.
3271 static int nested_vmx_check_permission(struct kvm_vcpu
*vcpu
)
3273 if (!to_vmx(vcpu
)->nested
.vmxon
) {
3274 kvm_queue_exception(vcpu
, UD_VECTOR
);
3278 if (vmx_get_cpl(vcpu
)) {
3279 kvm_inject_gp(vcpu
, 0);
3286 static u8
vmx_has_apicv_interrupt(struct kvm_vcpu
*vcpu
)
3288 u8 rvi
= vmx_get_rvi();
3289 u8 vppr
= kvm_lapic_get_reg(vcpu
->arch
.apic
, APIC_PROCPRI
);
3291 return ((rvi
& 0xf0) > (vppr
& 0xf0));
3294 static void load_vmcs12_host_state(struct kvm_vcpu
*vcpu
,
3295 struct vmcs12
*vmcs12
);
3298 * If from_vmentry is false, this is being called from state restore (either RSM
3299 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
3302 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
3303 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail
3304 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit
3305 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error
3307 enum nvmx_vmentry_status
nested_vmx_enter_non_root_mode(struct kvm_vcpu
*vcpu
,
3310 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3311 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3312 enum vm_entry_failure_code entry_failure_code
;
3313 bool evaluate_pending_interrupts
;
3314 u32 exit_reason
, failed_index
;
3316 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT
, vcpu
))
3317 kvm_vcpu_flush_tlb_current(vcpu
);
3319 evaluate_pending_interrupts
= exec_controls_get(vmx
) &
3320 (CPU_BASED_INTR_WINDOW_EXITING
| CPU_BASED_NMI_WINDOW_EXITING
);
3321 if (likely(!evaluate_pending_interrupts
) && kvm_vcpu_apicv_active(vcpu
))
3322 evaluate_pending_interrupts
|= vmx_has_apicv_interrupt(vcpu
);
3324 if (!(vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
))
3325 vmx
->nested
.vmcs01_debugctl
= vmcs_read64(GUEST_IA32_DEBUGCTL
);
3326 if (kvm_mpx_supported() &&
3327 !(vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
))
3328 vmx
->nested
.vmcs01_guest_bndcfgs
= vmcs_read64(GUEST_BNDCFGS
);
3331 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3332 * nested early checks are disabled. In the event of a "late" VM-Fail,
3333 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3334 * software model to the pre-VMEntry host state. When EPT is disabled,
3335 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3336 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3337 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3338 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3339 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3340 * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3341 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3342 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3343 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3344 * path would need to manually save/restore vmcs01.GUEST_CR3.
3346 if (!enable_ept
&& !nested_early_check
)
3347 vmcs_writel(GUEST_CR3
, vcpu
->arch
.cr3
);
3349 vmx_switch_vmcs(vcpu
, &vmx
->nested
.vmcs02
);
3351 prepare_vmcs02_early(vmx
, vmcs12
);
3354 if (unlikely(!nested_get_vmcs12_pages(vcpu
))) {
3355 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
3356 return NVMX_VMENTRY_KVM_INTERNAL_ERROR
;
3359 if (nested_vmx_check_vmentry_hw(vcpu
)) {
3360 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
3361 return NVMX_VMENTRY_VMFAIL
;
3364 if (nested_vmx_check_guest_state(vcpu
, vmcs12
,
3365 &entry_failure_code
)) {
3366 exit_reason
= EXIT_REASON_INVALID_STATE
;
3367 vmcs12
->exit_qualification
= entry_failure_code
;
3368 goto vmentry_fail_vmexit
;
3372 enter_guest_mode(vcpu
);
3373 if (vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_USE_TSC_OFFSETTING
)
3374 vcpu
->arch
.tsc_offset
+= vmcs12
->tsc_offset
;
3376 if (prepare_vmcs02(vcpu
, vmcs12
, &entry_failure_code
)) {
3377 exit_reason
= EXIT_REASON_INVALID_STATE
;
3378 vmcs12
->exit_qualification
= entry_failure_code
;
3379 goto vmentry_fail_vmexit_guest_mode
;
3383 failed_index
= nested_vmx_load_msr(vcpu
,
3384 vmcs12
->vm_entry_msr_load_addr
,
3385 vmcs12
->vm_entry_msr_load_count
);
3387 exit_reason
= EXIT_REASON_MSR_LOAD_FAIL
;
3388 vmcs12
->exit_qualification
= failed_index
;
3389 goto vmentry_fail_vmexit_guest_mode
;
3393 * The MMU is not initialized to point at the right entities yet and
3394 * "get pages" would need to read data from the guest (i.e. we will
3395 * need to perform gpa to hpa translation). Request a call
3396 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3397 * have already been set at vmentry time and should not be reset.
3399 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES
, vcpu
);
3403 * If L1 had a pending IRQ/NMI until it executed
3404 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3405 * disallowed (e.g. interrupts disabled), L0 needs to
3406 * evaluate if this pending event should cause an exit from L2
3407 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3408 * intercept EXTERNAL_INTERRUPT).
3410 * Usually this would be handled by the processor noticing an
3411 * IRQ/NMI window request, or checking RVI during evaluation of
3412 * pending virtual interrupts. However, this setting was done
3413 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3414 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3416 if (unlikely(evaluate_pending_interrupts
))
3417 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
3420 * Do not start the preemption timer hrtimer until after we know
3421 * we are successful, so that only nested_vmx_vmexit needs to cancel
3424 vmx
->nested
.preemption_timer_expired
= false;
3425 if (nested_cpu_has_preemption_timer(vmcs12
)) {
3426 u64 timer_value
= vmx_calc_preemption_timer_value(vcpu
);
3427 vmx_start_preemption_timer(vcpu
, timer_value
);
3431 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3432 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3433 * returned as far as L1 is concerned. It will only return (and set
3434 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3436 return NVMX_VMENTRY_SUCCESS
;
3439 * A failed consistency check that leads to a VMExit during L1's
3440 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3441 * 26.7 "VM-entry failures during or after loading guest state".
3443 vmentry_fail_vmexit_guest_mode
:
3444 if (vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_USE_TSC_OFFSETTING
)
3445 vcpu
->arch
.tsc_offset
-= vmcs12
->tsc_offset
;
3446 leave_guest_mode(vcpu
);
3448 vmentry_fail_vmexit
:
3449 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
3452 return NVMX_VMENTRY_VMEXIT
;
3454 load_vmcs12_host_state(vcpu
, vmcs12
);
3455 vmcs12
->vm_exit_reason
= exit_reason
| VMX_EXIT_REASONS_FAILED_VMENTRY
;
3456 if (enable_shadow_vmcs
|| vmx
->nested
.hv_evmcs
)
3457 vmx
->nested
.need_vmcs12_to_shadow_sync
= true;
3458 return NVMX_VMENTRY_VMEXIT
;
3462 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3463 * for running an L2 nested guest.
3465 static int nested_vmx_run(struct kvm_vcpu
*vcpu
, bool launch
)
3467 struct vmcs12
*vmcs12
;
3468 enum nvmx_vmentry_status status
;
3469 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3470 u32 interrupt_shadow
= vmx_get_interrupt_shadow(vcpu
);
3471 enum nested_evmptrld_status evmptrld_status
;
3473 if (!nested_vmx_check_permission(vcpu
))
3476 evmptrld_status
= nested_vmx_handle_enlightened_vmptrld(vcpu
, launch
);
3477 if (evmptrld_status
== EVMPTRLD_ERROR
) {
3478 kvm_queue_exception(vcpu
, UD_VECTOR
);
3480 } else if (CC(evmptrld_status
== EVMPTRLD_VMFAIL
)) {
3481 return nested_vmx_failInvalid(vcpu
);
3484 if (CC(!vmx
->nested
.hv_evmcs
&& vmx
->nested
.current_vmptr
== -1ull))
3485 return nested_vmx_failInvalid(vcpu
);
3487 vmcs12
= get_vmcs12(vcpu
);
3490 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3491 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3492 * rather than RFLAGS.ZF, and no error number is stored to the
3493 * VM-instruction error field.
3495 if (CC(vmcs12
->hdr
.shadow_vmcs
))
3496 return nested_vmx_failInvalid(vcpu
);
3498 if (vmx
->nested
.hv_evmcs
) {
3499 copy_enlightened_to_vmcs12(vmx
);
3500 /* Enlightened VMCS doesn't have launch state */
3501 vmcs12
->launch_state
= !launch
;
3502 } else if (enable_shadow_vmcs
) {
3503 copy_shadow_to_vmcs12(vmx
);
3507 * The nested entry process starts with enforcing various prerequisites
3508 * on vmcs12 as required by the Intel SDM, and act appropriately when
3509 * they fail: As the SDM explains, some conditions should cause the
3510 * instruction to fail, while others will cause the instruction to seem
3511 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3512 * To speed up the normal (success) code path, we should avoid checking
3513 * for misconfigurations which will anyway be caught by the processor
3514 * when using the merged vmcs02.
3516 if (CC(interrupt_shadow
& KVM_X86_SHADOW_INT_MOV_SS
))
3517 return nested_vmx_fail(vcpu
, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS
);
3519 if (CC(vmcs12
->launch_state
== launch
))
3520 return nested_vmx_fail(vcpu
,
3521 launch
? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3522 : VMXERR_VMRESUME_NONLAUNCHED_VMCS
);
3524 if (nested_vmx_check_controls(vcpu
, vmcs12
))
3525 return nested_vmx_fail(vcpu
, VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
3527 if (nested_vmx_check_host_state(vcpu
, vmcs12
))
3528 return nested_vmx_fail(vcpu
, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD
);
3531 * We're finally done with prerequisite checking, and can start with
3534 vmx
->nested
.nested_run_pending
= 1;
3535 vmx
->nested
.has_preemption_timer_deadline
= false;
3536 status
= nested_vmx_enter_non_root_mode(vcpu
, true);
3537 if (unlikely(status
!= NVMX_VMENTRY_SUCCESS
))
3538 goto vmentry_failed
;
3540 /* Emulate processing of posted interrupts on VM-Enter. */
3541 if (nested_cpu_has_posted_intr(vmcs12
) &&
3542 kvm_apic_has_interrupt(vcpu
) == vmx
->nested
.posted_intr_nv
) {
3543 vmx
->nested
.pi_pending
= true;
3544 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
3545 kvm_apic_clear_irr(vcpu
, vmx
->nested
.posted_intr_nv
);
3548 /* Hide L1D cache contents from the nested guest. */
3549 vmx
->vcpu
.arch
.l1tf_flush_l1d
= true;
3552 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3553 * also be used as part of restoring nVMX state for
3554 * snapshot restore (migration).
3556 * In this flow, it is assumed that vmcs12 cache was
3557 * trasferred as part of captured nVMX state and should
3558 * therefore not be read from guest memory (which may not
3559 * exist on destination host yet).
3561 nested_cache_shadow_vmcs12(vcpu
, vmcs12
);
3563 switch (vmcs12
->guest_activity_state
) {
3564 case GUEST_ACTIVITY_HLT
:
3566 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3567 * awakened by event injection or by an NMI-window VM-exit or
3568 * by an interrupt-window VM-exit, halt the vcpu.
3570 if (!(vmcs12
->vm_entry_intr_info_field
& INTR_INFO_VALID_MASK
) &&
3571 !nested_cpu_has(vmcs12
, CPU_BASED_NMI_WINDOW_EXITING
) &&
3572 !(nested_cpu_has(vmcs12
, CPU_BASED_INTR_WINDOW_EXITING
) &&
3573 (vmcs12
->guest_rflags
& X86_EFLAGS_IF
))) {
3574 vmx
->nested
.nested_run_pending
= 0;
3575 return kvm_vcpu_halt(vcpu
);
3578 case GUEST_ACTIVITY_WAIT_SIPI
:
3579 vmx
->nested
.nested_run_pending
= 0;
3580 vcpu
->arch
.mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
3589 vmx
->nested
.nested_run_pending
= 0;
3590 if (status
== NVMX_VMENTRY_KVM_INTERNAL_ERROR
)
3592 if (status
== NVMX_VMENTRY_VMEXIT
)
3594 WARN_ON_ONCE(status
!= NVMX_VMENTRY_VMFAIL
);
3595 return nested_vmx_fail(vcpu
, VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
3599 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3600 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK).
3601 * This function returns the new value we should put in vmcs12.guest_cr0.
3602 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3603 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3604 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3605 * didn't trap the bit, because if L1 did, so would L0).
3606 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3607 * been modified by L2, and L1 knows it. So just leave the old value of
3608 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3609 * isn't relevant, because if L0 traps this bit it can set it to anything.
3610 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3611 * changed these bits, and therefore they need to be updated, but L0
3612 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3613 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3615 static inline unsigned long
3616 vmcs12_guest_cr0(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
)
3619 /*1*/ (vmcs_readl(GUEST_CR0
) & vcpu
->arch
.cr0_guest_owned_bits
) |
3620 /*2*/ (vmcs12
->guest_cr0
& vmcs12
->cr0_guest_host_mask
) |
3621 /*3*/ (vmcs_readl(CR0_READ_SHADOW
) & ~(vmcs12
->cr0_guest_host_mask
|
3622 vcpu
->arch
.cr0_guest_owned_bits
));
3625 static inline unsigned long
3626 vmcs12_guest_cr4(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
)
3629 /*1*/ (vmcs_readl(GUEST_CR4
) & vcpu
->arch
.cr4_guest_owned_bits
) |
3630 /*2*/ (vmcs12
->guest_cr4
& vmcs12
->cr4_guest_host_mask
) |
3631 /*3*/ (vmcs_readl(CR4_READ_SHADOW
) & ~(vmcs12
->cr4_guest_host_mask
|
3632 vcpu
->arch
.cr4_guest_owned_bits
));
3635 static void vmcs12_save_pending_event(struct kvm_vcpu
*vcpu
,
3636 struct vmcs12
*vmcs12
)
3641 if (vcpu
->arch
.exception
.injected
) {
3642 nr
= vcpu
->arch
.exception
.nr
;
3643 idt_vectoring
= nr
| VECTORING_INFO_VALID_MASK
;
3645 if (kvm_exception_is_soft(nr
)) {
3646 vmcs12
->vm_exit_instruction_len
=
3647 vcpu
->arch
.event_exit_inst_len
;
3648 idt_vectoring
|= INTR_TYPE_SOFT_EXCEPTION
;
3650 idt_vectoring
|= INTR_TYPE_HARD_EXCEPTION
;
3652 if (vcpu
->arch
.exception
.has_error_code
) {
3653 idt_vectoring
|= VECTORING_INFO_DELIVER_CODE_MASK
;
3654 vmcs12
->idt_vectoring_error_code
=
3655 vcpu
->arch
.exception
.error_code
;
3658 vmcs12
->idt_vectoring_info_field
= idt_vectoring
;
3659 } else if (vcpu
->arch
.nmi_injected
) {
3660 vmcs12
->idt_vectoring_info_field
=
3661 INTR_TYPE_NMI_INTR
| INTR_INFO_VALID_MASK
| NMI_VECTOR
;
3662 } else if (vcpu
->arch
.interrupt
.injected
) {
3663 nr
= vcpu
->arch
.interrupt
.nr
;
3664 idt_vectoring
= nr
| VECTORING_INFO_VALID_MASK
;
3666 if (vcpu
->arch
.interrupt
.soft
) {
3667 idt_vectoring
|= INTR_TYPE_SOFT_INTR
;
3668 vmcs12
->vm_entry_instruction_len
=
3669 vcpu
->arch
.event_exit_inst_len
;
3671 idt_vectoring
|= INTR_TYPE_EXT_INTR
;
3673 vmcs12
->idt_vectoring_info_field
= idt_vectoring
;
3678 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu
*vcpu
)
3680 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3684 * Don't need to mark the APIC access page dirty; it is never
3685 * written to by the CPU during APIC virtualization.
3688 if (nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
)) {
3689 gfn
= vmcs12
->virtual_apic_page_addr
>> PAGE_SHIFT
;
3690 kvm_vcpu_mark_page_dirty(vcpu
, gfn
);
3693 if (nested_cpu_has_posted_intr(vmcs12
)) {
3694 gfn
= vmcs12
->posted_intr_desc_addr
>> PAGE_SHIFT
;
3695 kvm_vcpu_mark_page_dirty(vcpu
, gfn
);
3699 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu
*vcpu
)
3701 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3706 if (!vmx
->nested
.pi_desc
|| !vmx
->nested
.pi_pending
)
3709 vmx
->nested
.pi_pending
= false;
3710 if (!pi_test_and_clear_on(vmx
->nested
.pi_desc
))
3713 max_irr
= find_last_bit((unsigned long *)vmx
->nested
.pi_desc
->pir
, 256);
3714 if (max_irr
!= 256) {
3715 vapic_page
= vmx
->nested
.virtual_apic_map
.hva
;
3719 __kvm_apic_update_irr(vmx
->nested
.pi_desc
->pir
,
3720 vapic_page
, &max_irr
);
3721 status
= vmcs_read16(GUEST_INTR_STATUS
);
3722 if ((u8
)max_irr
> ((u8
)status
& 0xff)) {
3724 status
|= (u8
)max_irr
;
3725 vmcs_write16(GUEST_INTR_STATUS
, status
);
3729 nested_mark_vmcs12_pages_dirty(vcpu
);
3732 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu
*vcpu
,
3733 unsigned long exit_qual
)
3735 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3736 unsigned int nr
= vcpu
->arch
.exception
.nr
;
3737 u32 intr_info
= nr
| INTR_INFO_VALID_MASK
;
3739 if (vcpu
->arch
.exception
.has_error_code
) {
3740 vmcs12
->vm_exit_intr_error_code
= vcpu
->arch
.exception
.error_code
;
3741 intr_info
|= INTR_INFO_DELIVER_CODE_MASK
;
3744 if (kvm_exception_is_soft(nr
))
3745 intr_info
|= INTR_TYPE_SOFT_EXCEPTION
;
3747 intr_info
|= INTR_TYPE_HARD_EXCEPTION
;
3749 if (!(vmcs12
->idt_vectoring_info_field
& VECTORING_INFO_VALID_MASK
) &&
3750 vmx_get_nmi_mask(vcpu
))
3751 intr_info
|= INTR_INFO_UNBLOCK_NMI
;
3753 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXCEPTION_NMI
, intr_info
, exit_qual
);
3757 * Returns true if a debug trap is pending delivery.
3759 * In KVM, debug traps bear an exception payload. As such, the class of a #DB
3760 * exception may be inferred from the presence of an exception payload.
3762 static inline bool vmx_pending_dbg_trap(struct kvm_vcpu
*vcpu
)
3764 return vcpu
->arch
.exception
.pending
&&
3765 vcpu
->arch
.exception
.nr
== DB_VECTOR
&&
3766 vcpu
->arch
.exception
.payload
;
3770 * Certain VM-exits set the 'pending debug exceptions' field to indicate a
3771 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
3772 * represents these debug traps with a payload that is said to be compatible
3773 * with the 'pending debug exceptions' field, write the payload to the VMCS
3774 * field if a VM-exit is delivered before the debug trap.
3776 static void nested_vmx_update_pending_dbg(struct kvm_vcpu
*vcpu
)
3778 if (vmx_pending_dbg_trap(vcpu
))
3779 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS
,
3780 vcpu
->arch
.exception
.payload
);
3783 static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu
*vcpu
)
3785 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu
)) &&
3786 to_vmx(vcpu
)->nested
.preemption_timer_expired
;
3789 static int vmx_check_nested_events(struct kvm_vcpu
*vcpu
)
3791 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3792 unsigned long exit_qual
;
3793 bool block_nested_events
=
3794 vmx
->nested
.nested_run_pending
|| kvm_event_needs_reinjection(vcpu
);
3795 bool mtf_pending
= vmx
->nested
.mtf_pending
;
3796 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
3799 * Clear the MTF state. If a higher priority VM-exit is delivered first,
3800 * this state is discarded.
3802 if (!block_nested_events
)
3803 vmx
->nested
.mtf_pending
= false;
3805 if (lapic_in_kernel(vcpu
) &&
3806 test_bit(KVM_APIC_INIT
, &apic
->pending_events
)) {
3807 if (block_nested_events
)
3809 nested_vmx_update_pending_dbg(vcpu
);
3810 clear_bit(KVM_APIC_INIT
, &apic
->pending_events
);
3811 if (vcpu
->arch
.mp_state
!= KVM_MP_STATE_INIT_RECEIVED
)
3812 nested_vmx_vmexit(vcpu
, EXIT_REASON_INIT_SIGNAL
, 0, 0);
3816 if (lapic_in_kernel(vcpu
) &&
3817 test_bit(KVM_APIC_SIPI
, &apic
->pending_events
)) {
3818 if (block_nested_events
)
3821 clear_bit(KVM_APIC_SIPI
, &apic
->pending_events
);
3822 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_INIT_RECEIVED
)
3823 nested_vmx_vmexit(vcpu
, EXIT_REASON_SIPI_SIGNAL
, 0,
3824 apic
->sipi_vector
& 0xFFUL
);
3829 * Process any exceptions that are not debug traps before MTF.
3831 if (vcpu
->arch
.exception
.pending
&& !vmx_pending_dbg_trap(vcpu
)) {
3832 if (block_nested_events
)
3834 if (!nested_vmx_check_exception(vcpu
, &exit_qual
))
3836 nested_vmx_inject_exception_vmexit(vcpu
, exit_qual
);
3841 if (block_nested_events
)
3843 nested_vmx_update_pending_dbg(vcpu
);
3844 nested_vmx_vmexit(vcpu
, EXIT_REASON_MONITOR_TRAP_FLAG
, 0, 0);
3848 if (vcpu
->arch
.exception
.pending
) {
3849 if (block_nested_events
)
3851 if (!nested_vmx_check_exception(vcpu
, &exit_qual
))
3853 nested_vmx_inject_exception_vmexit(vcpu
, exit_qual
);
3857 if (nested_vmx_preemption_timer_pending(vcpu
)) {
3858 if (block_nested_events
)
3860 nested_vmx_vmexit(vcpu
, EXIT_REASON_PREEMPTION_TIMER
, 0, 0);
3864 if (vcpu
->arch
.smi_pending
&& !is_smm(vcpu
)) {
3865 if (block_nested_events
)
3870 if (vcpu
->arch
.nmi_pending
&& !vmx_nmi_blocked(vcpu
)) {
3871 if (block_nested_events
)
3873 if (!nested_exit_on_nmi(vcpu
))
3876 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXCEPTION_NMI
,
3877 NMI_VECTOR
| INTR_TYPE_NMI_INTR
|
3878 INTR_INFO_VALID_MASK
, 0);
3880 * The NMI-triggered VM exit counts as injection:
3881 * clear this one and block further NMIs.
3883 vcpu
->arch
.nmi_pending
= 0;
3884 vmx_set_nmi_mask(vcpu
, true);
3888 if (kvm_cpu_has_interrupt(vcpu
) && !vmx_interrupt_blocked(vcpu
)) {
3889 if (block_nested_events
)
3891 if (!nested_exit_on_intr(vcpu
))
3893 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXTERNAL_INTERRUPT
, 0, 0);
3898 vmx_complete_nested_posted_interrupt(vcpu
);
3902 static u32
vmx_get_preemption_timer_value(struct kvm_vcpu
*vcpu
)
3905 hrtimer_get_remaining(&to_vmx(vcpu
)->nested
.preemption_timer
);
3908 if (ktime_to_ns(remaining
) <= 0)
3911 value
= ktime_to_ns(remaining
) * vcpu
->arch
.virtual_tsc_khz
;
3912 do_div(value
, 1000000);
3913 return value
>> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
;
3916 static bool is_vmcs12_ext_field(unsigned long field
)
3919 case GUEST_ES_SELECTOR
:
3920 case GUEST_CS_SELECTOR
:
3921 case GUEST_SS_SELECTOR
:
3922 case GUEST_DS_SELECTOR
:
3923 case GUEST_FS_SELECTOR
:
3924 case GUEST_GS_SELECTOR
:
3925 case GUEST_LDTR_SELECTOR
:
3926 case GUEST_TR_SELECTOR
:
3927 case GUEST_ES_LIMIT
:
3928 case GUEST_CS_LIMIT
:
3929 case GUEST_SS_LIMIT
:
3930 case GUEST_DS_LIMIT
:
3931 case GUEST_FS_LIMIT
:
3932 case GUEST_GS_LIMIT
:
3933 case GUEST_LDTR_LIMIT
:
3934 case GUEST_TR_LIMIT
:
3935 case GUEST_GDTR_LIMIT
:
3936 case GUEST_IDTR_LIMIT
:
3937 case GUEST_ES_AR_BYTES
:
3938 case GUEST_DS_AR_BYTES
:
3939 case GUEST_FS_AR_BYTES
:
3940 case GUEST_GS_AR_BYTES
:
3941 case GUEST_LDTR_AR_BYTES
:
3942 case GUEST_TR_AR_BYTES
:
3949 case GUEST_LDTR_BASE
:
3951 case GUEST_GDTR_BASE
:
3952 case GUEST_IDTR_BASE
:
3953 case GUEST_PENDING_DBG_EXCEPTIONS
:
3963 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu
*vcpu
,
3964 struct vmcs12
*vmcs12
)
3966 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3968 vmcs12
->guest_es_selector
= vmcs_read16(GUEST_ES_SELECTOR
);
3969 vmcs12
->guest_cs_selector
= vmcs_read16(GUEST_CS_SELECTOR
);
3970 vmcs12
->guest_ss_selector
= vmcs_read16(GUEST_SS_SELECTOR
);
3971 vmcs12
->guest_ds_selector
= vmcs_read16(GUEST_DS_SELECTOR
);
3972 vmcs12
->guest_fs_selector
= vmcs_read16(GUEST_FS_SELECTOR
);
3973 vmcs12
->guest_gs_selector
= vmcs_read16(GUEST_GS_SELECTOR
);
3974 vmcs12
->guest_ldtr_selector
= vmcs_read16(GUEST_LDTR_SELECTOR
);
3975 vmcs12
->guest_tr_selector
= vmcs_read16(GUEST_TR_SELECTOR
);
3976 vmcs12
->guest_es_limit
= vmcs_read32(GUEST_ES_LIMIT
);
3977 vmcs12
->guest_cs_limit
= vmcs_read32(GUEST_CS_LIMIT
);
3978 vmcs12
->guest_ss_limit
= vmcs_read32(GUEST_SS_LIMIT
);
3979 vmcs12
->guest_ds_limit
= vmcs_read32(GUEST_DS_LIMIT
);
3980 vmcs12
->guest_fs_limit
= vmcs_read32(GUEST_FS_LIMIT
);
3981 vmcs12
->guest_gs_limit
= vmcs_read32(GUEST_GS_LIMIT
);
3982 vmcs12
->guest_ldtr_limit
= vmcs_read32(GUEST_LDTR_LIMIT
);
3983 vmcs12
->guest_tr_limit
= vmcs_read32(GUEST_TR_LIMIT
);
3984 vmcs12
->guest_gdtr_limit
= vmcs_read32(GUEST_GDTR_LIMIT
);
3985 vmcs12
->guest_idtr_limit
= vmcs_read32(GUEST_IDTR_LIMIT
);
3986 vmcs12
->guest_es_ar_bytes
= vmcs_read32(GUEST_ES_AR_BYTES
);
3987 vmcs12
->guest_ds_ar_bytes
= vmcs_read32(GUEST_DS_AR_BYTES
);
3988 vmcs12
->guest_fs_ar_bytes
= vmcs_read32(GUEST_FS_AR_BYTES
);
3989 vmcs12
->guest_gs_ar_bytes
= vmcs_read32(GUEST_GS_AR_BYTES
);
3990 vmcs12
->guest_ldtr_ar_bytes
= vmcs_read32(GUEST_LDTR_AR_BYTES
);
3991 vmcs12
->guest_tr_ar_bytes
= vmcs_read32(GUEST_TR_AR_BYTES
);
3992 vmcs12
->guest_es_base
= vmcs_readl(GUEST_ES_BASE
);
3993 vmcs12
->guest_cs_base
= vmcs_readl(GUEST_CS_BASE
);
3994 vmcs12
->guest_ss_base
= vmcs_readl(GUEST_SS_BASE
);
3995 vmcs12
->guest_ds_base
= vmcs_readl(GUEST_DS_BASE
);
3996 vmcs12
->guest_fs_base
= vmcs_readl(GUEST_FS_BASE
);
3997 vmcs12
->guest_gs_base
= vmcs_readl(GUEST_GS_BASE
);
3998 vmcs12
->guest_ldtr_base
= vmcs_readl(GUEST_LDTR_BASE
);
3999 vmcs12
->guest_tr_base
= vmcs_readl(GUEST_TR_BASE
);
4000 vmcs12
->guest_gdtr_base
= vmcs_readl(GUEST_GDTR_BASE
);
4001 vmcs12
->guest_idtr_base
= vmcs_readl(GUEST_IDTR_BASE
);
4002 vmcs12
->guest_pending_dbg_exceptions
=
4003 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS
);
4004 if (kvm_mpx_supported())
4005 vmcs12
->guest_bndcfgs
= vmcs_read64(GUEST_BNDCFGS
);
4007 vmx
->nested
.need_sync_vmcs02_to_vmcs12_rare
= false;
4010 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu
*vcpu
,
4011 struct vmcs12
*vmcs12
)
4013 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4016 if (!vmx
->nested
.need_sync_vmcs02_to_vmcs12_rare
)
4020 WARN_ON_ONCE(vmx
->loaded_vmcs
!= &vmx
->vmcs01
);
4023 vmx
->loaded_vmcs
= &vmx
->nested
.vmcs02
;
4024 vmx_vcpu_load_vmcs(vcpu
, cpu
, &vmx
->vmcs01
);
4026 sync_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
4028 vmx
->loaded_vmcs
= &vmx
->vmcs01
;
4029 vmx_vcpu_load_vmcs(vcpu
, cpu
, &vmx
->nested
.vmcs02
);
4034 * Update the guest state fields of vmcs12 to reflect changes that
4035 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
4036 * VM-entry controls is also updated, since this is really a guest
4039 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
)
4041 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4043 if (vmx
->nested
.hv_evmcs
)
4044 sync_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
4046 vmx
->nested
.need_sync_vmcs02_to_vmcs12_rare
= !vmx
->nested
.hv_evmcs
;
4048 vmcs12
->guest_cr0
= vmcs12_guest_cr0(vcpu
, vmcs12
);
4049 vmcs12
->guest_cr4
= vmcs12_guest_cr4(vcpu
, vmcs12
);
4051 vmcs12
->guest_rsp
= kvm_rsp_read(vcpu
);
4052 vmcs12
->guest_rip
= kvm_rip_read(vcpu
);
4053 vmcs12
->guest_rflags
= vmcs_readl(GUEST_RFLAGS
);
4055 vmcs12
->guest_cs_ar_bytes
= vmcs_read32(GUEST_CS_AR_BYTES
);
4056 vmcs12
->guest_ss_ar_bytes
= vmcs_read32(GUEST_SS_AR_BYTES
);
4058 vmcs12
->guest_interruptibility_info
=
4059 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
);
4061 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_HALTED
)
4062 vmcs12
->guest_activity_state
= GUEST_ACTIVITY_HLT
;
4063 else if (vcpu
->arch
.mp_state
== KVM_MP_STATE_INIT_RECEIVED
)
4064 vmcs12
->guest_activity_state
= GUEST_ACTIVITY_WAIT_SIPI
;
4066 vmcs12
->guest_activity_state
= GUEST_ACTIVITY_ACTIVE
;
4068 if (nested_cpu_has_preemption_timer(vmcs12
) &&
4069 vmcs12
->vm_exit_controls
& VM_EXIT_SAVE_VMX_PREEMPTION_TIMER
&&
4070 !vmx
->nested
.nested_run_pending
)
4071 vmcs12
->vmx_preemption_timer_value
=
4072 vmx_get_preemption_timer_value(vcpu
);
4075 * In some cases (usually, nested EPT), L2 is allowed to change its
4076 * own CR3 without exiting. If it has changed it, we must keep it.
4077 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
4078 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
4080 * Additionally, restore L2's PDPTR to vmcs12.
4083 vmcs12
->guest_cr3
= vmcs_readl(GUEST_CR3
);
4084 if (nested_cpu_has_ept(vmcs12
) && is_pae_paging(vcpu
)) {
4085 vmcs12
->guest_pdptr0
= vmcs_read64(GUEST_PDPTR0
);
4086 vmcs12
->guest_pdptr1
= vmcs_read64(GUEST_PDPTR1
);
4087 vmcs12
->guest_pdptr2
= vmcs_read64(GUEST_PDPTR2
);
4088 vmcs12
->guest_pdptr3
= vmcs_read64(GUEST_PDPTR3
);
4092 vmcs12
->guest_linear_address
= vmcs_readl(GUEST_LINEAR_ADDRESS
);
4094 if (nested_cpu_has_vid(vmcs12
))
4095 vmcs12
->guest_intr_status
= vmcs_read16(GUEST_INTR_STATUS
);
4097 vmcs12
->vm_entry_controls
=
4098 (vmcs12
->vm_entry_controls
& ~VM_ENTRY_IA32E_MODE
) |
4099 (vm_entry_controls_get(to_vmx(vcpu
)) & VM_ENTRY_IA32E_MODE
);
4101 if (vmcs12
->vm_exit_controls
& VM_EXIT_SAVE_DEBUG_CONTROLS
)
4102 kvm_get_dr(vcpu
, 7, (unsigned long *)&vmcs12
->guest_dr7
);
4104 if (vmcs12
->vm_exit_controls
& VM_EXIT_SAVE_IA32_EFER
)
4105 vmcs12
->guest_ia32_efer
= vcpu
->arch
.efer
;
4109 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
4110 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
4111 * and this function updates it to reflect the changes to the guest state while
4112 * L2 was running (and perhaps made some exits which were handled directly by L0
4113 * without going back to L1), and to reflect the exit reason.
4114 * Note that we do not have to copy here all VMCS fields, just those that
4115 * could have changed by the L2 guest or the exit - i.e., the guest-state and
4116 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
4117 * which already writes to vmcs12 directly.
4119 static void prepare_vmcs12(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
,
4120 u32 vm_exit_reason
, u32 exit_intr_info
,
4121 unsigned long exit_qualification
)
4123 /* update exit information fields: */
4124 vmcs12
->vm_exit_reason
= vm_exit_reason
;
4125 vmcs12
->exit_qualification
= exit_qualification
;
4126 vmcs12
->vm_exit_intr_info
= exit_intr_info
;
4128 vmcs12
->idt_vectoring_info_field
= 0;
4129 vmcs12
->vm_exit_instruction_len
= vmcs_read32(VM_EXIT_INSTRUCTION_LEN
);
4130 vmcs12
->vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
4132 if (!(vmcs12
->vm_exit_reason
& VMX_EXIT_REASONS_FAILED_VMENTRY
)) {
4133 vmcs12
->launch_state
= 1;
4135 /* vm_entry_intr_info_field is cleared on exit. Emulate this
4136 * instead of reading the real value. */
4137 vmcs12
->vm_entry_intr_info_field
&= ~INTR_INFO_VALID_MASK
;
4140 * Transfer the event that L0 or L1 may wanted to inject into
4141 * L2 to IDT_VECTORING_INFO_FIELD.
4143 vmcs12_save_pending_event(vcpu
, vmcs12
);
4146 * According to spec, there's no need to store the guest's
4147 * MSRs if the exit is due to a VM-entry failure that occurs
4148 * during or after loading the guest state. Since this exit
4149 * does not fall in that category, we need to save the MSRs.
4151 if (nested_vmx_store_msr(vcpu
,
4152 vmcs12
->vm_exit_msr_store_addr
,
4153 vmcs12
->vm_exit_msr_store_count
))
4154 nested_vmx_abort(vcpu
,
4155 VMX_ABORT_SAVE_GUEST_MSR_FAIL
);
4159 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
4160 * preserved above and would only end up incorrectly in L1.
4162 vcpu
->arch
.nmi_injected
= false;
4163 kvm_clear_exception_queue(vcpu
);
4164 kvm_clear_interrupt_queue(vcpu
);
4168 * A part of what we need to when the nested L2 guest exits and we want to
4169 * run its L1 parent, is to reset L1's guest state to the host state specified
4171 * This function is to be called not only on normal nested exit, but also on
4172 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4173 * Failures During or After Loading Guest State").
4174 * This function should be called when the active VMCS is L1's (vmcs01).
4176 static void load_vmcs12_host_state(struct kvm_vcpu
*vcpu
,
4177 struct vmcs12
*vmcs12
)
4179 enum vm_entry_failure_code ignored
;
4180 struct kvm_segment seg
;
4182 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_EFER
)
4183 vcpu
->arch
.efer
= vmcs12
->host_ia32_efer
;
4184 else if (vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
)
4185 vcpu
->arch
.efer
|= (EFER_LMA
| EFER_LME
);
4187 vcpu
->arch
.efer
&= ~(EFER_LMA
| EFER_LME
);
4188 vmx_set_efer(vcpu
, vcpu
->arch
.efer
);
4190 kvm_rsp_write(vcpu
, vmcs12
->host_rsp
);
4191 kvm_rip_write(vcpu
, vmcs12
->host_rip
);
4192 vmx_set_rflags(vcpu
, X86_EFLAGS_FIXED
);
4193 vmx_set_interrupt_shadow(vcpu
, 0);
4196 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
4197 * actually changed, because vmx_set_cr0 refers to efer set above.
4199 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
4200 * (KVM doesn't change it);
4202 vcpu
->arch
.cr0_guest_owned_bits
= KVM_POSSIBLE_CR0_GUEST_BITS
;
4203 vmx_set_cr0(vcpu
, vmcs12
->host_cr0
);
4205 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
4206 vcpu
->arch
.cr4_guest_owned_bits
= ~vmcs_readl(CR4_GUEST_HOST_MASK
);
4207 vmx_set_cr4(vcpu
, vmcs12
->host_cr4
);
4209 nested_ept_uninit_mmu_context(vcpu
);
4212 * Only PDPTE load can fail as the value of cr3 was checked on entry and
4213 * couldn't have changed.
4215 if (nested_vmx_load_cr3(vcpu
, vmcs12
->host_cr3
, false, &ignored
))
4216 nested_vmx_abort(vcpu
, VMX_ABORT_LOAD_HOST_PDPTE_FAIL
);
4219 vcpu
->arch
.walk_mmu
->inject_page_fault
= kvm_inject_page_fault
;
4221 nested_vmx_transition_tlb_flush(vcpu
, vmcs12
, false);
4223 vmcs_write32(GUEST_SYSENTER_CS
, vmcs12
->host_ia32_sysenter_cs
);
4224 vmcs_writel(GUEST_SYSENTER_ESP
, vmcs12
->host_ia32_sysenter_esp
);
4225 vmcs_writel(GUEST_SYSENTER_EIP
, vmcs12
->host_ia32_sysenter_eip
);
4226 vmcs_writel(GUEST_IDTR_BASE
, vmcs12
->host_idtr_base
);
4227 vmcs_writel(GUEST_GDTR_BASE
, vmcs12
->host_gdtr_base
);
4228 vmcs_write32(GUEST_IDTR_LIMIT
, 0xFFFF);
4229 vmcs_write32(GUEST_GDTR_LIMIT
, 0xFFFF);
4231 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
4232 if (vmcs12
->vm_exit_controls
& VM_EXIT_CLEAR_BNDCFGS
)
4233 vmcs_write64(GUEST_BNDCFGS
, 0);
4235 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PAT
) {
4236 vmcs_write64(GUEST_IA32_PAT
, vmcs12
->host_ia32_pat
);
4237 vcpu
->arch
.pat
= vmcs12
->host_ia32_pat
;
4239 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
)
4240 WARN_ON_ONCE(kvm_set_msr(vcpu
, MSR_CORE_PERF_GLOBAL_CTRL
,
4241 vmcs12
->host_ia32_perf_global_ctrl
));
4243 /* Set L1 segment info according to Intel SDM
4244 27.5.2 Loading Host Segment and Descriptor-Table Registers */
4245 seg
= (struct kvm_segment
) {
4247 .limit
= 0xFFFFFFFF,
4248 .selector
= vmcs12
->host_cs_selector
,
4254 if (vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
)
4258 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_CS
);
4259 seg
= (struct kvm_segment
) {
4261 .limit
= 0xFFFFFFFF,
4268 seg
.selector
= vmcs12
->host_ds_selector
;
4269 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_DS
);
4270 seg
.selector
= vmcs12
->host_es_selector
;
4271 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_ES
);
4272 seg
.selector
= vmcs12
->host_ss_selector
;
4273 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_SS
);
4274 seg
.selector
= vmcs12
->host_fs_selector
;
4275 seg
.base
= vmcs12
->host_fs_base
;
4276 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_FS
);
4277 seg
.selector
= vmcs12
->host_gs_selector
;
4278 seg
.base
= vmcs12
->host_gs_base
;
4279 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_GS
);
4280 seg
= (struct kvm_segment
) {
4281 .base
= vmcs12
->host_tr_base
,
4283 .selector
= vmcs12
->host_tr_selector
,
4287 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_TR
);
4289 kvm_set_dr(vcpu
, 7, 0x400);
4290 vmcs_write64(GUEST_IA32_DEBUGCTL
, 0);
4292 if (cpu_has_vmx_msr_bitmap())
4293 vmx_update_msr_bitmap(vcpu
);
4295 if (nested_vmx_load_msr(vcpu
, vmcs12
->vm_exit_msr_load_addr
,
4296 vmcs12
->vm_exit_msr_load_count
))
4297 nested_vmx_abort(vcpu
, VMX_ABORT_LOAD_HOST_MSR_FAIL
);
4300 static inline u64
nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx
*vmx
)
4302 struct vmx_uret_msr
*efer_msr
;
4305 if (vm_entry_controls_get(vmx
) & VM_ENTRY_LOAD_IA32_EFER
)
4306 return vmcs_read64(GUEST_IA32_EFER
);
4308 if (cpu_has_load_ia32_efer())
4311 for (i
= 0; i
< vmx
->msr_autoload
.guest
.nr
; ++i
) {
4312 if (vmx
->msr_autoload
.guest
.val
[i
].index
== MSR_EFER
)
4313 return vmx
->msr_autoload
.guest
.val
[i
].value
;
4316 efer_msr
= vmx_find_uret_msr(vmx
, MSR_EFER
);
4318 return efer_msr
->data
;
4323 static void nested_vmx_restore_host_state(struct kvm_vcpu
*vcpu
)
4325 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
4326 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4327 struct vmx_msr_entry g
, h
;
4331 vcpu
->arch
.pat
= vmcs_read64(GUEST_IA32_PAT
);
4333 if (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
) {
4335 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
4336 * as vmcs01.GUEST_DR7 contains a userspace defined value
4337 * and vcpu->arch.dr7 is not squirreled away before the
4338 * nested VMENTER (not worth adding a variable in nested_vmx).
4340 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
)
4341 kvm_set_dr(vcpu
, 7, DR7_FIXED_1
);
4343 WARN_ON(kvm_set_dr(vcpu
, 7, vmcs_readl(GUEST_DR7
)));
4347 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
4348 * handle a variety of side effects to KVM's software model.
4350 vmx_set_efer(vcpu
, nested_vmx_get_vmcs01_guest_efer(vmx
));
4352 vcpu
->arch
.cr0_guest_owned_bits
= KVM_POSSIBLE_CR0_GUEST_BITS
;
4353 vmx_set_cr0(vcpu
, vmcs_readl(CR0_READ_SHADOW
));
4355 vcpu
->arch
.cr4_guest_owned_bits
= ~vmcs_readl(CR4_GUEST_HOST_MASK
);
4356 vmx_set_cr4(vcpu
, vmcs_readl(CR4_READ_SHADOW
));
4358 nested_ept_uninit_mmu_context(vcpu
);
4359 vcpu
->arch
.cr3
= vmcs_readl(GUEST_CR3
);
4360 kvm_register_mark_available(vcpu
, VCPU_EXREG_CR3
);
4363 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
4364 * from vmcs01 (if necessary). The PDPTRs are not loaded on
4365 * VMFail, like everything else we just need to ensure our
4366 * software model is up-to-date.
4368 if (enable_ept
&& is_pae_paging(vcpu
))
4369 ept_save_pdptrs(vcpu
);
4371 kvm_mmu_reset_context(vcpu
);
4373 if (cpu_has_vmx_msr_bitmap())
4374 vmx_update_msr_bitmap(vcpu
);
4377 * This nasty bit of open coding is a compromise between blindly
4378 * loading L1's MSRs using the exit load lists (incorrect emulation
4379 * of VMFail), leaving the nested VM's MSRs in the software model
4380 * (incorrect behavior) and snapshotting the modified MSRs (too
4381 * expensive since the lists are unbound by hardware). For each
4382 * MSR that was (prematurely) loaded from the nested VMEntry load
4383 * list, reload it from the exit load list if it exists and differs
4384 * from the guest value. The intent is to stuff host state as
4385 * silently as possible, not to fully process the exit load list.
4387 for (i
= 0; i
< vmcs12
->vm_entry_msr_load_count
; i
++) {
4388 gpa
= vmcs12
->vm_entry_msr_load_addr
+ (i
* sizeof(g
));
4389 if (kvm_vcpu_read_guest(vcpu
, gpa
, &g
, sizeof(g
))) {
4390 pr_debug_ratelimited(
4391 "%s read MSR index failed (%u, 0x%08llx)\n",
4396 for (j
= 0; j
< vmcs12
->vm_exit_msr_load_count
; j
++) {
4397 gpa
= vmcs12
->vm_exit_msr_load_addr
+ (j
* sizeof(h
));
4398 if (kvm_vcpu_read_guest(vcpu
, gpa
, &h
, sizeof(h
))) {
4399 pr_debug_ratelimited(
4400 "%s read MSR failed (%u, 0x%08llx)\n",
4404 if (h
.index
!= g
.index
)
4406 if (h
.value
== g
.value
)
4409 if (nested_vmx_load_msr_check(vcpu
, &h
)) {
4410 pr_debug_ratelimited(
4411 "%s check failed (%u, 0x%x, 0x%x)\n",
4412 __func__
, j
, h
.index
, h
.reserved
);
4416 if (kvm_set_msr(vcpu
, h
.index
, h
.value
)) {
4417 pr_debug_ratelimited(
4418 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
4419 __func__
, j
, h
.index
, h
.value
);
4428 nested_vmx_abort(vcpu
, VMX_ABORT_LOAD_HOST_MSR_FAIL
);
4432 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4433 * and modify vmcs12 to make it see what it would expect to see there if
4434 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
4436 void nested_vmx_vmexit(struct kvm_vcpu
*vcpu
, u32 vm_exit_reason
,
4437 u32 exit_intr_info
, unsigned long exit_qualification
)
4439 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4440 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
4442 /* trying to cancel vmlaunch/vmresume is a bug */
4443 WARN_ON_ONCE(vmx
->nested
.nested_run_pending
);
4445 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES
, vcpu
);
4447 /* Service the TLB flush request for L2 before switching to L1. */
4448 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT
, vcpu
))
4449 kvm_vcpu_flush_tlb_current(vcpu
);
4452 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
4453 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
4454 * up-to-date before switching to L1.
4456 if (enable_ept
&& is_pae_paging(vcpu
))
4457 vmx_ept_load_pdptrs(vcpu
);
4459 leave_guest_mode(vcpu
);
4461 if (nested_cpu_has_preemption_timer(vmcs12
))
4462 hrtimer_cancel(&to_vmx(vcpu
)->nested
.preemption_timer
);
4464 if (vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_USE_TSC_OFFSETTING
)
4465 vcpu
->arch
.tsc_offset
-= vmcs12
->tsc_offset
;
4467 if (likely(!vmx
->fail
)) {
4468 sync_vmcs02_to_vmcs12(vcpu
, vmcs12
);
4470 if (vm_exit_reason
!= -1)
4471 prepare_vmcs12(vcpu
, vmcs12
, vm_exit_reason
,
4472 exit_intr_info
, exit_qualification
);
4475 * Must happen outside of sync_vmcs02_to_vmcs12() as it will
4476 * also be used to capture vmcs12 cache as part of
4477 * capturing nVMX state for snapshot (migration).
4479 * Otherwise, this flush will dirty guest memory at a
4480 * point it is already assumed by user-space to be
4483 nested_flush_cached_shadow_vmcs12(vcpu
, vmcs12
);
4486 * The only expected VM-instruction error is "VM entry with
4487 * invalid control field(s)." Anything else indicates a
4488 * problem with L0. And we should never get here with a
4489 * VMFail of any type if early consistency checks are enabled.
4491 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR
) !=
4492 VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
4493 WARN_ON_ONCE(nested_early_check
);
4496 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
4498 /* Update any VMCS fields that might have changed while L2 ran */
4499 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, vmx
->msr_autoload
.host
.nr
);
4500 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, vmx
->msr_autoload
.guest
.nr
);
4501 vmcs_write64(TSC_OFFSET
, vcpu
->arch
.tsc_offset
);
4502 if (vmx
->nested
.l1_tpr_threshold
!= -1)
4503 vmcs_write32(TPR_THRESHOLD
, vmx
->nested
.l1_tpr_threshold
);
4505 if (kvm_has_tsc_control
)
4506 decache_tsc_multiplier(vmx
);
4508 if (vmx
->nested
.change_vmcs01_virtual_apic_mode
) {
4509 vmx
->nested
.change_vmcs01_virtual_apic_mode
= false;
4510 vmx_set_virtual_apic_mode(vcpu
);
4513 /* Unpin physical memory we referred to in vmcs02 */
4514 if (vmx
->nested
.apic_access_page
) {
4515 kvm_release_page_clean(vmx
->nested
.apic_access_page
);
4516 vmx
->nested
.apic_access_page
= NULL
;
4518 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.virtual_apic_map
, true);
4519 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.pi_desc_map
, true);
4520 vmx
->nested
.pi_desc
= NULL
;
4522 if (vmx
->nested
.reload_vmcs01_apic_access_page
) {
4523 vmx
->nested
.reload_vmcs01_apic_access_page
= false;
4524 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD
, vcpu
);
4527 if ((vm_exit_reason
!= -1) &&
4528 (enable_shadow_vmcs
|| vmx
->nested
.hv_evmcs
))
4529 vmx
->nested
.need_vmcs12_to_shadow_sync
= true;
4531 /* in case we halted in L2 */
4532 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
4534 if (likely(!vmx
->fail
)) {
4535 if ((u16
)vm_exit_reason
== EXIT_REASON_EXTERNAL_INTERRUPT
&&
4536 nested_exit_intr_ack_set(vcpu
)) {
4537 int irq
= kvm_cpu_get_interrupt(vcpu
);
4539 vmcs12
->vm_exit_intr_info
= irq
|
4540 INTR_INFO_VALID_MASK
| INTR_TYPE_EXT_INTR
;
4543 if (vm_exit_reason
!= -1)
4544 trace_kvm_nested_vmexit_inject(vmcs12
->vm_exit_reason
,
4545 vmcs12
->exit_qualification
,
4546 vmcs12
->idt_vectoring_info_field
,
4547 vmcs12
->vm_exit_intr_info
,
4548 vmcs12
->vm_exit_intr_error_code
,
4551 load_vmcs12_host_state(vcpu
, vmcs12
);
4557 * After an early L2 VM-entry failure, we're now back
4558 * in L1 which thinks it just finished a VMLAUNCH or
4559 * VMRESUME instruction, so we need to set the failure
4560 * flag and the VM-instruction error field of the VMCS
4561 * accordingly, and skip the emulated instruction.
4563 (void)nested_vmx_fail(vcpu
, VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
4566 * Restore L1's host state to KVM's software model. We're here
4567 * because a consistency check was caught by hardware, which
4568 * means some amount of guest state has been propagated to KVM's
4569 * model and needs to be unwound to the host's state.
4571 nested_vmx_restore_host_state(vcpu
);
4577 * Decode the memory-address operand of a vmx instruction, as recorded on an
4578 * exit caused by such an instruction (run by a guest hypervisor).
4579 * On success, returns 0. When the operand is invalid, returns 1 and throws
4582 int get_vmx_mem_address(struct kvm_vcpu
*vcpu
, unsigned long exit_qualification
,
4583 u32 vmx_instruction_info
, bool wr
, int len
, gva_t
*ret
)
4587 struct kvm_segment s
;
4590 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4591 * Execution", on an exit, vmx_instruction_info holds most of the
4592 * addressing components of the operand. Only the displacement part
4593 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4594 * For how an actual address is calculated from all these components,
4595 * refer to Vol. 1, "Operand Addressing".
4597 int scaling
= vmx_instruction_info
& 3;
4598 int addr_size
= (vmx_instruction_info
>> 7) & 7;
4599 bool is_reg
= vmx_instruction_info
& (1u << 10);
4600 int seg_reg
= (vmx_instruction_info
>> 15) & 7;
4601 int index_reg
= (vmx_instruction_info
>> 18) & 0xf;
4602 bool index_is_valid
= !(vmx_instruction_info
& (1u << 22));
4603 int base_reg
= (vmx_instruction_info
>> 23) & 0xf;
4604 bool base_is_valid
= !(vmx_instruction_info
& (1u << 27));
4607 kvm_queue_exception(vcpu
, UD_VECTOR
);
4611 /* Addr = segment_base + offset */
4612 /* offset = base + [index * scale] + displacement */
4613 off
= exit_qualification
; /* holds the displacement */
4615 off
= (gva_t
)sign_extend64(off
, 31);
4616 else if (addr_size
== 0)
4617 off
= (gva_t
)sign_extend64(off
, 15);
4619 off
+= kvm_register_read(vcpu
, base_reg
);
4621 off
+= kvm_register_read(vcpu
, index_reg
) << scaling
;
4622 vmx_get_segment(vcpu
, &s
, seg_reg
);
4625 * The effective address, i.e. @off, of a memory operand is truncated
4626 * based on the address size of the instruction. Note that this is
4627 * the *effective address*, i.e. the address prior to accounting for
4628 * the segment's base.
4630 if (addr_size
== 1) /* 32 bit */
4632 else if (addr_size
== 0) /* 16 bit */
4635 /* Checks for #GP/#SS exceptions. */
4637 if (is_long_mode(vcpu
)) {
4639 * The virtual/linear address is never truncated in 64-bit
4640 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4641 * address when using FS/GS with a non-zero base.
4643 if (seg_reg
== VCPU_SREG_FS
|| seg_reg
== VCPU_SREG_GS
)
4644 *ret
= s
.base
+ off
;
4648 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4649 * non-canonical form. This is the only check on the memory
4650 * destination for long mode!
4652 exn
= is_noncanonical_address(*ret
, vcpu
);
4655 * When not in long mode, the virtual/linear address is
4656 * unconditionally truncated to 32 bits regardless of the
4659 *ret
= (s
.base
+ off
) & 0xffffffff;
4661 /* Protected mode: apply checks for segment validity in the
4663 * - segment type check (#GP(0) may be thrown)
4664 * - usability check (#GP(0)/#SS(0))
4665 * - limit check (#GP(0)/#SS(0))
4668 /* #GP(0) if the destination operand is located in a
4669 * read-only data segment or any code segment.
4671 exn
= ((s
.type
& 0xa) == 0 || (s
.type
& 8));
4673 /* #GP(0) if the source operand is located in an
4674 * execute-only code segment
4676 exn
= ((s
.type
& 0xa) == 8);
4678 kvm_queue_exception_e(vcpu
, GP_VECTOR
, 0);
4681 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4683 exn
= (s
.unusable
!= 0);
4686 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4687 * outside the segment limit. All CPUs that support VMX ignore
4688 * limit checks for flat segments, i.e. segments with base==0,
4689 * limit==0xffffffff and of type expand-up data or code.
4691 if (!(s
.base
== 0 && s
.limit
== 0xffffffff &&
4692 ((s
.type
& 8) || !(s
.type
& 4))))
4693 exn
= exn
|| ((u64
)off
+ len
- 1 > s
.limit
);
4696 kvm_queue_exception_e(vcpu
,
4697 seg_reg
== VCPU_SREG_SS
?
4698 SS_VECTOR
: GP_VECTOR
,
4706 void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu
*vcpu
)
4708 struct vcpu_vmx
*vmx
;
4710 if (!nested_vmx_allowed(vcpu
))
4714 if (kvm_x86_ops
.pmu_ops
->is_valid_msr(vcpu
, MSR_CORE_PERF_GLOBAL_CTRL
)) {
4715 vmx
->nested
.msrs
.entry_ctls_high
|=
4716 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
;
4717 vmx
->nested
.msrs
.exit_ctls_high
|=
4718 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
;
4720 vmx
->nested
.msrs
.entry_ctls_high
&=
4721 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
;
4722 vmx
->nested
.msrs
.exit_ctls_high
&=
4723 ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
;
4727 static int nested_vmx_get_vmptr(struct kvm_vcpu
*vcpu
, gpa_t
*vmpointer
,
4731 struct x86_exception e
;
4734 if (get_vmx_mem_address(vcpu
, vmx_get_exit_qual(vcpu
),
4735 vmcs_read32(VMX_INSTRUCTION_INFO
), false,
4736 sizeof(*vmpointer
), &gva
)) {
4741 r
= kvm_read_guest_virt(vcpu
, gva
, vmpointer
, sizeof(*vmpointer
), &e
);
4742 if (r
!= X86EMUL_CONTINUE
) {
4743 *ret
= kvm_handle_memory_failure(vcpu
, r
, &e
);
4751 * Allocate a shadow VMCS and associate it with the currently loaded
4752 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4753 * VMCS is also VMCLEARed, so that it is ready for use.
4755 static struct vmcs
*alloc_shadow_vmcs(struct kvm_vcpu
*vcpu
)
4757 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4758 struct loaded_vmcs
*loaded_vmcs
= vmx
->loaded_vmcs
;
4761 * We should allocate a shadow vmcs for vmcs01 only when L1
4762 * executes VMXON and free it when L1 executes VMXOFF.
4763 * As it is invalid to execute VMXON twice, we shouldn't reach
4764 * here when vmcs01 already have an allocated shadow vmcs.
4766 WARN_ON(loaded_vmcs
== &vmx
->vmcs01
&& loaded_vmcs
->shadow_vmcs
);
4768 if (!loaded_vmcs
->shadow_vmcs
) {
4769 loaded_vmcs
->shadow_vmcs
= alloc_vmcs(true);
4770 if (loaded_vmcs
->shadow_vmcs
)
4771 vmcs_clear(loaded_vmcs
->shadow_vmcs
);
4773 return loaded_vmcs
->shadow_vmcs
;
4776 static int enter_vmx_operation(struct kvm_vcpu
*vcpu
)
4778 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4781 r
= alloc_loaded_vmcs(&vmx
->nested
.vmcs02
);
4785 vmx
->nested
.cached_vmcs12
= kzalloc(VMCS12_SIZE
, GFP_KERNEL_ACCOUNT
);
4786 if (!vmx
->nested
.cached_vmcs12
)
4787 goto out_cached_vmcs12
;
4789 vmx
->nested
.cached_shadow_vmcs12
= kzalloc(VMCS12_SIZE
, GFP_KERNEL_ACCOUNT
);
4790 if (!vmx
->nested
.cached_shadow_vmcs12
)
4791 goto out_cached_shadow_vmcs12
;
4793 if (enable_shadow_vmcs
&& !alloc_shadow_vmcs(vcpu
))
4794 goto out_shadow_vmcs
;
4796 hrtimer_init(&vmx
->nested
.preemption_timer
, CLOCK_MONOTONIC
,
4797 HRTIMER_MODE_ABS_PINNED
);
4798 vmx
->nested
.preemption_timer
.function
= vmx_preemption_timer_fn
;
4800 vmx
->nested
.vpid02
= allocate_vpid();
4802 vmx
->nested
.vmcs02_initialized
= false;
4803 vmx
->nested
.vmxon
= true;
4805 if (vmx_pt_mode_is_host_guest()) {
4806 vmx
->pt_desc
.guest
.ctl
= 0;
4807 pt_update_intercept_for_msr(vcpu
);
4813 kfree(vmx
->nested
.cached_shadow_vmcs12
);
4815 out_cached_shadow_vmcs12
:
4816 kfree(vmx
->nested
.cached_vmcs12
);
4819 free_loaded_vmcs(&vmx
->nested
.vmcs02
);
4826 * Emulate the VMXON instruction.
4827 * Currently, we just remember that VMX is active, and do not save or even
4828 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4829 * do not currently need to store anything in that guest-allocated memory
4830 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4831 * argument is different from the VMXON pointer (which the spec says they do).
4833 static int handle_vmon(struct kvm_vcpu
*vcpu
)
4838 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4839 const u64 VMXON_NEEDED_FEATURES
= FEAT_CTL_LOCKED
4840 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX
;
4843 * The Intel VMX Instruction Reference lists a bunch of bits that are
4844 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
4845 * 1 (see vmx_is_valid_cr4() for when we allow the guest to set this).
4846 * Otherwise, we should fail with #UD. But most faulting conditions
4847 * have already been checked by hardware, prior to the VM-exit for
4848 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
4849 * that bit set to 1 in non-root mode.
4851 if (!kvm_read_cr4_bits(vcpu
, X86_CR4_VMXE
)) {
4852 kvm_queue_exception(vcpu
, UD_VECTOR
);
4856 /* CPL=0 must be checked manually. */
4857 if (vmx_get_cpl(vcpu
)) {
4858 kvm_inject_gp(vcpu
, 0);
4862 if (vmx
->nested
.vmxon
)
4863 return nested_vmx_fail(vcpu
, VMXERR_VMXON_IN_VMX_ROOT_OPERATION
);
4865 if ((vmx
->msr_ia32_feature_control
& VMXON_NEEDED_FEATURES
)
4866 != VMXON_NEEDED_FEATURES
) {
4867 kvm_inject_gp(vcpu
, 0);
4871 if (nested_vmx_get_vmptr(vcpu
, &vmptr
, &ret
))
4876 * The first 4 bytes of VMXON region contain the supported
4877 * VMCS revision identifier
4879 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4880 * which replaces physical address width with 32
4882 if (!page_address_valid(vcpu
, vmptr
))
4883 return nested_vmx_failInvalid(vcpu
);
4885 if (kvm_read_guest(vcpu
->kvm
, vmptr
, &revision
, sizeof(revision
)) ||
4886 revision
!= VMCS12_REVISION
)
4887 return nested_vmx_failInvalid(vcpu
);
4889 vmx
->nested
.vmxon_ptr
= vmptr
;
4890 ret
= enter_vmx_operation(vcpu
);
4894 return nested_vmx_succeed(vcpu
);
4897 static inline void nested_release_vmcs12(struct kvm_vcpu
*vcpu
)
4899 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4901 if (vmx
->nested
.current_vmptr
== -1ull)
4904 copy_vmcs02_to_vmcs12_rare(vcpu
, get_vmcs12(vcpu
));
4906 if (enable_shadow_vmcs
) {
4907 /* copy to memory all shadowed fields in case
4908 they were modified */
4909 copy_shadow_to_vmcs12(vmx
);
4910 vmx_disable_shadow_vmcs(vmx
);
4912 vmx
->nested
.posted_intr_nv
= -1;
4914 /* Flush VMCS12 to guest memory */
4915 kvm_vcpu_write_guest_page(vcpu
,
4916 vmx
->nested
.current_vmptr
>> PAGE_SHIFT
,
4917 vmx
->nested
.cached_vmcs12
, 0, VMCS12_SIZE
);
4919 kvm_mmu_free_roots(vcpu
, &vcpu
->arch
.guest_mmu
, KVM_MMU_ROOTS_ALL
);
4921 vmx
->nested
.current_vmptr
= -1ull;
4924 /* Emulate the VMXOFF instruction */
4925 static int handle_vmoff(struct kvm_vcpu
*vcpu
)
4927 if (!nested_vmx_check_permission(vcpu
))
4932 /* Process a latched INIT during time CPU was in VMX operation */
4933 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
4935 return nested_vmx_succeed(vcpu
);
4938 /* Emulate the VMCLEAR instruction */
4939 static int handle_vmclear(struct kvm_vcpu
*vcpu
)
4941 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4947 if (!nested_vmx_check_permission(vcpu
))
4950 if (nested_vmx_get_vmptr(vcpu
, &vmptr
, &r
))
4953 if (!page_address_valid(vcpu
, vmptr
))
4954 return nested_vmx_fail(vcpu
, VMXERR_VMCLEAR_INVALID_ADDRESS
);
4956 if (vmptr
== vmx
->nested
.vmxon_ptr
)
4957 return nested_vmx_fail(vcpu
, VMXERR_VMCLEAR_VMXON_POINTER
);
4960 * When Enlightened VMEntry is enabled on the calling CPU we treat
4961 * memory area pointer by vmptr as Enlightened VMCS (as there's no good
4962 * way to distinguish it from VMCS12) and we must not corrupt it by
4963 * writing to the non-existent 'launch_state' field. The area doesn't
4964 * have to be the currently active EVMCS on the calling CPU and there's
4965 * nothing KVM has to do to transition it from 'active' to 'non-active'
4966 * state. It is possible that the area will stay mapped as
4967 * vmx->nested.hv_evmcs but this shouldn't be a problem.
4969 if (likely(!vmx
->nested
.enlightened_vmcs_enabled
||
4970 !nested_enlightened_vmentry(vcpu
, &evmcs_gpa
))) {
4971 if (vmptr
== vmx
->nested
.current_vmptr
)
4972 nested_release_vmcs12(vcpu
);
4974 kvm_vcpu_write_guest(vcpu
,
4975 vmptr
+ offsetof(struct vmcs12
,
4977 &zero
, sizeof(zero
));
4980 return nested_vmx_succeed(vcpu
);
4983 /* Emulate the VMLAUNCH instruction */
4984 static int handle_vmlaunch(struct kvm_vcpu
*vcpu
)
4986 return nested_vmx_run(vcpu
, true);
4989 /* Emulate the VMRESUME instruction */
4990 static int handle_vmresume(struct kvm_vcpu
*vcpu
)
4993 return nested_vmx_run(vcpu
, false);
4996 static int handle_vmread(struct kvm_vcpu
*vcpu
)
4998 struct vmcs12
*vmcs12
= is_guest_mode(vcpu
) ? get_shadow_vmcs12(vcpu
)
5000 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5001 u32 instr_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5002 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5003 struct x86_exception e
;
5004 unsigned long field
;
5010 if (!nested_vmx_check_permission(vcpu
))
5014 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
5015 * any VMREAD sets the ALU flags for VMfailInvalid.
5017 if (vmx
->nested
.current_vmptr
== -1ull ||
5018 (is_guest_mode(vcpu
) &&
5019 get_vmcs12(vcpu
)->vmcs_link_pointer
== -1ull))
5020 return nested_vmx_failInvalid(vcpu
);
5022 /* Decode instruction info and find the field to read */
5023 field
= kvm_register_readl(vcpu
, (((instr_info
) >> 28) & 0xf));
5025 offset
= vmcs_field_to_offset(field
);
5027 return nested_vmx_fail(vcpu
, VMXERR_UNSUPPORTED_VMCS_COMPONENT
);
5029 if (!is_guest_mode(vcpu
) && is_vmcs12_ext_field(field
))
5030 copy_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
5032 /* Read the field, zero-extended to a u64 value */
5033 value
= vmcs12_read_any(vmcs12
, field
, offset
);
5036 * Now copy part of this value to register or memory, as requested.
5037 * Note that the number of bits actually copied is 32 or 64 depending
5038 * on the guest's mode (32 or 64 bit), not on the given field's length.
5040 if (instr_info
& BIT(10)) {
5041 kvm_register_writel(vcpu
, (((instr_info
) >> 3) & 0xf), value
);
5043 len
= is_64_bit_mode(vcpu
) ? 8 : 4;
5044 if (get_vmx_mem_address(vcpu
, exit_qualification
,
5045 instr_info
, true, len
, &gva
))
5047 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
5048 r
= kvm_write_guest_virt_system(vcpu
, gva
, &value
, len
, &e
);
5049 if (r
!= X86EMUL_CONTINUE
)
5050 return kvm_handle_memory_failure(vcpu
, r
, &e
);
5053 return nested_vmx_succeed(vcpu
);
5056 static bool is_shadow_field_rw(unsigned long field
)
5059 #define SHADOW_FIELD_RW(x, y) case x:
5060 #include "vmcs_shadow_fields.h"
5068 static bool is_shadow_field_ro(unsigned long field
)
5071 #define SHADOW_FIELD_RO(x, y) case x:
5072 #include "vmcs_shadow_fields.h"
5080 static int handle_vmwrite(struct kvm_vcpu
*vcpu
)
5082 struct vmcs12
*vmcs12
= is_guest_mode(vcpu
) ? get_shadow_vmcs12(vcpu
)
5084 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5085 u32 instr_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5086 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5087 struct x86_exception e
;
5088 unsigned long field
;
5094 * The value to write might be 32 or 64 bits, depending on L1's long
5095 * mode, and eventually we need to write that into a field of several
5096 * possible lengths. The code below first zero-extends the value to 64
5097 * bit (value), and then copies only the appropriate number of
5098 * bits into the vmcs12 field.
5102 if (!nested_vmx_check_permission(vcpu
))
5106 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
5107 * any VMWRITE sets the ALU flags for VMfailInvalid.
5109 if (vmx
->nested
.current_vmptr
== -1ull ||
5110 (is_guest_mode(vcpu
) &&
5111 get_vmcs12(vcpu
)->vmcs_link_pointer
== -1ull))
5112 return nested_vmx_failInvalid(vcpu
);
5114 if (instr_info
& BIT(10))
5115 value
= kvm_register_readl(vcpu
, (((instr_info
) >> 3) & 0xf));
5117 len
= is_64_bit_mode(vcpu
) ? 8 : 4;
5118 if (get_vmx_mem_address(vcpu
, exit_qualification
,
5119 instr_info
, false, len
, &gva
))
5121 r
= kvm_read_guest_virt(vcpu
, gva
, &value
, len
, &e
);
5122 if (r
!= X86EMUL_CONTINUE
)
5123 return kvm_handle_memory_failure(vcpu
, r
, &e
);
5126 field
= kvm_register_readl(vcpu
, (((instr_info
) >> 28) & 0xf));
5128 offset
= vmcs_field_to_offset(field
);
5130 return nested_vmx_fail(vcpu
, VMXERR_UNSUPPORTED_VMCS_COMPONENT
);
5133 * If the vCPU supports "VMWRITE to any supported field in the
5134 * VMCS," then the "read-only" fields are actually read/write.
5136 if (vmcs_field_readonly(field
) &&
5137 !nested_cpu_has_vmwrite_any_field(vcpu
))
5138 return nested_vmx_fail(vcpu
, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT
);
5141 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
5142 * vmcs12, else we may crush a field or consume a stale value.
5144 if (!is_guest_mode(vcpu
) && !is_shadow_field_rw(field
))
5145 copy_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
5148 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
5149 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
5150 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
5151 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
5152 * from L1 will return a different value than VMREAD from L2 (L1 sees
5153 * the stripped down value, L2 sees the full value as stored by KVM).
5155 if (field
>= GUEST_ES_AR_BYTES
&& field
<= GUEST_TR_AR_BYTES
)
5158 vmcs12_write_any(vmcs12
, field
, offset
, value
);
5161 * Do not track vmcs12 dirty-state if in guest-mode as we actually
5162 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
5163 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
5164 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
5166 if (!is_guest_mode(vcpu
) && !is_shadow_field_rw(field
)) {
5168 * L1 can read these fields without exiting, ensure the
5169 * shadow VMCS is up-to-date.
5171 if (enable_shadow_vmcs
&& is_shadow_field_ro(field
)) {
5173 vmcs_load(vmx
->vmcs01
.shadow_vmcs
);
5175 __vmcs_writel(field
, value
);
5177 vmcs_clear(vmx
->vmcs01
.shadow_vmcs
);
5178 vmcs_load(vmx
->loaded_vmcs
->vmcs
);
5181 vmx
->nested
.dirty_vmcs12
= true;
5184 return nested_vmx_succeed(vcpu
);
5187 static void set_current_vmptr(struct vcpu_vmx
*vmx
, gpa_t vmptr
)
5189 vmx
->nested
.current_vmptr
= vmptr
;
5190 if (enable_shadow_vmcs
) {
5191 secondary_exec_controls_setbit(vmx
, SECONDARY_EXEC_SHADOW_VMCS
);
5192 vmcs_write64(VMCS_LINK_POINTER
,
5193 __pa(vmx
->vmcs01
.shadow_vmcs
));
5194 vmx
->nested
.need_vmcs12_to_shadow_sync
= true;
5196 vmx
->nested
.dirty_vmcs12
= true;
5199 /* Emulate the VMPTRLD instruction */
5200 static int handle_vmptrld(struct kvm_vcpu
*vcpu
)
5202 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5206 if (!nested_vmx_check_permission(vcpu
))
5209 if (nested_vmx_get_vmptr(vcpu
, &vmptr
, &r
))
5212 if (!page_address_valid(vcpu
, vmptr
))
5213 return nested_vmx_fail(vcpu
, VMXERR_VMPTRLD_INVALID_ADDRESS
);
5215 if (vmptr
== vmx
->nested
.vmxon_ptr
)
5216 return nested_vmx_fail(vcpu
, VMXERR_VMPTRLD_VMXON_POINTER
);
5218 /* Forbid normal VMPTRLD if Enlightened version was used */
5219 if (vmx
->nested
.hv_evmcs
)
5222 if (vmx
->nested
.current_vmptr
!= vmptr
) {
5223 struct kvm_host_map map
;
5224 struct vmcs12
*new_vmcs12
;
5226 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(vmptr
), &map
)) {
5228 * Reads from an unbacked page return all 1s,
5229 * which means that the 32 bits located at the
5230 * given physical address won't match the required
5231 * VMCS12_REVISION identifier.
5233 return nested_vmx_fail(vcpu
,
5234 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID
);
5237 new_vmcs12
= map
.hva
;
5239 if (new_vmcs12
->hdr
.revision_id
!= VMCS12_REVISION
||
5240 (new_vmcs12
->hdr
.shadow_vmcs
&&
5241 !nested_cpu_has_vmx_shadow_vmcs(vcpu
))) {
5242 kvm_vcpu_unmap(vcpu
, &map
, false);
5243 return nested_vmx_fail(vcpu
,
5244 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID
);
5247 nested_release_vmcs12(vcpu
);
5250 * Load VMCS12 from guest memory since it is not already
5253 memcpy(vmx
->nested
.cached_vmcs12
, new_vmcs12
, VMCS12_SIZE
);
5254 kvm_vcpu_unmap(vcpu
, &map
, false);
5256 set_current_vmptr(vmx
, vmptr
);
5259 return nested_vmx_succeed(vcpu
);
5262 /* Emulate the VMPTRST instruction */
5263 static int handle_vmptrst(struct kvm_vcpu
*vcpu
)
5265 unsigned long exit_qual
= vmx_get_exit_qual(vcpu
);
5266 u32 instr_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5267 gpa_t current_vmptr
= to_vmx(vcpu
)->nested
.current_vmptr
;
5268 struct x86_exception e
;
5272 if (!nested_vmx_check_permission(vcpu
))
5275 if (unlikely(to_vmx(vcpu
)->nested
.hv_evmcs
))
5278 if (get_vmx_mem_address(vcpu
, exit_qual
, instr_info
,
5279 true, sizeof(gpa_t
), &gva
))
5281 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
5282 r
= kvm_write_guest_virt_system(vcpu
, gva
, (void *)¤t_vmptr
,
5284 if (r
!= X86EMUL_CONTINUE
)
5285 return kvm_handle_memory_failure(vcpu
, r
, &e
);
5287 return nested_vmx_succeed(vcpu
);
5290 #define EPTP_PA_MASK GENMASK_ULL(51, 12)
5292 static bool nested_ept_root_matches(hpa_t root_hpa
, u64 root_eptp
, u64 eptp
)
5294 return VALID_PAGE(root_hpa
) &&
5295 ((root_eptp
& EPTP_PA_MASK
) == (eptp
& EPTP_PA_MASK
));
5298 /* Emulate the INVEPT instruction */
5299 static int handle_invept(struct kvm_vcpu
*vcpu
)
5301 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5302 u32 vmx_instruction_info
, types
;
5303 unsigned long type
, roots_to_free
;
5304 struct kvm_mmu
*mmu
;
5306 struct x86_exception e
;
5312 if (!(vmx
->nested
.msrs
.secondary_ctls_high
&
5313 SECONDARY_EXEC_ENABLE_EPT
) ||
5314 !(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_INVEPT_BIT
)) {
5315 kvm_queue_exception(vcpu
, UD_VECTOR
);
5319 if (!nested_vmx_check_permission(vcpu
))
5322 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5323 type
= kvm_register_readl(vcpu
, (vmx_instruction_info
>> 28) & 0xf);
5325 types
= (vmx
->nested
.msrs
.ept_caps
>> VMX_EPT_EXTENT_SHIFT
) & 6;
5327 if (type
>= 32 || !(types
& (1 << type
)))
5328 return nested_vmx_fail(vcpu
, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5330 /* According to the Intel VMX instruction reference, the memory
5331 * operand is read even if it isn't needed (e.g., for type==global)
5333 if (get_vmx_mem_address(vcpu
, vmx_get_exit_qual(vcpu
),
5334 vmx_instruction_info
, false, sizeof(operand
), &gva
))
5336 r
= kvm_read_guest_virt(vcpu
, gva
, &operand
, sizeof(operand
), &e
);
5337 if (r
!= X86EMUL_CONTINUE
)
5338 return kvm_handle_memory_failure(vcpu
, r
, &e
);
5341 * Nested EPT roots are always held through guest_mmu,
5344 mmu
= &vcpu
->arch
.guest_mmu
;
5347 case VMX_EPT_EXTENT_CONTEXT
:
5348 if (!nested_vmx_check_eptp(vcpu
, operand
.eptp
))
5349 return nested_vmx_fail(vcpu
,
5350 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5353 if (nested_ept_root_matches(mmu
->root_hpa
, mmu
->root_pgd
,
5355 roots_to_free
|= KVM_MMU_ROOT_CURRENT
;
5357 for (i
= 0; i
< KVM_MMU_NUM_PREV_ROOTS
; i
++) {
5358 if (nested_ept_root_matches(mmu
->prev_roots
[i
].hpa
,
5359 mmu
->prev_roots
[i
].pgd
,
5361 roots_to_free
|= KVM_MMU_ROOT_PREVIOUS(i
);
5364 case VMX_EPT_EXTENT_GLOBAL
:
5365 roots_to_free
= KVM_MMU_ROOTS_ALL
;
5373 kvm_mmu_free_roots(vcpu
, mmu
, roots_to_free
);
5375 return nested_vmx_succeed(vcpu
);
5378 static int handle_invvpid(struct kvm_vcpu
*vcpu
)
5380 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5381 u32 vmx_instruction_info
;
5382 unsigned long type
, types
;
5384 struct x86_exception e
;
5392 if (!(vmx
->nested
.msrs
.secondary_ctls_high
&
5393 SECONDARY_EXEC_ENABLE_VPID
) ||
5394 !(vmx
->nested
.msrs
.vpid_caps
& VMX_VPID_INVVPID_BIT
)) {
5395 kvm_queue_exception(vcpu
, UD_VECTOR
);
5399 if (!nested_vmx_check_permission(vcpu
))
5402 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5403 type
= kvm_register_readl(vcpu
, (vmx_instruction_info
>> 28) & 0xf);
5405 types
= (vmx
->nested
.msrs
.vpid_caps
&
5406 VMX_VPID_EXTENT_SUPPORTED_MASK
) >> 8;
5408 if (type
>= 32 || !(types
& (1 << type
)))
5409 return nested_vmx_fail(vcpu
,
5410 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5412 /* according to the intel vmx instruction reference, the memory
5413 * operand is read even if it isn't needed (e.g., for type==global)
5415 if (get_vmx_mem_address(vcpu
, vmx_get_exit_qual(vcpu
),
5416 vmx_instruction_info
, false, sizeof(operand
), &gva
))
5418 r
= kvm_read_guest_virt(vcpu
, gva
, &operand
, sizeof(operand
), &e
);
5419 if (r
!= X86EMUL_CONTINUE
)
5420 return kvm_handle_memory_failure(vcpu
, r
, &e
);
5422 if (operand
.vpid
>> 16)
5423 return nested_vmx_fail(vcpu
,
5424 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5426 vpid02
= nested_get_vpid02(vcpu
);
5428 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR
:
5429 if (!operand
.vpid
||
5430 is_noncanonical_address(operand
.gla
, vcpu
))
5431 return nested_vmx_fail(vcpu
,
5432 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5433 vpid_sync_vcpu_addr(vpid02
, operand
.gla
);
5435 case VMX_VPID_EXTENT_SINGLE_CONTEXT
:
5436 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL
:
5438 return nested_vmx_fail(vcpu
,
5439 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5440 vpid_sync_context(vpid02
);
5442 case VMX_VPID_EXTENT_ALL_CONTEXT
:
5443 vpid_sync_context(vpid02
);
5447 return kvm_skip_emulated_instruction(vcpu
);
5451 * Sync the shadow page tables if EPT is disabled, L1 is invalidating
5452 * linear mappings for L2 (tagged with L2's VPID). Free all roots as
5453 * VPIDs are not tracked in the MMU role.
5455 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share
5456 * an MMU when EPT is disabled.
5458 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR.
5461 kvm_mmu_free_roots(vcpu
, &vcpu
->arch
.root_mmu
,
5464 return nested_vmx_succeed(vcpu
);
5467 static int nested_vmx_eptp_switching(struct kvm_vcpu
*vcpu
,
5468 struct vmcs12
*vmcs12
)
5470 u32 index
= kvm_rcx_read(vcpu
);
5472 bool accessed_dirty
;
5473 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
5475 if (!nested_cpu_has_eptp_switching(vmcs12
) ||
5476 !nested_cpu_has_ept(vmcs12
))
5479 if (index
>= VMFUNC_EPTP_ENTRIES
)
5483 if (kvm_vcpu_read_guest_page(vcpu
, vmcs12
->eptp_list_address
>> PAGE_SHIFT
,
5484 &new_eptp
, index
* 8, 8))
5487 accessed_dirty
= !!(new_eptp
& VMX_EPTP_AD_ENABLE_BIT
);
5490 * If the (L2) guest does a vmfunc to the currently
5491 * active ept pointer, we don't have to do anything else
5493 if (vmcs12
->ept_pointer
!= new_eptp
) {
5494 if (!nested_vmx_check_eptp(vcpu
, new_eptp
))
5497 kvm_mmu_unload(vcpu
);
5498 mmu
->ept_ad
= accessed_dirty
;
5499 mmu
->mmu_role
.base
.ad_disabled
= !accessed_dirty
;
5500 vmcs12
->ept_pointer
= new_eptp
;
5502 * TODO: Check what's the correct approach in case
5503 * mmu reload fails. Currently, we just let the next
5504 * reload potentially fail
5506 kvm_mmu_reload(vcpu
);
5512 static int handle_vmfunc(struct kvm_vcpu
*vcpu
)
5514 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5515 struct vmcs12
*vmcs12
;
5516 u32 function
= kvm_rax_read(vcpu
);
5519 * VMFUNC is only supported for nested guests, but we always enable the
5520 * secondary control for simplicity; for non-nested mode, fake that we
5521 * didn't by injecting #UD.
5523 if (!is_guest_mode(vcpu
)) {
5524 kvm_queue_exception(vcpu
, UD_VECTOR
);
5528 vmcs12
= get_vmcs12(vcpu
);
5529 if ((vmcs12
->vm_function_control
& (1 << function
)) == 0)
5534 if (nested_vmx_eptp_switching(vcpu
, vmcs12
))
5540 return kvm_skip_emulated_instruction(vcpu
);
5543 nested_vmx_vmexit(vcpu
, vmx
->exit_reason
,
5544 vmx_get_intr_info(vcpu
),
5545 vmx_get_exit_qual(vcpu
));
5550 * Return true if an IO instruction with the specified port and size should cause
5551 * a VM-exit into L1.
5553 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu
*vcpu
, unsigned int port
,
5556 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
5557 gpa_t bitmap
, last_bitmap
;
5560 last_bitmap
= (gpa_t
)-1;
5565 bitmap
= vmcs12
->io_bitmap_a
;
5566 else if (port
< 0x10000)
5567 bitmap
= vmcs12
->io_bitmap_b
;
5570 bitmap
+= (port
& 0x7fff) / 8;
5572 if (last_bitmap
!= bitmap
)
5573 if (kvm_vcpu_read_guest(vcpu
, bitmap
, &b
, 1))
5575 if (b
& (1 << (port
& 7)))
5580 last_bitmap
= bitmap
;
5586 static bool nested_vmx_exit_handled_io(struct kvm_vcpu
*vcpu
,
5587 struct vmcs12
*vmcs12
)
5589 unsigned long exit_qualification
;
5590 unsigned short port
;
5593 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_IO_BITMAPS
))
5594 return nested_cpu_has(vmcs12
, CPU_BASED_UNCOND_IO_EXITING
);
5596 exit_qualification
= vmx_get_exit_qual(vcpu
);
5598 port
= exit_qualification
>> 16;
5599 size
= (exit_qualification
& 7) + 1;
5601 return nested_vmx_check_io_bitmaps(vcpu
, port
, size
);
5605 * Return 1 if we should exit from L2 to L1 to handle an MSR access,
5606 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5607 * disinterest in the current event (read or write a specific MSR) by using an
5608 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5610 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu
*vcpu
,
5611 struct vmcs12
*vmcs12
, u32 exit_reason
)
5613 u32 msr_index
= kvm_rcx_read(vcpu
);
5616 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_MSR_BITMAPS
))
5620 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5621 * for the four combinations of read/write and low/high MSR numbers.
5622 * First we need to figure out which of the four to use:
5624 bitmap
= vmcs12
->msr_bitmap
;
5625 if (exit_reason
== EXIT_REASON_MSR_WRITE
)
5627 if (msr_index
>= 0xc0000000) {
5628 msr_index
-= 0xc0000000;
5632 /* Then read the msr_index'th bit from this bitmap: */
5633 if (msr_index
< 1024*8) {
5635 if (kvm_vcpu_read_guest(vcpu
, bitmap
+ msr_index
/8, &b
, 1))
5637 return 1 & (b
>> (msr_index
& 7));
5639 return true; /* let L1 handle the wrong parameter */
5643 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5644 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5645 * intercept (via guest_host_mask etc.) the current event.
5647 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu
*vcpu
,
5648 struct vmcs12
*vmcs12
)
5650 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5651 int cr
= exit_qualification
& 15;
5655 switch ((exit_qualification
>> 4) & 3) {
5656 case 0: /* mov to cr */
5657 reg
= (exit_qualification
>> 8) & 15;
5658 val
= kvm_register_readl(vcpu
, reg
);
5661 if (vmcs12
->cr0_guest_host_mask
&
5662 (val
^ vmcs12
->cr0_read_shadow
))
5666 if (nested_cpu_has(vmcs12
, CPU_BASED_CR3_LOAD_EXITING
))
5670 if (vmcs12
->cr4_guest_host_mask
&
5671 (vmcs12
->cr4_read_shadow
^ val
))
5675 if (nested_cpu_has(vmcs12
, CPU_BASED_CR8_LOAD_EXITING
))
5681 if ((vmcs12
->cr0_guest_host_mask
& X86_CR0_TS
) &&
5682 (vmcs12
->cr0_read_shadow
& X86_CR0_TS
))
5685 case 1: /* mov from cr */
5688 if (vmcs12
->cpu_based_vm_exec_control
&
5689 CPU_BASED_CR3_STORE_EXITING
)
5693 if (vmcs12
->cpu_based_vm_exec_control
&
5694 CPU_BASED_CR8_STORE_EXITING
)
5701 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5702 * cr0. Other attempted changes are ignored, with no exit.
5704 val
= (exit_qualification
>> LMSW_SOURCE_DATA_SHIFT
) & 0x0f;
5705 if (vmcs12
->cr0_guest_host_mask
& 0xe &
5706 (val
^ vmcs12
->cr0_read_shadow
))
5708 if ((vmcs12
->cr0_guest_host_mask
& 0x1) &&
5709 !(vmcs12
->cr0_read_shadow
& 0x1) &&
5717 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu
*vcpu
,
5718 struct vmcs12
*vmcs12
, gpa_t bitmap
)
5720 u32 vmx_instruction_info
;
5721 unsigned long field
;
5724 if (!nested_cpu_has_shadow_vmcs(vmcs12
))
5727 /* Decode instruction info and find the field to access */
5728 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5729 field
= kvm_register_read(vcpu
, (((vmx_instruction_info
) >> 28) & 0xf));
5731 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5735 if (kvm_vcpu_read_guest(vcpu
, bitmap
+ field
/8, &b
, 1))
5738 return 1 & (b
>> (field
& 7));
5741 static bool nested_vmx_exit_handled_mtf(struct vmcs12
*vmcs12
)
5743 u32 entry_intr_info
= vmcs12
->vm_entry_intr_info_field
;
5745 if (nested_cpu_has_mtf(vmcs12
))
5749 * An MTF VM-exit may be injected into the guest by setting the
5750 * interruption-type to 7 (other event) and the vector field to 0. Such
5751 * is the case regardless of the 'monitor trap flag' VM-execution
5754 return entry_intr_info
== (INTR_INFO_VALID_MASK
5755 | INTR_TYPE_OTHER_EVENT
);
5759 * Return true if L0 wants to handle an exit from L2 regardless of whether or not
5760 * L1 wants the exit. Only call this when in is_guest_mode (L2).
5762 static bool nested_vmx_l0_wants_exit(struct kvm_vcpu
*vcpu
, u32 exit_reason
)
5766 switch ((u16
)exit_reason
) {
5767 case EXIT_REASON_EXCEPTION_NMI
:
5768 intr_info
= vmx_get_intr_info(vcpu
);
5769 if (is_nmi(intr_info
))
5771 else if (is_page_fault(intr_info
))
5772 return vcpu
->arch
.apf
.host_apf_flags
|| !enable_ept
;
5773 else if (is_debug(intr_info
) &&
5775 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
))
5777 else if (is_breakpoint(intr_info
) &&
5778 vcpu
->guest_debug
& KVM_GUESTDBG_USE_SW_BP
)
5781 case EXIT_REASON_EXTERNAL_INTERRUPT
:
5783 case EXIT_REASON_MCE_DURING_VMENTRY
:
5785 case EXIT_REASON_EPT_VIOLATION
:
5787 * L0 always deals with the EPT violation. If nested EPT is
5788 * used, and the nested mmu code discovers that the address is
5789 * missing in the guest EPT table (EPT12), the EPT violation
5790 * will be injected with nested_ept_inject_page_fault()
5793 case EXIT_REASON_EPT_MISCONFIG
:
5795 * L2 never uses directly L1's EPT, but rather L0's own EPT
5796 * table (shadow on EPT) or a merged EPT table that L0 built
5797 * (EPT on EPT). So any problems with the structure of the
5798 * table is L0's fault.
5801 case EXIT_REASON_PREEMPTION_TIMER
:
5803 case EXIT_REASON_PML_FULL
:
5804 /* We emulate PML support to L1. */
5806 case EXIT_REASON_VMFUNC
:
5807 /* VM functions are emulated through L2->L0 vmexits. */
5809 case EXIT_REASON_ENCLS
:
5810 /* SGX is never exposed to L1 */
5819 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in
5820 * is_guest_mode (L2).
5822 static bool nested_vmx_l1_wants_exit(struct kvm_vcpu
*vcpu
, u32 exit_reason
)
5824 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
5827 switch ((u16
)exit_reason
) {
5828 case EXIT_REASON_EXCEPTION_NMI
:
5829 intr_info
= vmx_get_intr_info(vcpu
);
5830 if (is_nmi(intr_info
))
5832 else if (is_page_fault(intr_info
))
5834 return vmcs12
->exception_bitmap
&
5835 (1u << (intr_info
& INTR_INFO_VECTOR_MASK
));
5836 case EXIT_REASON_EXTERNAL_INTERRUPT
:
5837 return nested_exit_on_intr(vcpu
);
5838 case EXIT_REASON_TRIPLE_FAULT
:
5840 case EXIT_REASON_INTERRUPT_WINDOW
:
5841 return nested_cpu_has(vmcs12
, CPU_BASED_INTR_WINDOW_EXITING
);
5842 case EXIT_REASON_NMI_WINDOW
:
5843 return nested_cpu_has(vmcs12
, CPU_BASED_NMI_WINDOW_EXITING
);
5844 case EXIT_REASON_TASK_SWITCH
:
5846 case EXIT_REASON_CPUID
:
5848 case EXIT_REASON_HLT
:
5849 return nested_cpu_has(vmcs12
, CPU_BASED_HLT_EXITING
);
5850 case EXIT_REASON_INVD
:
5852 case EXIT_REASON_INVLPG
:
5853 return nested_cpu_has(vmcs12
, CPU_BASED_INVLPG_EXITING
);
5854 case EXIT_REASON_RDPMC
:
5855 return nested_cpu_has(vmcs12
, CPU_BASED_RDPMC_EXITING
);
5856 case EXIT_REASON_RDRAND
:
5857 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_RDRAND_EXITING
);
5858 case EXIT_REASON_RDSEED
:
5859 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_RDSEED_EXITING
);
5860 case EXIT_REASON_RDTSC
: case EXIT_REASON_RDTSCP
:
5861 return nested_cpu_has(vmcs12
, CPU_BASED_RDTSC_EXITING
);
5862 case EXIT_REASON_VMREAD
:
5863 return nested_vmx_exit_handled_vmcs_access(vcpu
, vmcs12
,
5864 vmcs12
->vmread_bitmap
);
5865 case EXIT_REASON_VMWRITE
:
5866 return nested_vmx_exit_handled_vmcs_access(vcpu
, vmcs12
,
5867 vmcs12
->vmwrite_bitmap
);
5868 case EXIT_REASON_VMCALL
: case EXIT_REASON_VMCLEAR
:
5869 case EXIT_REASON_VMLAUNCH
: case EXIT_REASON_VMPTRLD
:
5870 case EXIT_REASON_VMPTRST
: case EXIT_REASON_VMRESUME
:
5871 case EXIT_REASON_VMOFF
: case EXIT_REASON_VMON
:
5872 case EXIT_REASON_INVEPT
: case EXIT_REASON_INVVPID
:
5874 * VMX instructions trap unconditionally. This allows L1 to
5875 * emulate them for its L2 guest, i.e., allows 3-level nesting!
5878 case EXIT_REASON_CR_ACCESS
:
5879 return nested_vmx_exit_handled_cr(vcpu
, vmcs12
);
5880 case EXIT_REASON_DR_ACCESS
:
5881 return nested_cpu_has(vmcs12
, CPU_BASED_MOV_DR_EXITING
);
5882 case EXIT_REASON_IO_INSTRUCTION
:
5883 return nested_vmx_exit_handled_io(vcpu
, vmcs12
);
5884 case EXIT_REASON_GDTR_IDTR
: case EXIT_REASON_LDTR_TR
:
5885 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_DESC
);
5886 case EXIT_REASON_MSR_READ
:
5887 case EXIT_REASON_MSR_WRITE
:
5888 return nested_vmx_exit_handled_msr(vcpu
, vmcs12
, exit_reason
);
5889 case EXIT_REASON_INVALID_STATE
:
5891 case EXIT_REASON_MWAIT_INSTRUCTION
:
5892 return nested_cpu_has(vmcs12
, CPU_BASED_MWAIT_EXITING
);
5893 case EXIT_REASON_MONITOR_TRAP_FLAG
:
5894 return nested_vmx_exit_handled_mtf(vmcs12
);
5895 case EXIT_REASON_MONITOR_INSTRUCTION
:
5896 return nested_cpu_has(vmcs12
, CPU_BASED_MONITOR_EXITING
);
5897 case EXIT_REASON_PAUSE_INSTRUCTION
:
5898 return nested_cpu_has(vmcs12
, CPU_BASED_PAUSE_EXITING
) ||
5899 nested_cpu_has2(vmcs12
,
5900 SECONDARY_EXEC_PAUSE_LOOP_EXITING
);
5901 case EXIT_REASON_MCE_DURING_VMENTRY
:
5903 case EXIT_REASON_TPR_BELOW_THRESHOLD
:
5904 return nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
);
5905 case EXIT_REASON_APIC_ACCESS
:
5906 case EXIT_REASON_APIC_WRITE
:
5907 case EXIT_REASON_EOI_INDUCED
:
5909 * The controls for "virtualize APIC accesses," "APIC-
5910 * register virtualization," and "virtual-interrupt
5911 * delivery" only come from vmcs12.
5914 case EXIT_REASON_INVPCID
:
5916 nested_cpu_has2(vmcs12
, SECONDARY_EXEC_ENABLE_INVPCID
) &&
5917 nested_cpu_has(vmcs12
, CPU_BASED_INVLPG_EXITING
);
5918 case EXIT_REASON_WBINVD
:
5919 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_WBINVD_EXITING
);
5920 case EXIT_REASON_XSETBV
:
5922 case EXIT_REASON_XSAVES
: case EXIT_REASON_XRSTORS
:
5924 * This should never happen, since it is not possible to
5925 * set XSS to a non-zero value---neither in L1 nor in L2.
5926 * If if it were, XSS would have to be checked against
5927 * the XSS exit bitmap in vmcs12.
5929 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_XSAVES
);
5930 case EXIT_REASON_UMWAIT
:
5931 case EXIT_REASON_TPAUSE
:
5932 return nested_cpu_has2(vmcs12
,
5933 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE
);
5940 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was
5941 * reflected into L1.
5943 bool nested_vmx_reflect_vmexit(struct kvm_vcpu
*vcpu
)
5945 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5946 u32 exit_reason
= vmx
->exit_reason
;
5947 unsigned long exit_qual
;
5950 WARN_ON_ONCE(vmx
->nested
.nested_run_pending
);
5953 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM
5954 * has already loaded L2's state.
5956 if (unlikely(vmx
->fail
)) {
5957 trace_kvm_nested_vmenter_failed(
5958 "hardware VM-instruction error: ",
5959 vmcs_read32(VM_INSTRUCTION_ERROR
));
5962 goto reflect_vmexit
;
5965 trace_kvm_nested_vmexit(exit_reason
, vcpu
, KVM_ISA_VMX
);
5967 /* If L0 (KVM) wants the exit, it trumps L1's desires. */
5968 if (nested_vmx_l0_wants_exit(vcpu
, exit_reason
))
5971 /* If L1 doesn't want the exit, handle it in L0. */
5972 if (!nested_vmx_l1_wants_exit(vcpu
, exit_reason
))
5976 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For
5977 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would
5978 * need to be synthesized by querying the in-kernel LAPIC, but external
5979 * interrupts are never reflected to L1 so it's a non-issue.
5981 exit_intr_info
= vmx_get_intr_info(vcpu
);
5982 if (is_exception_with_error_code(exit_intr_info
)) {
5983 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
5985 vmcs12
->vm_exit_intr_error_code
=
5986 vmcs_read32(VM_EXIT_INTR_ERROR_CODE
);
5988 exit_qual
= vmx_get_exit_qual(vcpu
);
5991 nested_vmx_vmexit(vcpu
, exit_reason
, exit_intr_info
, exit_qual
);
5995 static int vmx_get_nested_state(struct kvm_vcpu
*vcpu
,
5996 struct kvm_nested_state __user
*user_kvm_nested_state
,
5999 struct vcpu_vmx
*vmx
;
6000 struct vmcs12
*vmcs12
;
6001 struct kvm_nested_state kvm_state
= {
6003 .format
= KVM_STATE_NESTED_FORMAT_VMX
,
6004 .size
= sizeof(kvm_state
),
6006 .hdr
.vmx
.vmxon_pa
= -1ull,
6007 .hdr
.vmx
.vmcs12_pa
= -1ull,
6008 .hdr
.vmx
.preemption_timer_deadline
= 0,
6010 struct kvm_vmx_nested_state_data __user
*user_vmx_nested_state
=
6011 &user_kvm_nested_state
->data
.vmx
[0];
6014 return kvm_state
.size
+ sizeof(*user_vmx_nested_state
);
6017 vmcs12
= get_vmcs12(vcpu
);
6019 if (nested_vmx_allowed(vcpu
) &&
6020 (vmx
->nested
.vmxon
|| vmx
->nested
.smm
.vmxon
)) {
6021 kvm_state
.hdr
.vmx
.vmxon_pa
= vmx
->nested
.vmxon_ptr
;
6022 kvm_state
.hdr
.vmx
.vmcs12_pa
= vmx
->nested
.current_vmptr
;
6024 if (vmx_has_valid_vmcs12(vcpu
)) {
6025 kvm_state
.size
+= sizeof(user_vmx_nested_state
->vmcs12
);
6027 if (vmx
->nested
.hv_evmcs
)
6028 kvm_state
.flags
|= KVM_STATE_NESTED_EVMCS
;
6030 if (is_guest_mode(vcpu
) &&
6031 nested_cpu_has_shadow_vmcs(vmcs12
) &&
6032 vmcs12
->vmcs_link_pointer
!= -1ull)
6033 kvm_state
.size
+= sizeof(user_vmx_nested_state
->shadow_vmcs12
);
6036 if (vmx
->nested
.smm
.vmxon
)
6037 kvm_state
.hdr
.vmx
.smm
.flags
|= KVM_STATE_NESTED_SMM_VMXON
;
6039 if (vmx
->nested
.smm
.guest_mode
)
6040 kvm_state
.hdr
.vmx
.smm
.flags
|= KVM_STATE_NESTED_SMM_GUEST_MODE
;
6042 if (is_guest_mode(vcpu
)) {
6043 kvm_state
.flags
|= KVM_STATE_NESTED_GUEST_MODE
;
6045 if (vmx
->nested
.nested_run_pending
)
6046 kvm_state
.flags
|= KVM_STATE_NESTED_RUN_PENDING
;
6048 if (vmx
->nested
.mtf_pending
)
6049 kvm_state
.flags
|= KVM_STATE_NESTED_MTF_PENDING
;
6051 if (nested_cpu_has_preemption_timer(vmcs12
) &&
6052 vmx
->nested
.has_preemption_timer_deadline
) {
6053 kvm_state
.hdr
.vmx
.flags
|=
6054 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE
;
6055 kvm_state
.hdr
.vmx
.preemption_timer_deadline
=
6056 vmx
->nested
.preemption_timer_deadline
;
6061 if (user_data_size
< kvm_state
.size
)
6064 if (copy_to_user(user_kvm_nested_state
, &kvm_state
, sizeof(kvm_state
)))
6067 if (!vmx_has_valid_vmcs12(vcpu
))
6071 * When running L2, the authoritative vmcs12 state is in the
6072 * vmcs02. When running L1, the authoritative vmcs12 state is
6073 * in the shadow or enlightened vmcs linked to vmcs01, unless
6074 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
6075 * vmcs12 state is in the vmcs12 already.
6077 if (is_guest_mode(vcpu
)) {
6078 sync_vmcs02_to_vmcs12(vcpu
, vmcs12
);
6079 sync_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
6080 } else if (!vmx
->nested
.need_vmcs12_to_shadow_sync
) {
6081 if (vmx
->nested
.hv_evmcs
)
6082 copy_enlightened_to_vmcs12(vmx
);
6083 else if (enable_shadow_vmcs
)
6084 copy_shadow_to_vmcs12(vmx
);
6087 BUILD_BUG_ON(sizeof(user_vmx_nested_state
->vmcs12
) < VMCS12_SIZE
);
6088 BUILD_BUG_ON(sizeof(user_vmx_nested_state
->shadow_vmcs12
) < VMCS12_SIZE
);
6091 * Copy over the full allocated size of vmcs12 rather than just the size
6094 if (copy_to_user(user_vmx_nested_state
->vmcs12
, vmcs12
, VMCS12_SIZE
))
6097 if (nested_cpu_has_shadow_vmcs(vmcs12
) &&
6098 vmcs12
->vmcs_link_pointer
!= -1ull) {
6099 if (copy_to_user(user_vmx_nested_state
->shadow_vmcs12
,
6100 get_shadow_vmcs12(vcpu
), VMCS12_SIZE
))
6104 return kvm_state
.size
;
6108 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
6110 void vmx_leave_nested(struct kvm_vcpu
*vcpu
)
6112 if (is_guest_mode(vcpu
)) {
6113 to_vmx(vcpu
)->nested
.nested_run_pending
= 0;
6114 nested_vmx_vmexit(vcpu
, -1, 0, 0);
6119 static int vmx_set_nested_state(struct kvm_vcpu
*vcpu
,
6120 struct kvm_nested_state __user
*user_kvm_nested_state
,
6121 struct kvm_nested_state
*kvm_state
)
6123 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6124 struct vmcs12
*vmcs12
;
6125 enum vm_entry_failure_code ignored
;
6126 struct kvm_vmx_nested_state_data __user
*user_vmx_nested_state
=
6127 &user_kvm_nested_state
->data
.vmx
[0];
6130 if (kvm_state
->format
!= KVM_STATE_NESTED_FORMAT_VMX
)
6133 if (kvm_state
->hdr
.vmx
.vmxon_pa
== -1ull) {
6134 if (kvm_state
->hdr
.vmx
.smm
.flags
)
6137 if (kvm_state
->hdr
.vmx
.vmcs12_pa
!= -1ull)
6141 * KVM_STATE_NESTED_EVMCS used to signal that KVM should
6142 * enable eVMCS capability on vCPU. However, since then
6143 * code was changed such that flag signals vmcs12 should
6144 * be copied into eVMCS in guest memory.
6146 * To preserve backwards compatability, allow user
6147 * to set this flag even when there is no VMXON region.
6149 if (kvm_state
->flags
& ~KVM_STATE_NESTED_EVMCS
)
6152 if (!nested_vmx_allowed(vcpu
))
6155 if (!page_address_valid(vcpu
, kvm_state
->hdr
.vmx
.vmxon_pa
))
6159 if ((kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_GUEST_MODE
) &&
6160 (kvm_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
))
6163 if (kvm_state
->hdr
.vmx
.smm
.flags
&
6164 ~(KVM_STATE_NESTED_SMM_GUEST_MODE
| KVM_STATE_NESTED_SMM_VMXON
))
6167 if (kvm_state
->hdr
.vmx
.flags
& ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE
)
6171 * SMM temporarily disables VMX, so we cannot be in guest mode,
6172 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
6177 (KVM_STATE_NESTED_GUEST_MODE
| KVM_STATE_NESTED_RUN_PENDING
))
6178 : kvm_state
->hdr
.vmx
.smm
.flags
)
6181 if ((kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_GUEST_MODE
) &&
6182 !(kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_VMXON
))
6185 if ((kvm_state
->flags
& KVM_STATE_NESTED_EVMCS
) &&
6186 (!nested_vmx_allowed(vcpu
) || !vmx
->nested
.enlightened_vmcs_enabled
))
6189 vmx_leave_nested(vcpu
);
6191 if (kvm_state
->hdr
.vmx
.vmxon_pa
== -1ull)
6194 vmx
->nested
.vmxon_ptr
= kvm_state
->hdr
.vmx
.vmxon_pa
;
6195 ret
= enter_vmx_operation(vcpu
);
6199 /* Empty 'VMXON' state is permitted if no VMCS loaded */
6200 if (kvm_state
->size
< sizeof(*kvm_state
) + sizeof(*vmcs12
)) {
6201 /* See vmx_has_valid_vmcs12. */
6202 if ((kvm_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
) ||
6203 (kvm_state
->flags
& KVM_STATE_NESTED_EVMCS
) ||
6204 (kvm_state
->hdr
.vmx
.vmcs12_pa
!= -1ull))
6210 if (kvm_state
->hdr
.vmx
.vmcs12_pa
!= -1ull) {
6211 if (kvm_state
->hdr
.vmx
.vmcs12_pa
== kvm_state
->hdr
.vmx
.vmxon_pa
||
6212 !page_address_valid(vcpu
, kvm_state
->hdr
.vmx
.vmcs12_pa
))
6215 set_current_vmptr(vmx
, kvm_state
->hdr
.vmx
.vmcs12_pa
);
6216 } else if (kvm_state
->flags
& KVM_STATE_NESTED_EVMCS
) {
6218 * nested_vmx_handle_enlightened_vmptrld() cannot be called
6219 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be
6220 * restored yet. EVMCS will be mapped from
6221 * nested_get_vmcs12_pages().
6223 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES
, vcpu
);
6228 if (kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_VMXON
) {
6229 vmx
->nested
.smm
.vmxon
= true;
6230 vmx
->nested
.vmxon
= false;
6232 if (kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_GUEST_MODE
)
6233 vmx
->nested
.smm
.guest_mode
= true;
6236 vmcs12
= get_vmcs12(vcpu
);
6237 if (copy_from_user(vmcs12
, user_vmx_nested_state
->vmcs12
, sizeof(*vmcs12
)))
6240 if (vmcs12
->hdr
.revision_id
!= VMCS12_REVISION
)
6243 if (!(kvm_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
))
6246 vmx
->nested
.nested_run_pending
=
6247 !!(kvm_state
->flags
& KVM_STATE_NESTED_RUN_PENDING
);
6249 vmx
->nested
.mtf_pending
=
6250 !!(kvm_state
->flags
& KVM_STATE_NESTED_MTF_PENDING
);
6253 if (nested_cpu_has_shadow_vmcs(vmcs12
) &&
6254 vmcs12
->vmcs_link_pointer
!= -1ull) {
6255 struct vmcs12
*shadow_vmcs12
= get_shadow_vmcs12(vcpu
);
6257 if (kvm_state
->size
<
6258 sizeof(*kvm_state
) +
6259 sizeof(user_vmx_nested_state
->vmcs12
) + sizeof(*shadow_vmcs12
))
6260 goto error_guest_mode
;
6262 if (copy_from_user(shadow_vmcs12
,
6263 user_vmx_nested_state
->shadow_vmcs12
,
6264 sizeof(*shadow_vmcs12
))) {
6266 goto error_guest_mode
;
6269 if (shadow_vmcs12
->hdr
.revision_id
!= VMCS12_REVISION
||
6270 !shadow_vmcs12
->hdr
.shadow_vmcs
)
6271 goto error_guest_mode
;
6274 vmx
->nested
.has_preemption_timer_deadline
= false;
6275 if (kvm_state
->hdr
.vmx
.flags
& KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE
) {
6276 vmx
->nested
.has_preemption_timer_deadline
= true;
6277 vmx
->nested
.preemption_timer_deadline
=
6278 kvm_state
->hdr
.vmx
.preemption_timer_deadline
;
6281 if (nested_vmx_check_controls(vcpu
, vmcs12
) ||
6282 nested_vmx_check_host_state(vcpu
, vmcs12
) ||
6283 nested_vmx_check_guest_state(vcpu
, vmcs12
, &ignored
))
6284 goto error_guest_mode
;
6286 vmx
->nested
.dirty_vmcs12
= true;
6287 ret
= nested_vmx_enter_non_root_mode(vcpu
, false);
6289 goto error_guest_mode
;
6294 vmx
->nested
.nested_run_pending
= 0;
6298 void nested_vmx_set_vmcs_shadowing_bitmap(void)
6300 if (enable_shadow_vmcs
) {
6301 vmcs_write64(VMREAD_BITMAP
, __pa(vmx_vmread_bitmap
));
6302 vmcs_write64(VMWRITE_BITMAP
, __pa(vmx_vmwrite_bitmap
));
6307 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
6308 * returned for the various VMX controls MSRs when nested VMX is enabled.
6309 * The same values should also be used to verify that vmcs12 control fields are
6310 * valid during nested entry from L1 to L2.
6311 * Each of these control msrs has a low and high 32-bit half: A low bit is on
6312 * if the corresponding bit in the (32-bit) control field *must* be on, and a
6313 * bit in the high half is on if the corresponding bit in the control field
6314 * may be on. See also vmx_control_verify().
6316 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs
*msrs
, u32 ept_caps
)
6319 * Note that as a general rule, the high half of the MSRs (bits in
6320 * the control fields which may be 1) should be initialized by the
6321 * intersection of the underlying hardware's MSR (i.e., features which
6322 * can be supported) and the list of features we want to expose -
6323 * because they are known to be properly supported in our code.
6324 * Also, usually, the low half of the MSRs (bits which must be 1) can
6325 * be set to 0, meaning that L1 may turn off any of these bits. The
6326 * reason is that if one of these bits is necessary, it will appear
6327 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
6328 * fields of vmcs01 and vmcs02, will turn these bits off - and
6329 * nested_vmx_l1_wants_exit() will not pass related exits to L1.
6330 * These rules have exceptions below.
6333 /* pin-based controls */
6334 rdmsr(MSR_IA32_VMX_PINBASED_CTLS
,
6335 msrs
->pinbased_ctls_low
,
6336 msrs
->pinbased_ctls_high
);
6337 msrs
->pinbased_ctls_low
|=
6338 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
6339 msrs
->pinbased_ctls_high
&=
6340 PIN_BASED_EXT_INTR_MASK
|
6341 PIN_BASED_NMI_EXITING
|
6342 PIN_BASED_VIRTUAL_NMIS
|
6343 (enable_apicv
? PIN_BASED_POSTED_INTR
: 0);
6344 msrs
->pinbased_ctls_high
|=
6345 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR
|
6346 PIN_BASED_VMX_PREEMPTION_TIMER
;
6349 rdmsr(MSR_IA32_VMX_EXIT_CTLS
,
6350 msrs
->exit_ctls_low
,
6351 msrs
->exit_ctls_high
);
6352 msrs
->exit_ctls_low
=
6353 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR
;
6355 msrs
->exit_ctls_high
&=
6356 #ifdef CONFIG_X86_64
6357 VM_EXIT_HOST_ADDR_SPACE_SIZE
|
6359 VM_EXIT_LOAD_IA32_PAT
| VM_EXIT_SAVE_IA32_PAT
|
6360 VM_EXIT_CLEAR_BNDCFGS
| VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
;
6361 msrs
->exit_ctls_high
|=
6362 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR
|
6363 VM_EXIT_LOAD_IA32_EFER
| VM_EXIT_SAVE_IA32_EFER
|
6364 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER
| VM_EXIT_ACK_INTR_ON_EXIT
;
6366 /* We support free control of debug control saving. */
6367 msrs
->exit_ctls_low
&= ~VM_EXIT_SAVE_DEBUG_CONTROLS
;
6369 /* entry controls */
6370 rdmsr(MSR_IA32_VMX_ENTRY_CTLS
,
6371 msrs
->entry_ctls_low
,
6372 msrs
->entry_ctls_high
);
6373 msrs
->entry_ctls_low
=
6374 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR
;
6375 msrs
->entry_ctls_high
&=
6376 #ifdef CONFIG_X86_64
6377 VM_ENTRY_IA32E_MODE
|
6379 VM_ENTRY_LOAD_IA32_PAT
| VM_ENTRY_LOAD_BNDCFGS
|
6380 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
;
6381 msrs
->entry_ctls_high
|=
6382 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR
| VM_ENTRY_LOAD_IA32_EFER
);
6384 /* We support free control of debug control loading. */
6385 msrs
->entry_ctls_low
&= ~VM_ENTRY_LOAD_DEBUG_CONTROLS
;
6387 /* cpu-based controls */
6388 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS
,
6389 msrs
->procbased_ctls_low
,
6390 msrs
->procbased_ctls_high
);
6391 msrs
->procbased_ctls_low
=
6392 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
6393 msrs
->procbased_ctls_high
&=
6394 CPU_BASED_INTR_WINDOW_EXITING
|
6395 CPU_BASED_NMI_WINDOW_EXITING
| CPU_BASED_USE_TSC_OFFSETTING
|
6396 CPU_BASED_HLT_EXITING
| CPU_BASED_INVLPG_EXITING
|
6397 CPU_BASED_MWAIT_EXITING
| CPU_BASED_CR3_LOAD_EXITING
|
6398 CPU_BASED_CR3_STORE_EXITING
|
6399 #ifdef CONFIG_X86_64
6400 CPU_BASED_CR8_LOAD_EXITING
| CPU_BASED_CR8_STORE_EXITING
|
6402 CPU_BASED_MOV_DR_EXITING
| CPU_BASED_UNCOND_IO_EXITING
|
6403 CPU_BASED_USE_IO_BITMAPS
| CPU_BASED_MONITOR_TRAP_FLAG
|
6404 CPU_BASED_MONITOR_EXITING
| CPU_BASED_RDPMC_EXITING
|
6405 CPU_BASED_RDTSC_EXITING
| CPU_BASED_PAUSE_EXITING
|
6406 CPU_BASED_TPR_SHADOW
| CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
;
6408 * We can allow some features even when not supported by the
6409 * hardware. For example, L1 can specify an MSR bitmap - and we
6410 * can use it to avoid exits to L1 - even when L0 runs L2
6411 * without MSR bitmaps.
6413 msrs
->procbased_ctls_high
|=
6414 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR
|
6415 CPU_BASED_USE_MSR_BITMAPS
;
6417 /* We support free control of CR3 access interception. */
6418 msrs
->procbased_ctls_low
&=
6419 ~(CPU_BASED_CR3_LOAD_EXITING
| CPU_BASED_CR3_STORE_EXITING
);
6422 * secondary cpu-based controls. Do not include those that
6423 * depend on CPUID bits, they are added later by
6424 * vmx_vcpu_after_set_cpuid.
6426 if (msrs
->procbased_ctls_high
& CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
)
6427 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2
,
6428 msrs
->secondary_ctls_low
,
6429 msrs
->secondary_ctls_high
);
6431 msrs
->secondary_ctls_low
= 0;
6432 msrs
->secondary_ctls_high
&=
6433 SECONDARY_EXEC_DESC
|
6434 SECONDARY_EXEC_ENABLE_RDTSCP
|
6435 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
|
6436 SECONDARY_EXEC_WBINVD_EXITING
|
6437 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
6438 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
|
6439 SECONDARY_EXEC_RDRAND_EXITING
|
6440 SECONDARY_EXEC_ENABLE_INVPCID
|
6441 SECONDARY_EXEC_RDSEED_EXITING
|
6442 SECONDARY_EXEC_XSAVES
;
6445 * We can emulate "VMCS shadowing," even if the hardware
6446 * doesn't support it.
6448 msrs
->secondary_ctls_high
|=
6449 SECONDARY_EXEC_SHADOW_VMCS
;
6452 /* nested EPT: emulate EPT also to L1 */
6453 msrs
->secondary_ctls_high
|=
6454 SECONDARY_EXEC_ENABLE_EPT
;
6456 VMX_EPT_PAGE_WALK_4_BIT
|
6457 VMX_EPT_PAGE_WALK_5_BIT
|
6459 VMX_EPT_INVEPT_BIT
|
6460 VMX_EPT_EXECUTE_ONLY_BIT
;
6462 msrs
->ept_caps
&= ept_caps
;
6463 msrs
->ept_caps
|= VMX_EPT_EXTENT_GLOBAL_BIT
|
6464 VMX_EPT_EXTENT_CONTEXT_BIT
| VMX_EPT_2MB_PAGE_BIT
|
6465 VMX_EPT_1GB_PAGE_BIT
;
6466 if (enable_ept_ad_bits
) {
6467 msrs
->secondary_ctls_high
|=
6468 SECONDARY_EXEC_ENABLE_PML
;
6469 msrs
->ept_caps
|= VMX_EPT_AD_BIT
;
6473 if (cpu_has_vmx_vmfunc()) {
6474 msrs
->secondary_ctls_high
|=
6475 SECONDARY_EXEC_ENABLE_VMFUNC
;
6477 * Advertise EPTP switching unconditionally
6478 * since we emulate it
6481 msrs
->vmfunc_controls
=
6482 VMX_VMFUNC_EPTP_SWITCHING
;
6486 * Old versions of KVM use the single-context version without
6487 * checking for support, so declare that it is supported even
6488 * though it is treated as global context. The alternative is
6489 * not failing the single-context invvpid, and it is worse.
6492 msrs
->secondary_ctls_high
|=
6493 SECONDARY_EXEC_ENABLE_VPID
;
6494 msrs
->vpid_caps
= VMX_VPID_INVVPID_BIT
|
6495 VMX_VPID_EXTENT_SUPPORTED_MASK
;
6498 if (enable_unrestricted_guest
)
6499 msrs
->secondary_ctls_high
|=
6500 SECONDARY_EXEC_UNRESTRICTED_GUEST
;
6502 if (flexpriority_enabled
)
6503 msrs
->secondary_ctls_high
|=
6504 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
;
6506 /* miscellaneous data */
6507 rdmsr(MSR_IA32_VMX_MISC
,
6510 msrs
->misc_low
&= VMX_MISC_SAVE_EFER_LMA
;
6512 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS
|
6513 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
|
6514 VMX_MISC_ACTIVITY_HLT
|
6515 VMX_MISC_ACTIVITY_WAIT_SIPI
;
6516 msrs
->misc_high
= 0;
6519 * This MSR reports some information about VMX support. We
6520 * should return information about the VMX we emulate for the
6521 * guest, and the VMCS structure we give it - not about the
6522 * VMX support of the underlying hardware.
6526 VMX_BASIC_TRUE_CTLS
|
6527 ((u64
)VMCS12_SIZE
<< VMX_BASIC_VMCS_SIZE_SHIFT
) |
6528 (VMX_BASIC_MEM_TYPE_WB
<< VMX_BASIC_MEM_TYPE_SHIFT
);
6530 if (cpu_has_vmx_basic_inout())
6531 msrs
->basic
|= VMX_BASIC_INOUT
;
6534 * These MSRs specify bits which the guest must keep fixed on
6535 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
6536 * We picked the standard core2 setting.
6538 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
6539 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
6540 msrs
->cr0_fixed0
= VMXON_CR0_ALWAYSON
;
6541 msrs
->cr4_fixed0
= VMXON_CR4_ALWAYSON
;
6543 /* These MSRs specify bits which the guest must keep fixed off. */
6544 rdmsrl(MSR_IA32_VMX_CR0_FIXED1
, msrs
->cr0_fixed1
);
6545 rdmsrl(MSR_IA32_VMX_CR4_FIXED1
, msrs
->cr4_fixed1
);
6547 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
6548 msrs
->vmcs_enum
= VMCS12_MAX_FIELD_INDEX
<< 1;
6551 void nested_vmx_hardware_unsetup(void)
6555 if (enable_shadow_vmcs
) {
6556 for (i
= 0; i
< VMX_BITMAP_NR
; i
++)
6557 free_page((unsigned long)vmx_bitmap
[i
]);
6561 __init
int nested_vmx_hardware_setup(int (*exit_handlers
[])(struct kvm_vcpu
*))
6565 if (!cpu_has_vmx_shadow_vmcs())
6566 enable_shadow_vmcs
= 0;
6567 if (enable_shadow_vmcs
) {
6568 for (i
= 0; i
< VMX_BITMAP_NR
; i
++) {
6570 * The vmx_bitmap is not tied to a VM and so should
6571 * not be charged to a memcg.
6573 vmx_bitmap
[i
] = (unsigned long *)
6574 __get_free_page(GFP_KERNEL
);
6575 if (!vmx_bitmap
[i
]) {
6576 nested_vmx_hardware_unsetup();
6581 init_vmcs_shadow_fields();
6584 exit_handlers
[EXIT_REASON_VMCLEAR
] = handle_vmclear
;
6585 exit_handlers
[EXIT_REASON_VMLAUNCH
] = handle_vmlaunch
;
6586 exit_handlers
[EXIT_REASON_VMPTRLD
] = handle_vmptrld
;
6587 exit_handlers
[EXIT_REASON_VMPTRST
] = handle_vmptrst
;
6588 exit_handlers
[EXIT_REASON_VMREAD
] = handle_vmread
;
6589 exit_handlers
[EXIT_REASON_VMRESUME
] = handle_vmresume
;
6590 exit_handlers
[EXIT_REASON_VMWRITE
] = handle_vmwrite
;
6591 exit_handlers
[EXIT_REASON_VMOFF
] = handle_vmoff
;
6592 exit_handlers
[EXIT_REASON_VMON
] = handle_vmon
;
6593 exit_handlers
[EXIT_REASON_INVEPT
] = handle_invept
;
6594 exit_handlers
[EXIT_REASON_INVVPID
] = handle_invvpid
;
6595 exit_handlers
[EXIT_REASON_VMFUNC
] = handle_vmfunc
;
6600 struct kvm_x86_nested_ops vmx_nested_ops
= {
6601 .check_events
= vmx_check_nested_events
,
6602 .hv_timer_pending
= nested_vmx_preemption_timer_pending
,
6603 .get_state
= vmx_get_nested_state
,
6604 .set_state
= vmx_set_nested_state
,
6605 .get_nested_state_pages
= nested_get_vmcs12_pages
,
6606 .write_log_dirty
= nested_vmx_write_pml_buffer
,
6607 .enable_evmcs
= nested_enable_evmcs
,
6608 .get_evmcs_version
= nested_get_evmcs_version
,