1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/objtool.h>
4 #include <linux/percpu.h>
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
19 static bool __read_mostly enable_shadow_vmcs
= 1;
20 module_param_named(enable_shadow_vmcs
, enable_shadow_vmcs
, bool, S_IRUGO
);
22 static bool __read_mostly nested_early_check
= 0;
23 module_param(nested_early_check
, bool, S_IRUGO
);
25 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
28 * Hyper-V requires all of these, so mark them as supported even though
29 * they are just treated the same as all-context.
31 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
32 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
33 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
34 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
35 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
37 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
44 static unsigned long *vmx_bitmap
[VMX_BITMAP_NR
];
46 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
47 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
49 struct shadow_vmcs_field
{
53 static struct shadow_vmcs_field shadow_read_only_fields
[] = {
54 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
55 #include "vmcs_shadow_fields.h"
57 static int max_shadow_read_only_fields
=
58 ARRAY_SIZE(shadow_read_only_fields
);
60 static struct shadow_vmcs_field shadow_read_write_fields
[] = {
61 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
62 #include "vmcs_shadow_fields.h"
64 static int max_shadow_read_write_fields
=
65 ARRAY_SIZE(shadow_read_write_fields
);
67 static void init_vmcs_shadow_fields(void)
71 memset(vmx_vmread_bitmap
, 0xff, PAGE_SIZE
);
72 memset(vmx_vmwrite_bitmap
, 0xff, PAGE_SIZE
);
74 for (i
= j
= 0; i
< max_shadow_read_only_fields
; i
++) {
75 struct shadow_vmcs_field entry
= shadow_read_only_fields
[i
];
76 u16 field
= entry
.encoding
;
78 if (vmcs_field_width(field
) == VMCS_FIELD_WIDTH_U64
&&
79 (i
+ 1 == max_shadow_read_only_fields
||
80 shadow_read_only_fields
[i
+ 1].encoding
!= field
+ 1))
81 pr_err("Missing field from shadow_read_only_field %x\n",
84 clear_bit(field
, vmx_vmread_bitmap
);
89 entry
.offset
+= sizeof(u32
);
91 shadow_read_only_fields
[j
++] = entry
;
93 max_shadow_read_only_fields
= j
;
95 for (i
= j
= 0; i
< max_shadow_read_write_fields
; i
++) {
96 struct shadow_vmcs_field entry
= shadow_read_write_fields
[i
];
97 u16 field
= entry
.encoding
;
99 if (vmcs_field_width(field
) == VMCS_FIELD_WIDTH_U64
&&
100 (i
+ 1 == max_shadow_read_write_fields
||
101 shadow_read_write_fields
[i
+ 1].encoding
!= field
+ 1))
102 pr_err("Missing field from shadow_read_write_field %x\n",
105 WARN_ONCE(field
>= GUEST_ES_AR_BYTES
&&
106 field
<= GUEST_TR_AR_BYTES
,
107 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
110 * PML and the preemption timer can be emulated, but the
111 * processor cannot vmwrite to fields that don't exist
115 case GUEST_PML_INDEX
:
116 if (!cpu_has_vmx_pml())
119 case VMX_PREEMPTION_TIMER_VALUE
:
120 if (!cpu_has_vmx_preemption_timer())
123 case GUEST_INTR_STATUS
:
124 if (!cpu_has_vmx_apicv())
131 clear_bit(field
, vmx_vmwrite_bitmap
);
132 clear_bit(field
, vmx_vmread_bitmap
);
137 entry
.offset
+= sizeof(u32
);
139 shadow_read_write_fields
[j
++] = entry
;
141 max_shadow_read_write_fields
= j
;
145 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
146 * set the success or error code of an emulated VMX instruction (as specified
147 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
150 static int nested_vmx_succeed(struct kvm_vcpu
*vcpu
)
152 vmx_set_rflags(vcpu
, vmx_get_rflags(vcpu
)
153 & ~(X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
154 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_OF
));
155 return kvm_skip_emulated_instruction(vcpu
);
158 static int nested_vmx_failInvalid(struct kvm_vcpu
*vcpu
)
160 vmx_set_rflags(vcpu
, (vmx_get_rflags(vcpu
)
161 & ~(X86_EFLAGS_PF
| X86_EFLAGS_AF
| X86_EFLAGS_ZF
|
162 X86_EFLAGS_SF
| X86_EFLAGS_OF
))
164 return kvm_skip_emulated_instruction(vcpu
);
167 static int nested_vmx_failValid(struct kvm_vcpu
*vcpu
,
168 u32 vm_instruction_error
)
170 vmx_set_rflags(vcpu
, (vmx_get_rflags(vcpu
)
171 & ~(X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
172 X86_EFLAGS_SF
| X86_EFLAGS_OF
))
174 get_vmcs12(vcpu
)->vm_instruction_error
= vm_instruction_error
;
176 * We don't need to force sync to shadow VMCS because
177 * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all
178 * fields and thus must be synced.
180 if (to_vmx(vcpu
)->nested
.hv_evmcs_vmptr
!= EVMPTR_INVALID
)
181 to_vmx(vcpu
)->nested
.need_vmcs12_to_shadow_sync
= true;
183 return kvm_skip_emulated_instruction(vcpu
);
186 static int nested_vmx_fail(struct kvm_vcpu
*vcpu
, u32 vm_instruction_error
)
188 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
191 * failValid writes the error number to the current VMCS, which
192 * can't be done if there isn't a current VMCS.
194 if (vmx
->nested
.current_vmptr
== -1ull &&
195 !evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
))
196 return nested_vmx_failInvalid(vcpu
);
198 return nested_vmx_failValid(vcpu
, vm_instruction_error
);
201 static void nested_vmx_abort(struct kvm_vcpu
*vcpu
, u32 indicator
)
203 /* TODO: not to reset guest simply here. */
204 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
205 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator
);
208 static inline bool vmx_control_verify(u32 control
, u32 low
, u32 high
)
210 return fixed_bits_valid(control
, low
, high
);
213 static inline u64
vmx_control_msr(u32 low
, u32 high
)
215 return low
| ((u64
)high
<< 32);
218 static void vmx_disable_shadow_vmcs(struct vcpu_vmx
*vmx
)
220 secondary_exec_controls_clearbit(vmx
, SECONDARY_EXEC_SHADOW_VMCS
);
221 vmcs_write64(VMCS_LINK_POINTER
, -1ull);
222 vmx
->nested
.need_vmcs12_to_shadow_sync
= false;
225 static inline void nested_release_evmcs(struct kvm_vcpu
*vcpu
)
227 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
229 if (evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
)) {
230 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.hv_evmcs_map
, true);
231 vmx
->nested
.hv_evmcs
= NULL
;
234 vmx
->nested
.hv_evmcs_vmptr
= EVMPTR_INVALID
;
237 static void vmx_sync_vmcs_host_state(struct vcpu_vmx
*vmx
,
238 struct loaded_vmcs
*prev
)
240 struct vmcs_host_state
*dest
, *src
;
242 if (unlikely(!vmx
->guest_state_loaded
))
245 src
= &prev
->host_state
;
246 dest
= &vmx
->loaded_vmcs
->host_state
;
248 vmx_set_host_fs_gs(dest
, src
->fs_sel
, src
->gs_sel
, src
->fs_base
, src
->gs_base
);
249 dest
->ldt_sel
= src
->ldt_sel
;
251 dest
->ds_sel
= src
->ds_sel
;
252 dest
->es_sel
= src
->es_sel
;
256 static void vmx_switch_vmcs(struct kvm_vcpu
*vcpu
, struct loaded_vmcs
*vmcs
)
258 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
259 struct loaded_vmcs
*prev
;
262 if (WARN_ON_ONCE(vmx
->loaded_vmcs
== vmcs
))
266 prev
= vmx
->loaded_vmcs
;
267 vmx
->loaded_vmcs
= vmcs
;
268 vmx_vcpu_load_vmcs(vcpu
, cpu
, prev
);
269 vmx_sync_vmcs_host_state(vmx
, prev
);
272 vmx_register_cache_reset(vcpu
);
276 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
277 * just stops using VMX.
279 static void free_nested(struct kvm_vcpu
*vcpu
)
281 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
283 if (WARN_ON_ONCE(vmx
->loaded_vmcs
!= &vmx
->vmcs01
))
284 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
286 if (!vmx
->nested
.vmxon
&& !vmx
->nested
.smm
.vmxon
)
289 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES
, vcpu
);
291 vmx
->nested
.vmxon
= false;
292 vmx
->nested
.smm
.vmxon
= false;
293 free_vpid(vmx
->nested
.vpid02
);
294 vmx
->nested
.posted_intr_nv
= -1;
295 vmx
->nested
.current_vmptr
= -1ull;
296 if (enable_shadow_vmcs
) {
297 vmx_disable_shadow_vmcs(vmx
);
298 vmcs_clear(vmx
->vmcs01
.shadow_vmcs
);
299 free_vmcs(vmx
->vmcs01
.shadow_vmcs
);
300 vmx
->vmcs01
.shadow_vmcs
= NULL
;
302 kfree(vmx
->nested
.cached_vmcs12
);
303 vmx
->nested
.cached_vmcs12
= NULL
;
304 kfree(vmx
->nested
.cached_shadow_vmcs12
);
305 vmx
->nested
.cached_shadow_vmcs12
= NULL
;
306 /* Unpin physical memory we referred to in the vmcs02 */
307 if (vmx
->nested
.apic_access_page
) {
308 kvm_release_page_clean(vmx
->nested
.apic_access_page
);
309 vmx
->nested
.apic_access_page
= NULL
;
311 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.virtual_apic_map
, true);
312 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.pi_desc_map
, true);
313 vmx
->nested
.pi_desc
= NULL
;
315 kvm_mmu_free_roots(vcpu
, &vcpu
->arch
.guest_mmu
, KVM_MMU_ROOTS_ALL
);
317 nested_release_evmcs(vcpu
);
319 free_loaded_vmcs(&vmx
->nested
.vmcs02
);
323 * Ensure that the current vmcs of the logical processor is the
324 * vmcs01 of the vcpu before calling free_nested().
326 void nested_vmx_free_vcpu(struct kvm_vcpu
*vcpu
)
329 vmx_leave_nested(vcpu
);
333 #define EPTP_PA_MASK GENMASK_ULL(51, 12)
335 static bool nested_ept_root_matches(hpa_t root_hpa
, u64 root_eptp
, u64 eptp
)
337 return VALID_PAGE(root_hpa
) &&
338 ((root_eptp
& EPTP_PA_MASK
) == (eptp
& EPTP_PA_MASK
));
341 static void nested_ept_invalidate_addr(struct kvm_vcpu
*vcpu
, gpa_t eptp
,
345 struct kvm_mmu_root_info
*cached_root
;
347 WARN_ON_ONCE(!mmu_is_nested(vcpu
));
349 for (i
= 0; i
< KVM_MMU_NUM_PREV_ROOTS
; i
++) {
350 cached_root
= &vcpu
->arch
.mmu
->prev_roots
[i
];
352 if (nested_ept_root_matches(cached_root
->hpa
, cached_root
->pgd
,
354 vcpu
->arch
.mmu
->invlpg(vcpu
, addr
, cached_root
->hpa
);
358 static void nested_ept_inject_page_fault(struct kvm_vcpu
*vcpu
,
359 struct x86_exception
*fault
)
361 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
362 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
364 unsigned long exit_qualification
= vcpu
->arch
.exit_qualification
;
366 if (vmx
->nested
.pml_full
) {
367 vm_exit_reason
= EXIT_REASON_PML_FULL
;
368 vmx
->nested
.pml_full
= false;
369 exit_qualification
&= INTR_INFO_UNBLOCK_NMI
;
371 if (fault
->error_code
& PFERR_RSVD_MASK
)
372 vm_exit_reason
= EXIT_REASON_EPT_MISCONFIG
;
374 vm_exit_reason
= EXIT_REASON_EPT_VIOLATION
;
377 * Although the caller (kvm_inject_emulated_page_fault) would
378 * have already synced the faulting address in the shadow EPT
379 * tables for the current EPTP12, we also need to sync it for
380 * any other cached EPTP02s based on the same EP4TA, since the
381 * TLB associates mappings to the EP4TA rather than the full EPTP.
383 nested_ept_invalidate_addr(vcpu
, vmcs12
->ept_pointer
,
387 nested_vmx_vmexit(vcpu
, vm_exit_reason
, 0, exit_qualification
);
388 vmcs12
->guest_physical_address
= fault
->address
;
391 static void nested_ept_new_eptp(struct kvm_vcpu
*vcpu
)
393 kvm_init_shadow_ept_mmu(vcpu
,
394 to_vmx(vcpu
)->nested
.msrs
.ept_caps
&
395 VMX_EPT_EXECUTE_ONLY_BIT
,
396 nested_ept_ad_enabled(vcpu
),
397 nested_ept_get_eptp(vcpu
));
400 static void nested_ept_init_mmu_context(struct kvm_vcpu
*vcpu
)
402 WARN_ON(mmu_is_nested(vcpu
));
404 vcpu
->arch
.mmu
= &vcpu
->arch
.guest_mmu
;
405 nested_ept_new_eptp(vcpu
);
406 vcpu
->arch
.mmu
->get_guest_pgd
= nested_ept_get_eptp
;
407 vcpu
->arch
.mmu
->inject_page_fault
= nested_ept_inject_page_fault
;
408 vcpu
->arch
.mmu
->get_pdptr
= kvm_pdptr_read
;
410 vcpu
->arch
.walk_mmu
= &vcpu
->arch
.nested_mmu
;
413 static void nested_ept_uninit_mmu_context(struct kvm_vcpu
*vcpu
)
415 vcpu
->arch
.mmu
= &vcpu
->arch
.root_mmu
;
416 vcpu
->arch
.walk_mmu
= &vcpu
->arch
.root_mmu
;
419 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12
*vmcs12
,
422 bool inequality
, bit
;
424 bit
= (vmcs12
->exception_bitmap
& (1u << PF_VECTOR
)) != 0;
426 (error_code
& vmcs12
->page_fault_error_code_mask
) !=
427 vmcs12
->page_fault_error_code_match
;
428 return inequality
^ bit
;
433 * KVM wants to inject page-faults which it got to the guest. This function
434 * checks whether in a nested guest, we need to inject them to L1 or L2.
436 static int nested_vmx_check_exception(struct kvm_vcpu
*vcpu
, unsigned long *exit_qual
)
438 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
439 unsigned int nr
= vcpu
->arch
.exception
.nr
;
440 bool has_payload
= vcpu
->arch
.exception
.has_payload
;
441 unsigned long payload
= vcpu
->arch
.exception
.payload
;
443 if (nr
== PF_VECTOR
) {
444 if (vcpu
->arch
.exception
.nested_apf
) {
445 *exit_qual
= vcpu
->arch
.apf
.nested_apf_token
;
448 if (nested_vmx_is_page_fault_vmexit(vmcs12
,
449 vcpu
->arch
.exception
.error_code
)) {
450 *exit_qual
= has_payload
? payload
: vcpu
->arch
.cr2
;
453 } else if (vmcs12
->exception_bitmap
& (1u << nr
)) {
454 if (nr
== DB_VECTOR
) {
456 payload
= vcpu
->arch
.dr6
;
458 payload
^= DR6_ACTIVE_LOW
;
460 *exit_qual
= payload
;
470 static void vmx_inject_page_fault_nested(struct kvm_vcpu
*vcpu
,
471 struct x86_exception
*fault
)
473 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
475 WARN_ON(!is_guest_mode(vcpu
));
477 if (nested_vmx_is_page_fault_vmexit(vmcs12
, fault
->error_code
) &&
478 !to_vmx(vcpu
)->nested
.nested_run_pending
) {
479 vmcs12
->vm_exit_intr_error_code
= fault
->error_code
;
480 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXCEPTION_NMI
,
481 PF_VECTOR
| INTR_TYPE_HARD_EXCEPTION
|
482 INTR_INFO_DELIVER_CODE_MASK
| INTR_INFO_VALID_MASK
,
485 kvm_inject_page_fault(vcpu
, fault
);
489 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu
*vcpu
,
490 struct vmcs12
*vmcs12
)
492 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_IO_BITMAPS
))
495 if (CC(!page_address_valid(vcpu
, vmcs12
->io_bitmap_a
)) ||
496 CC(!page_address_valid(vcpu
, vmcs12
->io_bitmap_b
)))
502 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu
*vcpu
,
503 struct vmcs12
*vmcs12
)
505 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_MSR_BITMAPS
))
508 if (CC(!page_address_valid(vcpu
, vmcs12
->msr_bitmap
)))
514 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu
*vcpu
,
515 struct vmcs12
*vmcs12
)
517 if (!nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
))
520 if (CC(!page_address_valid(vcpu
, vmcs12
->virtual_apic_page_addr
)))
527 * Check if MSR is intercepted for L01 MSR bitmap.
529 static bool msr_write_intercepted_l01(struct kvm_vcpu
*vcpu
, u32 msr
)
531 unsigned long *msr_bitmap
;
532 int f
= sizeof(unsigned long);
534 if (!cpu_has_vmx_msr_bitmap())
537 msr_bitmap
= to_vmx(vcpu
)->vmcs01
.msr_bitmap
;
540 return !!test_bit(msr
, msr_bitmap
+ 0x800 / f
);
541 } else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff)) {
543 return !!test_bit(msr
, msr_bitmap
+ 0xc00 / f
);
550 * If a msr is allowed by L0, we should check whether it is allowed by L1.
551 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
553 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1
,
554 unsigned long *msr_bitmap_nested
,
557 int f
= sizeof(unsigned long);
560 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
561 * have the write-low and read-high bitmap offsets the wrong way round.
562 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
565 if (type
& MSR_TYPE_R
&&
566 !test_bit(msr
, msr_bitmap_l1
+ 0x000 / f
))
568 __clear_bit(msr
, msr_bitmap_nested
+ 0x000 / f
);
570 if (type
& MSR_TYPE_W
&&
571 !test_bit(msr
, msr_bitmap_l1
+ 0x800 / f
))
573 __clear_bit(msr
, msr_bitmap_nested
+ 0x800 / f
);
575 } else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff)) {
577 if (type
& MSR_TYPE_R
&&
578 !test_bit(msr
, msr_bitmap_l1
+ 0x400 / f
))
580 __clear_bit(msr
, msr_bitmap_nested
+ 0x400 / f
);
582 if (type
& MSR_TYPE_W
&&
583 !test_bit(msr
, msr_bitmap_l1
+ 0xc00 / f
))
585 __clear_bit(msr
, msr_bitmap_nested
+ 0xc00 / f
);
590 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap
)
594 for (msr
= 0x800; msr
<= 0x8ff; msr
+= BITS_PER_LONG
) {
595 unsigned word
= msr
/ BITS_PER_LONG
;
597 msr_bitmap
[word
] = ~0;
598 msr_bitmap
[word
+ (0x800 / sizeof(long))] = ~0;
603 * Merge L0's and L1's MSR bitmap, return false to indicate that
604 * we do not use the hardware.
606 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu
*vcpu
,
607 struct vmcs12
*vmcs12
)
610 unsigned long *msr_bitmap_l1
;
611 unsigned long *msr_bitmap_l0
= to_vmx(vcpu
)->nested
.vmcs02
.msr_bitmap
;
612 struct kvm_host_map
*map
= &to_vmx(vcpu
)->nested
.msr_bitmap_map
;
614 /* Nothing to do if the MSR bitmap is not in use. */
615 if (!cpu_has_vmx_msr_bitmap() ||
616 !nested_cpu_has(vmcs12
, CPU_BASED_USE_MSR_BITMAPS
))
619 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->msr_bitmap
), map
))
622 msr_bitmap_l1
= (unsigned long *)map
->hva
;
625 * To keep the control flow simple, pay eight 8-byte writes (sixteen
626 * 4-byte writes on 32-bit systems) up front to enable intercepts for
627 * the x2APIC MSR range and selectively disable them below.
629 enable_x2apic_msr_intercepts(msr_bitmap_l0
);
631 if (nested_cpu_has_virt_x2apic_mode(vmcs12
)) {
632 if (nested_cpu_has_apic_reg_virt(vmcs12
)) {
634 * L0 need not intercept reads for MSRs between 0x800
635 * and 0x8ff, it just lets the processor take the value
636 * from the virtual-APIC page; take those 256 bits
637 * directly from the L1 bitmap.
639 for (msr
= 0x800; msr
<= 0x8ff; msr
+= BITS_PER_LONG
) {
640 unsigned word
= msr
/ BITS_PER_LONG
;
642 msr_bitmap_l0
[word
] = msr_bitmap_l1
[word
];
646 nested_vmx_disable_intercept_for_msr(
647 msr_bitmap_l1
, msr_bitmap_l0
,
648 X2APIC_MSR(APIC_TASKPRI
),
649 MSR_TYPE_R
| MSR_TYPE_W
);
651 if (nested_cpu_has_vid(vmcs12
)) {
652 nested_vmx_disable_intercept_for_msr(
653 msr_bitmap_l1
, msr_bitmap_l0
,
654 X2APIC_MSR(APIC_EOI
),
656 nested_vmx_disable_intercept_for_msr(
657 msr_bitmap_l1
, msr_bitmap_l0
,
658 X2APIC_MSR(APIC_SELF_IPI
),
663 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
665 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1
, msr_bitmap_l0
,
666 MSR_FS_BASE
, MSR_TYPE_RW
);
668 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1
, msr_bitmap_l0
,
669 MSR_GS_BASE
, MSR_TYPE_RW
);
671 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1
, msr_bitmap_l0
,
672 MSR_KERNEL_GS_BASE
, MSR_TYPE_RW
);
676 * Checking the L0->L1 bitmap is trying to verify two things:
678 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
679 * ensures that we do not accidentally generate an L02 MSR bitmap
680 * from the L12 MSR bitmap that is too permissive.
681 * 2. That L1 or L2s have actually used the MSR. This avoids
682 * unnecessarily merging of the bitmap if the MSR is unused. This
683 * works properly because we only update the L01 MSR bitmap lazily.
684 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
685 * updated to reflect this when L1 (or its L2s) actually write to
688 if (!msr_write_intercepted_l01(vcpu
, MSR_IA32_SPEC_CTRL
))
689 nested_vmx_disable_intercept_for_msr(
690 msr_bitmap_l1
, msr_bitmap_l0
,
692 MSR_TYPE_R
| MSR_TYPE_W
);
694 if (!msr_write_intercepted_l01(vcpu
, MSR_IA32_PRED_CMD
))
695 nested_vmx_disable_intercept_for_msr(
696 msr_bitmap_l1
, msr_bitmap_l0
,
700 kvm_vcpu_unmap(vcpu
, &to_vmx(vcpu
)->nested
.msr_bitmap_map
, false);
705 static void nested_cache_shadow_vmcs12(struct kvm_vcpu
*vcpu
,
706 struct vmcs12
*vmcs12
)
708 struct kvm_host_map map
;
709 struct vmcs12
*shadow
;
711 if (!nested_cpu_has_shadow_vmcs(vmcs12
) ||
712 vmcs12
->vmcs_link_pointer
== -1ull)
715 shadow
= get_shadow_vmcs12(vcpu
);
717 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->vmcs_link_pointer
), &map
))
720 memcpy(shadow
, map
.hva
, VMCS12_SIZE
);
721 kvm_vcpu_unmap(vcpu
, &map
, false);
724 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu
*vcpu
,
725 struct vmcs12
*vmcs12
)
727 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
729 if (!nested_cpu_has_shadow_vmcs(vmcs12
) ||
730 vmcs12
->vmcs_link_pointer
== -1ull)
733 kvm_write_guest(vmx
->vcpu
.kvm
, vmcs12
->vmcs_link_pointer
,
734 get_shadow_vmcs12(vcpu
), VMCS12_SIZE
);
738 * In nested virtualization, check if L1 has set
739 * VM_EXIT_ACK_INTR_ON_EXIT
741 static bool nested_exit_intr_ack_set(struct kvm_vcpu
*vcpu
)
743 return get_vmcs12(vcpu
)->vm_exit_controls
&
744 VM_EXIT_ACK_INTR_ON_EXIT
;
747 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu
*vcpu
,
748 struct vmcs12
*vmcs12
)
750 if (nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
) &&
751 CC(!page_address_valid(vcpu
, vmcs12
->apic_access_addr
)))
757 static int nested_vmx_check_apicv_controls(struct kvm_vcpu
*vcpu
,
758 struct vmcs12
*vmcs12
)
760 if (!nested_cpu_has_virt_x2apic_mode(vmcs12
) &&
761 !nested_cpu_has_apic_reg_virt(vmcs12
) &&
762 !nested_cpu_has_vid(vmcs12
) &&
763 !nested_cpu_has_posted_intr(vmcs12
))
767 * If virtualize x2apic mode is enabled,
768 * virtualize apic access must be disabled.
770 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12
) &&
771 nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)))
775 * If virtual interrupt delivery is enabled,
776 * we must exit on external interrupts.
778 if (CC(nested_cpu_has_vid(vmcs12
) && !nested_exit_on_intr(vcpu
)))
782 * bits 15:8 should be zero in posted_intr_nv,
783 * the descriptor address has been already checked
784 * in nested_get_vmcs12_pages.
786 * bits 5:0 of posted_intr_desc_addr should be zero.
788 if (nested_cpu_has_posted_intr(vmcs12
) &&
789 (CC(!nested_cpu_has_vid(vmcs12
)) ||
790 CC(!nested_exit_intr_ack_set(vcpu
)) ||
791 CC((vmcs12
->posted_intr_nv
& 0xff00)) ||
792 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu
, vmcs12
->posted_intr_desc_addr
, 64))))
795 /* tpr shadow is needed by all apicv features. */
796 if (CC(!nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
)))
802 static int nested_vmx_check_msr_switch(struct kvm_vcpu
*vcpu
,
808 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu
, addr
, 16) ||
809 !kvm_vcpu_is_legal_gpa(vcpu
, (addr
+ count
* sizeof(struct vmx_msr_entry
) - 1)))
815 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu
*vcpu
,
816 struct vmcs12
*vmcs12
)
818 if (CC(nested_vmx_check_msr_switch(vcpu
,
819 vmcs12
->vm_exit_msr_load_count
,
820 vmcs12
->vm_exit_msr_load_addr
)) ||
821 CC(nested_vmx_check_msr_switch(vcpu
,
822 vmcs12
->vm_exit_msr_store_count
,
823 vmcs12
->vm_exit_msr_store_addr
)))
829 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu
*vcpu
,
830 struct vmcs12
*vmcs12
)
832 if (CC(nested_vmx_check_msr_switch(vcpu
,
833 vmcs12
->vm_entry_msr_load_count
,
834 vmcs12
->vm_entry_msr_load_addr
)))
840 static int nested_vmx_check_pml_controls(struct kvm_vcpu
*vcpu
,
841 struct vmcs12
*vmcs12
)
843 if (!nested_cpu_has_pml(vmcs12
))
846 if (CC(!nested_cpu_has_ept(vmcs12
)) ||
847 CC(!page_address_valid(vcpu
, vmcs12
->pml_address
)))
853 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu
*vcpu
,
854 struct vmcs12
*vmcs12
)
856 if (CC(nested_cpu_has2(vmcs12
, SECONDARY_EXEC_UNRESTRICTED_GUEST
) &&
857 !nested_cpu_has_ept(vmcs12
)))
862 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu
*vcpu
,
863 struct vmcs12
*vmcs12
)
865 if (CC(nested_cpu_has2(vmcs12
, SECONDARY_EXEC_MODE_BASED_EPT_EXEC
) &&
866 !nested_cpu_has_ept(vmcs12
)))
871 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu
*vcpu
,
872 struct vmcs12
*vmcs12
)
874 if (!nested_cpu_has_shadow_vmcs(vmcs12
))
877 if (CC(!page_address_valid(vcpu
, vmcs12
->vmread_bitmap
)) ||
878 CC(!page_address_valid(vcpu
, vmcs12
->vmwrite_bitmap
)))
884 static int nested_vmx_msr_check_common(struct kvm_vcpu
*vcpu
,
885 struct vmx_msr_entry
*e
)
887 /* x2APIC MSR accesses are not allowed */
888 if (CC(vcpu
->arch
.apic_base
& X2APIC_ENABLE
&& e
->index
>> 8 == 0x8))
890 if (CC(e
->index
== MSR_IA32_UCODE_WRITE
) || /* SDM Table 35-2 */
891 CC(e
->index
== MSR_IA32_UCODE_REV
))
893 if (CC(e
->reserved
!= 0))
898 static int nested_vmx_load_msr_check(struct kvm_vcpu
*vcpu
,
899 struct vmx_msr_entry
*e
)
901 if (CC(e
->index
== MSR_FS_BASE
) ||
902 CC(e
->index
== MSR_GS_BASE
) ||
903 CC(e
->index
== MSR_IA32_SMM_MONITOR_CTL
) || /* SMM is not supported */
904 nested_vmx_msr_check_common(vcpu
, e
))
909 static int nested_vmx_store_msr_check(struct kvm_vcpu
*vcpu
,
910 struct vmx_msr_entry
*e
)
912 if (CC(e
->index
== MSR_IA32_SMBASE
) || /* SMM is not supported */
913 nested_vmx_msr_check_common(vcpu
, e
))
918 static u32
nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu
*vcpu
)
920 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
921 u64 vmx_misc
= vmx_control_msr(vmx
->nested
.msrs
.misc_low
,
922 vmx
->nested
.msrs
.misc_high
);
924 return (vmx_misc_max_msr(vmx_misc
) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER
;
928 * Load guest's/host's msr at nested entry/exit.
929 * return 0 for success, entry index for failure.
931 * One of the failure modes for MSR load/store is when a list exceeds the
932 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch
933 * as possible, process all valid entries before failing rather than precheck
934 * for a capacity violation.
936 static u32
nested_vmx_load_msr(struct kvm_vcpu
*vcpu
, u64 gpa
, u32 count
)
939 struct vmx_msr_entry e
;
940 u32 max_msr_list_size
= nested_vmx_max_atomic_switch_msrs(vcpu
);
942 for (i
= 0; i
< count
; i
++) {
943 if (unlikely(i
>= max_msr_list_size
))
946 if (kvm_vcpu_read_guest(vcpu
, gpa
+ i
* sizeof(e
),
948 pr_debug_ratelimited(
949 "%s cannot read MSR entry (%u, 0x%08llx)\n",
950 __func__
, i
, gpa
+ i
* sizeof(e
));
953 if (nested_vmx_load_msr_check(vcpu
, &e
)) {
954 pr_debug_ratelimited(
955 "%s check failed (%u, 0x%x, 0x%x)\n",
956 __func__
, i
, e
.index
, e
.reserved
);
959 if (kvm_set_msr(vcpu
, e
.index
, e
.value
)) {
960 pr_debug_ratelimited(
961 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
962 __func__
, i
, e
.index
, e
.value
);
968 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */
972 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu
*vcpu
,
976 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
979 * If the L0 hypervisor stored a more accurate value for the TSC that
980 * does not include the time taken for emulation of the L2->L1
981 * VM-exit in L0, use the more accurate value.
983 if (msr_index
== MSR_IA32_TSC
) {
984 int i
= vmx_find_loadstore_msr_slot(&vmx
->msr_autostore
.guest
,
988 u64 val
= vmx
->msr_autostore
.guest
.val
[i
].value
;
990 *data
= kvm_read_l1_tsc(vcpu
, val
);
995 if (kvm_get_msr(vcpu
, msr_index
, data
)) {
996 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__
,
1003 static bool read_and_check_msr_entry(struct kvm_vcpu
*vcpu
, u64 gpa
, int i
,
1004 struct vmx_msr_entry
*e
)
1006 if (kvm_vcpu_read_guest(vcpu
,
1007 gpa
+ i
* sizeof(*e
),
1008 e
, 2 * sizeof(u32
))) {
1009 pr_debug_ratelimited(
1010 "%s cannot read MSR entry (%u, 0x%08llx)\n",
1011 __func__
, i
, gpa
+ i
* sizeof(*e
));
1014 if (nested_vmx_store_msr_check(vcpu
, e
)) {
1015 pr_debug_ratelimited(
1016 "%s check failed (%u, 0x%x, 0x%x)\n",
1017 __func__
, i
, e
->index
, e
->reserved
);
1023 static int nested_vmx_store_msr(struct kvm_vcpu
*vcpu
, u64 gpa
, u32 count
)
1027 struct vmx_msr_entry e
;
1028 u32 max_msr_list_size
= nested_vmx_max_atomic_switch_msrs(vcpu
);
1030 for (i
= 0; i
< count
; i
++) {
1031 if (unlikely(i
>= max_msr_list_size
))
1034 if (!read_and_check_msr_entry(vcpu
, gpa
, i
, &e
))
1037 if (!nested_vmx_get_vmexit_msr_value(vcpu
, e
.index
, &data
))
1040 if (kvm_vcpu_write_guest(vcpu
,
1041 gpa
+ i
* sizeof(e
) +
1042 offsetof(struct vmx_msr_entry
, value
),
1043 &data
, sizeof(data
))) {
1044 pr_debug_ratelimited(
1045 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
1046 __func__
, i
, e
.index
, data
);
1053 static bool nested_msr_store_list_has_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
)
1055 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1056 u32 count
= vmcs12
->vm_exit_msr_store_count
;
1057 u64 gpa
= vmcs12
->vm_exit_msr_store_addr
;
1058 struct vmx_msr_entry e
;
1061 for (i
= 0; i
< count
; i
++) {
1062 if (!read_and_check_msr_entry(vcpu
, gpa
, i
, &e
))
1065 if (e
.index
== msr_index
)
1071 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu
*vcpu
,
1074 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1075 struct vmx_msrs
*autostore
= &vmx
->msr_autostore
.guest
;
1076 bool in_vmcs12_store_list
;
1077 int msr_autostore_slot
;
1078 bool in_autostore_list
;
1081 msr_autostore_slot
= vmx_find_loadstore_msr_slot(autostore
, msr_index
);
1082 in_autostore_list
= msr_autostore_slot
>= 0;
1083 in_vmcs12_store_list
= nested_msr_store_list_has_msr(vcpu
, msr_index
);
1085 if (in_vmcs12_store_list
&& !in_autostore_list
) {
1086 if (autostore
->nr
== MAX_NR_LOADSTORE_MSRS
) {
1088 * Emulated VMEntry does not fail here. Instead a less
1089 * accurate value will be returned by
1090 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
1091 * instead of reading the value from the vmcs02 VMExit
1094 pr_warn_ratelimited(
1095 "Not enough msr entries in msr_autostore. Can't add msr %x\n",
1099 last
= autostore
->nr
++;
1100 autostore
->val
[last
].index
= msr_index
;
1101 } else if (!in_vmcs12_store_list
&& in_autostore_list
) {
1102 last
= --autostore
->nr
;
1103 autostore
->val
[msr_autostore_slot
] = autostore
->val
[last
];
1108 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1109 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected
1110 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
1111 * @entry_failure_code.
1113 static int nested_vmx_load_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr3
,
1114 bool nested_ept
, bool reload_pdptrs
,
1115 enum vm_entry_failure_code
*entry_failure_code
)
1117 if (CC(kvm_vcpu_is_illegal_gpa(vcpu
, cr3
))) {
1118 *entry_failure_code
= ENTRY_FAIL_DEFAULT
;
1123 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
1124 * must not be dereferenced.
1126 if (reload_pdptrs
&& !nested_ept
&& is_pae_paging(vcpu
) &&
1127 CC(!load_pdptrs(vcpu
, vcpu
->arch
.walk_mmu
, cr3
))) {
1128 *entry_failure_code
= ENTRY_FAIL_PDPTE
;
1133 kvm_mmu_new_pgd(vcpu
, cr3
);
1135 vcpu
->arch
.cr3
= cr3
;
1136 kvm_register_mark_available(vcpu
, VCPU_EXREG_CR3
);
1138 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
1145 * Returns if KVM is able to config CPU to tag TLB entries
1146 * populated by L2 differently than TLB entries populated
1149 * If L0 uses EPT, L1 and L2 run with different EPTP because
1150 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries
1151 * are tagged with different EPTP.
1153 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
1154 * with different VPID (L1 entries are tagged with vmx->vpid
1155 * while L2 entries are tagged with vmx->nested.vpid02).
1157 static bool nested_has_guest_tlb_tag(struct kvm_vcpu
*vcpu
)
1159 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1161 return enable_ept
||
1162 (nested_cpu_has_vpid(vmcs12
) && to_vmx(vcpu
)->nested
.vpid02
);
1165 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu
*vcpu
,
1166 struct vmcs12
*vmcs12
,
1169 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1172 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1173 * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
1174 * full TLB flush from the guest's perspective. This is required even
1175 * if VPID is disabled in the host as KVM may need to synchronize the
1176 * MMU in response to the guest TLB flush.
1178 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
1179 * EPT is a special snowflake, as guest-physical mappings aren't
1180 * flushed on VPID invalidations, including VM-Enter or VM-Exit with
1181 * VPID disabled. As a result, KVM _never_ needs to sync nEPT
1182 * entries on VM-Enter because L1 can't rely on VM-Enter to flush
1185 if (!nested_cpu_has_vpid(vmcs12
)) {
1186 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST
, vcpu
);
1190 /* L2 should never have a VPID if VPID is disabled. */
1191 WARN_ON(!enable_vpid
);
1194 * If VPID is enabled and used by vmc12, but L2 does not have a unique
1195 * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
1196 * a VPID for L2, flush the current context as the effective ASID is
1197 * common to both L1 and L2.
1199 * Defer the flush so that it runs after vmcs02.EPTP has been set by
1200 * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
1201 * redundant flushes further down the nested pipeline.
1203 * If a TLB flush isn't required due to any of the above, and vpid12 is
1204 * changing then the new "virtual" VPID (vpid12) will reuse the same
1205 * "real" VPID (vpid02), and so needs to be flushed. There's no direct
1206 * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
1207 * all nested vCPUs. Remember, a flush on VM-Enter does not invalidate
1208 * guest-physical mappings, so there is no need to sync the nEPT MMU.
1210 if (!nested_has_guest_tlb_tag(vcpu
)) {
1211 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT
, vcpu
);
1212 } else if (is_vmenter
&&
1213 vmcs12
->virtual_processor_id
!= vmx
->nested
.last_vpid
) {
1214 vmx
->nested
.last_vpid
= vmcs12
->virtual_processor_id
;
1215 vpid_sync_context(nested_get_vpid02(vcpu
));
1219 static bool is_bitwise_subset(u64 superset
, u64 subset
, u64 mask
)
1224 return (superset
| subset
) == superset
;
1227 static int vmx_restore_vmx_basic(struct vcpu_vmx
*vmx
, u64 data
)
1229 const u64 feature_and_reserved
=
1230 /* feature (except bit 48; see below) */
1231 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1233 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1234 u64 vmx_basic
= vmx
->nested
.msrs
.basic
;
1236 if (!is_bitwise_subset(vmx_basic
, data
, feature_and_reserved
))
1240 * KVM does not emulate a version of VMX that constrains physical
1241 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1243 if (data
& BIT_ULL(48))
1246 if (vmx_basic_vmcs_revision_id(vmx_basic
) !=
1247 vmx_basic_vmcs_revision_id(data
))
1250 if (vmx_basic_vmcs_size(vmx_basic
) > vmx_basic_vmcs_size(data
))
1253 vmx
->nested
.msrs
.basic
= data
;
1258 vmx_restore_control_msr(struct vcpu_vmx
*vmx
, u32 msr_index
, u64 data
)
1263 switch (msr_index
) {
1264 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
1265 lowp
= &vmx
->nested
.msrs
.pinbased_ctls_low
;
1266 highp
= &vmx
->nested
.msrs
.pinbased_ctls_high
;
1268 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
1269 lowp
= &vmx
->nested
.msrs
.procbased_ctls_low
;
1270 highp
= &vmx
->nested
.msrs
.procbased_ctls_high
;
1272 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
1273 lowp
= &vmx
->nested
.msrs
.exit_ctls_low
;
1274 highp
= &vmx
->nested
.msrs
.exit_ctls_high
;
1276 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
1277 lowp
= &vmx
->nested
.msrs
.entry_ctls_low
;
1278 highp
= &vmx
->nested
.msrs
.entry_ctls_high
;
1280 case MSR_IA32_VMX_PROCBASED_CTLS2
:
1281 lowp
= &vmx
->nested
.msrs
.secondary_ctls_low
;
1282 highp
= &vmx
->nested
.msrs
.secondary_ctls_high
;
1288 supported
= vmx_control_msr(*lowp
, *highp
);
1290 /* Check must-be-1 bits are still 1. */
1291 if (!is_bitwise_subset(data
, supported
, GENMASK_ULL(31, 0)))
1294 /* Check must-be-0 bits are still 0. */
1295 if (!is_bitwise_subset(supported
, data
, GENMASK_ULL(63, 32)))
1299 *highp
= data
>> 32;
1303 static int vmx_restore_vmx_misc(struct vcpu_vmx
*vmx
, u64 data
)
1305 const u64 feature_and_reserved_bits
=
1307 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1308 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1310 GENMASK_ULL(13, 9) | BIT_ULL(31);
1313 vmx_misc
= vmx_control_msr(vmx
->nested
.msrs
.misc_low
,
1314 vmx
->nested
.msrs
.misc_high
);
1316 if (!is_bitwise_subset(vmx_misc
, data
, feature_and_reserved_bits
))
1319 if ((vmx
->nested
.msrs
.pinbased_ctls_high
&
1320 PIN_BASED_VMX_PREEMPTION_TIMER
) &&
1321 vmx_misc_preemption_timer_rate(data
) !=
1322 vmx_misc_preemption_timer_rate(vmx_misc
))
1325 if (vmx_misc_cr3_count(data
) > vmx_misc_cr3_count(vmx_misc
))
1328 if (vmx_misc_max_msr(data
) > vmx_misc_max_msr(vmx_misc
))
1331 if (vmx_misc_mseg_revid(data
) != vmx_misc_mseg_revid(vmx_misc
))
1334 vmx
->nested
.msrs
.misc_low
= data
;
1335 vmx
->nested
.msrs
.misc_high
= data
>> 32;
1340 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx
*vmx
, u64 data
)
1342 u64 vmx_ept_vpid_cap
;
1344 vmx_ept_vpid_cap
= vmx_control_msr(vmx
->nested
.msrs
.ept_caps
,
1345 vmx
->nested
.msrs
.vpid_caps
);
1347 /* Every bit is either reserved or a feature bit. */
1348 if (!is_bitwise_subset(vmx_ept_vpid_cap
, data
, -1ULL))
1351 vmx
->nested
.msrs
.ept_caps
= data
;
1352 vmx
->nested
.msrs
.vpid_caps
= data
>> 32;
1356 static int vmx_restore_fixed0_msr(struct vcpu_vmx
*vmx
, u32 msr_index
, u64 data
)
1360 switch (msr_index
) {
1361 case MSR_IA32_VMX_CR0_FIXED0
:
1362 msr
= &vmx
->nested
.msrs
.cr0_fixed0
;
1364 case MSR_IA32_VMX_CR4_FIXED0
:
1365 msr
= &vmx
->nested
.msrs
.cr4_fixed0
;
1372 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1373 * must be 1 in the restored value.
1375 if (!is_bitwise_subset(data
, *msr
, -1ULL))
1383 * Called when userspace is restoring VMX MSRs.
1385 * Returns 0 on success, non-0 otherwise.
1387 int vmx_set_vmx_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
)
1389 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1392 * Don't allow changes to the VMX capability MSRs while the vCPU
1393 * is in VMX operation.
1395 if (vmx
->nested
.vmxon
)
1398 switch (msr_index
) {
1399 case MSR_IA32_VMX_BASIC
:
1400 return vmx_restore_vmx_basic(vmx
, data
);
1401 case MSR_IA32_VMX_PINBASED_CTLS
:
1402 case MSR_IA32_VMX_PROCBASED_CTLS
:
1403 case MSR_IA32_VMX_EXIT_CTLS
:
1404 case MSR_IA32_VMX_ENTRY_CTLS
:
1406 * The "non-true" VMX capability MSRs are generated from the
1407 * "true" MSRs, so we do not support restoring them directly.
1409 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1410 * should restore the "true" MSRs with the must-be-1 bits
1411 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1412 * DEFAULT SETTINGS".
1415 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
1416 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
1417 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
1418 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
1419 case MSR_IA32_VMX_PROCBASED_CTLS2
:
1420 return vmx_restore_control_msr(vmx
, msr_index
, data
);
1421 case MSR_IA32_VMX_MISC
:
1422 return vmx_restore_vmx_misc(vmx
, data
);
1423 case MSR_IA32_VMX_CR0_FIXED0
:
1424 case MSR_IA32_VMX_CR4_FIXED0
:
1425 return vmx_restore_fixed0_msr(vmx
, msr_index
, data
);
1426 case MSR_IA32_VMX_CR0_FIXED1
:
1427 case MSR_IA32_VMX_CR4_FIXED1
:
1429 * These MSRs are generated based on the vCPU's CPUID, so we
1430 * do not support restoring them directly.
1433 case MSR_IA32_VMX_EPT_VPID_CAP
:
1434 return vmx_restore_vmx_ept_vpid_cap(vmx
, data
);
1435 case MSR_IA32_VMX_VMCS_ENUM
:
1436 vmx
->nested
.msrs
.vmcs_enum
= data
;
1438 case MSR_IA32_VMX_VMFUNC
:
1439 if (data
& ~vmx
->nested
.msrs
.vmfunc_controls
)
1441 vmx
->nested
.msrs
.vmfunc_controls
= data
;
1445 * The rest of the VMX capability MSRs do not support restore.
1451 /* Returns 0 on success, non-0 otherwise. */
1452 int vmx_get_vmx_msr(struct nested_vmx_msrs
*msrs
, u32 msr_index
, u64
*pdata
)
1454 switch (msr_index
) {
1455 case MSR_IA32_VMX_BASIC
:
1456 *pdata
= msrs
->basic
;
1458 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
1459 case MSR_IA32_VMX_PINBASED_CTLS
:
1460 *pdata
= vmx_control_msr(
1461 msrs
->pinbased_ctls_low
,
1462 msrs
->pinbased_ctls_high
);
1463 if (msr_index
== MSR_IA32_VMX_PINBASED_CTLS
)
1464 *pdata
|= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
1466 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
1467 case MSR_IA32_VMX_PROCBASED_CTLS
:
1468 *pdata
= vmx_control_msr(
1469 msrs
->procbased_ctls_low
,
1470 msrs
->procbased_ctls_high
);
1471 if (msr_index
== MSR_IA32_VMX_PROCBASED_CTLS
)
1472 *pdata
|= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
1474 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
1475 case MSR_IA32_VMX_EXIT_CTLS
:
1476 *pdata
= vmx_control_msr(
1477 msrs
->exit_ctls_low
,
1478 msrs
->exit_ctls_high
);
1479 if (msr_index
== MSR_IA32_VMX_EXIT_CTLS
)
1480 *pdata
|= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR
;
1482 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
1483 case MSR_IA32_VMX_ENTRY_CTLS
:
1484 *pdata
= vmx_control_msr(
1485 msrs
->entry_ctls_low
,
1486 msrs
->entry_ctls_high
);
1487 if (msr_index
== MSR_IA32_VMX_ENTRY_CTLS
)
1488 *pdata
|= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR
;
1490 case MSR_IA32_VMX_MISC
:
1491 *pdata
= vmx_control_msr(
1495 case MSR_IA32_VMX_CR0_FIXED0
:
1496 *pdata
= msrs
->cr0_fixed0
;
1498 case MSR_IA32_VMX_CR0_FIXED1
:
1499 *pdata
= msrs
->cr0_fixed1
;
1501 case MSR_IA32_VMX_CR4_FIXED0
:
1502 *pdata
= msrs
->cr4_fixed0
;
1504 case MSR_IA32_VMX_CR4_FIXED1
:
1505 *pdata
= msrs
->cr4_fixed1
;
1507 case MSR_IA32_VMX_VMCS_ENUM
:
1508 *pdata
= msrs
->vmcs_enum
;
1510 case MSR_IA32_VMX_PROCBASED_CTLS2
:
1511 *pdata
= vmx_control_msr(
1512 msrs
->secondary_ctls_low
,
1513 msrs
->secondary_ctls_high
);
1515 case MSR_IA32_VMX_EPT_VPID_CAP
:
1516 *pdata
= msrs
->ept_caps
|
1517 ((u64
)msrs
->vpid_caps
<< 32);
1519 case MSR_IA32_VMX_VMFUNC
:
1520 *pdata
= msrs
->vmfunc_controls
;
1530 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1531 * been modified by the L1 guest. Note, "writable" in this context means
1532 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1533 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1534 * VM-exit information fields (which are actually writable if the vCPU is
1535 * configured to support "VMWRITE to any supported field in the VMCS").
1537 static void copy_shadow_to_vmcs12(struct vcpu_vmx
*vmx
)
1539 struct vmcs
*shadow_vmcs
= vmx
->vmcs01
.shadow_vmcs
;
1540 struct vmcs12
*vmcs12
= get_vmcs12(&vmx
->vcpu
);
1541 struct shadow_vmcs_field field
;
1545 if (WARN_ON(!shadow_vmcs
))
1550 vmcs_load(shadow_vmcs
);
1552 for (i
= 0; i
< max_shadow_read_write_fields
; i
++) {
1553 field
= shadow_read_write_fields
[i
];
1554 val
= __vmcs_readl(field
.encoding
);
1555 vmcs12_write_any(vmcs12
, field
.encoding
, field
.offset
, val
);
1558 vmcs_clear(shadow_vmcs
);
1559 vmcs_load(vmx
->loaded_vmcs
->vmcs
);
1564 static void copy_vmcs12_to_shadow(struct vcpu_vmx
*vmx
)
1566 const struct shadow_vmcs_field
*fields
[] = {
1567 shadow_read_write_fields
,
1568 shadow_read_only_fields
1570 const int max_fields
[] = {
1571 max_shadow_read_write_fields
,
1572 max_shadow_read_only_fields
1574 struct vmcs
*shadow_vmcs
= vmx
->vmcs01
.shadow_vmcs
;
1575 struct vmcs12
*vmcs12
= get_vmcs12(&vmx
->vcpu
);
1576 struct shadow_vmcs_field field
;
1580 if (WARN_ON(!shadow_vmcs
))
1583 vmcs_load(shadow_vmcs
);
1585 for (q
= 0; q
< ARRAY_SIZE(fields
); q
++) {
1586 for (i
= 0; i
< max_fields
[q
]; i
++) {
1587 field
= fields
[q
][i
];
1588 val
= vmcs12_read_any(vmcs12
, field
.encoding
,
1590 __vmcs_writel(field
.encoding
, val
);
1594 vmcs_clear(shadow_vmcs
);
1595 vmcs_load(vmx
->loaded_vmcs
->vmcs
);
1598 static void copy_enlightened_to_vmcs12(struct vcpu_vmx
*vmx
, u32 hv_clean_fields
)
1600 struct vmcs12
*vmcs12
= vmx
->nested
.cached_vmcs12
;
1601 struct hv_enlightened_vmcs
*evmcs
= vmx
->nested
.hv_evmcs
;
1603 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1604 vmcs12
->tpr_threshold
= evmcs
->tpr_threshold
;
1605 vmcs12
->guest_rip
= evmcs
->guest_rip
;
1607 if (unlikely(!(hv_clean_fields
&
1608 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC
))) {
1609 vmcs12
->guest_rsp
= evmcs
->guest_rsp
;
1610 vmcs12
->guest_rflags
= evmcs
->guest_rflags
;
1611 vmcs12
->guest_interruptibility_info
=
1612 evmcs
->guest_interruptibility_info
;
1615 if (unlikely(!(hv_clean_fields
&
1616 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC
))) {
1617 vmcs12
->cpu_based_vm_exec_control
=
1618 evmcs
->cpu_based_vm_exec_control
;
1621 if (unlikely(!(hv_clean_fields
&
1622 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN
))) {
1623 vmcs12
->exception_bitmap
= evmcs
->exception_bitmap
;
1626 if (unlikely(!(hv_clean_fields
&
1627 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY
))) {
1628 vmcs12
->vm_entry_controls
= evmcs
->vm_entry_controls
;
1631 if (unlikely(!(hv_clean_fields
&
1632 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT
))) {
1633 vmcs12
->vm_entry_intr_info_field
=
1634 evmcs
->vm_entry_intr_info_field
;
1635 vmcs12
->vm_entry_exception_error_code
=
1636 evmcs
->vm_entry_exception_error_code
;
1637 vmcs12
->vm_entry_instruction_len
=
1638 evmcs
->vm_entry_instruction_len
;
1641 if (unlikely(!(hv_clean_fields
&
1642 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1
))) {
1643 vmcs12
->host_ia32_pat
= evmcs
->host_ia32_pat
;
1644 vmcs12
->host_ia32_efer
= evmcs
->host_ia32_efer
;
1645 vmcs12
->host_cr0
= evmcs
->host_cr0
;
1646 vmcs12
->host_cr3
= evmcs
->host_cr3
;
1647 vmcs12
->host_cr4
= evmcs
->host_cr4
;
1648 vmcs12
->host_ia32_sysenter_esp
= evmcs
->host_ia32_sysenter_esp
;
1649 vmcs12
->host_ia32_sysenter_eip
= evmcs
->host_ia32_sysenter_eip
;
1650 vmcs12
->host_rip
= evmcs
->host_rip
;
1651 vmcs12
->host_ia32_sysenter_cs
= evmcs
->host_ia32_sysenter_cs
;
1652 vmcs12
->host_es_selector
= evmcs
->host_es_selector
;
1653 vmcs12
->host_cs_selector
= evmcs
->host_cs_selector
;
1654 vmcs12
->host_ss_selector
= evmcs
->host_ss_selector
;
1655 vmcs12
->host_ds_selector
= evmcs
->host_ds_selector
;
1656 vmcs12
->host_fs_selector
= evmcs
->host_fs_selector
;
1657 vmcs12
->host_gs_selector
= evmcs
->host_gs_selector
;
1658 vmcs12
->host_tr_selector
= evmcs
->host_tr_selector
;
1661 if (unlikely(!(hv_clean_fields
&
1662 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1
))) {
1663 vmcs12
->pin_based_vm_exec_control
=
1664 evmcs
->pin_based_vm_exec_control
;
1665 vmcs12
->vm_exit_controls
= evmcs
->vm_exit_controls
;
1666 vmcs12
->secondary_vm_exec_control
=
1667 evmcs
->secondary_vm_exec_control
;
1670 if (unlikely(!(hv_clean_fields
&
1671 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP
))) {
1672 vmcs12
->io_bitmap_a
= evmcs
->io_bitmap_a
;
1673 vmcs12
->io_bitmap_b
= evmcs
->io_bitmap_b
;
1676 if (unlikely(!(hv_clean_fields
&
1677 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP
))) {
1678 vmcs12
->msr_bitmap
= evmcs
->msr_bitmap
;
1681 if (unlikely(!(hv_clean_fields
&
1682 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2
))) {
1683 vmcs12
->guest_es_base
= evmcs
->guest_es_base
;
1684 vmcs12
->guest_cs_base
= evmcs
->guest_cs_base
;
1685 vmcs12
->guest_ss_base
= evmcs
->guest_ss_base
;
1686 vmcs12
->guest_ds_base
= evmcs
->guest_ds_base
;
1687 vmcs12
->guest_fs_base
= evmcs
->guest_fs_base
;
1688 vmcs12
->guest_gs_base
= evmcs
->guest_gs_base
;
1689 vmcs12
->guest_ldtr_base
= evmcs
->guest_ldtr_base
;
1690 vmcs12
->guest_tr_base
= evmcs
->guest_tr_base
;
1691 vmcs12
->guest_gdtr_base
= evmcs
->guest_gdtr_base
;
1692 vmcs12
->guest_idtr_base
= evmcs
->guest_idtr_base
;
1693 vmcs12
->guest_es_limit
= evmcs
->guest_es_limit
;
1694 vmcs12
->guest_cs_limit
= evmcs
->guest_cs_limit
;
1695 vmcs12
->guest_ss_limit
= evmcs
->guest_ss_limit
;
1696 vmcs12
->guest_ds_limit
= evmcs
->guest_ds_limit
;
1697 vmcs12
->guest_fs_limit
= evmcs
->guest_fs_limit
;
1698 vmcs12
->guest_gs_limit
= evmcs
->guest_gs_limit
;
1699 vmcs12
->guest_ldtr_limit
= evmcs
->guest_ldtr_limit
;
1700 vmcs12
->guest_tr_limit
= evmcs
->guest_tr_limit
;
1701 vmcs12
->guest_gdtr_limit
= evmcs
->guest_gdtr_limit
;
1702 vmcs12
->guest_idtr_limit
= evmcs
->guest_idtr_limit
;
1703 vmcs12
->guest_es_ar_bytes
= evmcs
->guest_es_ar_bytes
;
1704 vmcs12
->guest_cs_ar_bytes
= evmcs
->guest_cs_ar_bytes
;
1705 vmcs12
->guest_ss_ar_bytes
= evmcs
->guest_ss_ar_bytes
;
1706 vmcs12
->guest_ds_ar_bytes
= evmcs
->guest_ds_ar_bytes
;
1707 vmcs12
->guest_fs_ar_bytes
= evmcs
->guest_fs_ar_bytes
;
1708 vmcs12
->guest_gs_ar_bytes
= evmcs
->guest_gs_ar_bytes
;
1709 vmcs12
->guest_ldtr_ar_bytes
= evmcs
->guest_ldtr_ar_bytes
;
1710 vmcs12
->guest_tr_ar_bytes
= evmcs
->guest_tr_ar_bytes
;
1711 vmcs12
->guest_es_selector
= evmcs
->guest_es_selector
;
1712 vmcs12
->guest_cs_selector
= evmcs
->guest_cs_selector
;
1713 vmcs12
->guest_ss_selector
= evmcs
->guest_ss_selector
;
1714 vmcs12
->guest_ds_selector
= evmcs
->guest_ds_selector
;
1715 vmcs12
->guest_fs_selector
= evmcs
->guest_fs_selector
;
1716 vmcs12
->guest_gs_selector
= evmcs
->guest_gs_selector
;
1717 vmcs12
->guest_ldtr_selector
= evmcs
->guest_ldtr_selector
;
1718 vmcs12
->guest_tr_selector
= evmcs
->guest_tr_selector
;
1721 if (unlikely(!(hv_clean_fields
&
1722 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2
))) {
1723 vmcs12
->tsc_offset
= evmcs
->tsc_offset
;
1724 vmcs12
->virtual_apic_page_addr
= evmcs
->virtual_apic_page_addr
;
1725 vmcs12
->xss_exit_bitmap
= evmcs
->xss_exit_bitmap
;
1728 if (unlikely(!(hv_clean_fields
&
1729 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR
))) {
1730 vmcs12
->cr0_guest_host_mask
= evmcs
->cr0_guest_host_mask
;
1731 vmcs12
->cr4_guest_host_mask
= evmcs
->cr4_guest_host_mask
;
1732 vmcs12
->cr0_read_shadow
= evmcs
->cr0_read_shadow
;
1733 vmcs12
->cr4_read_shadow
= evmcs
->cr4_read_shadow
;
1734 vmcs12
->guest_cr0
= evmcs
->guest_cr0
;
1735 vmcs12
->guest_cr3
= evmcs
->guest_cr3
;
1736 vmcs12
->guest_cr4
= evmcs
->guest_cr4
;
1737 vmcs12
->guest_dr7
= evmcs
->guest_dr7
;
1740 if (unlikely(!(hv_clean_fields
&
1741 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER
))) {
1742 vmcs12
->host_fs_base
= evmcs
->host_fs_base
;
1743 vmcs12
->host_gs_base
= evmcs
->host_gs_base
;
1744 vmcs12
->host_tr_base
= evmcs
->host_tr_base
;
1745 vmcs12
->host_gdtr_base
= evmcs
->host_gdtr_base
;
1746 vmcs12
->host_idtr_base
= evmcs
->host_idtr_base
;
1747 vmcs12
->host_rsp
= evmcs
->host_rsp
;
1750 if (unlikely(!(hv_clean_fields
&
1751 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT
))) {
1752 vmcs12
->ept_pointer
= evmcs
->ept_pointer
;
1753 vmcs12
->virtual_processor_id
= evmcs
->virtual_processor_id
;
1756 if (unlikely(!(hv_clean_fields
&
1757 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1
))) {
1758 vmcs12
->vmcs_link_pointer
= evmcs
->vmcs_link_pointer
;
1759 vmcs12
->guest_ia32_debugctl
= evmcs
->guest_ia32_debugctl
;
1760 vmcs12
->guest_ia32_pat
= evmcs
->guest_ia32_pat
;
1761 vmcs12
->guest_ia32_efer
= evmcs
->guest_ia32_efer
;
1762 vmcs12
->guest_pdptr0
= evmcs
->guest_pdptr0
;
1763 vmcs12
->guest_pdptr1
= evmcs
->guest_pdptr1
;
1764 vmcs12
->guest_pdptr2
= evmcs
->guest_pdptr2
;
1765 vmcs12
->guest_pdptr3
= evmcs
->guest_pdptr3
;
1766 vmcs12
->guest_pending_dbg_exceptions
=
1767 evmcs
->guest_pending_dbg_exceptions
;
1768 vmcs12
->guest_sysenter_esp
= evmcs
->guest_sysenter_esp
;
1769 vmcs12
->guest_sysenter_eip
= evmcs
->guest_sysenter_eip
;
1770 vmcs12
->guest_bndcfgs
= evmcs
->guest_bndcfgs
;
1771 vmcs12
->guest_activity_state
= evmcs
->guest_activity_state
;
1772 vmcs12
->guest_sysenter_cs
= evmcs
->guest_sysenter_cs
;
1777 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1778 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1779 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1780 * vmcs12->page_fault_error_code_mask =
1781 * evmcs->page_fault_error_code_mask;
1782 * vmcs12->page_fault_error_code_match =
1783 * evmcs->page_fault_error_code_match;
1784 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1785 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1786 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1787 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1792 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1793 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1794 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1795 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1796 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1797 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1798 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1799 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1800 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1801 * vmcs12->exit_qualification = evmcs->exit_qualification;
1802 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1804 * Not present in struct vmcs12:
1805 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1806 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1807 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1808 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1814 static void copy_vmcs12_to_enlightened(struct vcpu_vmx
*vmx
)
1816 struct vmcs12
*vmcs12
= vmx
->nested
.cached_vmcs12
;
1817 struct hv_enlightened_vmcs
*evmcs
= vmx
->nested
.hv_evmcs
;
1820 * Should not be changed by KVM:
1822 * evmcs->host_es_selector = vmcs12->host_es_selector;
1823 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1824 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1825 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1826 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1827 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1828 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1829 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1830 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1831 * evmcs->host_cr0 = vmcs12->host_cr0;
1832 * evmcs->host_cr3 = vmcs12->host_cr3;
1833 * evmcs->host_cr4 = vmcs12->host_cr4;
1834 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1835 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1836 * evmcs->host_rip = vmcs12->host_rip;
1837 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1838 * evmcs->host_fs_base = vmcs12->host_fs_base;
1839 * evmcs->host_gs_base = vmcs12->host_gs_base;
1840 * evmcs->host_tr_base = vmcs12->host_tr_base;
1841 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1842 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1843 * evmcs->host_rsp = vmcs12->host_rsp;
1844 * sync_vmcs02_to_vmcs12() doesn't read these:
1845 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1846 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1847 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1848 * evmcs->ept_pointer = vmcs12->ept_pointer;
1849 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1850 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1851 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1852 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1853 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1854 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1855 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1856 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1857 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1858 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1859 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1860 * evmcs->page_fault_error_code_mask =
1861 * vmcs12->page_fault_error_code_mask;
1862 * evmcs->page_fault_error_code_match =
1863 * vmcs12->page_fault_error_code_match;
1864 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1865 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1866 * evmcs->tsc_offset = vmcs12->tsc_offset;
1867 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1868 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1869 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1870 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1871 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1872 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1873 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1874 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1876 * Not present in struct vmcs12:
1877 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1878 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1879 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1880 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1883 evmcs
->guest_es_selector
= vmcs12
->guest_es_selector
;
1884 evmcs
->guest_cs_selector
= vmcs12
->guest_cs_selector
;
1885 evmcs
->guest_ss_selector
= vmcs12
->guest_ss_selector
;
1886 evmcs
->guest_ds_selector
= vmcs12
->guest_ds_selector
;
1887 evmcs
->guest_fs_selector
= vmcs12
->guest_fs_selector
;
1888 evmcs
->guest_gs_selector
= vmcs12
->guest_gs_selector
;
1889 evmcs
->guest_ldtr_selector
= vmcs12
->guest_ldtr_selector
;
1890 evmcs
->guest_tr_selector
= vmcs12
->guest_tr_selector
;
1892 evmcs
->guest_es_limit
= vmcs12
->guest_es_limit
;
1893 evmcs
->guest_cs_limit
= vmcs12
->guest_cs_limit
;
1894 evmcs
->guest_ss_limit
= vmcs12
->guest_ss_limit
;
1895 evmcs
->guest_ds_limit
= vmcs12
->guest_ds_limit
;
1896 evmcs
->guest_fs_limit
= vmcs12
->guest_fs_limit
;
1897 evmcs
->guest_gs_limit
= vmcs12
->guest_gs_limit
;
1898 evmcs
->guest_ldtr_limit
= vmcs12
->guest_ldtr_limit
;
1899 evmcs
->guest_tr_limit
= vmcs12
->guest_tr_limit
;
1900 evmcs
->guest_gdtr_limit
= vmcs12
->guest_gdtr_limit
;
1901 evmcs
->guest_idtr_limit
= vmcs12
->guest_idtr_limit
;
1903 evmcs
->guest_es_ar_bytes
= vmcs12
->guest_es_ar_bytes
;
1904 evmcs
->guest_cs_ar_bytes
= vmcs12
->guest_cs_ar_bytes
;
1905 evmcs
->guest_ss_ar_bytes
= vmcs12
->guest_ss_ar_bytes
;
1906 evmcs
->guest_ds_ar_bytes
= vmcs12
->guest_ds_ar_bytes
;
1907 evmcs
->guest_fs_ar_bytes
= vmcs12
->guest_fs_ar_bytes
;
1908 evmcs
->guest_gs_ar_bytes
= vmcs12
->guest_gs_ar_bytes
;
1909 evmcs
->guest_ldtr_ar_bytes
= vmcs12
->guest_ldtr_ar_bytes
;
1910 evmcs
->guest_tr_ar_bytes
= vmcs12
->guest_tr_ar_bytes
;
1912 evmcs
->guest_es_base
= vmcs12
->guest_es_base
;
1913 evmcs
->guest_cs_base
= vmcs12
->guest_cs_base
;
1914 evmcs
->guest_ss_base
= vmcs12
->guest_ss_base
;
1915 evmcs
->guest_ds_base
= vmcs12
->guest_ds_base
;
1916 evmcs
->guest_fs_base
= vmcs12
->guest_fs_base
;
1917 evmcs
->guest_gs_base
= vmcs12
->guest_gs_base
;
1918 evmcs
->guest_ldtr_base
= vmcs12
->guest_ldtr_base
;
1919 evmcs
->guest_tr_base
= vmcs12
->guest_tr_base
;
1920 evmcs
->guest_gdtr_base
= vmcs12
->guest_gdtr_base
;
1921 evmcs
->guest_idtr_base
= vmcs12
->guest_idtr_base
;
1923 evmcs
->guest_ia32_pat
= vmcs12
->guest_ia32_pat
;
1924 evmcs
->guest_ia32_efer
= vmcs12
->guest_ia32_efer
;
1926 evmcs
->guest_pdptr0
= vmcs12
->guest_pdptr0
;
1927 evmcs
->guest_pdptr1
= vmcs12
->guest_pdptr1
;
1928 evmcs
->guest_pdptr2
= vmcs12
->guest_pdptr2
;
1929 evmcs
->guest_pdptr3
= vmcs12
->guest_pdptr3
;
1931 evmcs
->guest_pending_dbg_exceptions
=
1932 vmcs12
->guest_pending_dbg_exceptions
;
1933 evmcs
->guest_sysenter_esp
= vmcs12
->guest_sysenter_esp
;
1934 evmcs
->guest_sysenter_eip
= vmcs12
->guest_sysenter_eip
;
1936 evmcs
->guest_activity_state
= vmcs12
->guest_activity_state
;
1937 evmcs
->guest_sysenter_cs
= vmcs12
->guest_sysenter_cs
;
1939 evmcs
->guest_cr0
= vmcs12
->guest_cr0
;
1940 evmcs
->guest_cr3
= vmcs12
->guest_cr3
;
1941 evmcs
->guest_cr4
= vmcs12
->guest_cr4
;
1942 evmcs
->guest_dr7
= vmcs12
->guest_dr7
;
1944 evmcs
->guest_physical_address
= vmcs12
->guest_physical_address
;
1946 evmcs
->vm_instruction_error
= vmcs12
->vm_instruction_error
;
1947 evmcs
->vm_exit_reason
= vmcs12
->vm_exit_reason
;
1948 evmcs
->vm_exit_intr_info
= vmcs12
->vm_exit_intr_info
;
1949 evmcs
->vm_exit_intr_error_code
= vmcs12
->vm_exit_intr_error_code
;
1950 evmcs
->idt_vectoring_info_field
= vmcs12
->idt_vectoring_info_field
;
1951 evmcs
->idt_vectoring_error_code
= vmcs12
->idt_vectoring_error_code
;
1952 evmcs
->vm_exit_instruction_len
= vmcs12
->vm_exit_instruction_len
;
1953 evmcs
->vmx_instruction_info
= vmcs12
->vmx_instruction_info
;
1955 evmcs
->exit_qualification
= vmcs12
->exit_qualification
;
1957 evmcs
->guest_linear_address
= vmcs12
->guest_linear_address
;
1958 evmcs
->guest_rsp
= vmcs12
->guest_rsp
;
1959 evmcs
->guest_rflags
= vmcs12
->guest_rflags
;
1961 evmcs
->guest_interruptibility_info
=
1962 vmcs12
->guest_interruptibility_info
;
1963 evmcs
->cpu_based_vm_exec_control
= vmcs12
->cpu_based_vm_exec_control
;
1964 evmcs
->vm_entry_controls
= vmcs12
->vm_entry_controls
;
1965 evmcs
->vm_entry_intr_info_field
= vmcs12
->vm_entry_intr_info_field
;
1966 evmcs
->vm_entry_exception_error_code
=
1967 vmcs12
->vm_entry_exception_error_code
;
1968 evmcs
->vm_entry_instruction_len
= vmcs12
->vm_entry_instruction_len
;
1970 evmcs
->guest_rip
= vmcs12
->guest_rip
;
1972 evmcs
->guest_bndcfgs
= vmcs12
->guest_bndcfgs
;
1978 * This is an equivalent of the nested hypervisor executing the vmptrld
1981 static enum nested_evmptrld_status
nested_vmx_handle_enlightened_vmptrld(
1982 struct kvm_vcpu
*vcpu
, bool from_launch
)
1984 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1985 bool evmcs_gpa_changed
= false;
1988 if (likely(!vmx
->nested
.enlightened_vmcs_enabled
))
1989 return EVMPTRLD_DISABLED
;
1991 if (!nested_enlightened_vmentry(vcpu
, &evmcs_gpa
)) {
1992 nested_release_evmcs(vcpu
);
1993 return EVMPTRLD_DISABLED
;
1996 if (unlikely(evmcs_gpa
!= vmx
->nested
.hv_evmcs_vmptr
)) {
1997 vmx
->nested
.current_vmptr
= -1ull;
1999 nested_release_evmcs(vcpu
);
2001 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(evmcs_gpa
),
2002 &vmx
->nested
.hv_evmcs_map
))
2003 return EVMPTRLD_ERROR
;
2005 vmx
->nested
.hv_evmcs
= vmx
->nested
.hv_evmcs_map
.hva
;
2008 * Currently, KVM only supports eVMCS version 1
2009 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
2010 * value to first u32 field of eVMCS which should specify eVMCS
2013 * Guest should be aware of supported eVMCS versions by host by
2014 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
2015 * expected to set this CPUID leaf according to the value
2016 * returned in vmcs_version from nested_enable_evmcs().
2018 * However, it turns out that Microsoft Hyper-V fails to comply
2019 * to their own invented interface: When Hyper-V use eVMCS, it
2020 * just sets first u32 field of eVMCS to revision_id specified
2021 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
2022 * which is one of the supported versions specified in
2023 * CPUID.0x4000000A.EAX[0:15].
2025 * To overcome Hyper-V bug, we accept here either a supported
2026 * eVMCS version or VMCS12 revision_id as valid values for first
2027 * u32 field of eVMCS.
2029 if ((vmx
->nested
.hv_evmcs
->revision_id
!= KVM_EVMCS_VERSION
) &&
2030 (vmx
->nested
.hv_evmcs
->revision_id
!= VMCS12_REVISION
)) {
2031 nested_release_evmcs(vcpu
);
2032 return EVMPTRLD_VMFAIL
;
2035 vmx
->nested
.hv_evmcs_vmptr
= evmcs_gpa
;
2037 evmcs_gpa_changed
= true;
2039 * Unlike normal vmcs12, enlightened vmcs12 is not fully
2040 * reloaded from guest's memory (read only fields, fields not
2041 * present in struct hv_enlightened_vmcs, ...). Make sure there
2045 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
2046 memset(vmcs12
, 0, sizeof(*vmcs12
));
2047 vmcs12
->hdr
.revision_id
= VMCS12_REVISION
;
2053 * Clean fields data can't be used on VMLAUNCH and when we switch
2054 * between different L2 guests as KVM keeps a single VMCS12 per L1.
2056 if (from_launch
|| evmcs_gpa_changed
)
2057 vmx
->nested
.hv_evmcs
->hv_clean_fields
&=
2058 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL
;
2060 return EVMPTRLD_SUCCEEDED
;
2063 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu
*vcpu
)
2065 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2067 if (evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
))
2068 copy_vmcs12_to_enlightened(vmx
);
2070 copy_vmcs12_to_shadow(vmx
);
2072 vmx
->nested
.need_vmcs12_to_shadow_sync
= false;
2075 static enum hrtimer_restart
vmx_preemption_timer_fn(struct hrtimer
*timer
)
2077 struct vcpu_vmx
*vmx
=
2078 container_of(timer
, struct vcpu_vmx
, nested
.preemption_timer
);
2080 vmx
->nested
.preemption_timer_expired
= true;
2081 kvm_make_request(KVM_REQ_EVENT
, &vmx
->vcpu
);
2082 kvm_vcpu_kick(&vmx
->vcpu
);
2084 return HRTIMER_NORESTART
;
2087 static u64
vmx_calc_preemption_timer_value(struct kvm_vcpu
*vcpu
)
2089 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2090 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
2092 u64 l1_scaled_tsc
= kvm_read_l1_tsc(vcpu
, rdtsc()) >>
2093 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
;
2095 if (!vmx
->nested
.has_preemption_timer_deadline
) {
2096 vmx
->nested
.preemption_timer_deadline
=
2097 vmcs12
->vmx_preemption_timer_value
+ l1_scaled_tsc
;
2098 vmx
->nested
.has_preemption_timer_deadline
= true;
2100 return vmx
->nested
.preemption_timer_deadline
- l1_scaled_tsc
;
2103 static void vmx_start_preemption_timer(struct kvm_vcpu
*vcpu
,
2104 u64 preemption_timeout
)
2106 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2109 * A timer value of zero is architecturally guaranteed to cause
2110 * a VMExit prior to executing any instructions in the guest.
2112 if (preemption_timeout
== 0) {
2113 vmx_preemption_timer_fn(&vmx
->nested
.preemption_timer
);
2117 if (vcpu
->arch
.virtual_tsc_khz
== 0)
2120 preemption_timeout
<<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
;
2121 preemption_timeout
*= 1000000;
2122 do_div(preemption_timeout
, vcpu
->arch
.virtual_tsc_khz
);
2123 hrtimer_start(&vmx
->nested
.preemption_timer
,
2124 ktime_add_ns(ktime_get(), preemption_timeout
),
2125 HRTIMER_MODE_ABS_PINNED
);
2128 static u64
nested_vmx_calc_efer(struct vcpu_vmx
*vmx
, struct vmcs12
*vmcs12
)
2130 if (vmx
->nested
.nested_run_pending
&&
2131 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_EFER
))
2132 return vmcs12
->guest_ia32_efer
;
2133 else if (vmcs12
->vm_entry_controls
& VM_ENTRY_IA32E_MODE
)
2134 return vmx
->vcpu
.arch
.efer
| (EFER_LMA
| EFER_LME
);
2136 return vmx
->vcpu
.arch
.efer
& ~(EFER_LMA
| EFER_LME
);
2139 static void prepare_vmcs02_constant_state(struct vcpu_vmx
*vmx
)
2142 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
2143 * according to L0's settings (vmcs12 is irrelevant here). Host
2144 * fields that come from L0 and are not constant, e.g. HOST_CR3,
2145 * will be set as needed prior to VMLAUNCH/VMRESUME.
2147 if (vmx
->nested
.vmcs02_initialized
)
2149 vmx
->nested
.vmcs02_initialized
= true;
2152 * We don't care what the EPTP value is we just need to guarantee
2153 * it's valid so we don't get a false positive when doing early
2154 * consistency checks.
2156 if (enable_ept
&& nested_early_check
)
2157 vmcs_write64(EPT_POINTER
,
2158 construct_eptp(&vmx
->vcpu
, 0, PT64_ROOT_4LEVEL
));
2160 /* All VMFUNCs are currently emulated through L0 vmexits. */
2161 if (cpu_has_vmx_vmfunc())
2162 vmcs_write64(VM_FUNCTION_CONTROL
, 0);
2164 if (cpu_has_vmx_posted_intr())
2165 vmcs_write16(POSTED_INTR_NV
, POSTED_INTR_NESTED_VECTOR
);
2167 if (cpu_has_vmx_msr_bitmap())
2168 vmcs_write64(MSR_BITMAP
, __pa(vmx
->nested
.vmcs02
.msr_bitmap
));
2171 * PML is emulated for L2, but never enabled in hardware as the MMU
2172 * handles A/D emulation. Disabling PML for L2 also avoids having to
2173 * deal with filtering out L2 GPAs from the buffer.
2176 vmcs_write64(PML_ADDRESS
, 0);
2177 vmcs_write16(GUEST_PML_INDEX
, -1);
2180 if (cpu_has_vmx_encls_vmexit())
2181 vmcs_write64(ENCLS_EXITING_BITMAP
, -1ull);
2184 * Set the MSR load/store lists to match L0's settings. Only the
2185 * addresses are constant (for vmcs02), the counts can change based
2186 * on L2's behavior, e.g. switching to/from long mode.
2188 vmcs_write64(VM_EXIT_MSR_STORE_ADDR
, __pa(vmx
->msr_autostore
.guest
.val
));
2189 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR
, __pa(vmx
->msr_autoload
.host
.val
));
2190 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR
, __pa(vmx
->msr_autoload
.guest
.val
));
2192 vmx_set_constant_host_state(vmx
);
2195 static void prepare_vmcs02_early_rare(struct vcpu_vmx
*vmx
,
2196 struct vmcs12
*vmcs12
)
2198 prepare_vmcs02_constant_state(vmx
);
2200 vmcs_write64(VMCS_LINK_POINTER
, -1ull);
2203 if (nested_cpu_has_vpid(vmcs12
) && vmx
->nested
.vpid02
)
2204 vmcs_write16(VIRTUAL_PROCESSOR_ID
, vmx
->nested
.vpid02
);
2206 vmcs_write16(VIRTUAL_PROCESSOR_ID
, vmx
->vpid
);
2210 static void prepare_vmcs02_early(struct vcpu_vmx
*vmx
, struct loaded_vmcs
*vmcs01
,
2211 struct vmcs12
*vmcs12
)
2214 u64 guest_efer
= nested_vmx_calc_efer(vmx
, vmcs12
);
2216 if (vmx
->nested
.dirty_vmcs12
|| evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
))
2217 prepare_vmcs02_early_rare(vmx
, vmcs12
);
2222 exec_control
= __pin_controls_get(vmcs01
);
2223 exec_control
|= (vmcs12
->pin_based_vm_exec_control
&
2224 ~PIN_BASED_VMX_PREEMPTION_TIMER
);
2226 /* Posted interrupts setting is only taken from vmcs12. */
2227 vmx
->nested
.pi_pending
= false;
2228 if (nested_cpu_has_posted_intr(vmcs12
))
2229 vmx
->nested
.posted_intr_nv
= vmcs12
->posted_intr_nv
;
2231 exec_control
&= ~PIN_BASED_POSTED_INTR
;
2232 pin_controls_set(vmx
, exec_control
);
2237 exec_control
= __exec_controls_get(vmcs01
); /* L0's desires */
2238 exec_control
&= ~CPU_BASED_INTR_WINDOW_EXITING
;
2239 exec_control
&= ~CPU_BASED_NMI_WINDOW_EXITING
;
2240 exec_control
&= ~CPU_BASED_TPR_SHADOW
;
2241 exec_control
|= vmcs12
->cpu_based_vm_exec_control
;
2243 vmx
->nested
.l1_tpr_threshold
= -1;
2244 if (exec_control
& CPU_BASED_TPR_SHADOW
)
2245 vmcs_write32(TPR_THRESHOLD
, vmcs12
->tpr_threshold
);
2246 #ifdef CONFIG_X86_64
2248 exec_control
|= CPU_BASED_CR8_LOAD_EXITING
|
2249 CPU_BASED_CR8_STORE_EXITING
;
2253 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2254 * for I/O port accesses.
2256 exec_control
|= CPU_BASED_UNCOND_IO_EXITING
;
2257 exec_control
&= ~CPU_BASED_USE_IO_BITMAPS
;
2260 * This bit will be computed in nested_get_vmcs12_pages, because
2261 * we do not have access to L1's MSR bitmap yet. For now, keep
2262 * the same bit as before, hoping to avoid multiple VMWRITEs that
2263 * only set/clear this bit.
2265 exec_control
&= ~CPU_BASED_USE_MSR_BITMAPS
;
2266 exec_control
|= exec_controls_get(vmx
) & CPU_BASED_USE_MSR_BITMAPS
;
2268 exec_controls_set(vmx
, exec_control
);
2271 * SECONDARY EXEC CONTROLS
2273 if (cpu_has_secondary_exec_ctrls()) {
2274 exec_control
= __secondary_exec_controls_get(vmcs01
);
2276 /* Take the following fields only from vmcs12 */
2277 exec_control
&= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
|
2278 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
|
2279 SECONDARY_EXEC_ENABLE_INVPCID
|
2280 SECONDARY_EXEC_ENABLE_RDTSCP
|
2281 SECONDARY_EXEC_XSAVES
|
2282 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE
|
2283 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
|
2284 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
2285 SECONDARY_EXEC_ENABLE_VMFUNC
|
2286 SECONDARY_EXEC_TSC_SCALING
|
2287 SECONDARY_EXEC_DESC
);
2289 if (nested_cpu_has(vmcs12
,
2290 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
))
2291 exec_control
|= vmcs12
->secondary_vm_exec_control
;
2293 /* PML is emulated and never enabled in hardware for L2. */
2294 exec_control
&= ~SECONDARY_EXEC_ENABLE_PML
;
2296 /* VMCS shadowing for L2 is emulated for now */
2297 exec_control
&= ~SECONDARY_EXEC_SHADOW_VMCS
;
2300 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
2301 * will not have to rewrite the controls just for this bit.
2303 if (!boot_cpu_has(X86_FEATURE_UMIP
) && vmx_umip_emulated() &&
2304 (vmcs12
->guest_cr4
& X86_CR4_UMIP
))
2305 exec_control
|= SECONDARY_EXEC_DESC
;
2307 if (exec_control
& SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
)
2308 vmcs_write16(GUEST_INTR_STATUS
,
2309 vmcs12
->guest_intr_status
);
2311 if (!nested_cpu_has2(vmcs12
, SECONDARY_EXEC_UNRESTRICTED_GUEST
))
2312 exec_control
&= ~SECONDARY_EXEC_UNRESTRICTED_GUEST
;
2314 if (exec_control
& SECONDARY_EXEC_ENCLS_EXITING
)
2315 vmx_write_encls_bitmap(&vmx
->vcpu
, vmcs12
);
2317 secondary_exec_controls_set(vmx
, exec_control
);
2323 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2324 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2325 * on the related bits (if supported by the CPU) in the hope that
2326 * we can avoid VMWrites during vmx_set_efer().
2328 exec_control
= __vm_entry_controls_get(vmcs01
);
2329 exec_control
|= vmcs12
->vm_entry_controls
;
2330 exec_control
&= ~(VM_ENTRY_IA32E_MODE
| VM_ENTRY_LOAD_IA32_EFER
);
2331 if (cpu_has_load_ia32_efer()) {
2332 if (guest_efer
& EFER_LMA
)
2333 exec_control
|= VM_ENTRY_IA32E_MODE
;
2334 if (guest_efer
!= host_efer
)
2335 exec_control
|= VM_ENTRY_LOAD_IA32_EFER
;
2337 vm_entry_controls_set(vmx
, exec_control
);
2342 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2343 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2344 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2346 exec_control
= __vm_exit_controls_get(vmcs01
);
2347 if (cpu_has_load_ia32_efer() && guest_efer
!= host_efer
)
2348 exec_control
|= VM_EXIT_LOAD_IA32_EFER
;
2350 exec_control
&= ~VM_EXIT_LOAD_IA32_EFER
;
2351 vm_exit_controls_set(vmx
, exec_control
);
2354 * Interrupt/Exception Fields
2356 if (vmx
->nested
.nested_run_pending
) {
2357 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
,
2358 vmcs12
->vm_entry_intr_info_field
);
2359 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE
,
2360 vmcs12
->vm_entry_exception_error_code
);
2361 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN
,
2362 vmcs12
->vm_entry_instruction_len
);
2363 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO
,
2364 vmcs12
->guest_interruptibility_info
);
2365 vmx
->loaded_vmcs
->nmi_known_unmasked
=
2366 !(vmcs12
->guest_interruptibility_info
& GUEST_INTR_STATE_NMI
);
2368 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
, 0);
2372 static void prepare_vmcs02_rare(struct vcpu_vmx
*vmx
, struct vmcs12
*vmcs12
)
2374 struct hv_enlightened_vmcs
*hv_evmcs
= vmx
->nested
.hv_evmcs
;
2376 if (!hv_evmcs
|| !(hv_evmcs
->hv_clean_fields
&
2377 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2
)) {
2378 vmcs_write16(GUEST_ES_SELECTOR
, vmcs12
->guest_es_selector
);
2379 vmcs_write16(GUEST_CS_SELECTOR
, vmcs12
->guest_cs_selector
);
2380 vmcs_write16(GUEST_SS_SELECTOR
, vmcs12
->guest_ss_selector
);
2381 vmcs_write16(GUEST_DS_SELECTOR
, vmcs12
->guest_ds_selector
);
2382 vmcs_write16(GUEST_FS_SELECTOR
, vmcs12
->guest_fs_selector
);
2383 vmcs_write16(GUEST_GS_SELECTOR
, vmcs12
->guest_gs_selector
);
2384 vmcs_write16(GUEST_LDTR_SELECTOR
, vmcs12
->guest_ldtr_selector
);
2385 vmcs_write16(GUEST_TR_SELECTOR
, vmcs12
->guest_tr_selector
);
2386 vmcs_write32(GUEST_ES_LIMIT
, vmcs12
->guest_es_limit
);
2387 vmcs_write32(GUEST_CS_LIMIT
, vmcs12
->guest_cs_limit
);
2388 vmcs_write32(GUEST_SS_LIMIT
, vmcs12
->guest_ss_limit
);
2389 vmcs_write32(GUEST_DS_LIMIT
, vmcs12
->guest_ds_limit
);
2390 vmcs_write32(GUEST_FS_LIMIT
, vmcs12
->guest_fs_limit
);
2391 vmcs_write32(GUEST_GS_LIMIT
, vmcs12
->guest_gs_limit
);
2392 vmcs_write32(GUEST_LDTR_LIMIT
, vmcs12
->guest_ldtr_limit
);
2393 vmcs_write32(GUEST_TR_LIMIT
, vmcs12
->guest_tr_limit
);
2394 vmcs_write32(GUEST_GDTR_LIMIT
, vmcs12
->guest_gdtr_limit
);
2395 vmcs_write32(GUEST_IDTR_LIMIT
, vmcs12
->guest_idtr_limit
);
2396 vmcs_write32(GUEST_CS_AR_BYTES
, vmcs12
->guest_cs_ar_bytes
);
2397 vmcs_write32(GUEST_SS_AR_BYTES
, vmcs12
->guest_ss_ar_bytes
);
2398 vmcs_write32(GUEST_ES_AR_BYTES
, vmcs12
->guest_es_ar_bytes
);
2399 vmcs_write32(GUEST_DS_AR_BYTES
, vmcs12
->guest_ds_ar_bytes
);
2400 vmcs_write32(GUEST_FS_AR_BYTES
, vmcs12
->guest_fs_ar_bytes
);
2401 vmcs_write32(GUEST_GS_AR_BYTES
, vmcs12
->guest_gs_ar_bytes
);
2402 vmcs_write32(GUEST_LDTR_AR_BYTES
, vmcs12
->guest_ldtr_ar_bytes
);
2403 vmcs_write32(GUEST_TR_AR_BYTES
, vmcs12
->guest_tr_ar_bytes
);
2404 vmcs_writel(GUEST_ES_BASE
, vmcs12
->guest_es_base
);
2405 vmcs_writel(GUEST_CS_BASE
, vmcs12
->guest_cs_base
);
2406 vmcs_writel(GUEST_SS_BASE
, vmcs12
->guest_ss_base
);
2407 vmcs_writel(GUEST_DS_BASE
, vmcs12
->guest_ds_base
);
2408 vmcs_writel(GUEST_FS_BASE
, vmcs12
->guest_fs_base
);
2409 vmcs_writel(GUEST_GS_BASE
, vmcs12
->guest_gs_base
);
2410 vmcs_writel(GUEST_LDTR_BASE
, vmcs12
->guest_ldtr_base
);
2411 vmcs_writel(GUEST_TR_BASE
, vmcs12
->guest_tr_base
);
2412 vmcs_writel(GUEST_GDTR_BASE
, vmcs12
->guest_gdtr_base
);
2413 vmcs_writel(GUEST_IDTR_BASE
, vmcs12
->guest_idtr_base
);
2415 vmx
->segment_cache
.bitmask
= 0;
2418 if (!hv_evmcs
|| !(hv_evmcs
->hv_clean_fields
&
2419 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1
)) {
2420 vmcs_write32(GUEST_SYSENTER_CS
, vmcs12
->guest_sysenter_cs
);
2421 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS
,
2422 vmcs12
->guest_pending_dbg_exceptions
);
2423 vmcs_writel(GUEST_SYSENTER_ESP
, vmcs12
->guest_sysenter_esp
);
2424 vmcs_writel(GUEST_SYSENTER_EIP
, vmcs12
->guest_sysenter_eip
);
2427 * L1 may access the L2's PDPTR, so save them to construct
2431 vmcs_write64(GUEST_PDPTR0
, vmcs12
->guest_pdptr0
);
2432 vmcs_write64(GUEST_PDPTR1
, vmcs12
->guest_pdptr1
);
2433 vmcs_write64(GUEST_PDPTR2
, vmcs12
->guest_pdptr2
);
2434 vmcs_write64(GUEST_PDPTR3
, vmcs12
->guest_pdptr3
);
2437 if (kvm_mpx_supported() && vmx
->nested
.nested_run_pending
&&
2438 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
))
2439 vmcs_write64(GUEST_BNDCFGS
, vmcs12
->guest_bndcfgs
);
2442 if (nested_cpu_has_xsaves(vmcs12
))
2443 vmcs_write64(XSS_EXIT_BITMAP
, vmcs12
->xss_exit_bitmap
);
2446 * Whether page-faults are trapped is determined by a combination of
2447 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0
2448 * doesn't care about page faults then we should set all of these to
2449 * L1's desires. However, if L0 does care about (some) page faults, it
2450 * is not easy (if at all possible?) to merge L0 and L1's desires, we
2451 * simply ask to exit on each and every L2 page fault. This is done by
2452 * setting MASK=MATCH=0 and (see below) EB.PF=1.
2453 * Note that below we don't need special code to set EB.PF beyond the
2454 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2455 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2456 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2458 if (vmx_need_pf_intercept(&vmx
->vcpu
)) {
2460 * TODO: if both L0 and L1 need the same MASK and MATCH,
2461 * go ahead and use it?
2463 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK
, 0);
2464 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH
, 0);
2466 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK
, vmcs12
->page_fault_error_code_mask
);
2467 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH
, vmcs12
->page_fault_error_code_match
);
2470 if (cpu_has_vmx_apicv()) {
2471 vmcs_write64(EOI_EXIT_BITMAP0
, vmcs12
->eoi_exit_bitmap0
);
2472 vmcs_write64(EOI_EXIT_BITMAP1
, vmcs12
->eoi_exit_bitmap1
);
2473 vmcs_write64(EOI_EXIT_BITMAP2
, vmcs12
->eoi_exit_bitmap2
);
2474 vmcs_write64(EOI_EXIT_BITMAP3
, vmcs12
->eoi_exit_bitmap3
);
2478 * Make sure the msr_autostore list is up to date before we set the
2479 * count in the vmcs02.
2481 prepare_vmx_msr_autostore_list(&vmx
->vcpu
, MSR_IA32_TSC
);
2483 vmcs_write32(VM_EXIT_MSR_STORE_COUNT
, vmx
->msr_autostore
.guest
.nr
);
2484 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, vmx
->msr_autoload
.host
.nr
);
2485 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, vmx
->msr_autoload
.guest
.nr
);
2487 set_cr4_guest_host_mask(vmx
);
2491 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2492 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2493 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2494 * guest in a way that will both be appropriate to L1's requests, and our
2495 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2496 * function also has additional necessary side-effects, like setting various
2497 * vcpu->arch fields.
2498 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2499 * is assigned to entry_failure_code on failure.
2501 static int prepare_vmcs02(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
,
2503 enum vm_entry_failure_code
*entry_failure_code
)
2505 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2506 bool load_guest_pdptrs_vmcs12
= false;
2508 if (vmx
->nested
.dirty_vmcs12
|| evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
)) {
2509 prepare_vmcs02_rare(vmx
, vmcs12
);
2510 vmx
->nested
.dirty_vmcs12
= false;
2512 load_guest_pdptrs_vmcs12
= !evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
) ||
2513 !(vmx
->nested
.hv_evmcs
->hv_clean_fields
&
2514 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1
);
2517 if (vmx
->nested
.nested_run_pending
&&
2518 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
)) {
2519 kvm_set_dr(vcpu
, 7, vmcs12
->guest_dr7
);
2520 vmcs_write64(GUEST_IA32_DEBUGCTL
, vmcs12
->guest_ia32_debugctl
);
2522 kvm_set_dr(vcpu
, 7, vcpu
->arch
.dr7
);
2523 vmcs_write64(GUEST_IA32_DEBUGCTL
, vmx
->nested
.vmcs01_debugctl
);
2525 if (kvm_mpx_supported() && (!vmx
->nested
.nested_run_pending
||
2526 !(vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
)))
2527 vmcs_write64(GUEST_BNDCFGS
, vmx
->nested
.vmcs01_guest_bndcfgs
);
2528 vmx_set_rflags(vcpu
, vmcs12
->guest_rflags
);
2530 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2531 * bitwise-or of what L1 wants to trap for L2, and what we want to
2532 * trap. Note that CR0.TS also needs updating - we do this later.
2534 vmx_update_exception_bitmap(vcpu
);
2535 vcpu
->arch
.cr0_guest_owned_bits
&= ~vmcs12
->cr0_guest_host_mask
;
2536 vmcs_writel(CR0_GUEST_HOST_MASK
, ~vcpu
->arch
.cr0_guest_owned_bits
);
2538 if (vmx
->nested
.nested_run_pending
&&
2539 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PAT
)) {
2540 vmcs_write64(GUEST_IA32_PAT
, vmcs12
->guest_ia32_pat
);
2541 vcpu
->arch
.pat
= vmcs12
->guest_ia32_pat
;
2542 } else if (vmcs_config
.vmentry_ctrl
& VM_ENTRY_LOAD_IA32_PAT
) {
2543 vmcs_write64(GUEST_IA32_PAT
, vmx
->vcpu
.arch
.pat
);
2546 vcpu
->arch
.tsc_offset
= kvm_calc_nested_tsc_offset(
2547 vcpu
->arch
.l1_tsc_offset
,
2548 vmx_get_l2_tsc_offset(vcpu
),
2549 vmx_get_l2_tsc_multiplier(vcpu
));
2551 vcpu
->arch
.tsc_scaling_ratio
= kvm_calc_nested_tsc_multiplier(
2552 vcpu
->arch
.l1_tsc_scaling_ratio
,
2553 vmx_get_l2_tsc_multiplier(vcpu
));
2555 vmcs_write64(TSC_OFFSET
, vcpu
->arch
.tsc_offset
);
2556 if (kvm_has_tsc_control
)
2557 vmcs_write64(TSC_MULTIPLIER
, vcpu
->arch
.tsc_scaling_ratio
);
2559 nested_vmx_transition_tlb_flush(vcpu
, vmcs12
, true);
2561 if (nested_cpu_has_ept(vmcs12
))
2562 nested_ept_init_mmu_context(vcpu
);
2565 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2566 * bits which we consider mandatory enabled.
2567 * The CR0_READ_SHADOW is what L2 should have expected to read given
2568 * the specifications by L1; It's not enough to take
2569 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2570 * have more bits than L1 expected.
2572 vmx_set_cr0(vcpu
, vmcs12
->guest_cr0
);
2573 vmcs_writel(CR0_READ_SHADOW
, nested_read_cr0(vmcs12
));
2575 vmx_set_cr4(vcpu
, vmcs12
->guest_cr4
);
2576 vmcs_writel(CR4_READ_SHADOW
, nested_read_cr4(vmcs12
));
2578 vcpu
->arch
.efer
= nested_vmx_calc_efer(vmx
, vmcs12
);
2579 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2580 vmx_set_efer(vcpu
, vcpu
->arch
.efer
);
2583 * Guest state is invalid and unrestricted guest is disabled,
2584 * which means L1 attempted VMEntry to L2 with invalid state.
2587 if (CC(!vmx_guest_state_valid(vcpu
))) {
2588 *entry_failure_code
= ENTRY_FAIL_DEFAULT
;
2592 /* Shadow page tables on either EPT or shadow page tables. */
2593 if (nested_vmx_load_cr3(vcpu
, vmcs12
->guest_cr3
, nested_cpu_has_ept(vmcs12
),
2594 from_vmentry
, entry_failure_code
))
2598 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2599 * on nested VM-Exit, which can occur without actually running L2 and
2600 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
2601 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2602 * transition to HLT instead of running L2.
2605 vmcs_writel(GUEST_CR3
, vmcs12
->guest_cr3
);
2607 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2608 if (load_guest_pdptrs_vmcs12
&& nested_cpu_has_ept(vmcs12
) &&
2609 is_pae_paging(vcpu
)) {
2610 vmcs_write64(GUEST_PDPTR0
, vmcs12
->guest_pdptr0
);
2611 vmcs_write64(GUEST_PDPTR1
, vmcs12
->guest_pdptr1
);
2612 vmcs_write64(GUEST_PDPTR2
, vmcs12
->guest_pdptr2
);
2613 vmcs_write64(GUEST_PDPTR3
, vmcs12
->guest_pdptr3
);
2617 vcpu
->arch
.walk_mmu
->inject_page_fault
= vmx_inject_page_fault_nested
;
2619 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
) &&
2620 WARN_ON_ONCE(kvm_set_msr(vcpu
, MSR_CORE_PERF_GLOBAL_CTRL
,
2621 vmcs12
->guest_ia32_perf_global_ctrl
)))
2624 kvm_rsp_write(vcpu
, vmcs12
->guest_rsp
);
2625 kvm_rip_write(vcpu
, vmcs12
->guest_rip
);
2628 * It was observed that genuine Hyper-V running in L1 doesn't reset
2629 * 'hv_clean_fields' by itself, it only sets the corresponding dirty
2630 * bits when it changes a field in eVMCS. Mark all fields as clean
2633 if (evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
))
2634 vmx
->nested
.hv_evmcs
->hv_clean_fields
|=
2635 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL
;
2640 static int nested_vmx_check_nmi_controls(struct vmcs12
*vmcs12
)
2642 if (CC(!nested_cpu_has_nmi_exiting(vmcs12
) &&
2643 nested_cpu_has_virtual_nmis(vmcs12
)))
2646 if (CC(!nested_cpu_has_virtual_nmis(vmcs12
) &&
2647 nested_cpu_has(vmcs12
, CPU_BASED_NMI_WINDOW_EXITING
)))
2653 static bool nested_vmx_check_eptp(struct kvm_vcpu
*vcpu
, u64 new_eptp
)
2655 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2657 /* Check for memory type validity */
2658 switch (new_eptp
& VMX_EPTP_MT_MASK
) {
2659 case VMX_EPTP_MT_UC
:
2660 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPTP_UC_BIT
)))
2663 case VMX_EPTP_MT_WB
:
2664 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPTP_WB_BIT
)))
2671 /* Page-walk levels validity. */
2672 switch (new_eptp
& VMX_EPTP_PWL_MASK
) {
2673 case VMX_EPTP_PWL_5
:
2674 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_PAGE_WALK_5_BIT
)))
2677 case VMX_EPTP_PWL_4
:
2678 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_PAGE_WALK_4_BIT
)))
2685 /* Reserved bits should not be set */
2686 if (CC(kvm_vcpu_is_illegal_gpa(vcpu
, new_eptp
) || ((new_eptp
>> 7) & 0x1f)))
2689 /* AD, if set, should be supported */
2690 if (new_eptp
& VMX_EPTP_AD_ENABLE_BIT
) {
2691 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_AD_BIT
)))
2699 * Checks related to VM-Execution Control Fields
2701 static int nested_check_vm_execution_controls(struct kvm_vcpu
*vcpu
,
2702 struct vmcs12
*vmcs12
)
2704 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2706 if (CC(!vmx_control_verify(vmcs12
->pin_based_vm_exec_control
,
2707 vmx
->nested
.msrs
.pinbased_ctls_low
,
2708 vmx
->nested
.msrs
.pinbased_ctls_high
)) ||
2709 CC(!vmx_control_verify(vmcs12
->cpu_based_vm_exec_control
,
2710 vmx
->nested
.msrs
.procbased_ctls_low
,
2711 vmx
->nested
.msrs
.procbased_ctls_high
)))
2714 if (nested_cpu_has(vmcs12
, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
) &&
2715 CC(!vmx_control_verify(vmcs12
->secondary_vm_exec_control
,
2716 vmx
->nested
.msrs
.secondary_ctls_low
,
2717 vmx
->nested
.msrs
.secondary_ctls_high
)))
2720 if (CC(vmcs12
->cr3_target_count
> nested_cpu_vmx_misc_cr3_count(vcpu
)) ||
2721 nested_vmx_check_io_bitmap_controls(vcpu
, vmcs12
) ||
2722 nested_vmx_check_msr_bitmap_controls(vcpu
, vmcs12
) ||
2723 nested_vmx_check_tpr_shadow_controls(vcpu
, vmcs12
) ||
2724 nested_vmx_check_apic_access_controls(vcpu
, vmcs12
) ||
2725 nested_vmx_check_apicv_controls(vcpu
, vmcs12
) ||
2726 nested_vmx_check_nmi_controls(vmcs12
) ||
2727 nested_vmx_check_pml_controls(vcpu
, vmcs12
) ||
2728 nested_vmx_check_unrestricted_guest_controls(vcpu
, vmcs12
) ||
2729 nested_vmx_check_mode_based_ept_exec_controls(vcpu
, vmcs12
) ||
2730 nested_vmx_check_shadow_vmcs_controls(vcpu
, vmcs12
) ||
2731 CC(nested_cpu_has_vpid(vmcs12
) && !vmcs12
->virtual_processor_id
))
2734 if (!nested_cpu_has_preemption_timer(vmcs12
) &&
2735 nested_cpu_has_save_preemption_timer(vmcs12
))
2738 if (nested_cpu_has_ept(vmcs12
) &&
2739 CC(!nested_vmx_check_eptp(vcpu
, vmcs12
->ept_pointer
)))
2742 if (nested_cpu_has_vmfunc(vmcs12
)) {
2743 if (CC(vmcs12
->vm_function_control
&
2744 ~vmx
->nested
.msrs
.vmfunc_controls
))
2747 if (nested_cpu_has_eptp_switching(vmcs12
)) {
2748 if (CC(!nested_cpu_has_ept(vmcs12
)) ||
2749 CC(!page_address_valid(vcpu
, vmcs12
->eptp_list_address
)))
2758 * Checks related to VM-Exit Control Fields
2760 static int nested_check_vm_exit_controls(struct kvm_vcpu
*vcpu
,
2761 struct vmcs12
*vmcs12
)
2763 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2765 if (CC(!vmx_control_verify(vmcs12
->vm_exit_controls
,
2766 vmx
->nested
.msrs
.exit_ctls_low
,
2767 vmx
->nested
.msrs
.exit_ctls_high
)) ||
2768 CC(nested_vmx_check_exit_msr_switch_controls(vcpu
, vmcs12
)))
2775 * Checks related to VM-Entry Control Fields
2777 static int nested_check_vm_entry_controls(struct kvm_vcpu
*vcpu
,
2778 struct vmcs12
*vmcs12
)
2780 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2782 if (CC(!vmx_control_verify(vmcs12
->vm_entry_controls
,
2783 vmx
->nested
.msrs
.entry_ctls_low
,
2784 vmx
->nested
.msrs
.entry_ctls_high
)))
2788 * From the Intel SDM, volume 3:
2789 * Fields relevant to VM-entry event injection must be set properly.
2790 * These fields are the VM-entry interruption-information field, the
2791 * VM-entry exception error code, and the VM-entry instruction length.
2793 if (vmcs12
->vm_entry_intr_info_field
& INTR_INFO_VALID_MASK
) {
2794 u32 intr_info
= vmcs12
->vm_entry_intr_info_field
;
2795 u8 vector
= intr_info
& INTR_INFO_VECTOR_MASK
;
2796 u32 intr_type
= intr_info
& INTR_INFO_INTR_TYPE_MASK
;
2797 bool has_error_code
= intr_info
& INTR_INFO_DELIVER_CODE_MASK
;
2798 bool should_have_error_code
;
2799 bool urg
= nested_cpu_has2(vmcs12
,
2800 SECONDARY_EXEC_UNRESTRICTED_GUEST
);
2801 bool prot_mode
= !urg
|| vmcs12
->guest_cr0
& X86_CR0_PE
;
2803 /* VM-entry interruption-info field: interruption type */
2804 if (CC(intr_type
== INTR_TYPE_RESERVED
) ||
2805 CC(intr_type
== INTR_TYPE_OTHER_EVENT
&&
2806 !nested_cpu_supports_monitor_trap_flag(vcpu
)))
2809 /* VM-entry interruption-info field: vector */
2810 if (CC(intr_type
== INTR_TYPE_NMI_INTR
&& vector
!= NMI_VECTOR
) ||
2811 CC(intr_type
== INTR_TYPE_HARD_EXCEPTION
&& vector
> 31) ||
2812 CC(intr_type
== INTR_TYPE_OTHER_EVENT
&& vector
!= 0))
2815 /* VM-entry interruption-info field: deliver error code */
2816 should_have_error_code
=
2817 intr_type
== INTR_TYPE_HARD_EXCEPTION
&& prot_mode
&&
2818 x86_exception_has_error_code(vector
);
2819 if (CC(has_error_code
!= should_have_error_code
))
2822 /* VM-entry exception error code */
2823 if (CC(has_error_code
&&
2824 vmcs12
->vm_entry_exception_error_code
& GENMASK(31, 16)))
2827 /* VM-entry interruption-info field: reserved bits */
2828 if (CC(intr_info
& INTR_INFO_RESVD_BITS_MASK
))
2831 /* VM-entry instruction length */
2832 switch (intr_type
) {
2833 case INTR_TYPE_SOFT_EXCEPTION
:
2834 case INTR_TYPE_SOFT_INTR
:
2835 case INTR_TYPE_PRIV_SW_EXCEPTION
:
2836 if (CC(vmcs12
->vm_entry_instruction_len
> 15) ||
2837 CC(vmcs12
->vm_entry_instruction_len
== 0 &&
2838 CC(!nested_cpu_has_zero_length_injection(vcpu
))))
2843 if (nested_vmx_check_entry_msr_switch_controls(vcpu
, vmcs12
))
2849 static int nested_vmx_check_controls(struct kvm_vcpu
*vcpu
,
2850 struct vmcs12
*vmcs12
)
2852 if (nested_check_vm_execution_controls(vcpu
, vmcs12
) ||
2853 nested_check_vm_exit_controls(vcpu
, vmcs12
) ||
2854 nested_check_vm_entry_controls(vcpu
, vmcs12
))
2857 if (to_vmx(vcpu
)->nested
.enlightened_vmcs_enabled
)
2858 return nested_evmcs_check_controls(vmcs12
);
2863 static int nested_vmx_check_host_state(struct kvm_vcpu
*vcpu
,
2864 struct vmcs12
*vmcs12
)
2868 if (CC(!nested_host_cr0_valid(vcpu
, vmcs12
->host_cr0
)) ||
2869 CC(!nested_host_cr4_valid(vcpu
, vmcs12
->host_cr4
)) ||
2870 CC(kvm_vcpu_is_illegal_gpa(vcpu
, vmcs12
->host_cr3
)))
2873 if (CC(is_noncanonical_address(vmcs12
->host_ia32_sysenter_esp
, vcpu
)) ||
2874 CC(is_noncanonical_address(vmcs12
->host_ia32_sysenter_eip
, vcpu
)))
2877 if ((vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PAT
) &&
2878 CC(!kvm_pat_valid(vmcs12
->host_ia32_pat
)))
2881 if ((vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
) &&
2882 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu
),
2883 vmcs12
->host_ia32_perf_global_ctrl
)))
2886 #ifdef CONFIG_X86_64
2887 ia32e
= !!(vcpu
->arch
.efer
& EFER_LMA
);
2893 if (CC(!(vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
)) ||
2894 CC(!(vmcs12
->host_cr4
& X86_CR4_PAE
)))
2897 if (CC(vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
) ||
2898 CC(vmcs12
->vm_entry_controls
& VM_ENTRY_IA32E_MODE
) ||
2899 CC(vmcs12
->host_cr4
& X86_CR4_PCIDE
) ||
2900 CC((vmcs12
->host_rip
) >> 32))
2904 if (CC(vmcs12
->host_cs_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2905 CC(vmcs12
->host_ss_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2906 CC(vmcs12
->host_ds_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2907 CC(vmcs12
->host_es_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2908 CC(vmcs12
->host_fs_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2909 CC(vmcs12
->host_gs_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2910 CC(vmcs12
->host_tr_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2911 CC(vmcs12
->host_cs_selector
== 0) ||
2912 CC(vmcs12
->host_tr_selector
== 0) ||
2913 CC(vmcs12
->host_ss_selector
== 0 && !ia32e
))
2916 if (CC(is_noncanonical_address(vmcs12
->host_fs_base
, vcpu
)) ||
2917 CC(is_noncanonical_address(vmcs12
->host_gs_base
, vcpu
)) ||
2918 CC(is_noncanonical_address(vmcs12
->host_gdtr_base
, vcpu
)) ||
2919 CC(is_noncanonical_address(vmcs12
->host_idtr_base
, vcpu
)) ||
2920 CC(is_noncanonical_address(vmcs12
->host_tr_base
, vcpu
)) ||
2921 CC(is_noncanonical_address(vmcs12
->host_rip
, vcpu
)))
2925 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2926 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2927 * the values of the LMA and LME bits in the field must each be that of
2928 * the host address-space size VM-exit control.
2930 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_EFER
) {
2931 if (CC(!kvm_valid_efer(vcpu
, vmcs12
->host_ia32_efer
)) ||
2932 CC(ia32e
!= !!(vmcs12
->host_ia32_efer
& EFER_LMA
)) ||
2933 CC(ia32e
!= !!(vmcs12
->host_ia32_efer
& EFER_LME
)))
2940 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu
*vcpu
,
2941 struct vmcs12
*vmcs12
)
2944 struct vmcs12
*shadow
;
2945 struct kvm_host_map map
;
2947 if (vmcs12
->vmcs_link_pointer
== -1ull)
2950 if (CC(!page_address_valid(vcpu
, vmcs12
->vmcs_link_pointer
)))
2953 if (CC(kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->vmcs_link_pointer
), &map
)))
2958 if (CC(shadow
->hdr
.revision_id
!= VMCS12_REVISION
) ||
2959 CC(shadow
->hdr
.shadow_vmcs
!= nested_cpu_has_shadow_vmcs(vmcs12
)))
2962 kvm_vcpu_unmap(vcpu
, &map
, false);
2967 * Checks related to Guest Non-register State
2969 static int nested_check_guest_non_reg_state(struct vmcs12
*vmcs12
)
2971 if (CC(vmcs12
->guest_activity_state
!= GUEST_ACTIVITY_ACTIVE
&&
2972 vmcs12
->guest_activity_state
!= GUEST_ACTIVITY_HLT
&&
2973 vmcs12
->guest_activity_state
!= GUEST_ACTIVITY_WAIT_SIPI
))
2979 static int nested_vmx_check_guest_state(struct kvm_vcpu
*vcpu
,
2980 struct vmcs12
*vmcs12
,
2981 enum vm_entry_failure_code
*entry_failure_code
)
2985 *entry_failure_code
= ENTRY_FAIL_DEFAULT
;
2987 if (CC(!nested_guest_cr0_valid(vcpu
, vmcs12
->guest_cr0
)) ||
2988 CC(!nested_guest_cr4_valid(vcpu
, vmcs12
->guest_cr4
)))
2991 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
) &&
2992 CC(!kvm_dr7_valid(vmcs12
->guest_dr7
)))
2995 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PAT
) &&
2996 CC(!kvm_pat_valid(vmcs12
->guest_ia32_pat
)))
2999 if (nested_vmx_check_vmcs_link_ptr(vcpu
, vmcs12
)) {
3000 *entry_failure_code
= ENTRY_FAIL_VMCS_LINK_PTR
;
3004 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
) &&
3005 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu
),
3006 vmcs12
->guest_ia32_perf_global_ctrl
)))
3010 * If the load IA32_EFER VM-entry control is 1, the following checks
3011 * are performed on the field for the IA32_EFER MSR:
3012 * - Bits reserved in the IA32_EFER MSR must be 0.
3013 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
3014 * the IA-32e mode guest VM-exit control. It must also be identical
3015 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
3018 if (to_vmx(vcpu
)->nested
.nested_run_pending
&&
3019 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_EFER
)) {
3020 ia32e
= (vmcs12
->vm_entry_controls
& VM_ENTRY_IA32E_MODE
) != 0;
3021 if (CC(!kvm_valid_efer(vcpu
, vmcs12
->guest_ia32_efer
)) ||
3022 CC(ia32e
!= !!(vmcs12
->guest_ia32_efer
& EFER_LMA
)) ||
3023 CC(((vmcs12
->guest_cr0
& X86_CR0_PG
) &&
3024 ia32e
!= !!(vmcs12
->guest_ia32_efer
& EFER_LME
))))
3028 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
) &&
3029 (CC(is_noncanonical_address(vmcs12
->guest_bndcfgs
& PAGE_MASK
, vcpu
)) ||
3030 CC((vmcs12
->guest_bndcfgs
& MSR_IA32_BNDCFGS_RSVD
))))
3033 if (nested_check_guest_non_reg_state(vmcs12
))
3039 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu
*vcpu
)
3041 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3042 unsigned long cr3
, cr4
;
3045 if (!nested_early_check
)
3048 if (vmx
->msr_autoload
.host
.nr
)
3049 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, 0);
3050 if (vmx
->msr_autoload
.guest
.nr
)
3051 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, 0);
3055 vmx_prepare_switch_to_guest(vcpu
);
3058 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
3059 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
3060 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e.
3061 * there is no need to preserve other bits or save/restore the field.
3063 vmcs_writel(GUEST_RFLAGS
, 0);
3065 cr3
= __get_current_cr3_fast();
3066 if (unlikely(cr3
!= vmx
->loaded_vmcs
->host_state
.cr3
)) {
3067 vmcs_writel(HOST_CR3
, cr3
);
3068 vmx
->loaded_vmcs
->host_state
.cr3
= cr3
;
3071 cr4
= cr4_read_shadow();
3072 if (unlikely(cr4
!= vmx
->loaded_vmcs
->host_state
.cr4
)) {
3073 vmcs_writel(HOST_CR4
, cr4
);
3074 vmx
->loaded_vmcs
->host_state
.cr4
= cr4
;
3077 vm_fail
= __vmx_vcpu_run(vmx
, (unsigned long *)&vcpu
->arch
.regs
,
3078 vmx
->loaded_vmcs
->launched
);
3080 if (vmx
->msr_autoload
.host
.nr
)
3081 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, vmx
->msr_autoload
.host
.nr
);
3082 if (vmx
->msr_autoload
.guest
.nr
)
3083 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, vmx
->msr_autoload
.guest
.nr
);
3086 u32 error
= vmcs_read32(VM_INSTRUCTION_ERROR
);
3090 trace_kvm_nested_vmenter_failed(
3091 "early hardware check VM-instruction error: ", error
);
3092 WARN_ON_ONCE(error
!= VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
3097 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
3099 if (hw_breakpoint_active())
3100 set_debugreg(__this_cpu_read(cpu_dr7
), 7);
3105 * A non-failing VMEntry means we somehow entered guest mode with
3106 * an illegal RIP, and that's just the tip of the iceberg. There
3107 * is no telling what memory has been modified or what state has
3108 * been exposed to unknown code. Hitting this all but guarantees
3109 * a (very critical) hardware issue.
3111 WARN_ON(!(vmcs_read32(VM_EXIT_REASON
) &
3112 VMX_EXIT_REASONS_FAILED_VMENTRY
));
3117 static bool nested_get_evmcs_page(struct kvm_vcpu
*vcpu
)
3119 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3122 * hv_evmcs may end up being not mapped after migration (when
3123 * L2 was running), map it here to make sure vmcs12 changes are
3124 * properly reflected.
3126 if (vmx
->nested
.enlightened_vmcs_enabled
&&
3127 vmx
->nested
.hv_evmcs_vmptr
== EVMPTR_MAP_PENDING
) {
3128 enum nested_evmptrld_status evmptrld_status
=
3129 nested_vmx_handle_enlightened_vmptrld(vcpu
, false);
3131 if (evmptrld_status
== EVMPTRLD_VMFAIL
||
3132 evmptrld_status
== EVMPTRLD_ERROR
)
3136 * Post migration VMCS12 always provides the most actual
3137 * information, copy it to eVMCS upon entry.
3139 vmx
->nested
.need_vmcs12_to_shadow_sync
= true;
3145 static bool nested_get_vmcs12_pages(struct kvm_vcpu
*vcpu
)
3147 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3148 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3149 struct kvm_host_map
*map
;
3153 if (!vcpu
->arch
.pdptrs_from_userspace
&&
3154 !nested_cpu_has_ept(vmcs12
) && is_pae_paging(vcpu
)) {
3156 * Reload the guest's PDPTRs since after a migration
3157 * the guest CR3 might be restored prior to setting the nested
3158 * state which can lead to a load of wrong PDPTRs.
3160 if (CC(!load_pdptrs(vcpu
, vcpu
->arch
.walk_mmu
, vcpu
->arch
.cr3
)))
3165 if (nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)) {
3167 * Translate L1 physical address to host physical
3168 * address for vmcs02. Keep the page pinned, so this
3169 * physical address remains valid. We keep a reference
3170 * to it so we can release it later.
3172 if (vmx
->nested
.apic_access_page
) { /* shouldn't happen */
3173 kvm_release_page_clean(vmx
->nested
.apic_access_page
);
3174 vmx
->nested
.apic_access_page
= NULL
;
3176 page
= kvm_vcpu_gpa_to_page(vcpu
, vmcs12
->apic_access_addr
);
3177 if (!is_error_page(page
)) {
3178 vmx
->nested
.apic_access_page
= page
;
3179 hpa
= page_to_phys(vmx
->nested
.apic_access_page
);
3180 vmcs_write64(APIC_ACCESS_ADDR
, hpa
);
3182 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
3184 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
3185 vcpu
->run
->internal
.suberror
=
3186 KVM_INTERNAL_ERROR_EMULATION
;
3187 vcpu
->run
->internal
.ndata
= 0;
3192 if (nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
)) {
3193 map
= &vmx
->nested
.virtual_apic_map
;
3195 if (!kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->virtual_apic_page_addr
), map
)) {
3196 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
, pfn_to_hpa(map
->pfn
));
3197 } else if (nested_cpu_has(vmcs12
, CPU_BASED_CR8_LOAD_EXITING
) &&
3198 nested_cpu_has(vmcs12
, CPU_BASED_CR8_STORE_EXITING
) &&
3199 !nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)) {
3201 * The processor will never use the TPR shadow, simply
3202 * clear the bit from the execution control. Such a
3203 * configuration is useless, but it happens in tests.
3204 * For any other configuration, failing the vm entry is
3205 * _not_ what the processor does but it's basically the
3206 * only possibility we have.
3208 exec_controls_clearbit(vmx
, CPU_BASED_TPR_SHADOW
);
3211 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
3212 * force VM-Entry to fail.
3214 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
, -1ull);
3218 if (nested_cpu_has_posted_intr(vmcs12
)) {
3219 map
= &vmx
->nested
.pi_desc_map
;
3221 if (!kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->posted_intr_desc_addr
), map
)) {
3222 vmx
->nested
.pi_desc
=
3223 (struct pi_desc
*)(((void *)map
->hva
) +
3224 offset_in_page(vmcs12
->posted_intr_desc_addr
));
3225 vmcs_write64(POSTED_INTR_DESC_ADDR
,
3226 pfn_to_hpa(map
->pfn
) + offset_in_page(vmcs12
->posted_intr_desc_addr
));
3229 * Defer the KVM_INTERNAL_EXIT until KVM tries to
3230 * access the contents of the VMCS12 posted interrupt
3231 * descriptor. (Note that KVM may do this when it
3232 * should not, per the architectural specification.)
3234 vmx
->nested
.pi_desc
= NULL
;
3235 pin_controls_clearbit(vmx
, PIN_BASED_POSTED_INTR
);
3238 if (nested_vmx_prepare_msr_bitmap(vcpu
, vmcs12
))
3239 exec_controls_setbit(vmx
, CPU_BASED_USE_MSR_BITMAPS
);
3241 exec_controls_clearbit(vmx
, CPU_BASED_USE_MSR_BITMAPS
);
3246 static bool vmx_get_nested_state_pages(struct kvm_vcpu
*vcpu
)
3248 if (!nested_get_evmcs_page(vcpu
)) {
3249 pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
3251 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
3252 vcpu
->run
->internal
.suberror
=
3253 KVM_INTERNAL_ERROR_EMULATION
;
3254 vcpu
->run
->internal
.ndata
= 0;
3259 if (is_guest_mode(vcpu
) && !nested_get_vmcs12_pages(vcpu
))
3265 static int nested_vmx_write_pml_buffer(struct kvm_vcpu
*vcpu
, gpa_t gpa
)
3267 struct vmcs12
*vmcs12
;
3268 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3271 if (WARN_ON_ONCE(!is_guest_mode(vcpu
)))
3274 if (WARN_ON_ONCE(vmx
->nested
.pml_full
))
3278 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
3279 * set is already checked as part of A/D emulation.
3281 vmcs12
= get_vmcs12(vcpu
);
3282 if (!nested_cpu_has_pml(vmcs12
))
3285 if (vmcs12
->guest_pml_index
>= PML_ENTITY_NUM
) {
3286 vmx
->nested
.pml_full
= true;
3291 dst
= vmcs12
->pml_address
+ sizeof(u64
) * vmcs12
->guest_pml_index
;
3293 if (kvm_write_guest_page(vcpu
->kvm
, gpa_to_gfn(dst
), &gpa
,
3294 offset_in_page(dst
), sizeof(gpa
)))
3297 vmcs12
->guest_pml_index
--;
3303 * Intel's VMX Instruction Reference specifies a common set of prerequisites
3304 * for running VMX instructions (except VMXON, whose prerequisites are
3305 * slightly different). It also specifies what exception to inject otherwise.
3306 * Note that many of these exceptions have priority over VM exits, so they
3307 * don't have to be checked again here.
3309 static int nested_vmx_check_permission(struct kvm_vcpu
*vcpu
)
3311 if (!to_vmx(vcpu
)->nested
.vmxon
) {
3312 kvm_queue_exception(vcpu
, UD_VECTOR
);
3316 if (vmx_get_cpl(vcpu
)) {
3317 kvm_inject_gp(vcpu
, 0);
3324 static u8
vmx_has_apicv_interrupt(struct kvm_vcpu
*vcpu
)
3326 u8 rvi
= vmx_get_rvi();
3327 u8 vppr
= kvm_lapic_get_reg(vcpu
->arch
.apic
, APIC_PROCPRI
);
3329 return ((rvi
& 0xf0) > (vppr
& 0xf0));
3332 static void load_vmcs12_host_state(struct kvm_vcpu
*vcpu
,
3333 struct vmcs12
*vmcs12
);
3336 * If from_vmentry is false, this is being called from state restore (either RSM
3337 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
3340 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
3341 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail
3342 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit
3343 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error
3345 enum nvmx_vmentry_status
nested_vmx_enter_non_root_mode(struct kvm_vcpu
*vcpu
,
3348 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3349 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3350 enum vm_entry_failure_code entry_failure_code
;
3351 bool evaluate_pending_interrupts
;
3352 union vmx_exit_reason exit_reason
= {
3353 .basic
= EXIT_REASON_INVALID_STATE
,
3354 .failed_vmentry
= 1,
3358 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT
, vcpu
))
3359 kvm_vcpu_flush_tlb_current(vcpu
);
3361 evaluate_pending_interrupts
= exec_controls_get(vmx
) &
3362 (CPU_BASED_INTR_WINDOW_EXITING
| CPU_BASED_NMI_WINDOW_EXITING
);
3363 if (likely(!evaluate_pending_interrupts
) && kvm_vcpu_apicv_active(vcpu
))
3364 evaluate_pending_interrupts
|= vmx_has_apicv_interrupt(vcpu
);
3366 if (!(vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
))
3367 vmx
->nested
.vmcs01_debugctl
= vmcs_read64(GUEST_IA32_DEBUGCTL
);
3368 if (kvm_mpx_supported() &&
3369 !(vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
))
3370 vmx
->nested
.vmcs01_guest_bndcfgs
= vmcs_read64(GUEST_BNDCFGS
);
3373 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3374 * nested early checks are disabled. In the event of a "late" VM-Fail,
3375 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3376 * software model to the pre-VMEntry host state. When EPT is disabled,
3377 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3378 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3379 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3380 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3381 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3382 * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3383 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3384 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3385 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3386 * path would need to manually save/restore vmcs01.GUEST_CR3.
3388 if (!enable_ept
&& !nested_early_check
)
3389 vmcs_writel(GUEST_CR3
, vcpu
->arch
.cr3
);
3391 vmx_switch_vmcs(vcpu
, &vmx
->nested
.vmcs02
);
3393 prepare_vmcs02_early(vmx
, &vmx
->vmcs01
, vmcs12
);
3396 if (unlikely(!nested_get_vmcs12_pages(vcpu
))) {
3397 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
3398 return NVMX_VMENTRY_KVM_INTERNAL_ERROR
;
3401 if (nested_vmx_check_vmentry_hw(vcpu
)) {
3402 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
3403 return NVMX_VMENTRY_VMFAIL
;
3406 if (nested_vmx_check_guest_state(vcpu
, vmcs12
,
3407 &entry_failure_code
)) {
3408 exit_reason
.basic
= EXIT_REASON_INVALID_STATE
;
3409 vmcs12
->exit_qualification
= entry_failure_code
;
3410 goto vmentry_fail_vmexit
;
3414 enter_guest_mode(vcpu
);
3416 if (prepare_vmcs02(vcpu
, vmcs12
, from_vmentry
, &entry_failure_code
)) {
3417 exit_reason
.basic
= EXIT_REASON_INVALID_STATE
;
3418 vmcs12
->exit_qualification
= entry_failure_code
;
3419 goto vmentry_fail_vmexit_guest_mode
;
3423 failed_index
= nested_vmx_load_msr(vcpu
,
3424 vmcs12
->vm_entry_msr_load_addr
,
3425 vmcs12
->vm_entry_msr_load_count
);
3427 exit_reason
.basic
= EXIT_REASON_MSR_LOAD_FAIL
;
3428 vmcs12
->exit_qualification
= failed_index
;
3429 goto vmentry_fail_vmexit_guest_mode
;
3433 * The MMU is not initialized to point at the right entities yet and
3434 * "get pages" would need to read data from the guest (i.e. we will
3435 * need to perform gpa to hpa translation). Request a call
3436 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3437 * have already been set at vmentry time and should not be reset.
3439 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES
, vcpu
);
3443 * If L1 had a pending IRQ/NMI until it executed
3444 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3445 * disallowed (e.g. interrupts disabled), L0 needs to
3446 * evaluate if this pending event should cause an exit from L2
3447 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3448 * intercept EXTERNAL_INTERRUPT).
3450 * Usually this would be handled by the processor noticing an
3451 * IRQ/NMI window request, or checking RVI during evaluation of
3452 * pending virtual interrupts. However, this setting was done
3453 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3454 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3456 if (unlikely(evaluate_pending_interrupts
))
3457 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
3460 * Do not start the preemption timer hrtimer until after we know
3461 * we are successful, so that only nested_vmx_vmexit needs to cancel
3464 vmx
->nested
.preemption_timer_expired
= false;
3465 if (nested_cpu_has_preemption_timer(vmcs12
)) {
3466 u64 timer_value
= vmx_calc_preemption_timer_value(vcpu
);
3467 vmx_start_preemption_timer(vcpu
, timer_value
);
3471 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3472 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3473 * returned as far as L1 is concerned. It will only return (and set
3474 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3476 return NVMX_VMENTRY_SUCCESS
;
3479 * A failed consistency check that leads to a VMExit during L1's
3480 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3481 * 26.7 "VM-entry failures during or after loading guest state".
3483 vmentry_fail_vmexit_guest_mode
:
3484 if (vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_USE_TSC_OFFSETTING
)
3485 vcpu
->arch
.tsc_offset
-= vmcs12
->tsc_offset
;
3486 leave_guest_mode(vcpu
);
3488 vmentry_fail_vmexit
:
3489 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
3492 return NVMX_VMENTRY_VMEXIT
;
3494 load_vmcs12_host_state(vcpu
, vmcs12
);
3495 vmcs12
->vm_exit_reason
= exit_reason
.full
;
3496 if (enable_shadow_vmcs
|| evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
))
3497 vmx
->nested
.need_vmcs12_to_shadow_sync
= true;
3498 return NVMX_VMENTRY_VMEXIT
;
3502 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3503 * for running an L2 nested guest.
3505 static int nested_vmx_run(struct kvm_vcpu
*vcpu
, bool launch
)
3507 struct vmcs12
*vmcs12
;
3508 enum nvmx_vmentry_status status
;
3509 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3510 u32 interrupt_shadow
= vmx_get_interrupt_shadow(vcpu
);
3511 enum nested_evmptrld_status evmptrld_status
;
3513 if (!nested_vmx_check_permission(vcpu
))
3516 evmptrld_status
= nested_vmx_handle_enlightened_vmptrld(vcpu
, launch
);
3517 if (evmptrld_status
== EVMPTRLD_ERROR
) {
3518 kvm_queue_exception(vcpu
, UD_VECTOR
);
3520 } else if (CC(evmptrld_status
== EVMPTRLD_VMFAIL
)) {
3521 return nested_vmx_failInvalid(vcpu
);
3524 if (CC(!evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
) &&
3525 vmx
->nested
.current_vmptr
== -1ull))
3526 return nested_vmx_failInvalid(vcpu
);
3528 vmcs12
= get_vmcs12(vcpu
);
3531 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3532 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3533 * rather than RFLAGS.ZF, and no error number is stored to the
3534 * VM-instruction error field.
3536 if (CC(vmcs12
->hdr
.shadow_vmcs
))
3537 return nested_vmx_failInvalid(vcpu
);
3539 if (evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
)) {
3540 copy_enlightened_to_vmcs12(vmx
, vmx
->nested
.hv_evmcs
->hv_clean_fields
);
3541 /* Enlightened VMCS doesn't have launch state */
3542 vmcs12
->launch_state
= !launch
;
3543 } else if (enable_shadow_vmcs
) {
3544 copy_shadow_to_vmcs12(vmx
);
3548 * The nested entry process starts with enforcing various prerequisites
3549 * on vmcs12 as required by the Intel SDM, and act appropriately when
3550 * they fail: As the SDM explains, some conditions should cause the
3551 * instruction to fail, while others will cause the instruction to seem
3552 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3553 * To speed up the normal (success) code path, we should avoid checking
3554 * for misconfigurations which will anyway be caught by the processor
3555 * when using the merged vmcs02.
3557 if (CC(interrupt_shadow
& KVM_X86_SHADOW_INT_MOV_SS
))
3558 return nested_vmx_fail(vcpu
, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS
);
3560 if (CC(vmcs12
->launch_state
== launch
))
3561 return nested_vmx_fail(vcpu
,
3562 launch
? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3563 : VMXERR_VMRESUME_NONLAUNCHED_VMCS
);
3565 if (nested_vmx_check_controls(vcpu
, vmcs12
))
3566 return nested_vmx_fail(vcpu
, VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
3568 if (nested_vmx_check_host_state(vcpu
, vmcs12
))
3569 return nested_vmx_fail(vcpu
, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD
);
3572 * We're finally done with prerequisite checking, and can start with
3575 vmx
->nested
.nested_run_pending
= 1;
3576 vmx
->nested
.has_preemption_timer_deadline
= false;
3577 status
= nested_vmx_enter_non_root_mode(vcpu
, true);
3578 if (unlikely(status
!= NVMX_VMENTRY_SUCCESS
))
3579 goto vmentry_failed
;
3581 /* Emulate processing of posted interrupts on VM-Enter. */
3582 if (nested_cpu_has_posted_intr(vmcs12
) &&
3583 kvm_apic_has_interrupt(vcpu
) == vmx
->nested
.posted_intr_nv
) {
3584 vmx
->nested
.pi_pending
= true;
3585 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
3586 kvm_apic_clear_irr(vcpu
, vmx
->nested
.posted_intr_nv
);
3589 /* Hide L1D cache contents from the nested guest. */
3590 vmx
->vcpu
.arch
.l1tf_flush_l1d
= true;
3593 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3594 * also be used as part of restoring nVMX state for
3595 * snapshot restore (migration).
3597 * In this flow, it is assumed that vmcs12 cache was
3598 * transferred as part of captured nVMX state and should
3599 * therefore not be read from guest memory (which may not
3600 * exist on destination host yet).
3602 nested_cache_shadow_vmcs12(vcpu
, vmcs12
);
3604 switch (vmcs12
->guest_activity_state
) {
3605 case GUEST_ACTIVITY_HLT
:
3607 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3608 * awakened by event injection or by an NMI-window VM-exit or
3609 * by an interrupt-window VM-exit, halt the vcpu.
3611 if (!(vmcs12
->vm_entry_intr_info_field
& INTR_INFO_VALID_MASK
) &&
3612 !nested_cpu_has(vmcs12
, CPU_BASED_NMI_WINDOW_EXITING
) &&
3613 !(nested_cpu_has(vmcs12
, CPU_BASED_INTR_WINDOW_EXITING
) &&
3614 (vmcs12
->guest_rflags
& X86_EFLAGS_IF
))) {
3615 vmx
->nested
.nested_run_pending
= 0;
3616 return kvm_vcpu_halt(vcpu
);
3619 case GUEST_ACTIVITY_WAIT_SIPI
:
3620 vmx
->nested
.nested_run_pending
= 0;
3621 vcpu
->arch
.mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
3630 vmx
->nested
.nested_run_pending
= 0;
3631 if (status
== NVMX_VMENTRY_KVM_INTERNAL_ERROR
)
3633 if (status
== NVMX_VMENTRY_VMEXIT
)
3635 WARN_ON_ONCE(status
!= NVMX_VMENTRY_VMFAIL
);
3636 return nested_vmx_fail(vcpu
, VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
3640 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3641 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK).
3642 * This function returns the new value we should put in vmcs12.guest_cr0.
3643 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3644 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3645 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3646 * didn't trap the bit, because if L1 did, so would L0).
3647 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3648 * been modified by L2, and L1 knows it. So just leave the old value of
3649 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3650 * isn't relevant, because if L0 traps this bit it can set it to anything.
3651 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3652 * changed these bits, and therefore they need to be updated, but L0
3653 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3654 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3656 static inline unsigned long
3657 vmcs12_guest_cr0(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
)
3660 /*1*/ (vmcs_readl(GUEST_CR0
) & vcpu
->arch
.cr0_guest_owned_bits
) |
3661 /*2*/ (vmcs12
->guest_cr0
& vmcs12
->cr0_guest_host_mask
) |
3662 /*3*/ (vmcs_readl(CR0_READ_SHADOW
) & ~(vmcs12
->cr0_guest_host_mask
|
3663 vcpu
->arch
.cr0_guest_owned_bits
));
3666 static inline unsigned long
3667 vmcs12_guest_cr4(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
)
3670 /*1*/ (vmcs_readl(GUEST_CR4
) & vcpu
->arch
.cr4_guest_owned_bits
) |
3671 /*2*/ (vmcs12
->guest_cr4
& vmcs12
->cr4_guest_host_mask
) |
3672 /*3*/ (vmcs_readl(CR4_READ_SHADOW
) & ~(vmcs12
->cr4_guest_host_mask
|
3673 vcpu
->arch
.cr4_guest_owned_bits
));
3676 static void vmcs12_save_pending_event(struct kvm_vcpu
*vcpu
,
3677 struct vmcs12
*vmcs12
)
3682 if (vcpu
->arch
.exception
.injected
) {
3683 nr
= vcpu
->arch
.exception
.nr
;
3684 idt_vectoring
= nr
| VECTORING_INFO_VALID_MASK
;
3686 if (kvm_exception_is_soft(nr
)) {
3687 vmcs12
->vm_exit_instruction_len
=
3688 vcpu
->arch
.event_exit_inst_len
;
3689 idt_vectoring
|= INTR_TYPE_SOFT_EXCEPTION
;
3691 idt_vectoring
|= INTR_TYPE_HARD_EXCEPTION
;
3693 if (vcpu
->arch
.exception
.has_error_code
) {
3694 idt_vectoring
|= VECTORING_INFO_DELIVER_CODE_MASK
;
3695 vmcs12
->idt_vectoring_error_code
=
3696 vcpu
->arch
.exception
.error_code
;
3699 vmcs12
->idt_vectoring_info_field
= idt_vectoring
;
3700 } else if (vcpu
->arch
.nmi_injected
) {
3701 vmcs12
->idt_vectoring_info_field
=
3702 INTR_TYPE_NMI_INTR
| INTR_INFO_VALID_MASK
| NMI_VECTOR
;
3703 } else if (vcpu
->arch
.interrupt
.injected
) {
3704 nr
= vcpu
->arch
.interrupt
.nr
;
3705 idt_vectoring
= nr
| VECTORING_INFO_VALID_MASK
;
3707 if (vcpu
->arch
.interrupt
.soft
) {
3708 idt_vectoring
|= INTR_TYPE_SOFT_INTR
;
3709 vmcs12
->vm_entry_instruction_len
=
3710 vcpu
->arch
.event_exit_inst_len
;
3712 idt_vectoring
|= INTR_TYPE_EXT_INTR
;
3714 vmcs12
->idt_vectoring_info_field
= idt_vectoring
;
3719 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu
*vcpu
)
3721 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3725 * Don't need to mark the APIC access page dirty; it is never
3726 * written to by the CPU during APIC virtualization.
3729 if (nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
)) {
3730 gfn
= vmcs12
->virtual_apic_page_addr
>> PAGE_SHIFT
;
3731 kvm_vcpu_mark_page_dirty(vcpu
, gfn
);
3734 if (nested_cpu_has_posted_intr(vmcs12
)) {
3735 gfn
= vmcs12
->posted_intr_desc_addr
>> PAGE_SHIFT
;
3736 kvm_vcpu_mark_page_dirty(vcpu
, gfn
);
3740 static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu
*vcpu
)
3742 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3747 if (!vmx
->nested
.pi_pending
)
3750 if (!vmx
->nested
.pi_desc
)
3753 vmx
->nested
.pi_pending
= false;
3755 if (!pi_test_and_clear_on(vmx
->nested
.pi_desc
))
3758 max_irr
= find_last_bit((unsigned long *)vmx
->nested
.pi_desc
->pir
, 256);
3759 if (max_irr
!= 256) {
3760 vapic_page
= vmx
->nested
.virtual_apic_map
.hva
;
3764 __kvm_apic_update_irr(vmx
->nested
.pi_desc
->pir
,
3765 vapic_page
, &max_irr
);
3766 status
= vmcs_read16(GUEST_INTR_STATUS
);
3767 if ((u8
)max_irr
> ((u8
)status
& 0xff)) {
3769 status
|= (u8
)max_irr
;
3770 vmcs_write16(GUEST_INTR_STATUS
, status
);
3774 nested_mark_vmcs12_pages_dirty(vcpu
);
3778 kvm_handle_memory_failure(vcpu
, X86EMUL_IO_NEEDED
, NULL
);
3782 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu
*vcpu
,
3783 unsigned long exit_qual
)
3785 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3786 unsigned int nr
= vcpu
->arch
.exception
.nr
;
3787 u32 intr_info
= nr
| INTR_INFO_VALID_MASK
;
3789 if (vcpu
->arch
.exception
.has_error_code
) {
3790 vmcs12
->vm_exit_intr_error_code
= vcpu
->arch
.exception
.error_code
;
3791 intr_info
|= INTR_INFO_DELIVER_CODE_MASK
;
3794 if (kvm_exception_is_soft(nr
))
3795 intr_info
|= INTR_TYPE_SOFT_EXCEPTION
;
3797 intr_info
|= INTR_TYPE_HARD_EXCEPTION
;
3799 if (!(vmcs12
->idt_vectoring_info_field
& VECTORING_INFO_VALID_MASK
) &&
3800 vmx_get_nmi_mask(vcpu
))
3801 intr_info
|= INTR_INFO_UNBLOCK_NMI
;
3803 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXCEPTION_NMI
, intr_info
, exit_qual
);
3807 * Returns true if a debug trap is pending delivery.
3809 * In KVM, debug traps bear an exception payload. As such, the class of a #DB
3810 * exception may be inferred from the presence of an exception payload.
3812 static inline bool vmx_pending_dbg_trap(struct kvm_vcpu
*vcpu
)
3814 return vcpu
->arch
.exception
.pending
&&
3815 vcpu
->arch
.exception
.nr
== DB_VECTOR
&&
3816 vcpu
->arch
.exception
.payload
;
3820 * Certain VM-exits set the 'pending debug exceptions' field to indicate a
3821 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
3822 * represents these debug traps with a payload that is said to be compatible
3823 * with the 'pending debug exceptions' field, write the payload to the VMCS
3824 * field if a VM-exit is delivered before the debug trap.
3826 static void nested_vmx_update_pending_dbg(struct kvm_vcpu
*vcpu
)
3828 if (vmx_pending_dbg_trap(vcpu
))
3829 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS
,
3830 vcpu
->arch
.exception
.payload
);
3833 static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu
*vcpu
)
3835 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu
)) &&
3836 to_vmx(vcpu
)->nested
.preemption_timer_expired
;
3839 static int vmx_check_nested_events(struct kvm_vcpu
*vcpu
)
3841 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3842 unsigned long exit_qual
;
3843 bool block_nested_events
=
3844 vmx
->nested
.nested_run_pending
|| kvm_event_needs_reinjection(vcpu
);
3845 bool mtf_pending
= vmx
->nested
.mtf_pending
;
3846 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
3849 * Clear the MTF state. If a higher priority VM-exit is delivered first,
3850 * this state is discarded.
3852 if (!block_nested_events
)
3853 vmx
->nested
.mtf_pending
= false;
3855 if (lapic_in_kernel(vcpu
) &&
3856 test_bit(KVM_APIC_INIT
, &apic
->pending_events
)) {
3857 if (block_nested_events
)
3859 nested_vmx_update_pending_dbg(vcpu
);
3860 clear_bit(KVM_APIC_INIT
, &apic
->pending_events
);
3861 if (vcpu
->arch
.mp_state
!= KVM_MP_STATE_INIT_RECEIVED
)
3862 nested_vmx_vmexit(vcpu
, EXIT_REASON_INIT_SIGNAL
, 0, 0);
3866 if (lapic_in_kernel(vcpu
) &&
3867 test_bit(KVM_APIC_SIPI
, &apic
->pending_events
)) {
3868 if (block_nested_events
)
3871 clear_bit(KVM_APIC_SIPI
, &apic
->pending_events
);
3872 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_INIT_RECEIVED
)
3873 nested_vmx_vmexit(vcpu
, EXIT_REASON_SIPI_SIGNAL
, 0,
3874 apic
->sipi_vector
& 0xFFUL
);
3879 * Process any exceptions that are not debug traps before MTF.
3881 * Note that only a pending nested run can block a pending exception.
3882 * Otherwise an injected NMI/interrupt should either be
3883 * lost or delivered to the nested hypervisor in the IDT_VECTORING_INFO,
3884 * while delivering the pending exception.
3887 if (vcpu
->arch
.exception
.pending
&& !vmx_pending_dbg_trap(vcpu
)) {
3888 if (vmx
->nested
.nested_run_pending
)
3890 if (!nested_vmx_check_exception(vcpu
, &exit_qual
))
3892 nested_vmx_inject_exception_vmexit(vcpu
, exit_qual
);
3897 if (block_nested_events
)
3899 nested_vmx_update_pending_dbg(vcpu
);
3900 nested_vmx_vmexit(vcpu
, EXIT_REASON_MONITOR_TRAP_FLAG
, 0, 0);
3904 if (vcpu
->arch
.exception
.pending
) {
3905 if (vmx
->nested
.nested_run_pending
)
3907 if (!nested_vmx_check_exception(vcpu
, &exit_qual
))
3909 nested_vmx_inject_exception_vmexit(vcpu
, exit_qual
);
3913 if (nested_vmx_preemption_timer_pending(vcpu
)) {
3914 if (block_nested_events
)
3916 nested_vmx_vmexit(vcpu
, EXIT_REASON_PREEMPTION_TIMER
, 0, 0);
3920 if (vcpu
->arch
.smi_pending
&& !is_smm(vcpu
)) {
3921 if (block_nested_events
)
3926 if (vcpu
->arch
.nmi_pending
&& !vmx_nmi_blocked(vcpu
)) {
3927 if (block_nested_events
)
3929 if (!nested_exit_on_nmi(vcpu
))
3932 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXCEPTION_NMI
,
3933 NMI_VECTOR
| INTR_TYPE_NMI_INTR
|
3934 INTR_INFO_VALID_MASK
, 0);
3936 * The NMI-triggered VM exit counts as injection:
3937 * clear this one and block further NMIs.
3939 vcpu
->arch
.nmi_pending
= 0;
3940 vmx_set_nmi_mask(vcpu
, true);
3944 if (kvm_cpu_has_interrupt(vcpu
) && !vmx_interrupt_blocked(vcpu
)) {
3945 if (block_nested_events
)
3947 if (!nested_exit_on_intr(vcpu
))
3949 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXTERNAL_INTERRUPT
, 0, 0);
3954 return vmx_complete_nested_posted_interrupt(vcpu
);
3957 static u32
vmx_get_preemption_timer_value(struct kvm_vcpu
*vcpu
)
3960 hrtimer_get_remaining(&to_vmx(vcpu
)->nested
.preemption_timer
);
3963 if (ktime_to_ns(remaining
) <= 0)
3966 value
= ktime_to_ns(remaining
) * vcpu
->arch
.virtual_tsc_khz
;
3967 do_div(value
, 1000000);
3968 return value
>> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
;
3971 static bool is_vmcs12_ext_field(unsigned long field
)
3974 case GUEST_ES_SELECTOR
:
3975 case GUEST_CS_SELECTOR
:
3976 case GUEST_SS_SELECTOR
:
3977 case GUEST_DS_SELECTOR
:
3978 case GUEST_FS_SELECTOR
:
3979 case GUEST_GS_SELECTOR
:
3980 case GUEST_LDTR_SELECTOR
:
3981 case GUEST_TR_SELECTOR
:
3982 case GUEST_ES_LIMIT
:
3983 case GUEST_CS_LIMIT
:
3984 case GUEST_SS_LIMIT
:
3985 case GUEST_DS_LIMIT
:
3986 case GUEST_FS_LIMIT
:
3987 case GUEST_GS_LIMIT
:
3988 case GUEST_LDTR_LIMIT
:
3989 case GUEST_TR_LIMIT
:
3990 case GUEST_GDTR_LIMIT
:
3991 case GUEST_IDTR_LIMIT
:
3992 case GUEST_ES_AR_BYTES
:
3993 case GUEST_DS_AR_BYTES
:
3994 case GUEST_FS_AR_BYTES
:
3995 case GUEST_GS_AR_BYTES
:
3996 case GUEST_LDTR_AR_BYTES
:
3997 case GUEST_TR_AR_BYTES
:
4004 case GUEST_LDTR_BASE
:
4006 case GUEST_GDTR_BASE
:
4007 case GUEST_IDTR_BASE
:
4008 case GUEST_PENDING_DBG_EXCEPTIONS
:
4018 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu
*vcpu
,
4019 struct vmcs12
*vmcs12
)
4021 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4023 vmcs12
->guest_es_selector
= vmcs_read16(GUEST_ES_SELECTOR
);
4024 vmcs12
->guest_cs_selector
= vmcs_read16(GUEST_CS_SELECTOR
);
4025 vmcs12
->guest_ss_selector
= vmcs_read16(GUEST_SS_SELECTOR
);
4026 vmcs12
->guest_ds_selector
= vmcs_read16(GUEST_DS_SELECTOR
);
4027 vmcs12
->guest_fs_selector
= vmcs_read16(GUEST_FS_SELECTOR
);
4028 vmcs12
->guest_gs_selector
= vmcs_read16(GUEST_GS_SELECTOR
);
4029 vmcs12
->guest_ldtr_selector
= vmcs_read16(GUEST_LDTR_SELECTOR
);
4030 vmcs12
->guest_tr_selector
= vmcs_read16(GUEST_TR_SELECTOR
);
4031 vmcs12
->guest_es_limit
= vmcs_read32(GUEST_ES_LIMIT
);
4032 vmcs12
->guest_cs_limit
= vmcs_read32(GUEST_CS_LIMIT
);
4033 vmcs12
->guest_ss_limit
= vmcs_read32(GUEST_SS_LIMIT
);
4034 vmcs12
->guest_ds_limit
= vmcs_read32(GUEST_DS_LIMIT
);
4035 vmcs12
->guest_fs_limit
= vmcs_read32(GUEST_FS_LIMIT
);
4036 vmcs12
->guest_gs_limit
= vmcs_read32(GUEST_GS_LIMIT
);
4037 vmcs12
->guest_ldtr_limit
= vmcs_read32(GUEST_LDTR_LIMIT
);
4038 vmcs12
->guest_tr_limit
= vmcs_read32(GUEST_TR_LIMIT
);
4039 vmcs12
->guest_gdtr_limit
= vmcs_read32(GUEST_GDTR_LIMIT
);
4040 vmcs12
->guest_idtr_limit
= vmcs_read32(GUEST_IDTR_LIMIT
);
4041 vmcs12
->guest_es_ar_bytes
= vmcs_read32(GUEST_ES_AR_BYTES
);
4042 vmcs12
->guest_ds_ar_bytes
= vmcs_read32(GUEST_DS_AR_BYTES
);
4043 vmcs12
->guest_fs_ar_bytes
= vmcs_read32(GUEST_FS_AR_BYTES
);
4044 vmcs12
->guest_gs_ar_bytes
= vmcs_read32(GUEST_GS_AR_BYTES
);
4045 vmcs12
->guest_ldtr_ar_bytes
= vmcs_read32(GUEST_LDTR_AR_BYTES
);
4046 vmcs12
->guest_tr_ar_bytes
= vmcs_read32(GUEST_TR_AR_BYTES
);
4047 vmcs12
->guest_es_base
= vmcs_readl(GUEST_ES_BASE
);
4048 vmcs12
->guest_cs_base
= vmcs_readl(GUEST_CS_BASE
);
4049 vmcs12
->guest_ss_base
= vmcs_readl(GUEST_SS_BASE
);
4050 vmcs12
->guest_ds_base
= vmcs_readl(GUEST_DS_BASE
);
4051 vmcs12
->guest_fs_base
= vmcs_readl(GUEST_FS_BASE
);
4052 vmcs12
->guest_gs_base
= vmcs_readl(GUEST_GS_BASE
);
4053 vmcs12
->guest_ldtr_base
= vmcs_readl(GUEST_LDTR_BASE
);
4054 vmcs12
->guest_tr_base
= vmcs_readl(GUEST_TR_BASE
);
4055 vmcs12
->guest_gdtr_base
= vmcs_readl(GUEST_GDTR_BASE
);
4056 vmcs12
->guest_idtr_base
= vmcs_readl(GUEST_IDTR_BASE
);
4057 vmcs12
->guest_pending_dbg_exceptions
=
4058 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS
);
4059 if (kvm_mpx_supported())
4060 vmcs12
->guest_bndcfgs
= vmcs_read64(GUEST_BNDCFGS
);
4062 vmx
->nested
.need_sync_vmcs02_to_vmcs12_rare
= false;
4065 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu
*vcpu
,
4066 struct vmcs12
*vmcs12
)
4068 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4071 if (!vmx
->nested
.need_sync_vmcs02_to_vmcs12_rare
)
4075 WARN_ON_ONCE(vmx
->loaded_vmcs
!= &vmx
->vmcs01
);
4078 vmx
->loaded_vmcs
= &vmx
->nested
.vmcs02
;
4079 vmx_vcpu_load_vmcs(vcpu
, cpu
, &vmx
->vmcs01
);
4081 sync_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
4083 vmx
->loaded_vmcs
= &vmx
->vmcs01
;
4084 vmx_vcpu_load_vmcs(vcpu
, cpu
, &vmx
->nested
.vmcs02
);
4089 * Update the guest state fields of vmcs12 to reflect changes that
4090 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
4091 * VM-entry controls is also updated, since this is really a guest
4094 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
)
4096 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4098 if (evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
))
4099 sync_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
4101 vmx
->nested
.need_sync_vmcs02_to_vmcs12_rare
=
4102 !evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
);
4104 vmcs12
->guest_cr0
= vmcs12_guest_cr0(vcpu
, vmcs12
);
4105 vmcs12
->guest_cr4
= vmcs12_guest_cr4(vcpu
, vmcs12
);
4107 vmcs12
->guest_rsp
= kvm_rsp_read(vcpu
);
4108 vmcs12
->guest_rip
= kvm_rip_read(vcpu
);
4109 vmcs12
->guest_rflags
= vmcs_readl(GUEST_RFLAGS
);
4111 vmcs12
->guest_cs_ar_bytes
= vmcs_read32(GUEST_CS_AR_BYTES
);
4112 vmcs12
->guest_ss_ar_bytes
= vmcs_read32(GUEST_SS_AR_BYTES
);
4114 vmcs12
->guest_interruptibility_info
=
4115 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
);
4117 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_HALTED
)
4118 vmcs12
->guest_activity_state
= GUEST_ACTIVITY_HLT
;
4119 else if (vcpu
->arch
.mp_state
== KVM_MP_STATE_INIT_RECEIVED
)
4120 vmcs12
->guest_activity_state
= GUEST_ACTIVITY_WAIT_SIPI
;
4122 vmcs12
->guest_activity_state
= GUEST_ACTIVITY_ACTIVE
;
4124 if (nested_cpu_has_preemption_timer(vmcs12
) &&
4125 vmcs12
->vm_exit_controls
& VM_EXIT_SAVE_VMX_PREEMPTION_TIMER
&&
4126 !vmx
->nested
.nested_run_pending
)
4127 vmcs12
->vmx_preemption_timer_value
=
4128 vmx_get_preemption_timer_value(vcpu
);
4131 * In some cases (usually, nested EPT), L2 is allowed to change its
4132 * own CR3 without exiting. If it has changed it, we must keep it.
4133 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
4134 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
4136 * Additionally, restore L2's PDPTR to vmcs12.
4139 vmcs12
->guest_cr3
= vmcs_readl(GUEST_CR3
);
4140 if (nested_cpu_has_ept(vmcs12
) && is_pae_paging(vcpu
)) {
4141 vmcs12
->guest_pdptr0
= vmcs_read64(GUEST_PDPTR0
);
4142 vmcs12
->guest_pdptr1
= vmcs_read64(GUEST_PDPTR1
);
4143 vmcs12
->guest_pdptr2
= vmcs_read64(GUEST_PDPTR2
);
4144 vmcs12
->guest_pdptr3
= vmcs_read64(GUEST_PDPTR3
);
4148 vmcs12
->guest_linear_address
= vmcs_readl(GUEST_LINEAR_ADDRESS
);
4150 if (nested_cpu_has_vid(vmcs12
))
4151 vmcs12
->guest_intr_status
= vmcs_read16(GUEST_INTR_STATUS
);
4153 vmcs12
->vm_entry_controls
=
4154 (vmcs12
->vm_entry_controls
& ~VM_ENTRY_IA32E_MODE
) |
4155 (vm_entry_controls_get(to_vmx(vcpu
)) & VM_ENTRY_IA32E_MODE
);
4157 if (vmcs12
->vm_exit_controls
& VM_EXIT_SAVE_DEBUG_CONTROLS
)
4158 kvm_get_dr(vcpu
, 7, (unsigned long *)&vmcs12
->guest_dr7
);
4160 if (vmcs12
->vm_exit_controls
& VM_EXIT_SAVE_IA32_EFER
)
4161 vmcs12
->guest_ia32_efer
= vcpu
->arch
.efer
;
4165 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
4166 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
4167 * and this function updates it to reflect the changes to the guest state while
4168 * L2 was running (and perhaps made some exits which were handled directly by L0
4169 * without going back to L1), and to reflect the exit reason.
4170 * Note that we do not have to copy here all VMCS fields, just those that
4171 * could have changed by the L2 guest or the exit - i.e., the guest-state and
4172 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
4173 * which already writes to vmcs12 directly.
4175 static void prepare_vmcs12(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
,
4176 u32 vm_exit_reason
, u32 exit_intr_info
,
4177 unsigned long exit_qualification
)
4179 /* update exit information fields: */
4180 vmcs12
->vm_exit_reason
= vm_exit_reason
;
4181 if (to_vmx(vcpu
)->exit_reason
.enclave_mode
)
4182 vmcs12
->vm_exit_reason
|= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE
;
4183 vmcs12
->exit_qualification
= exit_qualification
;
4184 vmcs12
->vm_exit_intr_info
= exit_intr_info
;
4186 vmcs12
->idt_vectoring_info_field
= 0;
4187 vmcs12
->vm_exit_instruction_len
= vmcs_read32(VM_EXIT_INSTRUCTION_LEN
);
4188 vmcs12
->vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
4190 if (!(vmcs12
->vm_exit_reason
& VMX_EXIT_REASONS_FAILED_VMENTRY
)) {
4191 vmcs12
->launch_state
= 1;
4193 /* vm_entry_intr_info_field is cleared on exit. Emulate this
4194 * instead of reading the real value. */
4195 vmcs12
->vm_entry_intr_info_field
&= ~INTR_INFO_VALID_MASK
;
4198 * Transfer the event that L0 or L1 may wanted to inject into
4199 * L2 to IDT_VECTORING_INFO_FIELD.
4201 vmcs12_save_pending_event(vcpu
, vmcs12
);
4204 * According to spec, there's no need to store the guest's
4205 * MSRs if the exit is due to a VM-entry failure that occurs
4206 * during or after loading the guest state. Since this exit
4207 * does not fall in that category, we need to save the MSRs.
4209 if (nested_vmx_store_msr(vcpu
,
4210 vmcs12
->vm_exit_msr_store_addr
,
4211 vmcs12
->vm_exit_msr_store_count
))
4212 nested_vmx_abort(vcpu
,
4213 VMX_ABORT_SAVE_GUEST_MSR_FAIL
);
4217 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
4218 * preserved above and would only end up incorrectly in L1.
4220 vcpu
->arch
.nmi_injected
= false;
4221 kvm_clear_exception_queue(vcpu
);
4222 kvm_clear_interrupt_queue(vcpu
);
4226 * A part of what we need to when the nested L2 guest exits and we want to
4227 * run its L1 parent, is to reset L1's guest state to the host state specified
4229 * This function is to be called not only on normal nested exit, but also on
4230 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4231 * Failures During or After Loading Guest State").
4232 * This function should be called when the active VMCS is L1's (vmcs01).
4234 static void load_vmcs12_host_state(struct kvm_vcpu
*vcpu
,
4235 struct vmcs12
*vmcs12
)
4237 enum vm_entry_failure_code ignored
;
4238 struct kvm_segment seg
;
4240 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_EFER
)
4241 vcpu
->arch
.efer
= vmcs12
->host_ia32_efer
;
4242 else if (vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
)
4243 vcpu
->arch
.efer
|= (EFER_LMA
| EFER_LME
);
4245 vcpu
->arch
.efer
&= ~(EFER_LMA
| EFER_LME
);
4246 vmx_set_efer(vcpu
, vcpu
->arch
.efer
);
4248 kvm_rsp_write(vcpu
, vmcs12
->host_rsp
);
4249 kvm_rip_write(vcpu
, vmcs12
->host_rip
);
4250 vmx_set_rflags(vcpu
, X86_EFLAGS_FIXED
);
4251 vmx_set_interrupt_shadow(vcpu
, 0);
4254 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
4255 * actually changed, because vmx_set_cr0 refers to efer set above.
4257 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
4258 * (KVM doesn't change it);
4260 vcpu
->arch
.cr0_guest_owned_bits
= KVM_POSSIBLE_CR0_GUEST_BITS
;
4261 vmx_set_cr0(vcpu
, vmcs12
->host_cr0
);
4263 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
4264 vcpu
->arch
.cr4_guest_owned_bits
= ~vmcs_readl(CR4_GUEST_HOST_MASK
);
4265 vmx_set_cr4(vcpu
, vmcs12
->host_cr4
);
4267 nested_ept_uninit_mmu_context(vcpu
);
4270 * Only PDPTE load can fail as the value of cr3 was checked on entry and
4271 * couldn't have changed.
4273 if (nested_vmx_load_cr3(vcpu
, vmcs12
->host_cr3
, false, true, &ignored
))
4274 nested_vmx_abort(vcpu
, VMX_ABORT_LOAD_HOST_PDPTE_FAIL
);
4276 nested_vmx_transition_tlb_flush(vcpu
, vmcs12
, false);
4278 vmcs_write32(GUEST_SYSENTER_CS
, vmcs12
->host_ia32_sysenter_cs
);
4279 vmcs_writel(GUEST_SYSENTER_ESP
, vmcs12
->host_ia32_sysenter_esp
);
4280 vmcs_writel(GUEST_SYSENTER_EIP
, vmcs12
->host_ia32_sysenter_eip
);
4281 vmcs_writel(GUEST_IDTR_BASE
, vmcs12
->host_idtr_base
);
4282 vmcs_writel(GUEST_GDTR_BASE
, vmcs12
->host_gdtr_base
);
4283 vmcs_write32(GUEST_IDTR_LIMIT
, 0xFFFF);
4284 vmcs_write32(GUEST_GDTR_LIMIT
, 0xFFFF);
4286 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
4287 if (vmcs12
->vm_exit_controls
& VM_EXIT_CLEAR_BNDCFGS
)
4288 vmcs_write64(GUEST_BNDCFGS
, 0);
4290 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PAT
) {
4291 vmcs_write64(GUEST_IA32_PAT
, vmcs12
->host_ia32_pat
);
4292 vcpu
->arch
.pat
= vmcs12
->host_ia32_pat
;
4294 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
)
4295 WARN_ON_ONCE(kvm_set_msr(vcpu
, MSR_CORE_PERF_GLOBAL_CTRL
,
4296 vmcs12
->host_ia32_perf_global_ctrl
));
4298 /* Set L1 segment info according to Intel SDM
4299 27.5.2 Loading Host Segment and Descriptor-Table Registers */
4300 seg
= (struct kvm_segment
) {
4302 .limit
= 0xFFFFFFFF,
4303 .selector
= vmcs12
->host_cs_selector
,
4309 if (vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
)
4313 __vmx_set_segment(vcpu
, &seg
, VCPU_SREG_CS
);
4314 seg
= (struct kvm_segment
) {
4316 .limit
= 0xFFFFFFFF,
4323 seg
.selector
= vmcs12
->host_ds_selector
;
4324 __vmx_set_segment(vcpu
, &seg
, VCPU_SREG_DS
);
4325 seg
.selector
= vmcs12
->host_es_selector
;
4326 __vmx_set_segment(vcpu
, &seg
, VCPU_SREG_ES
);
4327 seg
.selector
= vmcs12
->host_ss_selector
;
4328 __vmx_set_segment(vcpu
, &seg
, VCPU_SREG_SS
);
4329 seg
.selector
= vmcs12
->host_fs_selector
;
4330 seg
.base
= vmcs12
->host_fs_base
;
4331 __vmx_set_segment(vcpu
, &seg
, VCPU_SREG_FS
);
4332 seg
.selector
= vmcs12
->host_gs_selector
;
4333 seg
.base
= vmcs12
->host_gs_base
;
4334 __vmx_set_segment(vcpu
, &seg
, VCPU_SREG_GS
);
4335 seg
= (struct kvm_segment
) {
4336 .base
= vmcs12
->host_tr_base
,
4338 .selector
= vmcs12
->host_tr_selector
,
4342 __vmx_set_segment(vcpu
, &seg
, VCPU_SREG_TR
);
4344 memset(&seg
, 0, sizeof(seg
));
4346 __vmx_set_segment(vcpu
, &seg
, VCPU_SREG_LDTR
);
4348 kvm_set_dr(vcpu
, 7, 0x400);
4349 vmcs_write64(GUEST_IA32_DEBUGCTL
, 0);
4351 if (nested_vmx_load_msr(vcpu
, vmcs12
->vm_exit_msr_load_addr
,
4352 vmcs12
->vm_exit_msr_load_count
))
4353 nested_vmx_abort(vcpu
, VMX_ABORT_LOAD_HOST_MSR_FAIL
);
4356 static inline u64
nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx
*vmx
)
4358 struct vmx_uret_msr
*efer_msr
;
4361 if (vm_entry_controls_get(vmx
) & VM_ENTRY_LOAD_IA32_EFER
)
4362 return vmcs_read64(GUEST_IA32_EFER
);
4364 if (cpu_has_load_ia32_efer())
4367 for (i
= 0; i
< vmx
->msr_autoload
.guest
.nr
; ++i
) {
4368 if (vmx
->msr_autoload
.guest
.val
[i
].index
== MSR_EFER
)
4369 return vmx
->msr_autoload
.guest
.val
[i
].value
;
4372 efer_msr
= vmx_find_uret_msr(vmx
, MSR_EFER
);
4374 return efer_msr
->data
;
4379 static void nested_vmx_restore_host_state(struct kvm_vcpu
*vcpu
)
4381 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
4382 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4383 struct vmx_msr_entry g
, h
;
4387 vcpu
->arch
.pat
= vmcs_read64(GUEST_IA32_PAT
);
4389 if (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
) {
4391 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
4392 * as vmcs01.GUEST_DR7 contains a userspace defined value
4393 * and vcpu->arch.dr7 is not squirreled away before the
4394 * nested VMENTER (not worth adding a variable in nested_vmx).
4396 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
)
4397 kvm_set_dr(vcpu
, 7, DR7_FIXED_1
);
4399 WARN_ON(kvm_set_dr(vcpu
, 7, vmcs_readl(GUEST_DR7
)));
4403 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
4404 * handle a variety of side effects to KVM's software model.
4406 vmx_set_efer(vcpu
, nested_vmx_get_vmcs01_guest_efer(vmx
));
4408 vcpu
->arch
.cr0_guest_owned_bits
= KVM_POSSIBLE_CR0_GUEST_BITS
;
4409 vmx_set_cr0(vcpu
, vmcs_readl(CR0_READ_SHADOW
));
4411 vcpu
->arch
.cr4_guest_owned_bits
= ~vmcs_readl(CR4_GUEST_HOST_MASK
);
4412 vmx_set_cr4(vcpu
, vmcs_readl(CR4_READ_SHADOW
));
4414 nested_ept_uninit_mmu_context(vcpu
);
4415 vcpu
->arch
.cr3
= vmcs_readl(GUEST_CR3
);
4416 kvm_register_mark_available(vcpu
, VCPU_EXREG_CR3
);
4419 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
4420 * from vmcs01 (if necessary). The PDPTRs are not loaded on
4421 * VMFail, like everything else we just need to ensure our
4422 * software model is up-to-date.
4424 if (enable_ept
&& is_pae_paging(vcpu
))
4425 ept_save_pdptrs(vcpu
);
4427 kvm_mmu_reset_context(vcpu
);
4430 * This nasty bit of open coding is a compromise between blindly
4431 * loading L1's MSRs using the exit load lists (incorrect emulation
4432 * of VMFail), leaving the nested VM's MSRs in the software model
4433 * (incorrect behavior) and snapshotting the modified MSRs (too
4434 * expensive since the lists are unbound by hardware). For each
4435 * MSR that was (prematurely) loaded from the nested VMEntry load
4436 * list, reload it from the exit load list if it exists and differs
4437 * from the guest value. The intent is to stuff host state as
4438 * silently as possible, not to fully process the exit load list.
4440 for (i
= 0; i
< vmcs12
->vm_entry_msr_load_count
; i
++) {
4441 gpa
= vmcs12
->vm_entry_msr_load_addr
+ (i
* sizeof(g
));
4442 if (kvm_vcpu_read_guest(vcpu
, gpa
, &g
, sizeof(g
))) {
4443 pr_debug_ratelimited(
4444 "%s read MSR index failed (%u, 0x%08llx)\n",
4449 for (j
= 0; j
< vmcs12
->vm_exit_msr_load_count
; j
++) {
4450 gpa
= vmcs12
->vm_exit_msr_load_addr
+ (j
* sizeof(h
));
4451 if (kvm_vcpu_read_guest(vcpu
, gpa
, &h
, sizeof(h
))) {
4452 pr_debug_ratelimited(
4453 "%s read MSR failed (%u, 0x%08llx)\n",
4457 if (h
.index
!= g
.index
)
4459 if (h
.value
== g
.value
)
4462 if (nested_vmx_load_msr_check(vcpu
, &h
)) {
4463 pr_debug_ratelimited(
4464 "%s check failed (%u, 0x%x, 0x%x)\n",
4465 __func__
, j
, h
.index
, h
.reserved
);
4469 if (kvm_set_msr(vcpu
, h
.index
, h
.value
)) {
4470 pr_debug_ratelimited(
4471 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
4472 __func__
, j
, h
.index
, h
.value
);
4481 nested_vmx_abort(vcpu
, VMX_ABORT_LOAD_HOST_MSR_FAIL
);
4485 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4486 * and modify vmcs12 to make it see what it would expect to see there if
4487 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
4489 void nested_vmx_vmexit(struct kvm_vcpu
*vcpu
, u32 vm_exit_reason
,
4490 u32 exit_intr_info
, unsigned long exit_qualification
)
4492 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4493 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
4495 /* trying to cancel vmlaunch/vmresume is a bug */
4496 WARN_ON_ONCE(vmx
->nested
.nested_run_pending
);
4498 /* Similarly, triple faults in L2 should never escape. */
4499 WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT
, vcpu
));
4501 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES
, vcpu
)) {
4503 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
4504 * Enlightened VMCS after migration and we still need to
4505 * do that when something is forcing L2->L1 exit prior to
4508 (void)nested_get_evmcs_page(vcpu
);
4511 /* Service the TLB flush request for L2 before switching to L1. */
4512 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT
, vcpu
))
4513 kvm_vcpu_flush_tlb_current(vcpu
);
4516 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
4517 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
4518 * up-to-date before switching to L1.
4520 if (enable_ept
&& is_pae_paging(vcpu
))
4521 vmx_ept_load_pdptrs(vcpu
);
4523 leave_guest_mode(vcpu
);
4525 if (nested_cpu_has_preemption_timer(vmcs12
))
4526 hrtimer_cancel(&to_vmx(vcpu
)->nested
.preemption_timer
);
4528 if (nested_cpu_has(vmcs12
, CPU_BASED_USE_TSC_OFFSETTING
)) {
4529 vcpu
->arch
.tsc_offset
= vcpu
->arch
.l1_tsc_offset
;
4530 if (nested_cpu_has2(vmcs12
, SECONDARY_EXEC_TSC_SCALING
))
4531 vcpu
->arch
.tsc_scaling_ratio
= vcpu
->arch
.l1_tsc_scaling_ratio
;
4534 if (likely(!vmx
->fail
)) {
4535 sync_vmcs02_to_vmcs12(vcpu
, vmcs12
);
4537 if (vm_exit_reason
!= -1)
4538 prepare_vmcs12(vcpu
, vmcs12
, vm_exit_reason
,
4539 exit_intr_info
, exit_qualification
);
4542 * Must happen outside of sync_vmcs02_to_vmcs12() as it will
4543 * also be used to capture vmcs12 cache as part of
4544 * capturing nVMX state for snapshot (migration).
4546 * Otherwise, this flush will dirty guest memory at a
4547 * point it is already assumed by user-space to be
4550 nested_flush_cached_shadow_vmcs12(vcpu
, vmcs12
);
4553 * The only expected VM-instruction error is "VM entry with
4554 * invalid control field(s)." Anything else indicates a
4555 * problem with L0. And we should never get here with a
4556 * VMFail of any type if early consistency checks are enabled.
4558 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR
) !=
4559 VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
4560 WARN_ON_ONCE(nested_early_check
);
4563 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
4565 /* Update any VMCS fields that might have changed while L2 ran */
4566 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, vmx
->msr_autoload
.host
.nr
);
4567 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, vmx
->msr_autoload
.guest
.nr
);
4568 vmcs_write64(TSC_OFFSET
, vcpu
->arch
.tsc_offset
);
4569 if (kvm_has_tsc_control
)
4570 vmcs_write64(TSC_MULTIPLIER
, vcpu
->arch
.tsc_scaling_ratio
);
4572 if (vmx
->nested
.l1_tpr_threshold
!= -1)
4573 vmcs_write32(TPR_THRESHOLD
, vmx
->nested
.l1_tpr_threshold
);
4575 if (vmx
->nested
.change_vmcs01_virtual_apic_mode
) {
4576 vmx
->nested
.change_vmcs01_virtual_apic_mode
= false;
4577 vmx_set_virtual_apic_mode(vcpu
);
4580 if (vmx
->nested
.update_vmcs01_cpu_dirty_logging
) {
4581 vmx
->nested
.update_vmcs01_cpu_dirty_logging
= false;
4582 vmx_update_cpu_dirty_logging(vcpu
);
4585 /* Unpin physical memory we referred to in vmcs02 */
4586 if (vmx
->nested
.apic_access_page
) {
4587 kvm_release_page_clean(vmx
->nested
.apic_access_page
);
4588 vmx
->nested
.apic_access_page
= NULL
;
4590 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.virtual_apic_map
, true);
4591 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.pi_desc_map
, true);
4592 vmx
->nested
.pi_desc
= NULL
;
4594 if (vmx
->nested
.reload_vmcs01_apic_access_page
) {
4595 vmx
->nested
.reload_vmcs01_apic_access_page
= false;
4596 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD
, vcpu
);
4599 if ((vm_exit_reason
!= -1) &&
4600 (enable_shadow_vmcs
|| evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
)))
4601 vmx
->nested
.need_vmcs12_to_shadow_sync
= true;
4603 /* in case we halted in L2 */
4604 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
4606 if (likely(!vmx
->fail
)) {
4607 if ((u16
)vm_exit_reason
== EXIT_REASON_EXTERNAL_INTERRUPT
&&
4608 nested_exit_intr_ack_set(vcpu
)) {
4609 int irq
= kvm_cpu_get_interrupt(vcpu
);
4611 vmcs12
->vm_exit_intr_info
= irq
|
4612 INTR_INFO_VALID_MASK
| INTR_TYPE_EXT_INTR
;
4615 if (vm_exit_reason
!= -1)
4616 trace_kvm_nested_vmexit_inject(vmcs12
->vm_exit_reason
,
4617 vmcs12
->exit_qualification
,
4618 vmcs12
->idt_vectoring_info_field
,
4619 vmcs12
->vm_exit_intr_info
,
4620 vmcs12
->vm_exit_intr_error_code
,
4623 load_vmcs12_host_state(vcpu
, vmcs12
);
4629 * After an early L2 VM-entry failure, we're now back
4630 * in L1 which thinks it just finished a VMLAUNCH or
4631 * VMRESUME instruction, so we need to set the failure
4632 * flag and the VM-instruction error field of the VMCS
4633 * accordingly, and skip the emulated instruction.
4635 (void)nested_vmx_fail(vcpu
, VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
4638 * Restore L1's host state to KVM's software model. We're here
4639 * because a consistency check was caught by hardware, which
4640 * means some amount of guest state has been propagated to KVM's
4641 * model and needs to be unwound to the host's state.
4643 nested_vmx_restore_host_state(vcpu
);
4648 static void nested_vmx_triple_fault(struct kvm_vcpu
*vcpu
)
4650 nested_vmx_vmexit(vcpu
, EXIT_REASON_TRIPLE_FAULT
, 0, 0);
4654 * Decode the memory-address operand of a vmx instruction, as recorded on an
4655 * exit caused by such an instruction (run by a guest hypervisor).
4656 * On success, returns 0. When the operand is invalid, returns 1 and throws
4659 int get_vmx_mem_address(struct kvm_vcpu
*vcpu
, unsigned long exit_qualification
,
4660 u32 vmx_instruction_info
, bool wr
, int len
, gva_t
*ret
)
4664 struct kvm_segment s
;
4667 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4668 * Execution", on an exit, vmx_instruction_info holds most of the
4669 * addressing components of the operand. Only the displacement part
4670 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4671 * For how an actual address is calculated from all these components,
4672 * refer to Vol. 1, "Operand Addressing".
4674 int scaling
= vmx_instruction_info
& 3;
4675 int addr_size
= (vmx_instruction_info
>> 7) & 7;
4676 bool is_reg
= vmx_instruction_info
& (1u << 10);
4677 int seg_reg
= (vmx_instruction_info
>> 15) & 7;
4678 int index_reg
= (vmx_instruction_info
>> 18) & 0xf;
4679 bool index_is_valid
= !(vmx_instruction_info
& (1u << 22));
4680 int base_reg
= (vmx_instruction_info
>> 23) & 0xf;
4681 bool base_is_valid
= !(vmx_instruction_info
& (1u << 27));
4684 kvm_queue_exception(vcpu
, UD_VECTOR
);
4688 /* Addr = segment_base + offset */
4689 /* offset = base + [index * scale] + displacement */
4690 off
= exit_qualification
; /* holds the displacement */
4692 off
= (gva_t
)sign_extend64(off
, 31);
4693 else if (addr_size
== 0)
4694 off
= (gva_t
)sign_extend64(off
, 15);
4696 off
+= kvm_register_read(vcpu
, base_reg
);
4698 off
+= kvm_register_read(vcpu
, index_reg
) << scaling
;
4699 vmx_get_segment(vcpu
, &s
, seg_reg
);
4702 * The effective address, i.e. @off, of a memory operand is truncated
4703 * based on the address size of the instruction. Note that this is
4704 * the *effective address*, i.e. the address prior to accounting for
4705 * the segment's base.
4707 if (addr_size
== 1) /* 32 bit */
4709 else if (addr_size
== 0) /* 16 bit */
4712 /* Checks for #GP/#SS exceptions. */
4714 if (is_long_mode(vcpu
)) {
4716 * The virtual/linear address is never truncated in 64-bit
4717 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4718 * address when using FS/GS with a non-zero base.
4720 if (seg_reg
== VCPU_SREG_FS
|| seg_reg
== VCPU_SREG_GS
)
4721 *ret
= s
.base
+ off
;
4725 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4726 * non-canonical form. This is the only check on the memory
4727 * destination for long mode!
4729 exn
= is_noncanonical_address(*ret
, vcpu
);
4732 * When not in long mode, the virtual/linear address is
4733 * unconditionally truncated to 32 bits regardless of the
4736 *ret
= (s
.base
+ off
) & 0xffffffff;
4738 /* Protected mode: apply checks for segment validity in the
4740 * - segment type check (#GP(0) may be thrown)
4741 * - usability check (#GP(0)/#SS(0))
4742 * - limit check (#GP(0)/#SS(0))
4745 /* #GP(0) if the destination operand is located in a
4746 * read-only data segment or any code segment.
4748 exn
= ((s
.type
& 0xa) == 0 || (s
.type
& 8));
4750 /* #GP(0) if the source operand is located in an
4751 * execute-only code segment
4753 exn
= ((s
.type
& 0xa) == 8);
4755 kvm_queue_exception_e(vcpu
, GP_VECTOR
, 0);
4758 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4760 exn
= (s
.unusable
!= 0);
4763 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4764 * outside the segment limit. All CPUs that support VMX ignore
4765 * limit checks for flat segments, i.e. segments with base==0,
4766 * limit==0xffffffff and of type expand-up data or code.
4768 if (!(s
.base
== 0 && s
.limit
== 0xffffffff &&
4769 ((s
.type
& 8) || !(s
.type
& 4))))
4770 exn
= exn
|| ((u64
)off
+ len
- 1 > s
.limit
);
4773 kvm_queue_exception_e(vcpu
,
4774 seg_reg
== VCPU_SREG_SS
?
4775 SS_VECTOR
: GP_VECTOR
,
4783 void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu
*vcpu
)
4785 struct vcpu_vmx
*vmx
;
4787 if (!nested_vmx_allowed(vcpu
))
4791 if (kvm_x86_ops
.pmu_ops
->is_valid_msr(vcpu
, MSR_CORE_PERF_GLOBAL_CTRL
)) {
4792 vmx
->nested
.msrs
.entry_ctls_high
|=
4793 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
;
4794 vmx
->nested
.msrs
.exit_ctls_high
|=
4795 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
;
4797 vmx
->nested
.msrs
.entry_ctls_high
&=
4798 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
;
4799 vmx
->nested
.msrs
.exit_ctls_high
&=
4800 ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
;
4804 static int nested_vmx_get_vmptr(struct kvm_vcpu
*vcpu
, gpa_t
*vmpointer
,
4808 struct x86_exception e
;
4811 if (get_vmx_mem_address(vcpu
, vmx_get_exit_qual(vcpu
),
4812 vmcs_read32(VMX_INSTRUCTION_INFO
), false,
4813 sizeof(*vmpointer
), &gva
)) {
4818 r
= kvm_read_guest_virt(vcpu
, gva
, vmpointer
, sizeof(*vmpointer
), &e
);
4819 if (r
!= X86EMUL_CONTINUE
) {
4820 *ret
= kvm_handle_memory_failure(vcpu
, r
, &e
);
4828 * Allocate a shadow VMCS and associate it with the currently loaded
4829 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4830 * VMCS is also VMCLEARed, so that it is ready for use.
4832 static struct vmcs
*alloc_shadow_vmcs(struct kvm_vcpu
*vcpu
)
4834 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4835 struct loaded_vmcs
*loaded_vmcs
= vmx
->loaded_vmcs
;
4838 * We should allocate a shadow vmcs for vmcs01 only when L1
4839 * executes VMXON and free it when L1 executes VMXOFF.
4840 * As it is invalid to execute VMXON twice, we shouldn't reach
4841 * here when vmcs01 already have an allocated shadow vmcs.
4843 WARN_ON(loaded_vmcs
== &vmx
->vmcs01
&& loaded_vmcs
->shadow_vmcs
);
4845 if (!loaded_vmcs
->shadow_vmcs
) {
4846 loaded_vmcs
->shadow_vmcs
= alloc_vmcs(true);
4847 if (loaded_vmcs
->shadow_vmcs
)
4848 vmcs_clear(loaded_vmcs
->shadow_vmcs
);
4850 return loaded_vmcs
->shadow_vmcs
;
4853 static int enter_vmx_operation(struct kvm_vcpu
*vcpu
)
4855 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4858 r
= alloc_loaded_vmcs(&vmx
->nested
.vmcs02
);
4862 vmx
->nested
.cached_vmcs12
= kzalloc(VMCS12_SIZE
, GFP_KERNEL_ACCOUNT
);
4863 if (!vmx
->nested
.cached_vmcs12
)
4864 goto out_cached_vmcs12
;
4866 vmx
->nested
.cached_shadow_vmcs12
= kzalloc(VMCS12_SIZE
, GFP_KERNEL_ACCOUNT
);
4867 if (!vmx
->nested
.cached_shadow_vmcs12
)
4868 goto out_cached_shadow_vmcs12
;
4870 if (enable_shadow_vmcs
&& !alloc_shadow_vmcs(vcpu
))
4871 goto out_shadow_vmcs
;
4873 hrtimer_init(&vmx
->nested
.preemption_timer
, CLOCK_MONOTONIC
,
4874 HRTIMER_MODE_ABS_PINNED
);
4875 vmx
->nested
.preemption_timer
.function
= vmx_preemption_timer_fn
;
4877 vmx
->nested
.vpid02
= allocate_vpid();
4879 vmx
->nested
.vmcs02_initialized
= false;
4880 vmx
->nested
.vmxon
= true;
4882 if (vmx_pt_mode_is_host_guest()) {
4883 vmx
->pt_desc
.guest
.ctl
= 0;
4884 pt_update_intercept_for_msr(vcpu
);
4890 kfree(vmx
->nested
.cached_shadow_vmcs12
);
4892 out_cached_shadow_vmcs12
:
4893 kfree(vmx
->nested
.cached_vmcs12
);
4896 free_loaded_vmcs(&vmx
->nested
.vmcs02
);
4903 * Emulate the VMXON instruction.
4904 * Currently, we just remember that VMX is active, and do not save or even
4905 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4906 * do not currently need to store anything in that guest-allocated memory
4907 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4908 * argument is different from the VMXON pointer (which the spec says they do).
4910 static int handle_vmon(struct kvm_vcpu
*vcpu
)
4915 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4916 const u64 VMXON_NEEDED_FEATURES
= FEAT_CTL_LOCKED
4917 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX
;
4920 * The Intel VMX Instruction Reference lists a bunch of bits that are
4921 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
4922 * 1 (see vmx_is_valid_cr4() for when we allow the guest to set this).
4923 * Otherwise, we should fail with #UD. But most faulting conditions
4924 * have already been checked by hardware, prior to the VM-exit for
4925 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
4926 * that bit set to 1 in non-root mode.
4928 if (!kvm_read_cr4_bits(vcpu
, X86_CR4_VMXE
)) {
4929 kvm_queue_exception(vcpu
, UD_VECTOR
);
4933 /* CPL=0 must be checked manually. */
4934 if (vmx_get_cpl(vcpu
)) {
4935 kvm_inject_gp(vcpu
, 0);
4939 if (vmx
->nested
.vmxon
)
4940 return nested_vmx_fail(vcpu
, VMXERR_VMXON_IN_VMX_ROOT_OPERATION
);
4942 if ((vmx
->msr_ia32_feature_control
& VMXON_NEEDED_FEATURES
)
4943 != VMXON_NEEDED_FEATURES
) {
4944 kvm_inject_gp(vcpu
, 0);
4948 if (nested_vmx_get_vmptr(vcpu
, &vmptr
, &ret
))
4953 * The first 4 bytes of VMXON region contain the supported
4954 * VMCS revision identifier
4956 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4957 * which replaces physical address width with 32
4959 if (!page_address_valid(vcpu
, vmptr
))
4960 return nested_vmx_failInvalid(vcpu
);
4962 if (kvm_read_guest(vcpu
->kvm
, vmptr
, &revision
, sizeof(revision
)) ||
4963 revision
!= VMCS12_REVISION
)
4964 return nested_vmx_failInvalid(vcpu
);
4966 vmx
->nested
.vmxon_ptr
= vmptr
;
4967 ret
= enter_vmx_operation(vcpu
);
4971 return nested_vmx_succeed(vcpu
);
4974 static inline void nested_release_vmcs12(struct kvm_vcpu
*vcpu
)
4976 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4978 if (vmx
->nested
.current_vmptr
== -1ull)
4981 copy_vmcs02_to_vmcs12_rare(vcpu
, get_vmcs12(vcpu
));
4983 if (enable_shadow_vmcs
) {
4984 /* copy to memory all shadowed fields in case
4985 they were modified */
4986 copy_shadow_to_vmcs12(vmx
);
4987 vmx_disable_shadow_vmcs(vmx
);
4989 vmx
->nested
.posted_intr_nv
= -1;
4991 /* Flush VMCS12 to guest memory */
4992 kvm_vcpu_write_guest_page(vcpu
,
4993 vmx
->nested
.current_vmptr
>> PAGE_SHIFT
,
4994 vmx
->nested
.cached_vmcs12
, 0, VMCS12_SIZE
);
4996 kvm_mmu_free_roots(vcpu
, &vcpu
->arch
.guest_mmu
, KVM_MMU_ROOTS_ALL
);
4998 vmx
->nested
.current_vmptr
= -1ull;
5001 /* Emulate the VMXOFF instruction */
5002 static int handle_vmoff(struct kvm_vcpu
*vcpu
)
5004 if (!nested_vmx_check_permission(vcpu
))
5009 /* Process a latched INIT during time CPU was in VMX operation */
5010 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
5012 return nested_vmx_succeed(vcpu
);
5015 /* Emulate the VMCLEAR instruction */
5016 static int handle_vmclear(struct kvm_vcpu
*vcpu
)
5018 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5024 if (!nested_vmx_check_permission(vcpu
))
5027 if (nested_vmx_get_vmptr(vcpu
, &vmptr
, &r
))
5030 if (!page_address_valid(vcpu
, vmptr
))
5031 return nested_vmx_fail(vcpu
, VMXERR_VMCLEAR_INVALID_ADDRESS
);
5033 if (vmptr
== vmx
->nested
.vmxon_ptr
)
5034 return nested_vmx_fail(vcpu
, VMXERR_VMCLEAR_VMXON_POINTER
);
5037 * When Enlightened VMEntry is enabled on the calling CPU we treat
5038 * memory area pointer by vmptr as Enlightened VMCS (as there's no good
5039 * way to distinguish it from VMCS12) and we must not corrupt it by
5040 * writing to the non-existent 'launch_state' field. The area doesn't
5041 * have to be the currently active EVMCS on the calling CPU and there's
5042 * nothing KVM has to do to transition it from 'active' to 'non-active'
5043 * state. It is possible that the area will stay mapped as
5044 * vmx->nested.hv_evmcs but this shouldn't be a problem.
5046 if (likely(!vmx
->nested
.enlightened_vmcs_enabled
||
5047 !nested_enlightened_vmentry(vcpu
, &evmcs_gpa
))) {
5048 if (vmptr
== vmx
->nested
.current_vmptr
)
5049 nested_release_vmcs12(vcpu
);
5051 kvm_vcpu_write_guest(vcpu
,
5052 vmptr
+ offsetof(struct vmcs12
,
5054 &zero
, sizeof(zero
));
5055 } else if (vmx
->nested
.hv_evmcs
&& vmptr
== vmx
->nested
.hv_evmcs_vmptr
) {
5056 nested_release_evmcs(vcpu
);
5059 return nested_vmx_succeed(vcpu
);
5062 /* Emulate the VMLAUNCH instruction */
5063 static int handle_vmlaunch(struct kvm_vcpu
*vcpu
)
5065 return nested_vmx_run(vcpu
, true);
5068 /* Emulate the VMRESUME instruction */
5069 static int handle_vmresume(struct kvm_vcpu
*vcpu
)
5072 return nested_vmx_run(vcpu
, false);
5075 static int handle_vmread(struct kvm_vcpu
*vcpu
)
5077 struct vmcs12
*vmcs12
= is_guest_mode(vcpu
) ? get_shadow_vmcs12(vcpu
)
5079 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5080 u32 instr_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5081 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5082 struct x86_exception e
;
5083 unsigned long field
;
5089 if (!nested_vmx_check_permission(vcpu
))
5093 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
5094 * any VMREAD sets the ALU flags for VMfailInvalid.
5096 if (vmx
->nested
.current_vmptr
== -1ull ||
5097 (is_guest_mode(vcpu
) &&
5098 get_vmcs12(vcpu
)->vmcs_link_pointer
== -1ull))
5099 return nested_vmx_failInvalid(vcpu
);
5101 /* Decode instruction info and find the field to read */
5102 field
= kvm_register_read(vcpu
, (((instr_info
) >> 28) & 0xf));
5104 offset
= vmcs_field_to_offset(field
);
5106 return nested_vmx_fail(vcpu
, VMXERR_UNSUPPORTED_VMCS_COMPONENT
);
5108 if (!is_guest_mode(vcpu
) && is_vmcs12_ext_field(field
))
5109 copy_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
5111 /* Read the field, zero-extended to a u64 value */
5112 value
= vmcs12_read_any(vmcs12
, field
, offset
);
5115 * Now copy part of this value to register or memory, as requested.
5116 * Note that the number of bits actually copied is 32 or 64 depending
5117 * on the guest's mode (32 or 64 bit), not on the given field's length.
5119 if (instr_info
& BIT(10)) {
5120 kvm_register_write(vcpu
, (((instr_info
) >> 3) & 0xf), value
);
5122 len
= is_64_bit_mode(vcpu
) ? 8 : 4;
5123 if (get_vmx_mem_address(vcpu
, exit_qualification
,
5124 instr_info
, true, len
, &gva
))
5126 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
5127 r
= kvm_write_guest_virt_system(vcpu
, gva
, &value
, len
, &e
);
5128 if (r
!= X86EMUL_CONTINUE
)
5129 return kvm_handle_memory_failure(vcpu
, r
, &e
);
5132 return nested_vmx_succeed(vcpu
);
5135 static bool is_shadow_field_rw(unsigned long field
)
5138 #define SHADOW_FIELD_RW(x, y) case x:
5139 #include "vmcs_shadow_fields.h"
5147 static bool is_shadow_field_ro(unsigned long field
)
5150 #define SHADOW_FIELD_RO(x, y) case x:
5151 #include "vmcs_shadow_fields.h"
5159 static int handle_vmwrite(struct kvm_vcpu
*vcpu
)
5161 struct vmcs12
*vmcs12
= is_guest_mode(vcpu
) ? get_shadow_vmcs12(vcpu
)
5163 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5164 u32 instr_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5165 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5166 struct x86_exception e
;
5167 unsigned long field
;
5173 * The value to write might be 32 or 64 bits, depending on L1's long
5174 * mode, and eventually we need to write that into a field of several
5175 * possible lengths. The code below first zero-extends the value to 64
5176 * bit (value), and then copies only the appropriate number of
5177 * bits into the vmcs12 field.
5181 if (!nested_vmx_check_permission(vcpu
))
5185 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
5186 * any VMWRITE sets the ALU flags for VMfailInvalid.
5188 if (vmx
->nested
.current_vmptr
== -1ull ||
5189 (is_guest_mode(vcpu
) &&
5190 get_vmcs12(vcpu
)->vmcs_link_pointer
== -1ull))
5191 return nested_vmx_failInvalid(vcpu
);
5193 if (instr_info
& BIT(10))
5194 value
= kvm_register_read(vcpu
, (((instr_info
) >> 3) & 0xf));
5196 len
= is_64_bit_mode(vcpu
) ? 8 : 4;
5197 if (get_vmx_mem_address(vcpu
, exit_qualification
,
5198 instr_info
, false, len
, &gva
))
5200 r
= kvm_read_guest_virt(vcpu
, gva
, &value
, len
, &e
);
5201 if (r
!= X86EMUL_CONTINUE
)
5202 return kvm_handle_memory_failure(vcpu
, r
, &e
);
5205 field
= kvm_register_read(vcpu
, (((instr_info
) >> 28) & 0xf));
5207 offset
= vmcs_field_to_offset(field
);
5209 return nested_vmx_fail(vcpu
, VMXERR_UNSUPPORTED_VMCS_COMPONENT
);
5212 * If the vCPU supports "VMWRITE to any supported field in the
5213 * VMCS," then the "read-only" fields are actually read/write.
5215 if (vmcs_field_readonly(field
) &&
5216 !nested_cpu_has_vmwrite_any_field(vcpu
))
5217 return nested_vmx_fail(vcpu
, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT
);
5220 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
5221 * vmcs12, else we may crush a field or consume a stale value.
5223 if (!is_guest_mode(vcpu
) && !is_shadow_field_rw(field
))
5224 copy_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
5227 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
5228 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
5229 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
5230 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
5231 * from L1 will return a different value than VMREAD from L2 (L1 sees
5232 * the stripped down value, L2 sees the full value as stored by KVM).
5234 if (field
>= GUEST_ES_AR_BYTES
&& field
<= GUEST_TR_AR_BYTES
)
5237 vmcs12_write_any(vmcs12
, field
, offset
, value
);
5240 * Do not track vmcs12 dirty-state if in guest-mode as we actually
5241 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
5242 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
5243 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
5245 if (!is_guest_mode(vcpu
) && !is_shadow_field_rw(field
)) {
5247 * L1 can read these fields without exiting, ensure the
5248 * shadow VMCS is up-to-date.
5250 if (enable_shadow_vmcs
&& is_shadow_field_ro(field
)) {
5252 vmcs_load(vmx
->vmcs01
.shadow_vmcs
);
5254 __vmcs_writel(field
, value
);
5256 vmcs_clear(vmx
->vmcs01
.shadow_vmcs
);
5257 vmcs_load(vmx
->loaded_vmcs
->vmcs
);
5260 vmx
->nested
.dirty_vmcs12
= true;
5263 return nested_vmx_succeed(vcpu
);
5266 static void set_current_vmptr(struct vcpu_vmx
*vmx
, gpa_t vmptr
)
5268 vmx
->nested
.current_vmptr
= vmptr
;
5269 if (enable_shadow_vmcs
) {
5270 secondary_exec_controls_setbit(vmx
, SECONDARY_EXEC_SHADOW_VMCS
);
5271 vmcs_write64(VMCS_LINK_POINTER
,
5272 __pa(vmx
->vmcs01
.shadow_vmcs
));
5273 vmx
->nested
.need_vmcs12_to_shadow_sync
= true;
5275 vmx
->nested
.dirty_vmcs12
= true;
5278 /* Emulate the VMPTRLD instruction */
5279 static int handle_vmptrld(struct kvm_vcpu
*vcpu
)
5281 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5285 if (!nested_vmx_check_permission(vcpu
))
5288 if (nested_vmx_get_vmptr(vcpu
, &vmptr
, &r
))
5291 if (!page_address_valid(vcpu
, vmptr
))
5292 return nested_vmx_fail(vcpu
, VMXERR_VMPTRLD_INVALID_ADDRESS
);
5294 if (vmptr
== vmx
->nested
.vmxon_ptr
)
5295 return nested_vmx_fail(vcpu
, VMXERR_VMPTRLD_VMXON_POINTER
);
5297 /* Forbid normal VMPTRLD if Enlightened version was used */
5298 if (evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
))
5301 if (vmx
->nested
.current_vmptr
!= vmptr
) {
5302 struct kvm_host_map map
;
5303 struct vmcs12
*new_vmcs12
;
5305 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(vmptr
), &map
)) {
5307 * Reads from an unbacked page return all 1s,
5308 * which means that the 32 bits located at the
5309 * given physical address won't match the required
5310 * VMCS12_REVISION identifier.
5312 return nested_vmx_fail(vcpu
,
5313 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID
);
5316 new_vmcs12
= map
.hva
;
5318 if (new_vmcs12
->hdr
.revision_id
!= VMCS12_REVISION
||
5319 (new_vmcs12
->hdr
.shadow_vmcs
&&
5320 !nested_cpu_has_vmx_shadow_vmcs(vcpu
))) {
5321 kvm_vcpu_unmap(vcpu
, &map
, false);
5322 return nested_vmx_fail(vcpu
,
5323 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID
);
5326 nested_release_vmcs12(vcpu
);
5329 * Load VMCS12 from guest memory since it is not already
5332 memcpy(vmx
->nested
.cached_vmcs12
, new_vmcs12
, VMCS12_SIZE
);
5333 kvm_vcpu_unmap(vcpu
, &map
, false);
5335 set_current_vmptr(vmx
, vmptr
);
5338 return nested_vmx_succeed(vcpu
);
5341 /* Emulate the VMPTRST instruction */
5342 static int handle_vmptrst(struct kvm_vcpu
*vcpu
)
5344 unsigned long exit_qual
= vmx_get_exit_qual(vcpu
);
5345 u32 instr_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5346 gpa_t current_vmptr
= to_vmx(vcpu
)->nested
.current_vmptr
;
5347 struct x86_exception e
;
5351 if (!nested_vmx_check_permission(vcpu
))
5354 if (unlikely(evmptr_is_valid(to_vmx(vcpu
)->nested
.hv_evmcs_vmptr
)))
5357 if (get_vmx_mem_address(vcpu
, exit_qual
, instr_info
,
5358 true, sizeof(gpa_t
), &gva
))
5360 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
5361 r
= kvm_write_guest_virt_system(vcpu
, gva
, (void *)¤t_vmptr
,
5363 if (r
!= X86EMUL_CONTINUE
)
5364 return kvm_handle_memory_failure(vcpu
, r
, &e
);
5366 return nested_vmx_succeed(vcpu
);
5369 /* Emulate the INVEPT instruction */
5370 static int handle_invept(struct kvm_vcpu
*vcpu
)
5372 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5373 u32 vmx_instruction_info
, types
;
5374 unsigned long type
, roots_to_free
;
5375 struct kvm_mmu
*mmu
;
5377 struct x86_exception e
;
5383 if (!(vmx
->nested
.msrs
.secondary_ctls_high
&
5384 SECONDARY_EXEC_ENABLE_EPT
) ||
5385 !(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_INVEPT_BIT
)) {
5386 kvm_queue_exception(vcpu
, UD_VECTOR
);
5390 if (!nested_vmx_check_permission(vcpu
))
5393 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5394 type
= kvm_register_read(vcpu
, (vmx_instruction_info
>> 28) & 0xf);
5396 types
= (vmx
->nested
.msrs
.ept_caps
>> VMX_EPT_EXTENT_SHIFT
) & 6;
5398 if (type
>= 32 || !(types
& (1 << type
)))
5399 return nested_vmx_fail(vcpu
, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5401 /* According to the Intel VMX instruction reference, the memory
5402 * operand is read even if it isn't needed (e.g., for type==global)
5404 if (get_vmx_mem_address(vcpu
, vmx_get_exit_qual(vcpu
),
5405 vmx_instruction_info
, false, sizeof(operand
), &gva
))
5407 r
= kvm_read_guest_virt(vcpu
, gva
, &operand
, sizeof(operand
), &e
);
5408 if (r
!= X86EMUL_CONTINUE
)
5409 return kvm_handle_memory_failure(vcpu
, r
, &e
);
5412 * Nested EPT roots are always held through guest_mmu,
5415 mmu
= &vcpu
->arch
.guest_mmu
;
5418 case VMX_EPT_EXTENT_CONTEXT
:
5419 if (!nested_vmx_check_eptp(vcpu
, operand
.eptp
))
5420 return nested_vmx_fail(vcpu
,
5421 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5424 if (nested_ept_root_matches(mmu
->root_hpa
, mmu
->root_pgd
,
5426 roots_to_free
|= KVM_MMU_ROOT_CURRENT
;
5428 for (i
= 0; i
< KVM_MMU_NUM_PREV_ROOTS
; i
++) {
5429 if (nested_ept_root_matches(mmu
->prev_roots
[i
].hpa
,
5430 mmu
->prev_roots
[i
].pgd
,
5432 roots_to_free
|= KVM_MMU_ROOT_PREVIOUS(i
);
5435 case VMX_EPT_EXTENT_GLOBAL
:
5436 roots_to_free
= KVM_MMU_ROOTS_ALL
;
5444 kvm_mmu_free_roots(vcpu
, mmu
, roots_to_free
);
5446 return nested_vmx_succeed(vcpu
);
5449 static int handle_invvpid(struct kvm_vcpu
*vcpu
)
5451 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5452 u32 vmx_instruction_info
;
5453 unsigned long type
, types
;
5455 struct x86_exception e
;
5463 if (!(vmx
->nested
.msrs
.secondary_ctls_high
&
5464 SECONDARY_EXEC_ENABLE_VPID
) ||
5465 !(vmx
->nested
.msrs
.vpid_caps
& VMX_VPID_INVVPID_BIT
)) {
5466 kvm_queue_exception(vcpu
, UD_VECTOR
);
5470 if (!nested_vmx_check_permission(vcpu
))
5473 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5474 type
= kvm_register_read(vcpu
, (vmx_instruction_info
>> 28) & 0xf);
5476 types
= (vmx
->nested
.msrs
.vpid_caps
&
5477 VMX_VPID_EXTENT_SUPPORTED_MASK
) >> 8;
5479 if (type
>= 32 || !(types
& (1 << type
)))
5480 return nested_vmx_fail(vcpu
,
5481 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5483 /* according to the intel vmx instruction reference, the memory
5484 * operand is read even if it isn't needed (e.g., for type==global)
5486 if (get_vmx_mem_address(vcpu
, vmx_get_exit_qual(vcpu
),
5487 vmx_instruction_info
, false, sizeof(operand
), &gva
))
5489 r
= kvm_read_guest_virt(vcpu
, gva
, &operand
, sizeof(operand
), &e
);
5490 if (r
!= X86EMUL_CONTINUE
)
5491 return kvm_handle_memory_failure(vcpu
, r
, &e
);
5493 if (operand
.vpid
>> 16)
5494 return nested_vmx_fail(vcpu
,
5495 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5497 vpid02
= nested_get_vpid02(vcpu
);
5499 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR
:
5500 if (!operand
.vpid
||
5501 is_noncanonical_address(operand
.gla
, vcpu
))
5502 return nested_vmx_fail(vcpu
,
5503 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5504 vpid_sync_vcpu_addr(vpid02
, operand
.gla
);
5506 case VMX_VPID_EXTENT_SINGLE_CONTEXT
:
5507 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL
:
5509 return nested_vmx_fail(vcpu
,
5510 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5511 vpid_sync_context(vpid02
);
5513 case VMX_VPID_EXTENT_ALL_CONTEXT
:
5514 vpid_sync_context(vpid02
);
5518 return kvm_skip_emulated_instruction(vcpu
);
5522 * Sync the shadow page tables if EPT is disabled, L1 is invalidating
5523 * linear mappings for L2 (tagged with L2's VPID). Free all guest
5524 * roots as VPIDs are not tracked in the MMU role.
5526 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share
5527 * an MMU when EPT is disabled.
5529 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR.
5532 kvm_mmu_free_guest_mode_roots(vcpu
, &vcpu
->arch
.root_mmu
);
5534 return nested_vmx_succeed(vcpu
);
5537 static int nested_vmx_eptp_switching(struct kvm_vcpu
*vcpu
,
5538 struct vmcs12
*vmcs12
)
5540 u32 index
= kvm_rcx_read(vcpu
);
5543 if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12
)))
5545 if (index
>= VMFUNC_EPTP_ENTRIES
)
5548 if (kvm_vcpu_read_guest_page(vcpu
, vmcs12
->eptp_list_address
>> PAGE_SHIFT
,
5549 &new_eptp
, index
* 8, 8))
5553 * If the (L2) guest does a vmfunc to the currently
5554 * active ept pointer, we don't have to do anything else
5556 if (vmcs12
->ept_pointer
!= new_eptp
) {
5557 if (!nested_vmx_check_eptp(vcpu
, new_eptp
))
5560 vmcs12
->ept_pointer
= new_eptp
;
5561 nested_ept_new_eptp(vcpu
);
5563 if (!nested_cpu_has_vpid(vmcs12
))
5564 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST
, vcpu
);
5570 static int handle_vmfunc(struct kvm_vcpu
*vcpu
)
5572 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5573 struct vmcs12
*vmcs12
;
5574 u32 function
= kvm_rax_read(vcpu
);
5577 * VMFUNC is only supported for nested guests, but we always enable the
5578 * secondary control for simplicity; for non-nested mode, fake that we
5579 * didn't by injecting #UD.
5581 if (!is_guest_mode(vcpu
)) {
5582 kvm_queue_exception(vcpu
, UD_VECTOR
);
5586 vmcs12
= get_vmcs12(vcpu
);
5589 * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC
5590 * is enabled in vmcs02 if and only if it's enabled in vmcs12.
5592 if (WARN_ON_ONCE((function
> 63) || !nested_cpu_has_vmfunc(vmcs12
))) {
5593 kvm_queue_exception(vcpu
, UD_VECTOR
);
5597 if (!(vmcs12
->vm_function_control
& BIT_ULL(function
)))
5602 if (nested_vmx_eptp_switching(vcpu
, vmcs12
))
5608 return kvm_skip_emulated_instruction(vcpu
);
5612 * This is effectively a reflected VM-Exit, as opposed to a synthesized
5613 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
5614 * EXIT_REASON_VMFUNC as the exit reason.
5616 nested_vmx_vmexit(vcpu
, vmx
->exit_reason
.full
,
5617 vmx_get_intr_info(vcpu
),
5618 vmx_get_exit_qual(vcpu
));
5623 * Return true if an IO instruction with the specified port and size should cause
5624 * a VM-exit into L1.
5626 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu
*vcpu
, unsigned int port
,
5629 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
5630 gpa_t bitmap
, last_bitmap
;
5633 last_bitmap
= (gpa_t
)-1;
5638 bitmap
= vmcs12
->io_bitmap_a
;
5639 else if (port
< 0x10000)
5640 bitmap
= vmcs12
->io_bitmap_b
;
5643 bitmap
+= (port
& 0x7fff) / 8;
5645 if (last_bitmap
!= bitmap
)
5646 if (kvm_vcpu_read_guest(vcpu
, bitmap
, &b
, 1))
5648 if (b
& (1 << (port
& 7)))
5653 last_bitmap
= bitmap
;
5659 static bool nested_vmx_exit_handled_io(struct kvm_vcpu
*vcpu
,
5660 struct vmcs12
*vmcs12
)
5662 unsigned long exit_qualification
;
5663 unsigned short port
;
5666 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_IO_BITMAPS
))
5667 return nested_cpu_has(vmcs12
, CPU_BASED_UNCOND_IO_EXITING
);
5669 exit_qualification
= vmx_get_exit_qual(vcpu
);
5671 port
= exit_qualification
>> 16;
5672 size
= (exit_qualification
& 7) + 1;
5674 return nested_vmx_check_io_bitmaps(vcpu
, port
, size
);
5678 * Return 1 if we should exit from L2 to L1 to handle an MSR access,
5679 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5680 * disinterest in the current event (read or write a specific MSR) by using an
5681 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5683 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu
*vcpu
,
5684 struct vmcs12
*vmcs12
,
5685 union vmx_exit_reason exit_reason
)
5687 u32 msr_index
= kvm_rcx_read(vcpu
);
5690 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_MSR_BITMAPS
))
5694 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5695 * for the four combinations of read/write and low/high MSR numbers.
5696 * First we need to figure out which of the four to use:
5698 bitmap
= vmcs12
->msr_bitmap
;
5699 if (exit_reason
.basic
== EXIT_REASON_MSR_WRITE
)
5701 if (msr_index
>= 0xc0000000) {
5702 msr_index
-= 0xc0000000;
5706 /* Then read the msr_index'th bit from this bitmap: */
5707 if (msr_index
< 1024*8) {
5709 if (kvm_vcpu_read_guest(vcpu
, bitmap
+ msr_index
/8, &b
, 1))
5711 return 1 & (b
>> (msr_index
& 7));
5713 return true; /* let L1 handle the wrong parameter */
5717 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5718 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5719 * intercept (via guest_host_mask etc.) the current event.
5721 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu
*vcpu
,
5722 struct vmcs12
*vmcs12
)
5724 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5725 int cr
= exit_qualification
& 15;
5729 switch ((exit_qualification
>> 4) & 3) {
5730 case 0: /* mov to cr */
5731 reg
= (exit_qualification
>> 8) & 15;
5732 val
= kvm_register_read(vcpu
, reg
);
5735 if (vmcs12
->cr0_guest_host_mask
&
5736 (val
^ vmcs12
->cr0_read_shadow
))
5740 if (nested_cpu_has(vmcs12
, CPU_BASED_CR3_LOAD_EXITING
))
5744 if (vmcs12
->cr4_guest_host_mask
&
5745 (vmcs12
->cr4_read_shadow
^ val
))
5749 if (nested_cpu_has(vmcs12
, CPU_BASED_CR8_LOAD_EXITING
))
5755 if ((vmcs12
->cr0_guest_host_mask
& X86_CR0_TS
) &&
5756 (vmcs12
->cr0_read_shadow
& X86_CR0_TS
))
5759 case 1: /* mov from cr */
5762 if (vmcs12
->cpu_based_vm_exec_control
&
5763 CPU_BASED_CR3_STORE_EXITING
)
5767 if (vmcs12
->cpu_based_vm_exec_control
&
5768 CPU_BASED_CR8_STORE_EXITING
)
5775 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5776 * cr0. Other attempted changes are ignored, with no exit.
5778 val
= (exit_qualification
>> LMSW_SOURCE_DATA_SHIFT
) & 0x0f;
5779 if (vmcs12
->cr0_guest_host_mask
& 0xe &
5780 (val
^ vmcs12
->cr0_read_shadow
))
5782 if ((vmcs12
->cr0_guest_host_mask
& 0x1) &&
5783 !(vmcs12
->cr0_read_shadow
& 0x1) &&
5791 static bool nested_vmx_exit_handled_encls(struct kvm_vcpu
*vcpu
,
5792 struct vmcs12
*vmcs12
)
5796 if (!guest_cpuid_has(vcpu
, X86_FEATURE_SGX
) ||
5797 !nested_cpu_has2(vmcs12
, SECONDARY_EXEC_ENCLS_EXITING
))
5800 encls_leaf
= kvm_rax_read(vcpu
);
5801 if (encls_leaf
> 62)
5803 return vmcs12
->encls_exiting_bitmap
& BIT_ULL(encls_leaf
);
5806 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu
*vcpu
,
5807 struct vmcs12
*vmcs12
, gpa_t bitmap
)
5809 u32 vmx_instruction_info
;
5810 unsigned long field
;
5813 if (!nested_cpu_has_shadow_vmcs(vmcs12
))
5816 /* Decode instruction info and find the field to access */
5817 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5818 field
= kvm_register_read(vcpu
, (((vmx_instruction_info
) >> 28) & 0xf));
5820 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5824 if (kvm_vcpu_read_guest(vcpu
, bitmap
+ field
/8, &b
, 1))
5827 return 1 & (b
>> (field
& 7));
5830 static bool nested_vmx_exit_handled_mtf(struct vmcs12
*vmcs12
)
5832 u32 entry_intr_info
= vmcs12
->vm_entry_intr_info_field
;
5834 if (nested_cpu_has_mtf(vmcs12
))
5838 * An MTF VM-exit may be injected into the guest by setting the
5839 * interruption-type to 7 (other event) and the vector field to 0. Such
5840 * is the case regardless of the 'monitor trap flag' VM-execution
5843 return entry_intr_info
== (INTR_INFO_VALID_MASK
5844 | INTR_TYPE_OTHER_EVENT
);
5848 * Return true if L0 wants to handle an exit from L2 regardless of whether or not
5849 * L1 wants the exit. Only call this when in is_guest_mode (L2).
5851 static bool nested_vmx_l0_wants_exit(struct kvm_vcpu
*vcpu
,
5852 union vmx_exit_reason exit_reason
)
5856 switch ((u16
)exit_reason
.basic
) {
5857 case EXIT_REASON_EXCEPTION_NMI
:
5858 intr_info
= vmx_get_intr_info(vcpu
);
5859 if (is_nmi(intr_info
))
5861 else if (is_page_fault(intr_info
))
5862 return vcpu
->arch
.apf
.host_apf_flags
||
5863 vmx_need_pf_intercept(vcpu
);
5864 else if (is_debug(intr_info
) &&
5866 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
))
5868 else if (is_breakpoint(intr_info
) &&
5869 vcpu
->guest_debug
& KVM_GUESTDBG_USE_SW_BP
)
5871 else if (is_alignment_check(intr_info
) &&
5872 !vmx_guest_inject_ac(vcpu
))
5875 case EXIT_REASON_EXTERNAL_INTERRUPT
:
5877 case EXIT_REASON_MCE_DURING_VMENTRY
:
5879 case EXIT_REASON_EPT_VIOLATION
:
5881 * L0 always deals with the EPT violation. If nested EPT is
5882 * used, and the nested mmu code discovers that the address is
5883 * missing in the guest EPT table (EPT12), the EPT violation
5884 * will be injected with nested_ept_inject_page_fault()
5887 case EXIT_REASON_EPT_MISCONFIG
:
5889 * L2 never uses directly L1's EPT, but rather L0's own EPT
5890 * table (shadow on EPT) or a merged EPT table that L0 built
5891 * (EPT on EPT). So any problems with the structure of the
5892 * table is L0's fault.
5895 case EXIT_REASON_PREEMPTION_TIMER
:
5897 case EXIT_REASON_PML_FULL
:
5899 * PML is emulated for an L1 VMM and should never be enabled in
5900 * vmcs02, always "handle" PML_FULL by exiting to userspace.
5903 case EXIT_REASON_VMFUNC
:
5904 /* VM functions are emulated through L2->L0 vmexits. */
5913 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in
5914 * is_guest_mode (L2).
5916 static bool nested_vmx_l1_wants_exit(struct kvm_vcpu
*vcpu
,
5917 union vmx_exit_reason exit_reason
)
5919 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
5922 switch ((u16
)exit_reason
.basic
) {
5923 case EXIT_REASON_EXCEPTION_NMI
:
5924 intr_info
= vmx_get_intr_info(vcpu
);
5925 if (is_nmi(intr_info
))
5927 else if (is_page_fault(intr_info
))
5929 return vmcs12
->exception_bitmap
&
5930 (1u << (intr_info
& INTR_INFO_VECTOR_MASK
));
5931 case EXIT_REASON_EXTERNAL_INTERRUPT
:
5932 return nested_exit_on_intr(vcpu
);
5933 case EXIT_REASON_TRIPLE_FAULT
:
5935 case EXIT_REASON_INTERRUPT_WINDOW
:
5936 return nested_cpu_has(vmcs12
, CPU_BASED_INTR_WINDOW_EXITING
);
5937 case EXIT_REASON_NMI_WINDOW
:
5938 return nested_cpu_has(vmcs12
, CPU_BASED_NMI_WINDOW_EXITING
);
5939 case EXIT_REASON_TASK_SWITCH
:
5941 case EXIT_REASON_CPUID
:
5943 case EXIT_REASON_HLT
:
5944 return nested_cpu_has(vmcs12
, CPU_BASED_HLT_EXITING
);
5945 case EXIT_REASON_INVD
:
5947 case EXIT_REASON_INVLPG
:
5948 return nested_cpu_has(vmcs12
, CPU_BASED_INVLPG_EXITING
);
5949 case EXIT_REASON_RDPMC
:
5950 return nested_cpu_has(vmcs12
, CPU_BASED_RDPMC_EXITING
);
5951 case EXIT_REASON_RDRAND
:
5952 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_RDRAND_EXITING
);
5953 case EXIT_REASON_RDSEED
:
5954 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_RDSEED_EXITING
);
5955 case EXIT_REASON_RDTSC
: case EXIT_REASON_RDTSCP
:
5956 return nested_cpu_has(vmcs12
, CPU_BASED_RDTSC_EXITING
);
5957 case EXIT_REASON_VMREAD
:
5958 return nested_vmx_exit_handled_vmcs_access(vcpu
, vmcs12
,
5959 vmcs12
->vmread_bitmap
);
5960 case EXIT_REASON_VMWRITE
:
5961 return nested_vmx_exit_handled_vmcs_access(vcpu
, vmcs12
,
5962 vmcs12
->vmwrite_bitmap
);
5963 case EXIT_REASON_VMCALL
: case EXIT_REASON_VMCLEAR
:
5964 case EXIT_REASON_VMLAUNCH
: case EXIT_REASON_VMPTRLD
:
5965 case EXIT_REASON_VMPTRST
: case EXIT_REASON_VMRESUME
:
5966 case EXIT_REASON_VMOFF
: case EXIT_REASON_VMON
:
5967 case EXIT_REASON_INVEPT
: case EXIT_REASON_INVVPID
:
5969 * VMX instructions trap unconditionally. This allows L1 to
5970 * emulate them for its L2 guest, i.e., allows 3-level nesting!
5973 case EXIT_REASON_CR_ACCESS
:
5974 return nested_vmx_exit_handled_cr(vcpu
, vmcs12
);
5975 case EXIT_REASON_DR_ACCESS
:
5976 return nested_cpu_has(vmcs12
, CPU_BASED_MOV_DR_EXITING
);
5977 case EXIT_REASON_IO_INSTRUCTION
:
5978 return nested_vmx_exit_handled_io(vcpu
, vmcs12
);
5979 case EXIT_REASON_GDTR_IDTR
: case EXIT_REASON_LDTR_TR
:
5980 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_DESC
);
5981 case EXIT_REASON_MSR_READ
:
5982 case EXIT_REASON_MSR_WRITE
:
5983 return nested_vmx_exit_handled_msr(vcpu
, vmcs12
, exit_reason
);
5984 case EXIT_REASON_INVALID_STATE
:
5986 case EXIT_REASON_MWAIT_INSTRUCTION
:
5987 return nested_cpu_has(vmcs12
, CPU_BASED_MWAIT_EXITING
);
5988 case EXIT_REASON_MONITOR_TRAP_FLAG
:
5989 return nested_vmx_exit_handled_mtf(vmcs12
);
5990 case EXIT_REASON_MONITOR_INSTRUCTION
:
5991 return nested_cpu_has(vmcs12
, CPU_BASED_MONITOR_EXITING
);
5992 case EXIT_REASON_PAUSE_INSTRUCTION
:
5993 return nested_cpu_has(vmcs12
, CPU_BASED_PAUSE_EXITING
) ||
5994 nested_cpu_has2(vmcs12
,
5995 SECONDARY_EXEC_PAUSE_LOOP_EXITING
);
5996 case EXIT_REASON_MCE_DURING_VMENTRY
:
5998 case EXIT_REASON_TPR_BELOW_THRESHOLD
:
5999 return nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
);
6000 case EXIT_REASON_APIC_ACCESS
:
6001 case EXIT_REASON_APIC_WRITE
:
6002 case EXIT_REASON_EOI_INDUCED
:
6004 * The controls for "virtualize APIC accesses," "APIC-
6005 * register virtualization," and "virtual-interrupt
6006 * delivery" only come from vmcs12.
6009 case EXIT_REASON_INVPCID
:
6011 nested_cpu_has2(vmcs12
, SECONDARY_EXEC_ENABLE_INVPCID
) &&
6012 nested_cpu_has(vmcs12
, CPU_BASED_INVLPG_EXITING
);
6013 case EXIT_REASON_WBINVD
:
6014 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_WBINVD_EXITING
);
6015 case EXIT_REASON_XSETBV
:
6017 case EXIT_REASON_XSAVES
: case EXIT_REASON_XRSTORS
:
6019 * This should never happen, since it is not possible to
6020 * set XSS to a non-zero value---neither in L1 nor in L2.
6021 * If if it were, XSS would have to be checked against
6022 * the XSS exit bitmap in vmcs12.
6024 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_XSAVES
);
6025 case EXIT_REASON_UMWAIT
:
6026 case EXIT_REASON_TPAUSE
:
6027 return nested_cpu_has2(vmcs12
,
6028 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE
);
6029 case EXIT_REASON_ENCLS
:
6030 return nested_vmx_exit_handled_encls(vcpu
, vmcs12
);
6037 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was
6038 * reflected into L1.
6040 bool nested_vmx_reflect_vmexit(struct kvm_vcpu
*vcpu
)
6042 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6043 union vmx_exit_reason exit_reason
= vmx
->exit_reason
;
6044 unsigned long exit_qual
;
6047 WARN_ON_ONCE(vmx
->nested
.nested_run_pending
);
6050 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM
6051 * has already loaded L2's state.
6053 if (unlikely(vmx
->fail
)) {
6054 trace_kvm_nested_vmenter_failed(
6055 "hardware VM-instruction error: ",
6056 vmcs_read32(VM_INSTRUCTION_ERROR
));
6059 goto reflect_vmexit
;
6062 trace_kvm_nested_vmexit(exit_reason
.full
, vcpu
, KVM_ISA_VMX
);
6064 /* If L0 (KVM) wants the exit, it trumps L1's desires. */
6065 if (nested_vmx_l0_wants_exit(vcpu
, exit_reason
))
6068 /* If L1 doesn't want the exit, handle it in L0. */
6069 if (!nested_vmx_l1_wants_exit(vcpu
, exit_reason
))
6073 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For
6074 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would
6075 * need to be synthesized by querying the in-kernel LAPIC, but external
6076 * interrupts are never reflected to L1 so it's a non-issue.
6078 exit_intr_info
= vmx_get_intr_info(vcpu
);
6079 if (is_exception_with_error_code(exit_intr_info
)) {
6080 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
6082 vmcs12
->vm_exit_intr_error_code
=
6083 vmcs_read32(VM_EXIT_INTR_ERROR_CODE
);
6085 exit_qual
= vmx_get_exit_qual(vcpu
);
6088 nested_vmx_vmexit(vcpu
, exit_reason
.full
, exit_intr_info
, exit_qual
);
6092 static int vmx_get_nested_state(struct kvm_vcpu
*vcpu
,
6093 struct kvm_nested_state __user
*user_kvm_nested_state
,
6096 struct vcpu_vmx
*vmx
;
6097 struct vmcs12
*vmcs12
;
6098 struct kvm_nested_state kvm_state
= {
6100 .format
= KVM_STATE_NESTED_FORMAT_VMX
,
6101 .size
= sizeof(kvm_state
),
6103 .hdr
.vmx
.vmxon_pa
= -1ull,
6104 .hdr
.vmx
.vmcs12_pa
= -1ull,
6105 .hdr
.vmx
.preemption_timer_deadline
= 0,
6107 struct kvm_vmx_nested_state_data __user
*user_vmx_nested_state
=
6108 &user_kvm_nested_state
->data
.vmx
[0];
6111 return kvm_state
.size
+ sizeof(*user_vmx_nested_state
);
6114 vmcs12
= get_vmcs12(vcpu
);
6116 if (nested_vmx_allowed(vcpu
) &&
6117 (vmx
->nested
.vmxon
|| vmx
->nested
.smm
.vmxon
)) {
6118 kvm_state
.hdr
.vmx
.vmxon_pa
= vmx
->nested
.vmxon_ptr
;
6119 kvm_state
.hdr
.vmx
.vmcs12_pa
= vmx
->nested
.current_vmptr
;
6121 if (vmx_has_valid_vmcs12(vcpu
)) {
6122 kvm_state
.size
+= sizeof(user_vmx_nested_state
->vmcs12
);
6124 /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
6125 if (vmx
->nested
.hv_evmcs_vmptr
!= EVMPTR_INVALID
)
6126 kvm_state
.flags
|= KVM_STATE_NESTED_EVMCS
;
6128 if (is_guest_mode(vcpu
) &&
6129 nested_cpu_has_shadow_vmcs(vmcs12
) &&
6130 vmcs12
->vmcs_link_pointer
!= -1ull)
6131 kvm_state
.size
+= sizeof(user_vmx_nested_state
->shadow_vmcs12
);
6134 if (vmx
->nested
.smm
.vmxon
)
6135 kvm_state
.hdr
.vmx
.smm
.flags
|= KVM_STATE_NESTED_SMM_VMXON
;
6137 if (vmx
->nested
.smm
.guest_mode
)
6138 kvm_state
.hdr
.vmx
.smm
.flags
|= KVM_STATE_NESTED_SMM_GUEST_MODE
;
6140 if (is_guest_mode(vcpu
)) {
6141 kvm_state
.flags
|= KVM_STATE_NESTED_GUEST_MODE
;
6143 if (vmx
->nested
.nested_run_pending
)
6144 kvm_state
.flags
|= KVM_STATE_NESTED_RUN_PENDING
;
6146 if (vmx
->nested
.mtf_pending
)
6147 kvm_state
.flags
|= KVM_STATE_NESTED_MTF_PENDING
;
6149 if (nested_cpu_has_preemption_timer(vmcs12
) &&
6150 vmx
->nested
.has_preemption_timer_deadline
) {
6151 kvm_state
.hdr
.vmx
.flags
|=
6152 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE
;
6153 kvm_state
.hdr
.vmx
.preemption_timer_deadline
=
6154 vmx
->nested
.preemption_timer_deadline
;
6159 if (user_data_size
< kvm_state
.size
)
6162 if (copy_to_user(user_kvm_nested_state
, &kvm_state
, sizeof(kvm_state
)))
6165 if (!vmx_has_valid_vmcs12(vcpu
))
6169 * When running L2, the authoritative vmcs12 state is in the
6170 * vmcs02. When running L1, the authoritative vmcs12 state is
6171 * in the shadow or enlightened vmcs linked to vmcs01, unless
6172 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
6173 * vmcs12 state is in the vmcs12 already.
6175 if (is_guest_mode(vcpu
)) {
6176 sync_vmcs02_to_vmcs12(vcpu
, vmcs12
);
6177 sync_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
6179 copy_vmcs02_to_vmcs12_rare(vcpu
, get_vmcs12(vcpu
));
6180 if (!vmx
->nested
.need_vmcs12_to_shadow_sync
) {
6181 if (evmptr_is_valid(vmx
->nested
.hv_evmcs_vmptr
))
6183 * L1 hypervisor is not obliged to keep eVMCS
6184 * clean fields data always up-to-date while
6185 * not in guest mode, 'hv_clean_fields' is only
6186 * supposed to be actual upon vmentry so we need
6187 * to ignore it here and do full copy.
6189 copy_enlightened_to_vmcs12(vmx
, 0);
6190 else if (enable_shadow_vmcs
)
6191 copy_shadow_to_vmcs12(vmx
);
6195 BUILD_BUG_ON(sizeof(user_vmx_nested_state
->vmcs12
) < VMCS12_SIZE
);
6196 BUILD_BUG_ON(sizeof(user_vmx_nested_state
->shadow_vmcs12
) < VMCS12_SIZE
);
6199 * Copy over the full allocated size of vmcs12 rather than just the size
6202 if (copy_to_user(user_vmx_nested_state
->vmcs12
, vmcs12
, VMCS12_SIZE
))
6205 if (nested_cpu_has_shadow_vmcs(vmcs12
) &&
6206 vmcs12
->vmcs_link_pointer
!= -1ull) {
6207 if (copy_to_user(user_vmx_nested_state
->shadow_vmcs12
,
6208 get_shadow_vmcs12(vcpu
), VMCS12_SIZE
))
6212 return kvm_state
.size
;
6216 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
6218 void vmx_leave_nested(struct kvm_vcpu
*vcpu
)
6220 if (is_guest_mode(vcpu
)) {
6221 to_vmx(vcpu
)->nested
.nested_run_pending
= 0;
6222 nested_vmx_vmexit(vcpu
, -1, 0, 0);
6227 static int vmx_set_nested_state(struct kvm_vcpu
*vcpu
,
6228 struct kvm_nested_state __user
*user_kvm_nested_state
,
6229 struct kvm_nested_state
*kvm_state
)
6231 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6232 struct vmcs12
*vmcs12
;
6233 enum vm_entry_failure_code ignored
;
6234 struct kvm_vmx_nested_state_data __user
*user_vmx_nested_state
=
6235 &user_kvm_nested_state
->data
.vmx
[0];
6238 if (kvm_state
->format
!= KVM_STATE_NESTED_FORMAT_VMX
)
6241 if (kvm_state
->hdr
.vmx
.vmxon_pa
== -1ull) {
6242 if (kvm_state
->hdr
.vmx
.smm
.flags
)
6245 if (kvm_state
->hdr
.vmx
.vmcs12_pa
!= -1ull)
6249 * KVM_STATE_NESTED_EVMCS used to signal that KVM should
6250 * enable eVMCS capability on vCPU. However, since then
6251 * code was changed such that flag signals vmcs12 should
6252 * be copied into eVMCS in guest memory.
6254 * To preserve backwards compatability, allow user
6255 * to set this flag even when there is no VMXON region.
6257 if (kvm_state
->flags
& ~KVM_STATE_NESTED_EVMCS
)
6260 if (!nested_vmx_allowed(vcpu
))
6263 if (!page_address_valid(vcpu
, kvm_state
->hdr
.vmx
.vmxon_pa
))
6267 if ((kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_GUEST_MODE
) &&
6268 (kvm_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
))
6271 if (kvm_state
->hdr
.vmx
.smm
.flags
&
6272 ~(KVM_STATE_NESTED_SMM_GUEST_MODE
| KVM_STATE_NESTED_SMM_VMXON
))
6275 if (kvm_state
->hdr
.vmx
.flags
& ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE
)
6279 * SMM temporarily disables VMX, so we cannot be in guest mode,
6280 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
6285 (KVM_STATE_NESTED_GUEST_MODE
| KVM_STATE_NESTED_RUN_PENDING
))
6286 : kvm_state
->hdr
.vmx
.smm
.flags
)
6289 if ((kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_GUEST_MODE
) &&
6290 !(kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_VMXON
))
6293 if ((kvm_state
->flags
& KVM_STATE_NESTED_EVMCS
) &&
6294 (!nested_vmx_allowed(vcpu
) || !vmx
->nested
.enlightened_vmcs_enabled
))
6297 vmx_leave_nested(vcpu
);
6299 if (kvm_state
->hdr
.vmx
.vmxon_pa
== -1ull)
6302 vmx
->nested
.vmxon_ptr
= kvm_state
->hdr
.vmx
.vmxon_pa
;
6303 ret
= enter_vmx_operation(vcpu
);
6307 /* Empty 'VMXON' state is permitted if no VMCS loaded */
6308 if (kvm_state
->size
< sizeof(*kvm_state
) + sizeof(*vmcs12
)) {
6309 /* See vmx_has_valid_vmcs12. */
6310 if ((kvm_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
) ||
6311 (kvm_state
->flags
& KVM_STATE_NESTED_EVMCS
) ||
6312 (kvm_state
->hdr
.vmx
.vmcs12_pa
!= -1ull))
6318 if (kvm_state
->hdr
.vmx
.vmcs12_pa
!= -1ull) {
6319 if (kvm_state
->hdr
.vmx
.vmcs12_pa
== kvm_state
->hdr
.vmx
.vmxon_pa
||
6320 !page_address_valid(vcpu
, kvm_state
->hdr
.vmx
.vmcs12_pa
))
6323 set_current_vmptr(vmx
, kvm_state
->hdr
.vmx
.vmcs12_pa
);
6324 } else if (kvm_state
->flags
& KVM_STATE_NESTED_EVMCS
) {
6326 * nested_vmx_handle_enlightened_vmptrld() cannot be called
6327 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be
6328 * restored yet. EVMCS will be mapped from
6329 * nested_get_vmcs12_pages().
6331 vmx
->nested
.hv_evmcs_vmptr
= EVMPTR_MAP_PENDING
;
6332 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES
, vcpu
);
6337 if (kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_VMXON
) {
6338 vmx
->nested
.smm
.vmxon
= true;
6339 vmx
->nested
.vmxon
= false;
6341 if (kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_GUEST_MODE
)
6342 vmx
->nested
.smm
.guest_mode
= true;
6345 vmcs12
= get_vmcs12(vcpu
);
6346 if (copy_from_user(vmcs12
, user_vmx_nested_state
->vmcs12
, sizeof(*vmcs12
)))
6349 if (vmcs12
->hdr
.revision_id
!= VMCS12_REVISION
)
6352 if (!(kvm_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
))
6355 vmx
->nested
.nested_run_pending
=
6356 !!(kvm_state
->flags
& KVM_STATE_NESTED_RUN_PENDING
);
6358 vmx
->nested
.mtf_pending
=
6359 !!(kvm_state
->flags
& KVM_STATE_NESTED_MTF_PENDING
);
6362 if (nested_cpu_has_shadow_vmcs(vmcs12
) &&
6363 vmcs12
->vmcs_link_pointer
!= -1ull) {
6364 struct vmcs12
*shadow_vmcs12
= get_shadow_vmcs12(vcpu
);
6366 if (kvm_state
->size
<
6367 sizeof(*kvm_state
) +
6368 sizeof(user_vmx_nested_state
->vmcs12
) + sizeof(*shadow_vmcs12
))
6369 goto error_guest_mode
;
6371 if (copy_from_user(shadow_vmcs12
,
6372 user_vmx_nested_state
->shadow_vmcs12
,
6373 sizeof(*shadow_vmcs12
))) {
6375 goto error_guest_mode
;
6378 if (shadow_vmcs12
->hdr
.revision_id
!= VMCS12_REVISION
||
6379 !shadow_vmcs12
->hdr
.shadow_vmcs
)
6380 goto error_guest_mode
;
6383 vmx
->nested
.has_preemption_timer_deadline
= false;
6384 if (kvm_state
->hdr
.vmx
.flags
& KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE
) {
6385 vmx
->nested
.has_preemption_timer_deadline
= true;
6386 vmx
->nested
.preemption_timer_deadline
=
6387 kvm_state
->hdr
.vmx
.preemption_timer_deadline
;
6390 if (nested_vmx_check_controls(vcpu
, vmcs12
) ||
6391 nested_vmx_check_host_state(vcpu
, vmcs12
) ||
6392 nested_vmx_check_guest_state(vcpu
, vmcs12
, &ignored
))
6393 goto error_guest_mode
;
6395 vmx
->nested
.dirty_vmcs12
= true;
6396 ret
= nested_vmx_enter_non_root_mode(vcpu
, false);
6398 goto error_guest_mode
;
6403 vmx
->nested
.nested_run_pending
= 0;
6407 void nested_vmx_set_vmcs_shadowing_bitmap(void)
6409 if (enable_shadow_vmcs
) {
6410 vmcs_write64(VMREAD_BITMAP
, __pa(vmx_vmread_bitmap
));
6411 vmcs_write64(VMWRITE_BITMAP
, __pa(vmx_vmwrite_bitmap
));
6416 * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo
6417 * that madness to get the encoding for comparison.
6419 #define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10)))
6421 static u64
nested_vmx_calc_vmcs_enum_msr(void)
6424 * Note these are the so called "index" of the VMCS field encoding, not
6425 * the index into vmcs12.
6427 unsigned int max_idx
, idx
;
6431 * For better or worse, KVM allows VMREAD/VMWRITE to all fields in
6432 * vmcs12, regardless of whether or not the associated feature is
6433 * exposed to L1. Simply find the field with the highest index.
6436 for (i
= 0; i
< nr_vmcs12_fields
; i
++) {
6437 /* The vmcs12 table is very, very sparsely populated. */
6438 if (!vmcs_field_to_offset_table
[i
])
6441 idx
= vmcs_field_index(VMCS12_IDX_TO_ENC(i
));
6446 return (u64
)max_idx
<< VMCS_FIELD_INDEX_SHIFT
;
6450 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
6451 * returned for the various VMX controls MSRs when nested VMX is enabled.
6452 * The same values should also be used to verify that vmcs12 control fields are
6453 * valid during nested entry from L1 to L2.
6454 * Each of these control msrs has a low and high 32-bit half: A low bit is on
6455 * if the corresponding bit in the (32-bit) control field *must* be on, and a
6456 * bit in the high half is on if the corresponding bit in the control field
6457 * may be on. See also vmx_control_verify().
6459 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs
*msrs
, u32 ept_caps
)
6462 * Note that as a general rule, the high half of the MSRs (bits in
6463 * the control fields which may be 1) should be initialized by the
6464 * intersection of the underlying hardware's MSR (i.e., features which
6465 * can be supported) and the list of features we want to expose -
6466 * because they are known to be properly supported in our code.
6467 * Also, usually, the low half of the MSRs (bits which must be 1) can
6468 * be set to 0, meaning that L1 may turn off any of these bits. The
6469 * reason is that if one of these bits is necessary, it will appear
6470 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
6471 * fields of vmcs01 and vmcs02, will turn these bits off - and
6472 * nested_vmx_l1_wants_exit() will not pass related exits to L1.
6473 * These rules have exceptions below.
6476 /* pin-based controls */
6477 rdmsr(MSR_IA32_VMX_PINBASED_CTLS
,
6478 msrs
->pinbased_ctls_low
,
6479 msrs
->pinbased_ctls_high
);
6480 msrs
->pinbased_ctls_low
|=
6481 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
6482 msrs
->pinbased_ctls_high
&=
6483 PIN_BASED_EXT_INTR_MASK
|
6484 PIN_BASED_NMI_EXITING
|
6485 PIN_BASED_VIRTUAL_NMIS
|
6486 (enable_apicv
? PIN_BASED_POSTED_INTR
: 0);
6487 msrs
->pinbased_ctls_high
|=
6488 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR
|
6489 PIN_BASED_VMX_PREEMPTION_TIMER
;
6492 rdmsr(MSR_IA32_VMX_EXIT_CTLS
,
6493 msrs
->exit_ctls_low
,
6494 msrs
->exit_ctls_high
);
6495 msrs
->exit_ctls_low
=
6496 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR
;
6498 msrs
->exit_ctls_high
&=
6499 #ifdef CONFIG_X86_64
6500 VM_EXIT_HOST_ADDR_SPACE_SIZE
|
6502 VM_EXIT_LOAD_IA32_PAT
| VM_EXIT_SAVE_IA32_PAT
|
6503 VM_EXIT_CLEAR_BNDCFGS
| VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
;
6504 msrs
->exit_ctls_high
|=
6505 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR
|
6506 VM_EXIT_LOAD_IA32_EFER
| VM_EXIT_SAVE_IA32_EFER
|
6507 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER
| VM_EXIT_ACK_INTR_ON_EXIT
;
6509 /* We support free control of debug control saving. */
6510 msrs
->exit_ctls_low
&= ~VM_EXIT_SAVE_DEBUG_CONTROLS
;
6512 /* entry controls */
6513 rdmsr(MSR_IA32_VMX_ENTRY_CTLS
,
6514 msrs
->entry_ctls_low
,
6515 msrs
->entry_ctls_high
);
6516 msrs
->entry_ctls_low
=
6517 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR
;
6518 msrs
->entry_ctls_high
&=
6519 #ifdef CONFIG_X86_64
6520 VM_ENTRY_IA32E_MODE
|
6522 VM_ENTRY_LOAD_IA32_PAT
| VM_ENTRY_LOAD_BNDCFGS
|
6523 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
;
6524 msrs
->entry_ctls_high
|=
6525 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR
| VM_ENTRY_LOAD_IA32_EFER
);
6527 /* We support free control of debug control loading. */
6528 msrs
->entry_ctls_low
&= ~VM_ENTRY_LOAD_DEBUG_CONTROLS
;
6530 /* cpu-based controls */
6531 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS
,
6532 msrs
->procbased_ctls_low
,
6533 msrs
->procbased_ctls_high
);
6534 msrs
->procbased_ctls_low
=
6535 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
6536 msrs
->procbased_ctls_high
&=
6537 CPU_BASED_INTR_WINDOW_EXITING
|
6538 CPU_BASED_NMI_WINDOW_EXITING
| CPU_BASED_USE_TSC_OFFSETTING
|
6539 CPU_BASED_HLT_EXITING
| CPU_BASED_INVLPG_EXITING
|
6540 CPU_BASED_MWAIT_EXITING
| CPU_BASED_CR3_LOAD_EXITING
|
6541 CPU_BASED_CR3_STORE_EXITING
|
6542 #ifdef CONFIG_X86_64
6543 CPU_BASED_CR8_LOAD_EXITING
| CPU_BASED_CR8_STORE_EXITING
|
6545 CPU_BASED_MOV_DR_EXITING
| CPU_BASED_UNCOND_IO_EXITING
|
6546 CPU_BASED_USE_IO_BITMAPS
| CPU_BASED_MONITOR_TRAP_FLAG
|
6547 CPU_BASED_MONITOR_EXITING
| CPU_BASED_RDPMC_EXITING
|
6548 CPU_BASED_RDTSC_EXITING
| CPU_BASED_PAUSE_EXITING
|
6549 CPU_BASED_TPR_SHADOW
| CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
;
6551 * We can allow some features even when not supported by the
6552 * hardware. For example, L1 can specify an MSR bitmap - and we
6553 * can use it to avoid exits to L1 - even when L0 runs L2
6554 * without MSR bitmaps.
6556 msrs
->procbased_ctls_high
|=
6557 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR
|
6558 CPU_BASED_USE_MSR_BITMAPS
;
6560 /* We support free control of CR3 access interception. */
6561 msrs
->procbased_ctls_low
&=
6562 ~(CPU_BASED_CR3_LOAD_EXITING
| CPU_BASED_CR3_STORE_EXITING
);
6565 * secondary cpu-based controls. Do not include those that
6566 * depend on CPUID bits, they are added later by
6567 * vmx_vcpu_after_set_cpuid.
6569 if (msrs
->procbased_ctls_high
& CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
)
6570 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2
,
6571 msrs
->secondary_ctls_low
,
6572 msrs
->secondary_ctls_high
);
6574 msrs
->secondary_ctls_low
= 0;
6575 msrs
->secondary_ctls_high
&=
6576 SECONDARY_EXEC_DESC
|
6577 SECONDARY_EXEC_ENABLE_RDTSCP
|
6578 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
|
6579 SECONDARY_EXEC_WBINVD_EXITING
|
6580 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
6581 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
|
6582 SECONDARY_EXEC_RDRAND_EXITING
|
6583 SECONDARY_EXEC_ENABLE_INVPCID
|
6584 SECONDARY_EXEC_RDSEED_EXITING
|
6585 SECONDARY_EXEC_XSAVES
|
6586 SECONDARY_EXEC_TSC_SCALING
;
6589 * We can emulate "VMCS shadowing," even if the hardware
6590 * doesn't support it.
6592 msrs
->secondary_ctls_high
|=
6593 SECONDARY_EXEC_SHADOW_VMCS
;
6596 /* nested EPT: emulate EPT also to L1 */
6597 msrs
->secondary_ctls_high
|=
6598 SECONDARY_EXEC_ENABLE_EPT
;
6600 VMX_EPT_PAGE_WALK_4_BIT
|
6601 VMX_EPT_PAGE_WALK_5_BIT
|
6603 VMX_EPT_INVEPT_BIT
|
6604 VMX_EPT_EXECUTE_ONLY_BIT
;
6606 msrs
->ept_caps
&= ept_caps
;
6607 msrs
->ept_caps
|= VMX_EPT_EXTENT_GLOBAL_BIT
|
6608 VMX_EPT_EXTENT_CONTEXT_BIT
| VMX_EPT_2MB_PAGE_BIT
|
6609 VMX_EPT_1GB_PAGE_BIT
;
6610 if (enable_ept_ad_bits
) {
6611 msrs
->secondary_ctls_high
|=
6612 SECONDARY_EXEC_ENABLE_PML
;
6613 msrs
->ept_caps
|= VMX_EPT_AD_BIT
;
6617 if (cpu_has_vmx_vmfunc()) {
6618 msrs
->secondary_ctls_high
|=
6619 SECONDARY_EXEC_ENABLE_VMFUNC
;
6621 * Advertise EPTP switching unconditionally
6622 * since we emulate it
6625 msrs
->vmfunc_controls
=
6626 VMX_VMFUNC_EPTP_SWITCHING
;
6630 * Old versions of KVM use the single-context version without
6631 * checking for support, so declare that it is supported even
6632 * though it is treated as global context. The alternative is
6633 * not failing the single-context invvpid, and it is worse.
6636 msrs
->secondary_ctls_high
|=
6637 SECONDARY_EXEC_ENABLE_VPID
;
6638 msrs
->vpid_caps
= VMX_VPID_INVVPID_BIT
|
6639 VMX_VPID_EXTENT_SUPPORTED_MASK
;
6642 if (enable_unrestricted_guest
)
6643 msrs
->secondary_ctls_high
|=
6644 SECONDARY_EXEC_UNRESTRICTED_GUEST
;
6646 if (flexpriority_enabled
)
6647 msrs
->secondary_ctls_high
|=
6648 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
;
6651 msrs
->secondary_ctls_high
|= SECONDARY_EXEC_ENCLS_EXITING
;
6653 /* miscellaneous data */
6654 rdmsr(MSR_IA32_VMX_MISC
,
6657 msrs
->misc_low
&= VMX_MISC_SAVE_EFER_LMA
;
6659 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS
|
6660 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
|
6661 VMX_MISC_ACTIVITY_HLT
|
6662 VMX_MISC_ACTIVITY_WAIT_SIPI
;
6663 msrs
->misc_high
= 0;
6666 * This MSR reports some information about VMX support. We
6667 * should return information about the VMX we emulate for the
6668 * guest, and the VMCS structure we give it - not about the
6669 * VMX support of the underlying hardware.
6673 VMX_BASIC_TRUE_CTLS
|
6674 ((u64
)VMCS12_SIZE
<< VMX_BASIC_VMCS_SIZE_SHIFT
) |
6675 (VMX_BASIC_MEM_TYPE_WB
<< VMX_BASIC_MEM_TYPE_SHIFT
);
6677 if (cpu_has_vmx_basic_inout())
6678 msrs
->basic
|= VMX_BASIC_INOUT
;
6681 * These MSRs specify bits which the guest must keep fixed on
6682 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
6683 * We picked the standard core2 setting.
6685 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
6686 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
6687 msrs
->cr0_fixed0
= VMXON_CR0_ALWAYSON
;
6688 msrs
->cr4_fixed0
= VMXON_CR4_ALWAYSON
;
6690 /* These MSRs specify bits which the guest must keep fixed off. */
6691 rdmsrl(MSR_IA32_VMX_CR0_FIXED1
, msrs
->cr0_fixed1
);
6692 rdmsrl(MSR_IA32_VMX_CR4_FIXED1
, msrs
->cr4_fixed1
);
6694 msrs
->vmcs_enum
= nested_vmx_calc_vmcs_enum_msr();
6697 void nested_vmx_hardware_unsetup(void)
6701 if (enable_shadow_vmcs
) {
6702 for (i
= 0; i
< VMX_BITMAP_NR
; i
++)
6703 free_page((unsigned long)vmx_bitmap
[i
]);
6707 __init
int nested_vmx_hardware_setup(int (*exit_handlers
[])(struct kvm_vcpu
*))
6711 if (!cpu_has_vmx_shadow_vmcs())
6712 enable_shadow_vmcs
= 0;
6713 if (enable_shadow_vmcs
) {
6714 for (i
= 0; i
< VMX_BITMAP_NR
; i
++) {
6716 * The vmx_bitmap is not tied to a VM and so should
6717 * not be charged to a memcg.
6719 vmx_bitmap
[i
] = (unsigned long *)
6720 __get_free_page(GFP_KERNEL
);
6721 if (!vmx_bitmap
[i
]) {
6722 nested_vmx_hardware_unsetup();
6727 init_vmcs_shadow_fields();
6730 exit_handlers
[EXIT_REASON_VMCLEAR
] = handle_vmclear
;
6731 exit_handlers
[EXIT_REASON_VMLAUNCH
] = handle_vmlaunch
;
6732 exit_handlers
[EXIT_REASON_VMPTRLD
] = handle_vmptrld
;
6733 exit_handlers
[EXIT_REASON_VMPTRST
] = handle_vmptrst
;
6734 exit_handlers
[EXIT_REASON_VMREAD
] = handle_vmread
;
6735 exit_handlers
[EXIT_REASON_VMRESUME
] = handle_vmresume
;
6736 exit_handlers
[EXIT_REASON_VMWRITE
] = handle_vmwrite
;
6737 exit_handlers
[EXIT_REASON_VMOFF
] = handle_vmoff
;
6738 exit_handlers
[EXIT_REASON_VMON
] = handle_vmon
;
6739 exit_handlers
[EXIT_REASON_INVEPT
] = handle_invept
;
6740 exit_handlers
[EXIT_REASON_INVVPID
] = handle_invvpid
;
6741 exit_handlers
[EXIT_REASON_VMFUNC
] = handle_vmfunc
;
6746 struct kvm_x86_nested_ops vmx_nested_ops
= {
6747 .check_events
= vmx_check_nested_events
,
6748 .hv_timer_pending
= nested_vmx_preemption_timer_pending
,
6749 .triple_fault
= nested_vmx_triple_fault
,
6750 .get_state
= vmx_get_nested_state
,
6751 .set_state
= vmx_set_nested_state
,
6752 .get_nested_state_pages
= vmx_get_nested_state_pages
,
6753 .write_log_dirty
= nested_vmx_write_pml_buffer
,
6754 .enable_evmcs
= nested_enable_evmcs
,
6755 .get_evmcs_version
= nested_get_evmcs_version
,